"Fossies" - the Fresh Open Source Software Archive

Member "threads/malloc/gmalloc.c" (26 Jul 1999, 35019 Bytes) of package /linux/misc/old/pthreads-3.14.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "gmalloc.c" see the Fossies "Dox" file reference documentation.

    1 /* DO NOT EDIT THIS FILE -- it is automagically generated.  -*- C -*- */
    2 
    3 #define _MALLOC_INTERNAL
    4 
    5 /* The malloc headers and source files from the C library follow here.  */
    6 
    7 /* Declarations for `malloc' and friends.
    8    Copyright 1990, 1991, 1992, 1993 Free Software Foundation, Inc.
    9           Written May 1989 by Mike Haertel.
   10 
   11 This library is free software; you can redistribute it and/or
   12 modify it under the terms of the GNU Library General Public License as
   13 published by the Free Software Foundation; either version 2 of the
   14 License, or (at your option) any later version.
   15 
   16 This library is distributed in the hope that it will be useful,
   17 but WITHOUT ANY WARRANTY; without even the implied warranty of
   18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   19 Library General Public License for more details.
   20 
   21 You should have received a copy of the GNU Library General Public
   22 License along with this library; see the file COPYING.LIB.  If
   23 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
   24 Cambridge, MA 02139, USA.
   25 
   26    The author may be reached (Email) at the address mike@ai.mit.edu,
   27    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
   28 
   29 #ifndef _MALLOC_H
   30 
   31 #define _MALLOC_H   1
   32 
   33 #ifdef _MALLOC_INTERNAL
   34 
   35 #ifdef  HAVE_CONFIG_H
   36 #include "config.h"
   37 #endif
   38 
   39 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG) || \
   40           defined(SOLARIS)
   41 #include <string.h>
   42 #else
   43 #ifndef memset
   44 #define memset(s, zero, n)  bzero ((s), (n))
   45 #endif
   46 #ifndef memcpy
   47 #define memcpy(d, s, n)     bcopy ((s), (d), (n))
   48 #endif
   49 #ifndef memmove
   50 #define memmove(d, s, n)    bcopy ((s), (d), (n))
   51 #endif
   52 #endif
   53 
   54 #if defined(__GNU_LIBRARY__) || defined(__STDC__)
   55 #include <limits.h>
   56 #else
   57 #define CHAR_BIT    8
   58 #endif
   59 
   60 #endif  /* _MALLOC_INTERNAL.  */
   61 
   62 
   63 #ifdef  __cplusplus
   64 extern "C"
   65 {
   66 #endif
   67 
   68 #if defined (__cplusplus) || (defined (__STDC__) && __STDC__)
   69 #undef  __P
   70 #define __P(args)   args
   71 #undef  __ptr_t
   72 #define __ptr_t     void *
   73 #else /* Not C++ or ANSI C.  */
   74 #undef  __P
   75 #define __P(args)   ()
   76 #undef  const
   77 #define const
   78 #undef  __ptr_t
   79 #define __ptr_t     char *
   80 #endif /* C++ or ANSI C.  */
   81 
   82 #ifdef  __STDC__
   83 #include <stddef.h>
   84 #else
   85 #undef  size_t
   86 #define size_t      unsigned int
   87 #undef  ptrdiff_t
   88 #define ptrdiff_t   int
   89 #endif
   90 
   91 #ifndef NULL
   92 #define NULL    0
   93 #endif
   94 
   95 
   96 /* Allocate SIZE bytes of memory.  */
   97 extern __ptr_t pthread_malloc __P ((size_t __size));
   98 /* Re-allocate the previously allocated block
   99    in __ptr_t, making the new block SIZE bytes long.  */
  100 extern __ptr_t pthread_realloc __P ((__ptr_t __ptr, size_t __size));
  101 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0.  */
  102 extern __ptr_t pthread_calloc __P ((size_t __nmemb, size_t __size));
  103 /* Free a block allocated by `malloc', `realloc' or `calloc'.  */
  104 extern void pthread_free __P ((__ptr_t __ptr));
  105 
  106 /* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
  107 extern __ptr_t pthread_memalign __P ((size_t __alignment, size_t __size));
  108 
  109 /* Allocate SIZE bytes on a page boundary.  */
  110 extern __ptr_t pthread_valloc __P ((size_t __size));
  111 
  112 
  113 #ifdef _MALLOC_INTERNAL
  114 
  115 /* The allocator divides the heap into blocks of fixed size; large
  116    requests receive one or more whole blocks, and small requests
  117    receive a fragment of a block.  Fragment sizes are powers of two,
  118    and all fragments of a block are the same size.  When all the
  119    fragments in a block have been freed, the block itself is freed.  */
  120 #define INT_BIT     (CHAR_BIT * sizeof(int))
  121 #define BLOCKLOG    (INT_BIT > 16 ? 12 : 9)
  122 #define BLOCKSIZE   (1 << BLOCKLOG)
  123 #define BLOCKIFY(SIZE)  (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
  124 
  125 /* Determine the amount of memory spanned by the initial heap table
  126    (not an absolute limit).  */
  127 #define HEAP        (INT_BIT > 16 ? 4194304 : 65536)
  128 
  129 /* Number of contiguous free blocks allowed to build up at the end of
  130    memory before they will be returned to the system.  */
  131 #define FINAL_FREE_BLOCKS   8
  132 
  133 /* Data structure giving per-block information.  */
  134 typedef union
  135   {
  136     /* Heap information for a busy block.  */
  137     struct
  138       {
  139     /* Zero for a large block, or positive giving the
  140        logarithm to the base two of the fragment size.  */
  141     int type;
  142     union
  143       {
  144         struct
  145           {
  146         size_t nfree;   /* Free fragments in a fragmented block.  */
  147         size_t first;   /* First free fragment of the block.  */
  148           } frag;
  149         /* Size (in blocks) of a large cluster.  */
  150         size_t size;
  151       } info;
  152       } busy;
  153     /* Heap information for a free block
  154        (that may be the first of a free cluster).  */
  155     struct
  156       {
  157     size_t size;        /* Size (in blocks) of a free cluster.  */
  158     size_t next;        /* Index of next free cluster.  */
  159     size_t prev;        /* Index of previous free cluster.  */
  160       } free;
  161   } malloc_info;
  162 
  163 /* Pointer to first block of the heap.  */
  164 extern char *_heapbase;
  165 
  166 /* Table indexed by block number giving per-block information.  */
  167 extern malloc_info *_heapinfo;
  168 
  169 /* Address to block number and vice versa.  */
  170 #define BLOCK(A)    (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
  171 #define ADDRESS(B)  ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
  172 
  173 /* Current search index for the heap table.  */
  174 extern size_t _heapindex;
  175 
  176 /* Limit of valid info table indices.  */
  177 extern size_t _heaplimit;
  178 
  179 /* Doubly linked lists of free fragments.  */
  180 struct list
  181   {
  182     struct list *next;
  183     struct list *prev;
  184   };
  185 
  186 /* Free list headers for each fragment size.  */
  187 extern struct list _fraghead[];
  188 
  189 /* List of blocks allocated with `memalign' (or `valloc').  */
  190 struct alignlist
  191   {
  192     struct alignlist *next;
  193     __ptr_t aligned;        /* The address that memaligned returned.  */
  194     __ptr_t exact;      /* The address that malloc returned.  */
  195   };
  196 extern struct alignlist *_aligned_blocks;
  197 
  198 /* Instrumentation.  */
  199 extern size_t _chunks_used;
  200 extern size_t _bytes_used;
  201 extern size_t _chunks_free;
  202 extern size_t _bytes_free;
  203 
  204 /* Internal version of `free' used in `morecore' (malloc.c). */
  205 extern void _free_internal __P ((__ptr_t __ptr));
  206 
  207 #endif /* _MALLOC_INTERNAL.  */
  208 
  209 /* Underlying allocation function; successive calls should
  210    return contiguous pieces of memory.  */
  211 extern __ptr_t (*__morecore) __P ((ptrdiff_t __size));
  212 
  213 /* Default value of `__morecore'.  */
  214 extern __ptr_t __default_morecore __P ((ptrdiff_t __size));
  215 
  216 /* If not NULL, this function is called after each time
  217    `__morecore' is called to increase the data size.  */
  218 extern void (*__after_morecore_hook) __P ((void));
  219 
  220 /* Nonzero if `malloc' has been called and done its initialization.  */
  221 extern int __malloc_initialized;
  222 
  223 /* Hooks for debugging versions.  */
  224 extern void (*__free_hook) __P ((__ptr_t __ptr));
  225 extern __ptr_t (*__malloc_hook) __P ((size_t __size));
  226 extern __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, size_t __size));
  227 
  228 /* Activate a standard collection of debugging hooks.  */
  229 extern int mcheck __P ((void (*__func) __P ((void))));
  230 
  231 /* Activate a standard collection of tracing hooks.  */
  232 extern void mtrace __P ((void));
  233 
  234 /* Statistics available to the user.  */
  235 struct mstats
  236   {
  237     size_t bytes_total;     /* Total size of the heap. */
  238     size_t chunks_used;     /* Chunks allocated by the user. */
  239     size_t bytes_used;      /* Byte total of user-allocated chunks. */
  240     size_t chunks_free;     /* Chunks in the free list. */
  241     size_t bytes_free;      /* Byte total of chunks in the free list. */
  242   };
  243 
  244 /* Pick up the current statistics. */
  245 extern struct mstats mstats __P ((void));
  246 
  247 /* Call WARNFUN with a warning message when memory usage is high.  */
  248 extern void memory_warnings __P ((__ptr_t __start,
  249                   void (*__warnfun) __P ((__const char *))));
  250 
  251 
  252 /* Relocating allocator.  */
  253 
  254 /* Allocate SIZE bytes, and store the address in *HANDLEPTR.  */
  255 extern __ptr_t r_alloc __P ((__ptr_t *__handleptr, size_t __size));
  256 
  257 /* Free the storage allocated in HANDLEPTR.  */
  258 extern void r_alloc_free __P ((__ptr_t *__handleptr));
  259 
  260 /* Adjust the block at HANDLEPTR to be SIZE bytes long.  */
  261 extern __ptr_t r_re_alloc __P ((__ptr_t *__handleptr, size_t __size));
  262 
  263 
  264 #ifdef  __cplusplus
  265 }
  266 #endif
  267 
  268 #endif /* malloc.h  */
  269 /* Memory allocator `malloc'.
  270    Copyright 1990, 1991, 1992, 1993 Free Software Foundation
  271           Written May 1989 by Mike Haertel.
  272 
  273 This library is free software; you can redistribute it and/or
  274 modify it under the terms of the GNU Library General Public License as
  275 published by the Free Software Foundation; either version 2 of the
  276 License, or (at your option) any later version.
  277 
  278 This library is distributed in the hope that it will be useful,
  279 but WITHOUT ANY WARRANTY; without even the implied warranty of
  280 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  281 Library General Public License for more details.
  282 
  283 You should have received a copy of the GNU Library General Public
  284 License along with this library; see the file COPYING.LIB.  If
  285 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
  286 Cambridge, MA 02139, USA.
  287 
  288    The author may be reached (Email) at the address mike@ai.mit.edu,
  289    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
  290 
  291 #ifndef _MALLOC_INTERNAL
  292 #define _MALLOC_INTERNAL
  293 #include <malloc.h>
  294 #endif
  295 
  296 /* How to really get more memory.  */
  297 __ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore;
  298 
  299 /* Debugging hook for `malloc'.  */
  300 __ptr_t (*__malloc_hook) __P ((size_t __size));
  301 
  302 /* Pointer to the base of the first block.  */
  303 char *_heapbase;
  304 
  305 /* Block information table.  Allocated with align/__free (not malloc/free).  */
  306 malloc_info *_heapinfo;
  307 
  308 /* Number of info entries.  */
  309 static size_t heapsize;
  310 
  311 /* Search index in the info table.  */
  312 size_t _heapindex;
  313 
  314 /* Limit of valid info table indices.  */
  315 size_t _heaplimit;
  316 
  317 /* Free lists for each fragment size.  */
  318 struct list _fraghead[BLOCKLOG];
  319 
  320 /* Instrumentation.  */
  321 size_t _chunks_used;
  322 size_t _bytes_used;
  323 size_t _chunks_free;
  324 size_t _bytes_free;
  325 
  326 /* Are you experienced?  */
  327 int __malloc_initialized = 0;
  328 
  329 void (*__after_morecore_hook) __P ((void));
  330 
  331 /* Aligned allocation.  */
  332 static __ptr_t align __P ((size_t));
  333 static __ptr_t
  334 align (size)
  335      size_t size;
  336 {
  337   __ptr_t result;
  338   unsigned long int adj;
  339 
  340   result = (*__morecore) (size);
  341   adj = (unsigned long int) ((unsigned long int) ((char *) result -
  342                         (char *) NULL)) % BLOCKSIZE;
  343   if (adj != 0)
  344     {
  345       adj = BLOCKSIZE - adj;
  346       (void) (*__morecore) (adj);
  347       result = (char *) result + adj;
  348     }
  349 
  350   if (__after_morecore_hook)
  351     (*__after_morecore_hook) ();
  352 
  353   return result;
  354 }
  355 
  356 /* Set everything up and remember that we have.  */
  357 static int initialize __P ((void));
  358 static int
  359 initialize ()
  360 {
  361   heapsize = HEAP / BLOCKSIZE;
  362   _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
  363   if (_heapinfo == NULL)
  364     return 0;
  365   memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
  366   _heapinfo[0].free.size = 0;
  367   _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
  368   _heapindex = 0;
  369   _heapbase = (char *) _heapinfo;
  370   __malloc_initialized = 1;
  371   return 1;
  372 }
  373 
  374 /* Get neatly aligned memory, initializing or
  375    growing the heap info table as necessary. */
  376 static __ptr_t morecore __P ((size_t));
  377 static __ptr_t
  378 morecore (size)
  379      size_t size;
  380 {
  381   __ptr_t result;
  382   malloc_info *newinfo, *oldinfo;
  383   size_t newsize;
  384 
  385   result = align (size);
  386   if (result == NULL)
  387     return NULL;
  388 
  389   /* Check if we need to grow the info table.  */
  390   if ((size_t) BLOCK ((char *) result + size) > heapsize)
  391     {
  392       newsize = heapsize;
  393       while ((size_t) BLOCK ((char *) result + size) > newsize)
  394     newsize *= 2;
  395       newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
  396       if (newinfo == NULL)
  397     {
  398       (*__morecore) (-size);
  399       return NULL;
  400     }
  401       memset (newinfo, 0, newsize * sizeof (malloc_info));
  402       memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
  403       oldinfo = _heapinfo;
  404       newinfo[BLOCK (oldinfo)].busy.type = 0;
  405       newinfo[BLOCK (oldinfo)].busy.info.size
  406     = BLOCKIFY (heapsize * sizeof (malloc_info));
  407       _heapinfo = newinfo;
  408       _free_internal (oldinfo);
  409       heapsize = newsize;
  410     }
  411 
  412   _heaplimit = BLOCK ((char *) result + size);
  413   return result;
  414 }
  415 
  416 /* Allocate memory from the heap.  */
  417 __ptr_t
  418 pthread_malloc (size)
  419      size_t size;
  420 {
  421   __ptr_t result;
  422   size_t block, blocks, lastblocks, start;
  423   register size_t i;
  424   struct list *next;
  425 
  426   /* ANSI C allows `malloc (0)' to either return NULL, or to return a
  427      valid address you can realloc and pthread_free (though not dereference).
  428 
  429      It turns out that some extant code (sunrpc, at least Ultrix's version)
  430      expects `malloc (0)' to return non-NULL and breaks otherwise.
  431      Be compatible.  */
  432 
  433 #if 0
  434   if (size == 0)
  435     return NULL;
  436 #endif
  437 
  438   if (__malloc_hook != NULL)
  439     return (*__malloc_hook) (size);
  440 
  441   if (!__malloc_initialized)
  442     if (!initialize ())
  443       return NULL;
  444 
  445   if (size < sizeof (struct list))
  446       size = sizeof (struct list);
  447 
  448   /* Determine the allocation policy based on the request size.  */
  449   if (size <= BLOCKSIZE / 2)
  450     {
  451       /* Small allocation to receive a fragment of a block.
  452      Determine the logarithm to base two of the fragment size. */
  453       register size_t log = 1;
  454       --size;
  455       while ((size /= 2) != 0)
  456     ++log;
  457 
  458       /* Look in the fragment lists for a
  459      free fragment of the desired size. */
  460       next = _fraghead[log].next;
  461       if (next != NULL)
  462     {
  463       /* There are free fragments of this size.
  464          Pop a fragment out of the fragment list and return it.
  465          Update the block's nfree and first counters. */
  466       result = (__ptr_t) next;
  467       next->prev->next = next->next;
  468       if (next->next != NULL)
  469         next->next->prev = next->prev;
  470       block = BLOCK (result);
  471       if (--_heapinfo[block].busy.info.frag.nfree != 0)
  472         _heapinfo[block].busy.info.frag.first = (unsigned long int)
  473           ((unsigned long int) ((char *) next->next - (char *) NULL)
  474            % BLOCKSIZE) >> log;
  475 
  476       /* Update the statistics.  */
  477       ++_chunks_used;
  478       _bytes_used += 1 << log;
  479       --_chunks_free;
  480       _bytes_free -= 1 << log;
  481     }
  482       else
  483     {
  484       /* No free fragments of the desired size, so get a new block
  485          and break it into fragments, returning the first.  */
  486       result = pthread_malloc (BLOCKSIZE);
  487       if (result == NULL)
  488         return NULL;
  489 
  490       /* Link all fragments but the first into the free list.  */
  491       for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i)
  492         {
  493           next = (struct list *) ((char *) result + (i << log));
  494           next->next = _fraghead[log].next;
  495           next->prev = &_fraghead[log];
  496           next->prev->next = next;
  497           if (next->next != NULL)
  498         next->next->prev = next;
  499         }
  500 
  501       /* Initialize the nfree and first counters for this block.  */
  502       block = BLOCK (result);
  503       _heapinfo[block].busy.type = log;
  504       _heapinfo[block].busy.info.frag.nfree = i - 1;
  505       _heapinfo[block].busy.info.frag.first = i - 1;
  506 
  507       _chunks_free += (BLOCKSIZE >> log) - 1;
  508       _bytes_free += BLOCKSIZE - (1 << log);
  509       _bytes_used -= BLOCKSIZE - (1 << log);
  510     }
  511     }
  512   else
  513     {
  514       /* Large allocation to receive one or more blocks.
  515      Search the free list in a circle starting at the last place visited.
  516      If we loop completely around without finding a large enough
  517      space we will have to get more memory from the system.  */
  518       blocks = BLOCKIFY (size);
  519       start = block = _heapindex;
  520       while (_heapinfo[block].free.size < blocks)
  521     {
  522       block = _heapinfo[block].free.next;
  523       if (block == start)
  524         {
  525           /* Need to get more from the system.  Check to see if
  526          the new core will be contiguous with the final free
  527          block; if so we don't need to get as much.  */
  528           block = _heapinfo[0].free.prev;
  529           lastblocks = _heapinfo[block].free.size;
  530           if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
  531           (*__morecore) (0) == ADDRESS (block + lastblocks) &&
  532           (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL)
  533         {
  534           _heapinfo[block].free.size = blocks;
  535           _bytes_free += (blocks - lastblocks) * BLOCKSIZE;
  536           continue;
  537         }
  538           result = morecore (blocks * BLOCKSIZE);
  539           if (result == NULL)
  540         return NULL;
  541           block = BLOCK (result);
  542           _heapinfo[block].busy.type = 0;
  543           _heapinfo[block].busy.info.size = blocks;
  544           ++_chunks_used;
  545           _bytes_used += blocks * BLOCKSIZE;
  546           return result;
  547         }
  548     }
  549 
  550       /* At this point we have found a suitable free list entry.
  551      Figure out how to remove what we need from the list. */
  552       result = ADDRESS (block);
  553       if (_heapinfo[block].free.size > blocks)
  554     {
  555       /* The block we found has a bit left over,
  556          so relink the tail end back into the free list. */
  557       _heapinfo[block + blocks].free.size
  558         = _heapinfo[block].free.size - blocks;
  559       _heapinfo[block + blocks].free.next
  560         = _heapinfo[block].free.next;
  561       _heapinfo[block + blocks].free.prev
  562         = _heapinfo[block].free.prev;
  563       _heapinfo[_heapinfo[block].free.prev].free.next
  564         = _heapinfo[_heapinfo[block].free.next].free.prev
  565         = _heapindex = block + blocks;
  566     }
  567       else
  568     {
  569       /* The block exactly matches our requirements,
  570          so just remove it from the list. */
  571       _heapinfo[_heapinfo[block].free.next].free.prev
  572         = _heapinfo[block].free.prev;
  573       _heapinfo[_heapinfo[block].free.prev].free.next
  574         = _heapindex = _heapinfo[block].free.next;
  575       --_chunks_free;
  576     }
  577 
  578       _heapinfo[block].busy.type = 0;
  579       _heapinfo[block].busy.info.size = blocks;
  580       ++_chunks_used;
  581       _bytes_used += blocks * BLOCKSIZE;
  582       _bytes_free -= blocks * BLOCKSIZE;
  583     }
  584 
  585   return result;
  586 }
  587 /* Free a block of memory allocated by `malloc'.
  588    Copyright 1990, 1991, 1992 Free Software Foundation
  589           Written May 1989 by Mike Haertel.
  590 
  591 This library is free software; you can redistribute it and/or
  592 modify it under the terms of the GNU Library General Public License as
  593 published by the Free Software Foundation; either version 2 of the
  594 License, or (at your option) any later version.
  595 
  596 This library is distributed in the hope that it will be useful,
  597 but WITHOUT ANY WARRANTY; without even the implied warranty of
  598 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  599 Library General Public License for more details.
  600 
  601 You should have received a copy of the GNU Library General Public
  602 License along with this library; see the file COPYING.LIB.  If
  603 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
  604 Cambridge, MA 02139, USA.
  605 
  606    The author may be reached (Email) at the address mike@ai.mit.edu,
  607    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
  608 
  609 #ifndef _MALLOC_INTERNAL
  610 #define _MALLOC_INTERNAL
  611 #include <malloc.h>
  612 #endif
  613 
  614 /* Debugging hook for free.  */
  615 void (*__free_hook) __P ((__ptr_t __ptr));
  616 
  617 /* List of blocks allocated by memalign.  */
  618 struct alignlist *_aligned_blocks = NULL;
  619 
  620 /* Return memory to the heap.
  621    Like `free' but don't call a __free_hook if there is one.  */
  622 void
  623 _free_internal (ptr)
  624      __ptr_t ptr;
  625 {
  626   int type;
  627   size_t block, blocks;
  628   register size_t i;
  629   struct list *prev, *next;
  630 
  631   block = BLOCK (ptr);
  632 
  633   type = _heapinfo[block].busy.type;
  634   switch (type)
  635     {
  636     case 0:
  637       /* Get as many statistics as early as we can.  */
  638       --_chunks_used;
  639       _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
  640       _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
  641 
  642       /* Find the free cluster previous to this one in the free list.
  643      Start searching at the last block referenced; this may benefit
  644      programs with locality of allocation.  */
  645       i = _heapindex;
  646       if (i > block)
  647     while (i > block)
  648       i = _heapinfo[i].free.prev;
  649       else
  650     {
  651       do
  652         i = _heapinfo[i].free.next;
  653       while (i > 0 && i < block);
  654       i = _heapinfo[i].free.prev;
  655     }
  656 
  657       /* Determine how to link this block into the free list.  */
  658       if (block == i + _heapinfo[i].free.size)
  659     {
  660       /* Coalesce this block with its predecessor.  */
  661       _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
  662       block = i;
  663     }
  664       else
  665     {
  666       /* Really link this block back into the free list.  */
  667       _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
  668       _heapinfo[block].free.next = _heapinfo[i].free.next;
  669       _heapinfo[block].free.prev = i;
  670       _heapinfo[i].free.next = block;
  671       _heapinfo[_heapinfo[block].free.next].free.prev = block;
  672       ++_chunks_free;
  673     }
  674 
  675       /* Now that the block is linked in, see if we can coalesce it
  676      with its successor (by deleting its successor from the list
  677      and adding in its size).  */
  678       if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
  679     {
  680       _heapinfo[block].free.size
  681         += _heapinfo[_heapinfo[block].free.next].free.size;
  682       _heapinfo[block].free.next
  683         = _heapinfo[_heapinfo[block].free.next].free.next;
  684       _heapinfo[_heapinfo[block].free.next].free.prev = block;
  685       --_chunks_free;
  686     }
  687 
  688       /* Now see if we can return stuff to the system.  */
  689       blocks = _heapinfo[block].free.size;
  690       if (blocks >= FINAL_FREE_BLOCKS && block + blocks == _heaplimit
  691       && (*__morecore) (0) == ADDRESS (block + blocks))
  692     {
  693       register size_t bytes = blocks * BLOCKSIZE;
  694       _heaplimit -= blocks;
  695       (*__morecore) (-bytes);
  696       _heapinfo[_heapinfo[block].free.prev].free.next
  697         = _heapinfo[block].free.next;
  698       _heapinfo[_heapinfo[block].free.next].free.prev
  699         = _heapinfo[block].free.prev;
  700       block = _heapinfo[block].free.prev;
  701       --_chunks_free;
  702       _bytes_free -= bytes;
  703     }
  704 
  705       /* Set the next search to begin at this block.  */
  706       _heapindex = block;
  707       break;
  708 
  709     default:
  710       /* Do some of the statistics.  */
  711       --_chunks_used;
  712       _bytes_used -= 1 << type;
  713       ++_chunks_free;
  714       _bytes_free += 1 << type;
  715 
  716       /* Get the address of the first free fragment in this block.  */
  717       prev = (struct list *) ((char *) ADDRESS (block) +
  718                (_heapinfo[block].busy.info.frag.first << type));
  719 
  720       if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
  721     {
  722       /* If all fragments of this block are free, remove them
  723          from the fragment list and free the whole block.  */
  724       next = prev;
  725       for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
  726         next = next->next;
  727       prev->prev->next = next;
  728       if (next != NULL)
  729         next->prev = prev->prev;
  730       _heapinfo[block].busy.type = 0;
  731       _heapinfo[block].busy.info.size = 1;
  732 
  733       /* Keep the statistics accurate.  */
  734       ++_chunks_used;
  735       _bytes_used += BLOCKSIZE;
  736       _chunks_free -= BLOCKSIZE >> type;
  737       _bytes_free -= BLOCKSIZE;
  738 
  739       pthread_free (ADDRESS (block));
  740     }
  741       else if (_heapinfo[block].busy.info.frag.nfree != 0)
  742     {
  743       /* If some fragments of this block are free, link this
  744          fragment into the fragment list after the first free
  745          fragment of this block. */
  746       next = (struct list *) ptr;
  747       next->next = prev->next;
  748       next->prev = prev;
  749       prev->next = next;
  750       if (next->next != NULL)
  751         next->next->prev = next;
  752       ++_heapinfo[block].busy.info.frag.nfree;
  753     }
  754       else
  755     {
  756       /* No fragments of this block are free, so link this
  757          fragment into the fragment list and announce that
  758          it is the first free fragment of this block. */
  759       prev = (struct list *) ptr;
  760       _heapinfo[block].busy.info.frag.nfree = 1;
  761       _heapinfo[block].busy.info.frag.first = (unsigned long int)
  762         ((unsigned long int) ((char *) ptr - (char *) NULL)
  763          % BLOCKSIZE >> type);
  764       prev->next = _fraghead[type].next;
  765       prev->prev = &_fraghead[type];
  766       prev->prev->next = prev;
  767       if (prev->next != NULL)
  768         prev->next->prev = prev;
  769     }
  770       break;
  771     }
  772 }
  773 
  774 /* Return memory to the heap.  */
  775 void
  776 pthread_free (ptr)
  777      __ptr_t ptr;
  778 {
  779   register struct alignlist *l;
  780 
  781   if (ptr == NULL)
  782     return;
  783 
  784   for (l = _aligned_blocks; l != NULL; l = l->next)
  785     if (l->aligned == ptr)
  786       {
  787     l->aligned = NULL;  /* Mark the slot in the list as free.  */
  788     ptr = l->exact;
  789     break;
  790       }
  791 
  792   if (__free_hook != NULL)
  793     (*__free_hook) (ptr);
  794   else
  795     _free_internal (ptr);
  796 }
  797 /* Copyright (C) 1991, 1993 Free Software Foundation, Inc.
  798 This file is part of the GNU C Library.
  799 
  800 The GNU C Library is free software; you can redistribute it and/or
  801 modify it under the terms of the GNU Library General Public License as
  802 published by the Free Software Foundation; either version 2 of the
  803 License, or (at your option) any later version.
  804 
  805 The GNU C Library is distributed in the hope that it will be useful,
  806 but WITHOUT ANY WARRANTY; without even the implied warranty of
  807 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  808 Library General Public License for more details.
  809 
  810 You should have received a copy of the GNU Library General Public
  811 License along with the GNU C Library; see the file COPYING.LIB.  If
  812 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
  813 Cambridge, MA 02139, USA.  */
  814 
  815 #ifndef _MALLOC_INTERNAL
  816 #define _MALLOC_INTERNAL
  817 #include <malloc.h>
  818 #endif
  819 
  820 #undef  cfree
  821 
  822 #ifdef _LIBC
  823 
  824 #include <ansidecl.h>
  825 #include <gnu-stabs.h>
  826 
  827 function_alias(cfree, free, void, (ptr),
  828            DEFUN(cfree, (ptr), PTR ptr))
  829 
  830 #else
  831 
  832 void
  833 pthread_cfree (ptr)
  834      __ptr_t ptr;
  835 {
  836   pthread_free (ptr);
  837 }
  838 
  839 #endif
  840 /* Change the size of a block allocated by `malloc'.
  841    Copyright 1990, 1991, 1992, 1993 Free Software Foundation, Inc.
  842              Written May 1989 by Mike Haertel.
  843 
  844 This library is free software; you can redistribute it and/or
  845 modify it under the terms of the GNU Library General Public License as
  846 published by the Free Software Foundation; either version 2 of the
  847 License, or (at your option) any later version.
  848 
  849 This library is distributed in the hope that it will be useful,
  850 but WITHOUT ANY WARRANTY; without even the implied warranty of
  851 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  852 Library General Public License for more details.
  853 
  854 You should have received a copy of the GNU Library General Public
  855 License along with this library; see the file COPYING.LIB.  If
  856 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
  857 Cambridge, MA 02139, USA.
  858 
  859    The author may be reached (Email) at the address mike@ai.mit.edu,
  860    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
  861 
  862 #ifndef _MALLOC_INTERNAL
  863 #define _MALLOC_INTERNAL
  864 #include <malloc.h>
  865 #endif
  866 
  867 #define min(A, B) ((A) < (B) ? (A) : (B))
  868 
  869 /* Debugging hook for realloc.  */
  870 __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, size_t __size));
  871 
  872 /* Resize the given region to the new size, returning a pointer
  873    to the (possibly moved) region.  This is optimized for speed;
  874    some benchmarks seem to indicate that greater compactness is
  875    achieved by unconditionally allocating and copying to a
  876    new region.  This module has incestuous knowledge of the
  877    internals of both free and malloc. */
  878 __ptr_t
  879 pthread_realloc (ptr, size)
  880      __ptr_t ptr;
  881      size_t size;
  882 {
  883   __ptr_t result;
  884   int type;
  885   size_t block, blocks, oldlimit;
  886 
  887   if (size == 0)
  888     {
  889       pthread_free (ptr);
  890       return pthread_malloc (0);
  891     }
  892   else if (ptr == NULL)
  893     return pthread_malloc (size);
  894 
  895   if (__realloc_hook != NULL)
  896     return (*__realloc_hook) (ptr, size);
  897 
  898   block = BLOCK (ptr);
  899 
  900   type = _heapinfo[block].busy.type;
  901   switch (type)
  902     {
  903     case 0:
  904       /* Maybe reallocate a large block to a small fragment.  */
  905       if (size <= BLOCKSIZE / 2)
  906     {
  907       result = pthread_malloc (size);
  908       if (result != NULL)
  909         {
  910           memcpy (result, ptr, size);
  911           pthread_free (ptr);
  912           return result;
  913         }
  914     }
  915 
  916       /* The new size is a large allocation as well;
  917      see if we can hold it in place. */
  918       blocks = BLOCKIFY (size);
  919       if (blocks < _heapinfo[block].busy.info.size)
  920     {
  921       /* The new size is smaller; return
  922          excess memory to the free list. */
  923       _heapinfo[block + blocks].busy.type = 0;
  924       _heapinfo[block + blocks].busy.info.size
  925         = _heapinfo[block].busy.info.size - blocks;
  926       _heapinfo[block].busy.info.size = blocks;
  927       pthread_free (ADDRESS (block + blocks));
  928       result = ptr;
  929     }
  930       else if (blocks == _heapinfo[block].busy.info.size)
  931     /* No size change necessary.  */
  932     result = ptr;
  933       else
  934     {
  935       /* Won't fit, so allocate a new region that will.
  936          Free the old region first in case there is sufficient
  937          adjacent free space to grow without moving. */
  938       blocks = _heapinfo[block].busy.info.size;
  939       /* Prevent free from actually returning memory to the system.  */
  940       oldlimit = _heaplimit;
  941       _heaplimit = 0;
  942       pthread_free (ptr);
  943       _heaplimit = oldlimit;
  944       result = pthread_malloc (size);
  945       if (result == NULL)
  946         {
  947           /* Now we're really in trouble.  We have to unfree
  948          the thing we just freed.  Unfortunately it might
  949          have been coalesced with its neighbors.  */
  950           if (_heapindex == block)
  951             (void) pthread_malloc (blocks * BLOCKSIZE);
  952           else
  953         {
  954           __ptr_t previous = pthread_malloc ((block - _heapindex) * BLOCKSIZE);
  955           (void) pthread_malloc (blocks * BLOCKSIZE);
  956           pthread_free (previous);
  957         }
  958           return NULL;
  959         }
  960       if (ptr != result)
  961         memmove (result, ptr, blocks * BLOCKSIZE);
  962     }
  963       break;
  964 
  965     default:
  966       /* Old size is a fragment; type is logarithm
  967      to base two of the fragment size.  */
  968       if (size > (size_t) (1 << (type - 1)) && size <= (size_t) (1 << type))
  969     /* The new size is the same kind of fragment.  */
  970     result = ptr;
  971       else
  972     {
  973       /* The new size is different; allocate a new space,
  974          and copy the lesser of the new size and the old. */
  975       result = pthread_malloc (size);
  976       if (result == NULL)
  977         return NULL;
  978       memcpy (result, ptr, min (size, (size_t) 1 << type));
  979       pthread_free (ptr);
  980     }
  981       break;
  982     }
  983 
  984   return result;
  985 }
  986 /* Copyright (C) 1991, 1992 Free Software Foundation, Inc.
  987 
  988 This library is free software; you can redistribute it and/or
  989 modify it under the terms of the GNU Library General Public License as
  990 published by the Free Software Foundation; either version 2 of the
  991 License, or (at your option) any later version.
  992 
  993 This library is distributed in the hope that it will be useful,
  994 but WITHOUT ANY WARRANTY; without even the implied warranty of
  995 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  996 Library General Public License for more details.
  997 
  998 You should have received a copy of the GNU Library General Public
  999 License along with this library; see the file COPYING.LIB.  If
 1000 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
 1001 Cambridge, MA 02139, USA.
 1002 
 1003    The author may be reached (Email) at the address mike@ai.mit.edu,
 1004    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
 1005 
 1006 #ifndef _MALLOC_INTERNAL
 1007 #define _MALLOC_INTERNAL
 1008 #include <malloc.h>
 1009 #endif
 1010 
 1011 /* Allocate an array of NMEMB elements each SIZE bytes long.
 1012    The entire array is initialized to zeros.  */
 1013 __ptr_t
 1014 pthread_calloc (nmemb, size)
 1015      register size_t nmemb;
 1016      register size_t size;
 1017 {
 1018   register __ptr_t result = pthread_malloc (nmemb * size);
 1019 
 1020   if (result != NULL)
 1021     (void) memset (result, 0, nmemb * size);
 1022 
 1023   return result;
 1024 }
 1025 /* Copyright (C) 1991, 1992 Free Software Foundation, Inc.
 1026 This file is part of the GNU C Library.
 1027 
 1028 The GNU C Library is free software; you can redistribute it and/or modify
 1029 it under the terms of the GNU General Public License as published by
 1030 the Free Software Foundation; either version 2, or (at your option)
 1031 any later version.
 1032 
 1033 The GNU C Library is distributed in the hope that it will be useful,
 1034 but WITHOUT ANY WARRANTY; without even the implied warranty of
 1035 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 1036 GNU General Public License for more details.
 1037 
 1038 You should have received a copy of the GNU General Public License
 1039 along with the GNU C Library; see the file COPYING.  If not, write to
 1040 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.  */
 1041 
 1042 #ifndef _MALLOC_INTERNAL
 1043 #define _MALLOC_INTERNAL
 1044 #include <malloc.h>
 1045 #endif
 1046 
 1047 #ifndef __GNU_LIBRARY__
 1048 #define __sbrk  sbrk
 1049 #endif
 1050 
 1051 extern __ptr_t __sbrk __P ((int increment));
 1052 
 1053 #ifndef NULL
 1054 #define NULL 0
 1055 #endif
 1056 
 1057 /* Allocate INCREMENT more bytes of data space,
 1058    and return the start of data space, or NULL on errors.
 1059    If INCREMENT is negative, shrink data space.  */
 1060 __ptr_t
 1061 __default_morecore (increment)
 1062      ptrdiff_t increment;
 1063 {
 1064   __ptr_t result = __sbrk ((int) increment);
 1065   if (result == (__ptr_t) -1)
 1066     return NULL;
 1067   return result;
 1068 }
 1069 /* Copyright (C) 1991, 1992 Free Software Foundation, Inc.
 1070 
 1071 This library is free software; you can redistribute it and/or
 1072 modify it under the terms of the GNU Library General Public License as
 1073 published by the Free Software Foundation; either version 2 of the
 1074 License, or (at your option) any later version.
 1075 
 1076 This library is distributed in the hope that it will be useful,
 1077 but WITHOUT ANY WARRANTY; without even the implied warranty of
 1078 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 1079 Library General Public License for more details.
 1080 
 1081 You should have received a copy of the GNU Library General Public
 1082 License along with this library; see the file COPYING.LIB.  If
 1083 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
 1084 Cambridge, MA 02139, USA.  */
 1085 
 1086 #ifndef _MALLOC_INTERNAL
 1087 #define _MALLOC_INTERNAL
 1088 #include <malloc.h>
 1089 #endif
 1090 
 1091 __ptr_t
 1092 pthread_memalign (alignment, size)
 1093      size_t alignment;
 1094      size_t size;
 1095 {
 1096   __ptr_t result;
 1097   unsigned long int adj;
 1098 
 1099   size = (size + alignment - 1);
 1100 
 1101   result = pthread_malloc (size);
 1102   if (result == NULL)
 1103     return NULL;
 1104   adj = (unsigned long int) ((unsigned long int) ((char *) result -
 1105                         (char *) NULL)) % alignment;
 1106   if (adj != 0)
 1107     {
 1108       struct alignlist *l;
 1109       for (l = _aligned_blocks; l != NULL; l = l->next)
 1110     if (l->aligned == NULL)
 1111       /* This slot is free.  Use it.  */
 1112       break;
 1113       if (l == NULL)
 1114     {
 1115       l = (struct alignlist *) pthread_malloc (sizeof (struct alignlist));
 1116       if (l == NULL)
 1117         {
 1118           pthread_free (result);
 1119           return NULL;
 1120         }
 1121     }
 1122       l->exact = result;
 1123       result = l->aligned = (char *) result + alignment - adj;
 1124       l->next = _aligned_blocks;
 1125       _aligned_blocks = l;
 1126     }
 1127 
 1128   return result;
 1129 }
 1130 /* Allocate memory on a page boundary.
 1131    Copyright (C) 1991, 1992, 1993 Free Software Foundation, Inc.
 1132 
 1133 This library is free software; you can redistribute it and/or
 1134 modify it under the terms of the GNU Library General Public License as
 1135 published by the Free Software Foundation; either version 2 of the
 1136 License, or (at your option) any later version.
 1137 
 1138 This library is distributed in the hope that it will be useful,
 1139 but WITHOUT ANY WARRANTY; without even the implied warranty of
 1140 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 1141 Library General Public License for more details.
 1142 
 1143 You should have received a copy of the GNU Library General Public
 1144 License along with this library; see the file COPYING.LIB.  If
 1145 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
 1146 Cambridge, MA 02139, USA.
 1147 
 1148    The author may be reached (Email) at the address mike@ai.mit.edu,
 1149    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
 1150 
 1151 #ifndef _MALLOC_INTERNAL
 1152 #define _MALLOC_INTERNAL
 1153 #include <malloc.h>
 1154 #endif
 1155 
 1156 #ifdef  __GNU_LIBRARY__
 1157 extern size_t __getpagesize __P ((void));
 1158 #else
 1159 #include "getpagesize.h"
 1160 #define  __getpagesize()    getpagesize()
 1161 #endif
 1162 
 1163 static size_t pagesize;
 1164 
 1165 __ptr_t
 1166 pthread_valloc (size)
 1167      size_t size;
 1168 {
 1169   if (pagesize == 0)
 1170     pagesize = __getpagesize ();
 1171 
 1172   return pthread_memalign (pagesize, size);
 1173 }