"Fossies" - the Fresh Open Source Software Archive

Member "threads/malloc/malloc.c" (7 Apr 1998, 9371 Bytes) of package /linux/misc/old/pthreads-3.14.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "malloc.c" see the Fossies "Dox" file reference documentation.

    1 /* Memory allocator `malloc'.
    2    Copyright 1990, 1991, 1992, 1993 Free Software Foundation
    3           Written May 1989 by Mike Haertel.
    4 
    5 This library is free software; you can redistribute it and/or
    6 modify it under the terms of the GNU Library General Public License as
    7 published by the Free Software Foundation; either version 2 of the
    8 License, or (at your option) any later version.
    9 
   10 This library is distributed in the hope that it will be useful,
   11 but WITHOUT ANY WARRANTY; without even the implied warranty of
   12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   13 Library General Public License for more details.
   14 
   15 You should have received a copy of the GNU Library General Public
   16 License along with this library; see the file COPYING.LIB.  If
   17 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
   18 Cambridge, MA 02139, USA.
   19 
   20    The author may be reached (Email) at the address mike@ai.mit.edu,
   21    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
   22 
   23 #ifndef _MALLOC_INTERNAL
   24 #define _MALLOC_INTERNAL
   25 #include <malloc.h>
   26 #endif
   27 
   28 /* How to really get more memory.  */
   29 __ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore;
   30 
   31 /* Debugging hook for `malloc'.  */
   32 __ptr_t (*__malloc_hook) __P ((size_t __size));
   33 
   34 /* Pointer to the base of the first block.  */
   35 char *_heapbase;
   36 
   37 /* Block information table.  Allocated with align/__free (not malloc/free).  */
   38 malloc_info *_heapinfo;
   39 
   40 /* Number of info entries.  */
   41 static size_t heapsize;
   42 
   43 /* Search index in the info table.  */
   44 size_t _heapindex;
   45 
   46 /* Limit of valid info table indices.  */
   47 size_t _heaplimit;
   48 
   49 /* Free lists for each fragment size.  */
   50 struct list _fraghead[BLOCKLOG];
   51 
   52 /* Instrumentation.  */
   53 size_t _chunks_used;
   54 size_t _bytes_used;
   55 size_t _chunks_free;
   56 size_t _bytes_free;
   57 
   58 /* Are you experienced?  */
   59 int __malloc_initialized = 0;
   60 
   61 void (*__after_morecore_hook) __P ((void));
   62 
   63 /* Aligned allocation.  */
   64 static __ptr_t align __P ((size_t));
   65 static __ptr_t
   66 align (size)
   67      size_t size;
   68 {
   69   __ptr_t result;
   70   unsigned long int adj;
   71 
   72   result = (*__morecore) (size);
   73   adj = (unsigned long int) ((unsigned long int) ((char *) result -
   74                         (char *) NULL)) % BLOCKSIZE;
   75   if (adj != 0)
   76     {
   77       adj = BLOCKSIZE - adj;
   78       (void) (*__morecore) (adj);
   79       result = (char *) result + adj;
   80     }
   81 
   82   if (__after_morecore_hook)
   83     (*__after_morecore_hook) ();
   84 
   85   return result;
   86 }
   87 
   88 /* Set everything up and remember that we have.  */
   89 static int initialize __P ((void));
   90 static int
   91 initialize ()
   92 {
   93   heapsize = HEAP / BLOCKSIZE;
   94   _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
   95   if (_heapinfo == NULL)
   96     return 0;
   97   memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
   98   _heapinfo[0].free.size = 0;
   99   _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
  100   _heapindex = 0;
  101   _heapbase = (char *) _heapinfo;
  102   __malloc_initialized = 1;
  103   return 1;
  104 }
  105 
  106 /* Get neatly aligned memory, initializing or
  107    growing the heap info table as necessary. */
  108 static __ptr_t morecore __P ((size_t));
  109 static __ptr_t
  110 morecore (size)
  111      size_t size;
  112 {
  113   __ptr_t result;
  114   malloc_info *newinfo, *oldinfo;
  115   size_t newsize;
  116 
  117   result = align (size);
  118   if (result == NULL)
  119     return NULL;
  120 
  121   /* Check if we need to grow the info table.  */
  122   if ((size_t) BLOCK ((char *) result + size) > heapsize)
  123     {
  124       newsize = heapsize;
  125       while ((size_t) BLOCK ((char *) result + size) > newsize)
  126     newsize *= 2;
  127       newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
  128       if (newinfo == NULL)
  129     {
  130       (*__morecore) (-size);
  131       return NULL;
  132     }
  133       memset (newinfo, 0, newsize * sizeof (malloc_info));
  134       memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
  135       oldinfo = _heapinfo;
  136       newinfo[BLOCK (oldinfo)].busy.type = 0;
  137       newinfo[BLOCK (oldinfo)].busy.info.size
  138     = BLOCKIFY (heapsize * sizeof (malloc_info));
  139       _heapinfo = newinfo;
  140       _free_internal (oldinfo);
  141       heapsize = newsize;
  142     }
  143 
  144   _heaplimit = BLOCK ((char *) result + size);
  145   return result;
  146 }
  147 
  148 /* Allocate memory from the heap.  */
  149 __ptr_t
  150 pthread_malloc (size)
  151      size_t size;
  152 {
  153   __ptr_t result;
  154   size_t block, blocks, lastblocks, start;
  155   register size_t i;
  156   struct list *next;
  157 
  158   /* ANSI C allows `malloc (0)' to either return NULL, or to return a
  159      valid address you can realloc and pthread_free (though not dereference).
  160 
  161      It turns out that some extant code (sunrpc, at least Ultrix's version)
  162      expects `malloc (0)' to return non-NULL and breaks otherwise.
  163      Be compatible.  */
  164 
  165 #if 0
  166   if (size == 0)
  167     return NULL;
  168 #endif
  169 
  170   if (__malloc_hook != NULL)
  171     return (*__malloc_hook) (size);
  172 
  173   if (!__malloc_initialized)
  174     if (!initialize ())
  175       return NULL;
  176 
  177   if (size < sizeof (struct list))
  178       size = sizeof (struct list);
  179 
  180   /* Determine the allocation policy based on the request size.  */
  181   if (size <= BLOCKSIZE / 2)
  182     {
  183       /* Small allocation to receive a fragment of a block.
  184      Determine the logarithm to base two of the fragment size. */
  185       register size_t log = 1;
  186       --size;
  187       while ((size /= 2) != 0)
  188     ++log;
  189 
  190       /* Look in the fragment lists for a
  191      free fragment of the desired size. */
  192       next = _fraghead[log].next;
  193       if (next != NULL)
  194     {
  195       /* There are free fragments of this size.
  196          Pop a fragment out of the fragment list and return it.
  197          Update the block's nfree and first counters. */
  198       result = (__ptr_t) next;
  199       next->prev->next = next->next;
  200       if (next->next != NULL)
  201         next->next->prev = next->prev;
  202       block = BLOCK (result);
  203       if (--_heapinfo[block].busy.info.frag.nfree != 0)
  204         _heapinfo[block].busy.info.frag.first = (unsigned long int)
  205           ((unsigned long int) ((char *) next->next - (char *) NULL)
  206            % BLOCKSIZE) >> log;
  207 
  208       /* Update the statistics.  */
  209       ++_chunks_used;
  210       _bytes_used += 1 << log;
  211       --_chunks_free;
  212       _bytes_free -= 1 << log;
  213     }
  214       else
  215     {
  216       /* No free fragments of the desired size, so get a new block
  217          and break it into fragments, returning the first.  */
  218       result = pthread_malloc (BLOCKSIZE);
  219       if (result == NULL)
  220         return NULL;
  221 
  222       /* Link all fragments but the first into the free list.  */
  223       for (i = 1; i < (size_t) (BLOCKSIZE >> log); ++i)
  224         {
  225           next = (struct list *) ((char *) result + (i << log));
  226           next->next = _fraghead[log].next;
  227           next->prev = &_fraghead[log];
  228           next->prev->next = next;
  229           if (next->next != NULL)
  230         next->next->prev = next;
  231         }
  232 
  233       /* Initialize the nfree and first counters for this block.  */
  234       block = BLOCK (result);
  235       _heapinfo[block].busy.type = log;
  236       _heapinfo[block].busy.info.frag.nfree = i - 1;
  237       _heapinfo[block].busy.info.frag.first = i - 1;
  238 
  239       _chunks_free += (BLOCKSIZE >> log) - 1;
  240       _bytes_free += BLOCKSIZE - (1 << log);
  241       _bytes_used -= BLOCKSIZE - (1 << log);
  242     }
  243     }
  244   else
  245     {
  246       /* Large allocation to receive one or more blocks.
  247      Search the free list in a circle starting at the last place visited.
  248      If we loop completely around without finding a large enough
  249      space we will have to get more memory from the system.  */
  250       blocks = BLOCKIFY (size);
  251       start = block = _heapindex;
  252       while (_heapinfo[block].free.size < blocks)
  253     {
  254       block = _heapinfo[block].free.next;
  255       if (block == start)
  256         {
  257           /* Need to get more from the system.  Check to see if
  258          the new core will be contiguous with the final free
  259          block; if so we don't need to get as much.  */
  260           block = _heapinfo[0].free.prev;
  261           lastblocks = _heapinfo[block].free.size;
  262           if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
  263           (*__morecore) (0) == ADDRESS (block + lastblocks) &&
  264           (morecore ((blocks - lastblocks) * BLOCKSIZE)) != NULL)
  265         {
  266           _heapinfo[block].free.size = blocks;
  267           _bytes_free += (blocks - lastblocks) * BLOCKSIZE;
  268           continue;
  269         }
  270           result = morecore (blocks * BLOCKSIZE);
  271           if (result == NULL)
  272         return NULL;
  273           block = BLOCK (result);
  274           _heapinfo[block].busy.type = 0;
  275           _heapinfo[block].busy.info.size = blocks;
  276           ++_chunks_used;
  277           _bytes_used += blocks * BLOCKSIZE;
  278           return result;
  279         }
  280     }
  281 
  282       /* At this point we have found a suitable free list entry.
  283      Figure out how to remove what we need from the list. */
  284       result = ADDRESS (block);
  285       if (_heapinfo[block].free.size > blocks)
  286     {
  287       /* The block we found has a bit left over,
  288          so relink the tail end back into the free list. */
  289       _heapinfo[block + blocks].free.size
  290         = _heapinfo[block].free.size - blocks;
  291       _heapinfo[block + blocks].free.next
  292         = _heapinfo[block].free.next;
  293       _heapinfo[block + blocks].free.prev
  294         = _heapinfo[block].free.prev;
  295       _heapinfo[_heapinfo[block].free.prev].free.next
  296         = _heapinfo[_heapinfo[block].free.next].free.prev
  297         = _heapindex = block + blocks;
  298     }
  299       else
  300     {
  301       /* The block exactly matches our requirements,
  302          so just remove it from the list. */
  303       _heapinfo[_heapinfo[block].free.next].free.prev
  304         = _heapinfo[block].free.prev;
  305       _heapinfo[_heapinfo[block].free.prev].free.next
  306         = _heapindex = _heapinfo[block].free.next;
  307       --_chunks_free;
  308     }
  309 
  310       _heapinfo[block].busy.type = 0;
  311       _heapinfo[block].busy.info.size = blocks;
  312       ++_chunks_used;
  313       _bytes_used += blocks * BLOCKSIZE;
  314       _bytes_free -= blocks * BLOCKSIZE;
  315     }
  316 
  317   return result;
  318 }