"Fossies" - the Fresh Open Source Software Archive

Member "pcre-8.43/sljit/sljitExecAllocator.c" (23 Jan 2019, 11297 Bytes) of package /linux/misc/pcre-8.43.tar.bz2:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "sljitExecAllocator.c" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 8.42_vs_8.43.

    1 /*
    2  *    Stack-less Just-In-Time compiler
    3  *
    4  *    Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without modification, are
    7  * permitted provided that the following conditions are met:
    8  *
    9  *   1. Redistributions of source code must retain the above copyright notice, this list of
   10  *      conditions and the following disclaimer.
   11  *
   12  *   2. Redistributions in binary form must reproduce the above copyright notice, this list
   13  *      of conditions and the following disclaimer in the documentation and/or other materials
   14  *      provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
   17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
   19  * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
   21  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
   22  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
   24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   25  */
   26 
   27 /*
   28    This file contains a simple executable memory allocator
   29 
   30    It is assumed, that executable code blocks are usually medium (or sometimes
   31    large) memory blocks, and the allocator is not too frequently called (less
   32    optimized than other allocators). Thus, using it as a generic allocator is
   33    not suggested.
   34 
   35    How does it work:
   36      Memory is allocated in continuous memory areas called chunks by alloc_chunk()
   37      Chunk format:
   38      [ block ][ block ] ... [ block ][ block terminator ]
   39 
   40    All blocks and the block terminator is started with block_header. The block
   41    header contains the size of the previous and the next block. These sizes
   42    can also contain special values.
   43      Block size:
   44        0 - The block is a free_block, with a different size member.
   45        1 - The block is a block terminator.
   46        n - The block is used at the moment, and the value contains its size.
   47      Previous block size:
   48        0 - This is the first block of the memory chunk.
   49        n - The size of the previous block.
   50 
   51    Using these size values we can go forward or backward on the block chain.
   52    The unused blocks are stored in a chain list pointed by free_blocks. This
   53    list is useful if we need to find a suitable memory area when the allocator
   54    is called.
   55 
   56    When a block is freed, the new free block is connected to its adjacent free
   57    blocks if possible.
   58 
   59      [ free block ][ used block ][ free block ]
   60    and "used block" is freed, the three blocks are connected together:
   61      [           one big free block           ]
   62 */
   63 
   64 /* --------------------------------------------------------------------- */
   65 /*  System (OS) functions                                                */
   66 /* --------------------------------------------------------------------- */
   67 
   68 /* 64 KByte. */
   69 #define CHUNK_SIZE  0x10000
   70 
   71 /*
   72    alloc_chunk / free_chunk :
   73      * allocate executable system memory chunks
   74      * the size is always divisible by CHUNK_SIZE
   75    allocator_grab_lock / allocator_release_lock :
   76      * make the allocator thread safe
   77      * can be empty if the OS (or the application) does not support threading
   78      * only the allocator requires this lock, sljit is fully thread safe
   79        as it only uses local variables
   80 */
   81 
   82 #ifdef _WIN32
   83 
   84 static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
   85 {
   86     return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
   87 }
   88 
   89 static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
   90 {
   91     SLJIT_UNUSED_ARG(size);
   92     VirtualFree(chunk, 0, MEM_RELEASE);
   93 }
   94 
   95 #else
   96 
   97 #ifdef __APPLE__
   98 /* Configures TARGET_OS_OSX when appropriate */
   99 #include <TargetConditionals.h>
  100 
  101 #if TARGET_OS_OSX && defined(MAP_JIT)
  102 #include <sys/utsname.h>
  103 #endif /* TARGET_OS_OSX && MAP_JIT */
  104 
  105 #ifdef MAP_JIT
  106 
  107 static SLJIT_INLINE int get_map_jit_flag()
  108 {
  109 #if TARGET_OS_OSX
  110     /* On macOS systems, returns MAP_JIT if it is defined _and_ we're running on a version
  111        of macOS where it's OK to have more than one JIT block. On non-macOS systems, returns
  112        MAP_JIT if it is defined. */
  113     static int map_jit_flag = -1;
  114 
  115     /* The following code is thread safe because multiple initialization
  116        sets map_jit_flag to the same value and the code has no side-effects.
  117        Changing the kernel version witout system restart is (very) unlikely. */
  118     if (map_jit_flag == -1) {
  119         struct utsname name;
  120 
  121         uname(&name);
  122 
  123         /* Kernel version for 10.14.0 (Mojave) */
  124         map_jit_flag = (atoi(name.release) >= 18) ? MAP_JIT : 0;
  125     }
  126 
  127     return map_jit_flag;
  128 #else /* !TARGET_OS_OSX */
  129     return MAP_JIT;
  130 #endif /* TARGET_OS_OSX */
  131 }
  132 
  133 #endif /* MAP_JIT */
  134 
  135 #endif /* __APPLE__ */
  136 
  137 static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
  138 {
  139     void *retval;
  140 
  141 #ifdef MAP_ANON
  142 
  143     int flags = MAP_PRIVATE | MAP_ANON;
  144 
  145 #ifdef MAP_JIT
  146     flags |= get_map_jit_flag();
  147 #endif
  148 
  149     retval = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, flags, -1, 0);
  150 #else /* !MAP_ANON */
  151     if (dev_zero < 0) {
  152         if (open_dev_zero())
  153             return NULL;
  154     }
  155     retval = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, dev_zero, 0);
  156 #endif /* MAP_ANON */
  157 
  158     return (retval != MAP_FAILED) ? retval : NULL;
  159 }
  160 
  161 static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
  162 {
  163     munmap(chunk, size);
  164 }
  165 
  166 #endif
  167 
  168 /* --------------------------------------------------------------------- */
  169 /*  Common functions                                                     */
  170 /* --------------------------------------------------------------------- */
  171 
  172 #define CHUNK_MASK  (~(CHUNK_SIZE - 1))
  173 
  174 struct block_header {
  175     sljit_uw size;
  176     sljit_uw prev_size;
  177 };
  178 
  179 struct free_block {
  180     struct block_header header;
  181     struct free_block *next;
  182     struct free_block *prev;
  183     sljit_uw size;
  184 };
  185 
  186 #define AS_BLOCK_HEADER(base, offset) \
  187     ((struct block_header*)(((sljit_u8*)base) + offset))
  188 #define AS_FREE_BLOCK(base, offset) \
  189     ((struct free_block*)(((sljit_u8*)base) + offset))
  190 #define MEM_START(base)     ((void*)(((sljit_u8*)base) + sizeof(struct block_header)))
  191 #define ALIGN_SIZE(size)    (((size) + sizeof(struct block_header) + 7) & ~7)
  192 
  193 static struct free_block* free_blocks;
  194 static sljit_uw allocated_size;
  195 static sljit_uw total_size;
  196 
  197 static SLJIT_INLINE void sljit_insert_free_block(struct free_block *free_block, sljit_uw size)
  198 {
  199     free_block->header.size = 0;
  200     free_block->size = size;
  201 
  202     free_block->next = free_blocks;
  203     free_block->prev = NULL;
  204     if (free_blocks)
  205         free_blocks->prev = free_block;
  206     free_blocks = free_block;
  207 }
  208 
  209 static SLJIT_INLINE void sljit_remove_free_block(struct free_block *free_block)
  210 {
  211     if (free_block->next)
  212         free_block->next->prev = free_block->prev;
  213 
  214     if (free_block->prev)
  215         free_block->prev->next = free_block->next;
  216     else {
  217         SLJIT_ASSERT(free_blocks == free_block);
  218         free_blocks = free_block->next;
  219     }
  220 }
  221 
  222 SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
  223 {
  224     struct block_header *header;
  225     struct block_header *next_header;
  226     struct free_block *free_block;
  227     sljit_uw chunk_size;
  228 
  229     allocator_grab_lock();
  230     if (size < (64 - sizeof(struct block_header)))
  231         size = (64 - sizeof(struct block_header));
  232     size = ALIGN_SIZE(size);
  233 
  234     free_block = free_blocks;
  235     while (free_block) {
  236         if (free_block->size >= size) {
  237             chunk_size = free_block->size;
  238             if (chunk_size > size + 64) {
  239                 /* We just cut a block from the end of the free block. */
  240                 chunk_size -= size;
  241                 free_block->size = chunk_size;
  242                 header = AS_BLOCK_HEADER(free_block, chunk_size);
  243                 header->prev_size = chunk_size;
  244                 AS_BLOCK_HEADER(header, size)->prev_size = size;
  245             }
  246             else {
  247                 sljit_remove_free_block(free_block);
  248                 header = (struct block_header*)free_block;
  249                 size = chunk_size;
  250             }
  251             allocated_size += size;
  252             header->size = size;
  253             allocator_release_lock();
  254             return MEM_START(header);
  255         }
  256         free_block = free_block->next;
  257     }
  258 
  259     chunk_size = (size + sizeof(struct block_header) + CHUNK_SIZE - 1) & CHUNK_MASK;
  260     header = (struct block_header*)alloc_chunk(chunk_size);
  261     if (!header) {
  262         allocator_release_lock();
  263         return NULL;
  264     }
  265 
  266     chunk_size -= sizeof(struct block_header);
  267     total_size += chunk_size;
  268 
  269     header->prev_size = 0;
  270     if (chunk_size > size + 64) {
  271         /* Cut the allocated space into a free and a used block. */
  272         allocated_size += size;
  273         header->size = size;
  274         chunk_size -= size;
  275 
  276         free_block = AS_FREE_BLOCK(header, size);
  277         free_block->header.prev_size = size;
  278         sljit_insert_free_block(free_block, chunk_size);
  279         next_header = AS_BLOCK_HEADER(free_block, chunk_size);
  280     }
  281     else {
  282         /* All space belongs to this allocation. */
  283         allocated_size += chunk_size;
  284         header->size = chunk_size;
  285         next_header = AS_BLOCK_HEADER(header, chunk_size);
  286     }
  287     next_header->size = 1;
  288     next_header->prev_size = chunk_size;
  289     allocator_release_lock();
  290     return MEM_START(header);
  291 }
  292 
  293 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
  294 {
  295     struct block_header *header;
  296     struct free_block* free_block;
  297 
  298     allocator_grab_lock();
  299     header = AS_BLOCK_HEADER(ptr, -(sljit_sw)sizeof(struct block_header));
  300     allocated_size -= header->size;
  301 
  302     /* Connecting free blocks together if possible. */
  303 
  304     /* If header->prev_size == 0, free_block will equal to header.
  305        In this case, free_block->header.size will be > 0. */
  306     free_block = AS_FREE_BLOCK(header, -(sljit_sw)header->prev_size);
  307     if (SLJIT_UNLIKELY(!free_block->header.size)) {
  308         free_block->size += header->size;
  309         header = AS_BLOCK_HEADER(free_block, free_block->size);
  310         header->prev_size = free_block->size;
  311     }
  312     else {
  313         free_block = (struct free_block*)header;
  314         sljit_insert_free_block(free_block, header->size);
  315     }
  316 
  317     header = AS_BLOCK_HEADER(free_block, free_block->size);
  318     if (SLJIT_UNLIKELY(!header->size)) {
  319         free_block->size += ((struct free_block*)header)->size;
  320         sljit_remove_free_block((struct free_block*)header);
  321         header = AS_BLOCK_HEADER(free_block, free_block->size);
  322         header->prev_size = free_block->size;
  323     }
  324 
  325     /* The whole chunk is free. */
  326     if (SLJIT_UNLIKELY(!free_block->header.prev_size && header->size == 1)) {
  327         /* If this block is freed, we still have (allocated_size / 2) free space. */
  328         if (total_size - free_block->size > (allocated_size * 3 / 2)) {
  329             total_size -= free_block->size;
  330             sljit_remove_free_block(free_block);
  331             free_chunk(free_block, free_block->size + sizeof(struct block_header));
  332         }
  333     }
  334 
  335     allocator_release_lock();
  336 }
  337 
  338 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
  339 {
  340     struct free_block* free_block;
  341     struct free_block* next_free_block;
  342 
  343     allocator_grab_lock();
  344 
  345     free_block = free_blocks;
  346     while (free_block) {
  347         next_free_block = free_block->next;
  348         if (!free_block->header.prev_size && 
  349                 AS_BLOCK_HEADER(free_block, free_block->size)->size == 1) {
  350             total_size -= free_block->size;
  351             sljit_remove_free_block(free_block);
  352             free_chunk(free_block, free_block->size + sizeof(struct block_header));
  353         }
  354         free_block = next_free_block;
  355     }
  356 
  357     SLJIT_ASSERT((total_size && free_blocks) || (!total_size && !free_blocks));
  358     allocator_release_lock();
  359 }