"Fossies" - the Fresh Open Source Software Archive

Member "pcre2-10.36/src/sljit/sljitProtExecAllocator.c" (27 Aug 2020, 13787 Bytes) of package /linux/misc/pcre2-10.36.tar.bz2:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "sljitProtExecAllocator.c" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 10.35_vs_10.36.

    1 /*
    2  *    Stack-less Just-In-Time compiler
    3  *
    4  *    Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without modification, are
    7  * permitted provided that the following conditions are met:
    8  *
    9  *   1. Redistributions of source code must retain the above copyright notice, this list of
   10  *      conditions and the following disclaimer.
   11  *
   12  *   2. Redistributions in binary form must reproduce the above copyright notice, this list
   13  *      of conditions and the following disclaimer in the documentation and/or other materials
   14  *      provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
   17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
   19  * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
   20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
   21  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
   22  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
   24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   25  */
   26 
   27 /*
   28    This file contains a simple executable memory allocator
   29 
   30    It is assumed, that executable code blocks are usually medium (or sometimes
   31    large) memory blocks, and the allocator is not too frequently called (less
   32    optimized than other allocators). Thus, using it as a generic allocator is
   33    not suggested.
   34 
   35    How does it work:
   36      Memory is allocated in continuous memory areas called chunks by alloc_chunk()
   37      Chunk format:
   38      [ block ][ block ] ... [ block ][ block terminator ]
   39 
   40    All blocks and the block terminator is started with block_header. The block
   41    header contains the size of the previous and the next block. These sizes
   42    can also contain special values.
   43      Block size:
   44        0 - The block is a free_block, with a different size member.
   45        1 - The block is a block terminator.
   46        n - The block is used at the moment, and the value contains its size.
   47      Previous block size:
   48        0 - This is the first block of the memory chunk.
   49        n - The size of the previous block.
   50 
   51    Using these size values we can go forward or backward on the block chain.
   52    The unused blocks are stored in a chain list pointed by free_blocks. This
   53    list is useful if we need to find a suitable memory area when the allocator
   54    is called.
   55 
   56    When a block is freed, the new free block is connected to its adjacent free
   57    blocks if possible.
   58 
   59      [ free block ][ used block ][ free block ]
   60    and "used block" is freed, the three blocks are connected together:
   61      [           one big free block           ]
   62 */
   63 
   64 /* --------------------------------------------------------------------- */
   65 /*  System (OS) functions                                                */
   66 /* --------------------------------------------------------------------- */
   67 
   68 /* 64 KByte. */
   69 #define CHUNK_SIZE  0x10000
   70 
   71 struct chunk_header {
   72     void *executable;
   73 };
   74 
   75 /*
   76    alloc_chunk / free_chunk :
   77      * allocate executable system memory chunks
   78      * the size is always divisible by CHUNK_SIZE
   79    SLJIT_ALLOCATOR_LOCK / SLJIT_ALLOCATOR_UNLOCK :
   80      * provided as part of sljitUtils
   81      * only the allocator requires this lock, sljit is fully thread safe
   82        as it only uses local variables
   83 */
   84 
   85 #ifndef __NetBSD__
   86 #include <sys/stat.h>
   87 #include <fcntl.h>
   88 #include <stdio.h>
   89 #include <string.h>
   90 
   91 #ifndef O_NOATIME
   92 #define O_NOATIME 0
   93 #endif
   94 
   95 /* this is a linux extension available since kernel 3.11 */
   96 #ifndef O_TMPFILE
   97 #define O_TMPFILE 020200000
   98 #endif
   99 
  100 #ifndef _GNU_SOURCE
  101 char *secure_getenv(const char *name);
  102 int mkostemp(char *template, int flags);
  103 #endif
  104 
  105 static SLJIT_INLINE int create_tempfile(void)
  106 {
  107     int fd;
  108     char tmp_name[256];
  109     size_t tmp_name_len = 0;
  110     char *dir;
  111     struct stat st;
  112 #if defined(SLJIT_SINGLE_THREADED) && SLJIT_SINGLE_THREADED
  113     mode_t mode;
  114 #endif
  115 
  116 #ifdef HAVE_MEMFD_CREATE
  117     /* this is a GNU extension, make sure to use -D_GNU_SOURCE */
  118     fd = memfd_create("sljit", MFD_CLOEXEC);
  119     if (fd != -1) {
  120         fchmod(fd, 0);
  121         return fd;
  122     }
  123 #endif
  124 
  125     dir = secure_getenv("TMPDIR");
  126 
  127     if (dir) {
  128         tmp_name_len = strlen(dir);
  129         if (tmp_name_len > 0 && tmp_name_len < sizeof(tmp_name)) {
  130             if ((stat(dir, &st) == 0) && S_ISDIR(st.st_mode))
  131                 strcpy(tmp_name, dir);
  132         }
  133     }
  134 
  135 #ifdef P_tmpdir
  136     if (!tmp_name_len) {
  137         tmp_name_len = strlen(P_tmpdir);
  138         if (tmp_name_len > 0 && tmp_name_len < sizeof(tmp_name))
  139             strcpy(tmp_name, P_tmpdir);
  140     }
  141 #endif
  142     if (!tmp_name_len) {
  143         strcpy(tmp_name, "/tmp");
  144         tmp_name_len = 4;
  145     }
  146 
  147     SLJIT_ASSERT(tmp_name_len > 0 && tmp_name_len < sizeof(tmp_name));
  148 
  149     if (tmp_name[tmp_name_len - 1] == '/')
  150         tmp_name[--tmp_name_len] = '\0';
  151 
  152 #ifdef __linux__
  153     /*
  154      * the previous trimming might had left an empty string if TMPDIR="/"
  155      * so work around the problem below
  156      */
  157     fd = open(tmp_name_len ? tmp_name : "/",
  158         O_TMPFILE | O_EXCL | O_RDWR | O_NOATIME | O_CLOEXEC, 0);
  159     if (fd != -1)
  160         return fd;
  161 #endif
  162 
  163     if (tmp_name_len + 7 >= sizeof(tmp_name))
  164         return -1;
  165 
  166     strcpy(tmp_name + tmp_name_len, "/XXXXXX");
  167 #if defined(SLJIT_SINGLE_THREADED) && SLJIT_SINGLE_THREADED
  168     mode = umask(0777);
  169 #endif
  170     fd = mkostemp(tmp_name, O_CLOEXEC | O_NOATIME);
  171 #if defined(SLJIT_SINGLE_THREADED) && SLJIT_SINGLE_THREADED
  172     umask(mode);
  173 #else
  174     fchmod(fd, 0);
  175 #endif
  176 
  177     if (fd == -1)
  178         return -1;
  179 
  180     if (unlink(tmp_name)) {
  181         close(fd);
  182         return -1;
  183     }
  184 
  185     return fd;
  186 }
  187 
  188 static SLJIT_INLINE struct chunk_header* alloc_chunk(sljit_uw size)
  189 {
  190     struct chunk_header *retval;
  191     int fd;
  192 
  193     fd = create_tempfile();
  194     if (fd == -1)
  195         return NULL;
  196 
  197     if (ftruncate(fd, size)) {
  198         close(fd);
  199         return NULL;
  200     }
  201 
  202     retval = (struct chunk_header *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
  203 
  204     if (retval == MAP_FAILED) {
  205         close(fd);
  206         return NULL;
  207     }
  208 
  209     retval->executable = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
  210 
  211     if (retval->executable == MAP_FAILED) {
  212         munmap((void *)retval, size);
  213         close(fd);
  214         return NULL;
  215     }
  216 
  217     close(fd);
  218     return retval;
  219 }
  220 #else
  221 /*
  222  * MAP_REMAPDUP is a NetBSD extension available sinde 8.0, make sure to
  223  * adjust your feature macros (ex: -D_NETBSD_SOURCE) as needed
  224  */
  225 static SLJIT_INLINE struct chunk_header* alloc_chunk(sljit_uw size)
  226 {
  227     struct chunk_header *retval;
  228 
  229     retval = (struct chunk_header *)mmap(NULL, size,
  230             PROT_READ | PROT_WRITE | PROT_MPROTECT(PROT_EXEC),
  231             MAP_ANON | MAP_SHARED, -1, 0);
  232 
  233     if (retval == MAP_FAILED)
  234         return NULL;
  235 
  236     retval->executable = mremap(retval, size, NULL, size, MAP_REMAPDUP);
  237     if (retval->executable == MAP_FAILED) {
  238         munmap((void *)retval, size);
  239         return NULL;
  240     }
  241 
  242     if (mprotect(retval->executable, size, PROT_READ | PROT_EXEC) == -1) {
  243         munmap(retval->executable, size);
  244         munmap((void *)retval, size);
  245         return NULL;
  246     }
  247 
  248     return retval;
  249 }
  250 #endif /* NetBSD */
  251 
  252 static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
  253 {
  254     struct chunk_header *header = ((struct chunk_header *)chunk) - 1;
  255 
  256     munmap(header->executable, size);
  257     munmap((void *)header, size);
  258 }
  259 
  260 /* --------------------------------------------------------------------- */
  261 /*  Common functions                                                     */
  262 /* --------------------------------------------------------------------- */
  263 
  264 #define CHUNK_MASK  (~(CHUNK_SIZE - 1))
  265 
  266 struct block_header {
  267     sljit_uw size;
  268     sljit_uw prev_size;
  269     sljit_sw executable_offset;
  270 };
  271 
  272 struct free_block {
  273     struct block_header header;
  274     struct free_block *next;
  275     struct free_block *prev;
  276     sljit_uw size;
  277 };
  278 
  279 #define AS_BLOCK_HEADER(base, offset) \
  280     ((struct block_header*)(((sljit_u8*)base) + offset))
  281 #define AS_FREE_BLOCK(base, offset) \
  282     ((struct free_block*)(((sljit_u8*)base) + offset))
  283 #define MEM_START(base)     ((void*)((base) + 1))
  284 #define ALIGN_SIZE(size)    (((size) + sizeof(struct block_header) + 7) & ~7)
  285 
  286 static struct free_block* free_blocks;
  287 static sljit_uw allocated_size;
  288 static sljit_uw total_size;
  289 
  290 static SLJIT_INLINE void sljit_insert_free_block(struct free_block *free_block, sljit_uw size)
  291 {
  292     free_block->header.size = 0;
  293     free_block->size = size;
  294 
  295     free_block->next = free_blocks;
  296     free_block->prev = NULL;
  297     if (free_blocks)
  298         free_blocks->prev = free_block;
  299     free_blocks = free_block;
  300 }
  301 
  302 static SLJIT_INLINE void sljit_remove_free_block(struct free_block *free_block)
  303 {
  304     if (free_block->next)
  305         free_block->next->prev = free_block->prev;
  306 
  307     if (free_block->prev)
  308         free_block->prev->next = free_block->next;
  309     else {
  310         SLJIT_ASSERT(free_blocks == free_block);
  311         free_blocks = free_block->next;
  312     }
  313 }
  314 
  315 SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
  316 {
  317     struct chunk_header *chunk_header;
  318     struct block_header *header;
  319     struct block_header *next_header;
  320     struct free_block *free_block;
  321     sljit_uw chunk_size;
  322     sljit_sw executable_offset;
  323 
  324     SLJIT_ALLOCATOR_LOCK();
  325     if (size < (64 - sizeof(struct block_header)))
  326         size = (64 - sizeof(struct block_header));
  327     size = ALIGN_SIZE(size);
  328 
  329     free_block = free_blocks;
  330     while (free_block) {
  331         if (free_block->size >= size) {
  332             chunk_size = free_block->size;
  333             if (chunk_size > size + 64) {
  334                 /* We just cut a block from the end of the free block. */
  335                 chunk_size -= size;
  336                 free_block->size = chunk_size;
  337                 header = AS_BLOCK_HEADER(free_block, chunk_size);
  338                 header->prev_size = chunk_size;
  339                 header->executable_offset = free_block->header.executable_offset;
  340                 AS_BLOCK_HEADER(header, size)->prev_size = size;
  341             }
  342             else {
  343                 sljit_remove_free_block(free_block);
  344                 header = (struct block_header*)free_block;
  345                 size = chunk_size;
  346             }
  347             allocated_size += size;
  348             header->size = size;
  349             SLJIT_ALLOCATOR_UNLOCK();
  350             return MEM_START(header);
  351         }
  352         free_block = free_block->next;
  353     }
  354 
  355     chunk_size = sizeof(struct chunk_header) + sizeof(struct block_header);
  356     chunk_size = (chunk_size + size + CHUNK_SIZE - 1) & CHUNK_MASK;
  357 
  358     chunk_header = alloc_chunk(chunk_size);
  359     if (!chunk_header) {
  360         SLJIT_ALLOCATOR_UNLOCK();
  361         return NULL;
  362     }
  363 
  364     executable_offset = (sljit_sw)((sljit_u8*)chunk_header->executable - (sljit_u8*)chunk_header);
  365 
  366     chunk_size -= sizeof(struct chunk_header) + sizeof(struct block_header);
  367     total_size += chunk_size;
  368 
  369     header = (struct block_header *)(chunk_header + 1);
  370 
  371     header->prev_size = 0;
  372     header->executable_offset = executable_offset;
  373     if (chunk_size > size + 64) {
  374         /* Cut the allocated space into a free and a used block. */
  375         allocated_size += size;
  376         header->size = size;
  377         chunk_size -= size;
  378 
  379         free_block = AS_FREE_BLOCK(header, size);
  380         free_block->header.prev_size = size;
  381         free_block->header.executable_offset = executable_offset;
  382         sljit_insert_free_block(free_block, chunk_size);
  383         next_header = AS_BLOCK_HEADER(free_block, chunk_size);
  384     }
  385     else {
  386         /* All space belongs to this allocation. */
  387         allocated_size += chunk_size;
  388         header->size = chunk_size;
  389         next_header = AS_BLOCK_HEADER(header, chunk_size);
  390     }
  391     next_header->size = 1;
  392     next_header->prev_size = chunk_size;
  393     next_header->executable_offset = executable_offset;
  394     SLJIT_ALLOCATOR_UNLOCK();
  395     return MEM_START(header);
  396 }
  397 
  398 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
  399 {
  400     struct block_header *header;
  401     struct free_block* free_block;
  402 
  403     SLJIT_ALLOCATOR_LOCK();
  404     header = AS_BLOCK_HEADER(ptr, -(sljit_sw)sizeof(struct block_header));
  405     header = AS_BLOCK_HEADER(header, -header->executable_offset);
  406     allocated_size -= header->size;
  407 
  408     /* Connecting free blocks together if possible. */
  409 
  410     /* If header->prev_size == 0, free_block will equal to header.
  411        In this case, free_block->header.size will be > 0. */
  412     free_block = AS_FREE_BLOCK(header, -(sljit_sw)header->prev_size);
  413     if (SLJIT_UNLIKELY(!free_block->header.size)) {
  414         free_block->size += header->size;
  415         header = AS_BLOCK_HEADER(free_block, free_block->size);
  416         header->prev_size = free_block->size;
  417     }
  418     else {
  419         free_block = (struct free_block*)header;
  420         sljit_insert_free_block(free_block, header->size);
  421     }
  422 
  423     header = AS_BLOCK_HEADER(free_block, free_block->size);
  424     if (SLJIT_UNLIKELY(!header->size)) {
  425         free_block->size += ((struct free_block*)header)->size;
  426         sljit_remove_free_block((struct free_block*)header);
  427         header = AS_BLOCK_HEADER(free_block, free_block->size);
  428         header->prev_size = free_block->size;
  429     }
  430 
  431     /* The whole chunk is free. */
  432     if (SLJIT_UNLIKELY(!free_block->header.prev_size && header->size == 1)) {
  433         /* If this block is freed, we still have (allocated_size / 2) free space. */
  434         if (total_size - free_block->size > (allocated_size * 3 / 2)) {
  435             total_size -= free_block->size;
  436             sljit_remove_free_block(free_block);
  437             free_chunk(free_block, free_block->size +
  438                 sizeof(struct chunk_header) +
  439                 sizeof(struct block_header));
  440         }
  441     }
  442 
  443     SLJIT_ALLOCATOR_UNLOCK();
  444 }
  445 
  446 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
  447 {
  448     struct free_block* free_block;
  449     struct free_block* next_free_block;
  450 
  451     SLJIT_ALLOCATOR_LOCK();
  452 
  453     free_block = free_blocks;
  454     while (free_block) {
  455         next_free_block = free_block->next;
  456         if (!free_block->header.prev_size && 
  457                 AS_BLOCK_HEADER(free_block, free_block->size)->size == 1) {
  458             total_size -= free_block->size;
  459             sljit_remove_free_block(free_block);
  460             free_chunk(free_block, free_block->size +
  461                 sizeof(struct chunk_header) +
  462                 sizeof(struct block_header));
  463         }
  464         free_block = next_free_block;
  465     }
  466 
  467     SLJIT_ASSERT((total_size && free_blocks) || (!total_size && !free_blocks));
  468     SLJIT_ALLOCATOR_UNLOCK();
  469 }
  470 
  471 SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr)
  472 {
  473     return ((struct block_header *)(ptr))[-1].executable_offset;
  474 }