"Fossies" - the Fresh Open Source Software Archive

Member "stress-ng-0.09.56/stress-mmap.c" (15 Mar 2019, 12121 Bytes) of package /linux/privat/stress-ng-0.09.56.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "stress-mmap.c" see the Fossies "Dox" file reference documentation and the last Fossies "Diffs" side-by-side code changes report: 0.09.52_vs_0.09.54.

    1 /*
    2  * Copyright (C) 2013-2019 Canonical, Ltd.
    3  *
    4  * This program is free software; you can redistribute it and/or
    5  * modify it under the terms of the GNU General Public License
    6  * as published by the Free Software Foundation; either version 2
    7  * of the License, or (at your option) any later version.
    8  *
    9  * This program is distributed in the hope that it will be useful,
   10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12  * GNU General Public License for more details.
   13  *
   14  * You should have received a copy of the GNU General Public License
   15  * along with this program; if not, write to the Free Software
   16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
   17  *
   18  * This code is a complete clean re-write of the stress tool by
   19  * Colin Ian King <colin.king@canonical.com> and attempts to be
   20  * backwardly compatible with the stress tool by Amos Waterland
   21  * <apw@rossby.metr.ou.edu> but has more stress tests and more
   22  * functionality.
   23  *
   24  */
   25 #include "stress-ng.h"
   26 
   27 #define NO_MEM_RETRIES_MAX  (65536)
   28 
   29 /* Misc randomly chosen mmap flags */
   30 static const int mmap_flags[] = {
   31 #if defined(MAP_HUGE_2MB) && defined(MAP_HUGETLB)
   32     MAP_HUGE_2MB | MAP_HUGETLB,
   33 #endif
   34 #if defined(MAP_HUGE_1GB) && defined(MAP_HUGETLB)
   35     MAP_HUGE_1GB | MAP_HUGETLB,
   36 #endif
   37 #if defined(MAP_NONBLOCK)
   38     MAP_NONBLOCK,
   39 #endif
   40 #if defined(MAP_GROWSDOWN)
   41     MAP_GROWSDOWN,
   42 #endif
   43 #if defined(MAP_LOCKED)
   44     MAP_LOCKED,
   45 #endif
   46 #if defined(MAP_32BIT) && (defined(__x86_64__) || defined(__x86_64))
   47     MAP_32BIT,
   48 #endif
   49 #if defined(MAP_NOCACHE)    /* Mac OS X */
   50     MAP_NOCACHE,
   51 #endif
   52 #if defined(MAP_HASSEMAPHORE)   /* Mac OS X */
   53     MAP_HASSEMAPHORE,
   54 #endif
   55 /* This will segv if no backing, so don't use it for now */
   56 #if 0 && defined(MAP_NORESERVE)
   57     MAP_NORESERVE,
   58 #endif
   59 #if defined(MAP_STACK)
   60     MAP_STACK,
   61 #endif
   62     0
   63 };
   64 
   65 int stress_set_mmap_bytes(const char *opt)
   66 {
   67     size_t mmap_bytes;
   68 
   69     mmap_bytes = (size_t)get_uint64_byte_memory(opt, 1);
   70     check_range_bytes("mmap-bytes", mmap_bytes,
   71         MIN_MMAP_BYTES, MAX_MEM_LIMIT);
   72     return set_setting("mmap-bytes", TYPE_ID_SIZE_T, &mmap_bytes);
   73 }
   74 
   75 /*
   76  *  stress_mmap_mprotect()
   77  *  cycle through page settings on a region of mmap'd memory
   78  */
   79 static void stress_mmap_mprotect(const char *name, void *addr, const size_t len)
   80 {
   81 #if defined(HAVE_MPROTECT)
   82     if (g_opt_flags & OPT_FLAGS_MMAP_MPROTECT) {
   83         /* Cycle through potection */
   84         if (mprotect(addr, len, PROT_NONE) < 0)
   85             pr_fail("%s: mprotect set to PROT_NONE failed\n", name);
   86         if (mprotect(addr, len, PROT_READ) < 0)
   87             pr_fail("%s: mprotect set to PROT_READ failed\n", name);
   88         if (mprotect(addr, len, PROT_WRITE) < 0)
   89             pr_fail("%s: mprotect set to PROT_WRITE failed\n", name);
   90         if (mprotect(addr, len, PROT_EXEC) < 0)
   91             pr_fail("%s: mprotect set to PROT_EXEC failed\n", name);
   92         if (mprotect(addr, len, PROT_READ | PROT_WRITE) < 0)
   93             pr_fail("%s: mprotect set to PROT_READ | PROT_WRITE failed\n", name);
   94     }
   95 #else
   96     (void)name;
   97     (void)addr;
   98     (void)len;
   99 #endif
  100 }
  101 
  102 static void stress_mmap_child(
  103     const args_t *args,
  104     const int fd,
  105     int *flags,
  106     const size_t sz,
  107     const size_t pages4k,
  108     const size_t mmap_bytes)
  109 {
  110     const size_t page_size = args->page_size;
  111     int no_mem_retries = 0;
  112     const int ms_flags = (g_opt_flags & OPT_FLAGS_MMAP_ASYNC) ?
  113         MS_ASYNC : MS_SYNC;
  114 
  115     do {
  116         uint8_t mapped[pages4k];
  117         uint8_t *mappings[pages4k];
  118         size_t n;
  119         const int rnd = mwc32() % SIZEOF_ARRAY(mmap_flags);
  120         const int rnd_flag = mmap_flags[rnd];
  121         uint8_t *buf = NULL;
  122 
  123         if (no_mem_retries >= NO_MEM_RETRIES_MAX) {
  124             pr_inf("%s: gave up trying to mmap, no available memory\n",
  125                 args->name);
  126             break;
  127         }
  128 
  129         if (!g_keep_stressing_flag)
  130             break;
  131         buf = (uint8_t *)mmap(NULL, sz,
  132             PROT_READ | PROT_WRITE, *flags | rnd_flag, fd, 0);
  133         if (buf == MAP_FAILED) {
  134             /* Force MAP_POPULATE off, just in case */
  135 #if defined(MAP_POPULATE)
  136             *flags &= ~MAP_POPULATE;
  137 #endif
  138             no_mem_retries++;
  139             if (no_mem_retries > 1)
  140                 (void)shim_usleep(100000);
  141             continue;   /* Try again */
  142         }
  143         if (g_opt_flags & OPT_FLAGS_MMAP_FILE) {
  144             (void)memset(buf, 0xff, sz);
  145             (void)shim_msync((void *)buf, sz, ms_flags);
  146         }
  147         (void)madvise_random(buf, sz);
  148         (void)mincore_touch_pages(buf, mmap_bytes);
  149         stress_mmap_mprotect(args->name, buf, sz);
  150         (void)memset(mapped, PAGE_MAPPED, sizeof(mapped));
  151         for (n = 0; n < pages4k; n++)
  152             mappings[n] = buf + (n * page_size);
  153 
  154         /* Ensure we can write to the mapped pages */
  155         mmap_set(buf, sz, page_size);
  156         if (g_opt_flags & OPT_FLAGS_VERIFY) {
  157             if (mmap_check(buf, sz, page_size) < 0)
  158                 pr_fail("%s: mmap'd region of %zu bytes does "
  159                     "not contain expected data\n", args->name, sz);
  160         }
  161 
  162         /*
  163          *  Step #0, write + read the mmap'd data from the file back into
  164          *  the mappings.
  165          */
  166         if ((fd >= 0) && (g_opt_flags & OPT_FLAGS_MMAP_FILE)) {
  167             off_t offset = 0;
  168 
  169             for (n = 0; n < pages4k; n++, offset += page_size) {
  170                 ssize_t ret;
  171 
  172                 if (lseek(fd, offset, SEEK_SET) < 0)
  173                     continue;
  174 
  175                 ret = write(fd, mappings[n], page_size);
  176                 (void)ret;
  177                 ret = read(fd, mappings[n], page_size);
  178                 (void)ret;
  179             }
  180         }
  181 
  182         /*
  183          *  Step #1, unmap all pages in random order
  184          */
  185         (void)mincore_touch_pages(buf, mmap_bytes);
  186         for (n = pages4k; n; ) {
  187             uint64_t j, i = mwc64() % pages4k;
  188             for (j = 0; j < n; j++) {
  189                 uint64_t page = (i + j) % pages4k;
  190                 if (mapped[page] == PAGE_MAPPED) {
  191                     mapped[page] = 0;
  192                     (void)madvise_random(mappings[page], page_size);
  193                     stress_mmap_mprotect(args->name, mappings[page], page_size);
  194                     (void)munmap((void *)mappings[page], page_size);
  195                     n--;
  196                     break;
  197                 }
  198                 if (!g_keep_stressing_flag)
  199                     goto cleanup;
  200             }
  201         }
  202         (void)munmap((void *)buf, sz);
  203 #if defined(MAP_FIXED)
  204         /*
  205          *  Step #2, map them back in random order
  206          */
  207         for (n = pages4k; n; ) {
  208             uint64_t j, i = mwc64() % pages4k;
  209 
  210             for (j = 0; j < n; j++) {
  211                 uint64_t page = (i + j) % pages4k;
  212 
  213                 if (!mapped[page]) {
  214                     off_t offset = (g_opt_flags & OPT_FLAGS_MMAP_FILE) ?
  215                             page * page_size : 0;
  216                     int fixed_flags = MAP_FIXED;
  217 
  218                     /*
  219                      * Attempt to map them back into the original address, this
  220                      * may fail (it's not the most portable operation), so keep
  221                      * track of failed mappings too
  222                      */
  223 #if defined(MAP_FIXED_NOREPLACE)
  224                     if (mwc1())
  225                         fixed_flags = MAP_FIXED_NOREPLACE;
  226 #endif
  227                     mappings[page] = (uint8_t *)mmap((void *)mappings[page],
  228                         page_size, PROT_READ | PROT_WRITE, fixed_flags | *flags, fd, offset);
  229 
  230                     if (mappings[page] == MAP_FAILED) {
  231                         mapped[page] = PAGE_MAPPED_FAIL;
  232                         mappings[page] = NULL;
  233                     } else {
  234                         (void)mincore_touch_pages(mappings[page], page_size);
  235                         (void)madvise_random(mappings[page], page_size);
  236                         stress_mmap_mprotect(args->name, mappings[page], page_size);
  237                         mapped[page] = PAGE_MAPPED;
  238                         /* Ensure we can write to the mapped page */
  239                         mmap_set(mappings[page], page_size, page_size);
  240                         if (mmap_check(mappings[page], page_size, page_size) < 0)
  241                             pr_fail("%s: mmap'd region of %zu bytes does "
  242                                 "not contain expected data\n", args->name, page_size);
  243                         if (g_opt_flags & OPT_FLAGS_MMAP_FILE) {
  244                             (void)memset(mappings[page], n, page_size);
  245                             (void)shim_msync((void *)mappings[page], page_size, ms_flags);
  246 #if defined(FALLOC_FL_KEEP_SIZE) && defined(FALLOC_FL_PUNCH_HOLE)
  247                             (void)shim_fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
  248                                 offset, page_size);
  249 #endif
  250                         }
  251                     }
  252                     n--;
  253                     break;
  254                 }
  255                 if (!g_keep_stressing_flag)
  256                     goto cleanup;
  257             }
  258         }
  259 #endif
  260 cleanup:
  261         /*
  262          *  Step #3, unmap them all
  263          */
  264         for (n = 0; n < pages4k; n++) {
  265             if (mapped[n] & PAGE_MAPPED) {
  266                 (void)madvise_random(mappings[n], page_size);
  267                 stress_mmap_mprotect(args->name, mappings[n], page_size);
  268                 (void)munmap((void *)mappings[n], page_size);
  269             }
  270         }
  271         inc_counter(args);
  272     } while (keep_stressing());
  273 }
  274 
  275 /*
  276  *  stress_mmap()
  277  *  stress mmap
  278  */
  279 static int stress_mmap(const args_t *args)
  280 {
  281     const size_t page_size = args->page_size;
  282     size_t sz, pages4k;
  283     size_t mmap_bytes = DEFAULT_MMAP_BYTES;
  284     pid_t pid;
  285     int fd = -1, flags = MAP_PRIVATE | MAP_ANONYMOUS;
  286     uint32_t ooms = 0, segvs = 0, buserrs = 0;
  287     char filename[PATH_MAX];
  288 
  289 #if defined(MAP_POPULATE)
  290     flags |= MAP_POPULATE;
  291 #endif
  292     if (!get_setting("mmap-bytes", &mmap_bytes)) {
  293         if (g_opt_flags & OPT_FLAGS_MAXIMIZE)
  294             mmap_bytes = MAX_MMAP_BYTES;
  295         if (g_opt_flags & OPT_FLAGS_MINIMIZE)
  296             mmap_bytes = MIN_MMAP_BYTES;
  297     }
  298     mmap_bytes /= args->num_instances;
  299     if (mmap_bytes < MIN_MMAP_BYTES)
  300         mmap_bytes = MIN_MMAP_BYTES;
  301     if (mmap_bytes < page_size)
  302         mmap_bytes = page_size;
  303     sz = mmap_bytes & ~(page_size - 1);
  304     pages4k = sz / page_size;
  305 
  306     /* Make sure this is killable by OOM killer */
  307     set_oom_adjustment(args->name, true);
  308 
  309     if (g_opt_flags & OPT_FLAGS_MMAP_FILE) {
  310         ssize_t ret, rc;
  311         char ch = '\0';
  312 
  313         rc = stress_temp_dir_mk_args(args);
  314         if (rc < 0)
  315             return exit_status(-rc);
  316 
  317         (void)stress_temp_filename_args(args,
  318             filename, sizeof(filename), mwc32());
  319 
  320         fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
  321         if (fd < 0) {
  322             rc = exit_status(errno);
  323             pr_fail_err("open");
  324             (void)unlink(filename);
  325             (void)stress_temp_dir_rm_args(args);
  326 
  327             return rc;
  328         }
  329         (void)unlink(filename);
  330         if (lseek(fd, sz - sizeof(ch), SEEK_SET) < 0) {
  331             pr_fail_err("lseek");
  332             (void)close(fd);
  333             (void)stress_temp_dir_rm_args(args);
  334 
  335             return EXIT_FAILURE;
  336         }
  337 redo:
  338         ret = write(fd, &ch, sizeof(ch));
  339         if (ret != sizeof(ch)) {
  340             if ((errno == EAGAIN) || (errno == EINTR))
  341                 goto redo;
  342             rc = exit_status(errno);
  343             pr_fail_err("write");
  344             (void)close(fd);
  345             (void)stress_temp_dir_rm_args(args);
  346 
  347             return rc;
  348         }
  349         flags &= ~(MAP_ANONYMOUS | MAP_PRIVATE);
  350         flags |= MAP_SHARED;
  351     }
  352 
  353 again:
  354     if (!g_keep_stressing_flag)
  355         goto cleanup;
  356     pid = fork();
  357     if (pid < 0) {
  358         if ((errno == EAGAIN) || (errno == ENOMEM))
  359             goto again;
  360         pr_err("%s: fork failed: errno=%d: (%s)\n",
  361             args->name, errno, strerror(errno));
  362     } else if (pid > 0) {
  363         int status, ret;
  364 
  365         (void)setpgid(pid, g_pgrp);
  366         /* Parent, wait for child */
  367         ret = waitpid(pid, &status, 0);
  368         if (ret < 0) {
  369             if (errno != EINTR)
  370                 pr_dbg("%s: waitpid(): errno=%d (%s)\n",
  371                     args->name, errno, strerror(errno));
  372             (void)kill(pid, SIGTERM);
  373             (void)kill(pid, SIGKILL);
  374             (void)waitpid(pid, &status, 0);
  375         } else if (WIFSIGNALED(status)) {
  376             /* If we got killed by sigbus, re-start */
  377             if (WTERMSIG(status) == SIGBUS) {
  378                 /* Happens frequently, so be silent */
  379                 buserrs++;
  380                 goto again;
  381             }
  382 
  383             pr_dbg("%s: child died: %s (instance %d)\n",
  384                 args->name, stress_strsignal(WTERMSIG(status)),
  385                 args->instance);
  386             /* If we got killed by OOM killer, re-start */
  387             if (WTERMSIG(status) == SIGKILL) {
  388                 if (g_opt_flags & OPT_FLAGS_OOMABLE) {
  389                     log_system_mem_info();
  390                     pr_dbg("%s: assuming killed by OOM "
  391                         "killer, bailing out "
  392                         "(instance %d)\n",
  393                         args->name, args->instance);
  394                     _exit(0);
  395                 } else {
  396                     log_system_mem_info();
  397                     pr_dbg("%s: assuming killed by OOM "
  398                         "killer, restarting again "
  399                         "(instance %d)\n",
  400                         args->name, args->instance);
  401                     ooms++;
  402                     goto again;
  403                 }
  404             }
  405             /* If we got killed by sigsegv, re-start */
  406             if (WTERMSIG(status) == SIGSEGV) {
  407                 pr_dbg("%s: killed by SIGSEGV, "
  408                     "restarting again "
  409                     "(instance %d)\n",
  410                     args->name, args->instance);
  411                 segvs++;
  412                 goto again;
  413             }
  414         }
  415     } else if (pid == 0) {
  416         (void)setpgid(0, g_pgrp);
  417         stress_parent_died_alarm();
  418 
  419         /* Make sure this is killable by OOM killer */
  420         set_oom_adjustment(args->name, true);
  421 
  422         stress_mmap_child(args, fd, &flags, sz, pages4k, mmap_bytes);
  423         _exit(0);
  424     }
  425 
  426 cleanup:
  427     if (g_opt_flags & OPT_FLAGS_MMAP_FILE) {
  428         (void)close(fd);
  429         (void)stress_temp_dir_rm_args(args);
  430     }
  431     if (ooms + segvs + buserrs > 0)
  432         pr_dbg("%s: OOM restarts: %" PRIu32
  433             ", SEGV restarts: %" PRIu32
  434             ", SIGBUS signals: %" PRIu32 "\n",
  435             args->name, ooms, segvs, buserrs);
  436 
  437     return EXIT_SUCCESS;
  438 }
  439 
  440 stressor_info_t stress_mmap_info = {
  441     .stressor = stress_mmap,
  442     .class = CLASS_VM | CLASS_OS
  443 };