"Fossies" - the Fresh Open Source Software Archive

Member "stress-ng-0.09.56/stress-memthrash.c" (15 Mar 2019, 14595 Bytes) of package /linux/privat/stress-ng-0.09.56.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "stress-memthrash.c" see the Fossies "Dox" file reference documentation and the last Fossies "Diffs" side-by-side code changes report: 0.09.52_vs_0.09.54.

    1 /*
    2  * Copyright (C) 2013-2019 Canonical, Ltd.
    3  *
    4  * This program is free software; you can redistribute it and/or
    5  * modify it under the terms of the GNU General Public License
    6  * as published by the Free Software Foundation; either version 2
    7  * of the License, or (at your option) any later version.
    8  *
    9  * This program is distributed in the hope that it will be useful,
   10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12  * GNU General Public License for more details.
   13  *
   14  * You should have received a copy of the GNU General Public License
   15  * along with this program; if not, write to the Free Software
   16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
   17  *
   18  * This code is a complete clean re-write of the stress tool by
   19  * Colin Ian King <colin.king@canonical.com> and attempts to be
   20  * backwardly compatible with the stress tool by Amos Waterland
   21  * <apw@rossby.metr.ou.edu> but has more stress tests and more
   22  * functionality.
   23  *
   24  */
   25 #include "stress-ng.h"
   26 
   27 #if defined(HAVE_LIB_PTHREAD)
   28 
   29 #define MATRIX_SIZE_MAX_SHIFT   (14)
   30 #define MATRIX_SIZE_MIN_SHIFT   (10)
   31 #define MATRIX_SIZE     (1 << MATRIX_SIZE_MAX_SHIFT)
   32 #define MEM_SIZE        (MATRIX_SIZE * MATRIX_SIZE)
   33 
   34 typedef void (*memthrash_func_t)(const args_t *args, size_t mem_size);
   35 
   36 typedef struct {
   37     const char      *name;  /* human readable form of stressor */
   38     memthrash_func_t    func;   /* the method function */
   39 } stress_memthrash_method_info_t;
   40 
   41 static const stress_memthrash_method_info_t memthrash_methods[];
   42 static void *mem;
   43 static volatile bool thread_terminate;
   44 static sigset_t set;
   45 
   46 #if (((defined(__GNUC__) || defined(__clang__)) && defined(STRESS_X86)) || \
   47     (defined(__GNUC__) && NEED_GNUC(4,7,0) && defined(STRESS_ARM)))
   48 #if defined(__GNUC__) && NEED_GNUC(4,7,0)
   49 #define MEM_LOCK(ptr)    __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST);
   50 #else
   51 #define MEM_LOCK(ptr)   asm volatile("lock addl %1,%0" : "+m" (*ptr) : "ir" (1));
   52 #endif
   53 #endif
   54 
   55 static inline HOT OPTIMIZE3 void stress_memthrash_random_chunk(const size_t chunk_size, size_t mem_size)
   56 {
   57     uint32_t i;
   58     const uint32_t max = mwc16();
   59     size_t chunks = mem_size / chunk_size;
   60 
   61     if (chunks < 1)
   62         chunks = 1;
   63 
   64     for (i = 0; !thread_terminate && (i < max); i++) {
   65         const size_t chunk = mwc32() % chunks;
   66         const size_t offset = chunk * chunk_size;
   67 #if defined(__GNUC__)
   68         (void)__builtin_memset((void *)mem + offset, mwc8(), chunk_size);
   69 #else
   70         (void)memset((void *)mem + offset, mwc8(), chunk_size);
   71 #endif
   72     }
   73 }
   74 
   75 static void HOT OPTIMIZE3 stress_memthrash_random_chunkpage(const args_t *args, size_t mem_size)
   76 {
   77     stress_memthrash_random_chunk(args->page_size, mem_size);
   78 }
   79 
   80 static void HOT OPTIMIZE3 stress_memthrash_random_chunk256(const args_t *args, size_t mem_size)
   81 {
   82     (void)args;
   83 
   84     stress_memthrash_random_chunk(256, mem_size);
   85 }
   86 
   87 static void HOT OPTIMIZE3 stress_memthrash_random_chunk64(const args_t *args, size_t mem_size)
   88 {
   89     (void)args;
   90 
   91     stress_memthrash_random_chunk(64, mem_size);
   92 }
   93 
   94 static void HOT OPTIMIZE3 stress_memthrash_random_chunk8(const args_t *args, size_t mem_size)
   95 {
   96     (void)args;
   97 
   98     stress_memthrash_random_chunk(8, mem_size);
   99 }
  100 
  101 static void HOT OPTIMIZE3 stress_memthrash_random_chunk1(const args_t *args, size_t mem_size)
  102 {
  103     (void)args;
  104 
  105     stress_memthrash_random_chunk(1, mem_size);
  106 }
  107 
  108 static void stress_memthrash_memset(const args_t *args, size_t mem_size)
  109 {
  110     (void)args;
  111 
  112 #if defined(__GNUC__)
  113     (void)__builtin_memset((void *)mem, mwc8(), mem_size);
  114 #else
  115     (void)memset((void *)mem, mwc8(), mem_size);
  116 #endif
  117 }
  118 
  119 static void HOT OPTIMIZE3 stress_memthrash_flip_mem(const args_t *args, size_t mem_size)
  120 {
  121     (void)args;
  122 
  123     volatile uint64_t *ptr = (volatile uint64_t *)mem;
  124     const uint64_t *end = (uint64_t *)(mem + mem_size);
  125 
  126     while (LIKELY(ptr < end)) {
  127         *ptr = *ptr ^ ~0ULL;
  128         ptr++;
  129     }
  130 }
  131 
  132 static void HOT OPTIMIZE3 stress_memthrash_matrix(const args_t *args, size_t mem_size)
  133 {
  134     (void)args;
  135     (void)mem_size;
  136 
  137     size_t i, j;
  138     volatile uint8_t *vmem = mem;
  139 
  140     for (i = 0; !thread_terminate && (i < MATRIX_SIZE); i+= ((mwc8() & 0xf) + 1)) {
  141         for (j = 0; j < MATRIX_SIZE; j+= 16) {
  142             size_t i1 = (i * MATRIX_SIZE) + j;
  143             size_t i2 = (j * MATRIX_SIZE) + i;
  144             uint8_t tmp;
  145 
  146             tmp = vmem[i1];
  147             vmem[i1] = vmem[i2];
  148             vmem[i2] = tmp;
  149         }
  150     }
  151 }
  152 
  153 static void HOT OPTIMIZE3 stress_memthrash_prefetch(const args_t *args, size_t mem_size)
  154 {
  155     uint32_t i;
  156     const uint32_t max = mwc16();
  157 
  158     (void)args;
  159 
  160     for (i = 0; !thread_terminate && (i < max); i++) {
  161         size_t offset = mwc32() % mem_size;
  162         uint8_t *const ptr = mem + offset;
  163         volatile uint8_t *const vptr = ptr;
  164 
  165         __builtin_prefetch(ptr, 1, 1);
  166         //(void)*vptr;
  167         *vptr = i & 0xff;
  168     }
  169 }
  170 
  171 static void HOT OPTIMIZE3 stress_memthrash_flush(const args_t *args, size_t mem_size)
  172 {
  173     uint32_t i;
  174     const uint32_t max = mwc16();
  175 
  176     (void)args;
  177 
  178     for (i = 0; !thread_terminate && (i < max); i++) {
  179         size_t offset = mwc32() % mem_size;
  180         uint8_t *const ptr = mem + offset;
  181         volatile uint8_t *const vptr = ptr;
  182 
  183         *vptr = i & 0xff;
  184         clflush(ptr);
  185     }
  186 }
  187 
  188 static void HOT OPTIMIZE3 stress_memthrash_mfence(const args_t *args, size_t mem_size)
  189 {
  190     uint32_t i;
  191     const uint32_t max = mwc16();
  192 
  193     (void)args;
  194 
  195     for (i = 0; !thread_terminate && (i < max); i++) {
  196         size_t offset = mwc32() % mem_size;
  197         volatile uint8_t *ptr = mem + offset;
  198 
  199         *ptr = i & 0xff;
  200         mfence();
  201     }
  202 }
  203 
  204 #if defined(MEM_LOCK)
  205 static void HOT OPTIMIZE3 stress_memthrash_lock(const args_t *args, size_t mem_size)
  206 {
  207     uint32_t i;
  208 
  209     (void)args;
  210 
  211     for (i = 0; !thread_terminate && (i < 64); i++) {
  212         size_t offset = mwc32() % mem_size;
  213         volatile uint8_t *ptr = mem + offset;
  214 
  215         MEM_LOCK(ptr);
  216     }
  217 }
  218 #endif
  219 
  220 static void HOT OPTIMIZE3 stress_memthrash_spinread(const args_t *args, size_t mem_size)
  221 {
  222     uint32_t i;
  223     const size_t offset = mwc32() % mem_size;
  224     volatile uint32_t *ptr = (uint32_t *)(mem + offset);
  225 
  226     (void)args;
  227 
  228     for (i = 0; !thread_terminate && (i < 65536); i++) {
  229         (void)*ptr;
  230         (void)*ptr;
  231         (void)*ptr;
  232         (void)*ptr;
  233 
  234         (void)*ptr;
  235         (void)*ptr;
  236         (void)*ptr;
  237         (void)*ptr;
  238     }
  239 }
  240 
  241 static void HOT OPTIMIZE3 stress_memthrash_spinwrite(const args_t *args, size_t mem_size)
  242 {
  243     uint32_t i;
  244     const size_t offset = mwc32() % mem_size;
  245     volatile uint32_t *ptr = (uint32_t *)(mem + offset);
  246 
  247     (void)args;
  248 
  249     for (i = 0; !thread_terminate && (i < 65536); i++) {
  250         *ptr = i;
  251         *ptr = i;
  252         *ptr = i;
  253         *ptr = i;
  254 
  255         *ptr = i;
  256         *ptr = i;
  257         *ptr = i;
  258         *ptr = i;
  259     }
  260 }
  261 
  262 
  263 static void stress_memthrash_all(const args_t *args, size_t mem_size);
  264 static void stress_memthrash_random(const args_t *args, size_t mem_size);
  265 
  266 static const stress_memthrash_method_info_t memthrash_methods[] = {
  267     { "all",    stress_memthrash_all },     /* MUST always be first! */
  268 
  269     { "chunk1", stress_memthrash_random_chunk1 },
  270     { "chunk8", stress_memthrash_random_chunk8 },
  271     { "chunk64",    stress_memthrash_random_chunk64 },
  272     { "chunk256",   stress_memthrash_random_chunk256 },
  273     { "chunkpage",  stress_memthrash_random_chunkpage },
  274     { "flip",   stress_memthrash_flip_mem },
  275     { "flush",  stress_memthrash_flush },
  276 #if defined(MEM_LOCK)
  277     { "lock",   stress_memthrash_lock },
  278 #endif
  279     { "matrix", stress_memthrash_matrix },
  280     { "memset", stress_memthrash_memset },
  281     { "mfence", stress_memthrash_mfence },
  282     { "prefetch",   stress_memthrash_prefetch },
  283     { "random", stress_memthrash_random },
  284     { "spinread",   stress_memthrash_spinread },
  285     { "spinwrite",  stress_memthrash_spinwrite }
  286 };
  287 
  288 static void stress_memthrash_all(const args_t *args, size_t mem_size)
  289 {
  290     static size_t i = 1;
  291     const double t = time_now();
  292 
  293     do {
  294         memthrash_methods[i].func(args, mem_size);
  295     } while (!thread_terminate && (time_now() - t < 0.01));
  296 
  297     i++;
  298     if (UNLIKELY(i >= SIZEOF_ARRAY(memthrash_methods)))
  299         i = 1;
  300 }
  301 
  302 static void stress_memthrash_random(const args_t *args, size_t mem_size)
  303 {
  304     /* loop until we find a good candidate */
  305     for (;;) {
  306         size_t i = mwc8() % SIZEOF_ARRAY(memthrash_methods);
  307         const memthrash_func_t func = (memthrash_func_t)memthrash_methods[i].func;
  308 
  309         /* Don't run stress_memthrash_random/all to avoid recursion */
  310         if ((func != stress_memthrash_random) &&
  311             (func != stress_memthrash_all)) {
  312             func(args, mem_size);
  313             return;
  314         }
  315     }
  316 }
  317 
  318 /*
  319  *  stress_set_memthrash_method()
  320  *  set the default memthresh method
  321  */
  322 int stress_set_memthrash_method(const char *name)
  323 {
  324     size_t i;
  325 
  326     for (i = 0; i < SIZEOF_ARRAY(memthrash_methods); i++) {
  327         const stress_memthrash_method_info_t *info = &memthrash_methods[i];
  328         if (!strcmp(memthrash_methods[i].name, name)) {
  329             set_setting("memthrash-method", TYPE_ID_UINTPTR_T, &info);
  330             return 0;
  331         }
  332     }
  333 
  334     (void)fprintf(stderr, "memthrash-method must be one of:");
  335     for (i = 0; i < SIZEOF_ARRAY(memthrash_methods); i++) {
  336         (void)fprintf(stderr, " %s", memthrash_methods[i].name);
  337     }
  338     (void)fprintf(stderr, "\n");
  339 
  340     return -1;
  341 }
  342 
  343 /*
  344  *  stress_memthrash_func()
  345  *  pthread that exits immediately
  346  */
  347 static void *stress_memthrash_func(void *arg)
  348 {
  349     uint8_t stack[SIGSTKSZ + STACK_ALIGNMENT];
  350     static void *nowt = NULL;
  351     const pthread_args_t *parg = (pthread_args_t *)arg;
  352     const args_t *args = parg->args;
  353     const memthrash_func_t func = (memthrash_func_t)parg->data;
  354 
  355     /*
  356      *  Block all signals, let controlling thread
  357      *  handle these
  358      */
  359     (void)sigprocmask(SIG_BLOCK, &set, NULL);
  360 
  361     /*
  362      *  According to POSIX.1 a thread should have
  363      *  a distinct alternative signal stack.
  364      *  However, we block signals in this thread
  365      *  so this is probably just totally unncessary.
  366      */
  367     (void)memset(stack, 0, sizeof(stack));
  368     if (stress_sigaltstack(stack, SIGSTKSZ) < 0)
  369         goto die;
  370 
  371     while (!thread_terminate && keep_stressing()) {
  372         size_t j;
  373 
  374         for (j = MATRIX_SIZE_MIN_SHIFT; j <= MATRIX_SIZE_MAX_SHIFT &&
  375              keep_stressing(); j++) {
  376             size_t mem_size = 1 << (2 * j);
  377 
  378             size_t i;
  379             for (i = 0; i < SIZEOF_ARRAY(memthrash_methods); i++)
  380                 if (func == memthrash_methods[i].func)
  381                     break;
  382             func(args, mem_size);
  383             inc_counter(args);
  384         }
  385     }
  386 
  387     /* Wait parent up, all done! */
  388     (void)kill(args->pid, SIGALRM);
  389 die:
  390     return &nowt;
  391 }
  392 
  393 static inline uint32_t stress_memthrash_max(
  394     const uint32_t instances,
  395     const uint32_t total_cpus)
  396 {
  397     if ((instances >= total_cpus) || (instances == 0)) {
  398         return 1;
  399     } else {
  400         uint32_t max = total_cpus / instances;
  401         return ((total_cpus % instances) == 0) ? max : max + 1;
  402     }
  403 }
  404 
  405 static inline uint32_t stress_memthash_optimal(
  406     const uint32_t instances,
  407     const uint32_t total_cpus)
  408 {
  409     uint32_t n = instances;
  410 
  411     while (n > 1) {
  412         if (total_cpus % n == 0)
  413             return n;
  414         n--;
  415     }
  416     return 1;
  417 }
  418 
  419 static inline char *plural(uint32_t n)
  420 {
  421     return n > 1 ? "s" : "";
  422 }
  423 
  424 
  425 /*
  426  *  stress_memthrash()
  427  *  stress by creating pthreads
  428  */
  429 static int stress_memthrash(const args_t *args)
  430 {
  431     const stress_memthrash_method_info_t *memthrash_method = &memthrash_methods[0];
  432     const uint32_t total_cpus = stress_get_processors_configured();
  433     const uint32_t max_threads = stress_memthrash_max(args->num_instances, total_cpus);
  434     pthread_t pthreads[max_threads];
  435     int ret[max_threads];
  436     pthread_args_t pargs;
  437     memthrash_func_t func;
  438     pid_t pid;
  439 
  440     (void)get_setting("memthrash-method", &memthrash_method);
  441     func = memthrash_method->func;
  442 
  443     pr_dbg("%s: using method '%s'\n", args->name, memthrash_method->name);
  444     if (args->instance == 0) {
  445         pr_inf("%s: starting %" PRIu32 " thread%s on each of the %"
  446             PRIu32 " stressors on a %" PRIu32 " CPU system\n",
  447             args->name, max_threads, plural(max_threads),
  448             args->num_instances, total_cpus);
  449         if (max_threads * args->num_instances > total_cpus) {
  450             pr_inf("%s: this is not an optimal choice of stressors, "
  451                 "try %" PRIu32 " instead\n",
  452             args->name,
  453             stress_memthash_optimal(args->num_instances, total_cpus));
  454         }
  455     }
  456 
  457     pargs.args = args;
  458     pargs.data = func;
  459 
  460     (void)memset(pthreads, 0, sizeof(pthreads));
  461     (void)memset(ret, 0, sizeof(ret));
  462     (void)sigfillset(&set);
  463 
  464 again:
  465     if (!g_keep_stressing_flag)
  466         return EXIT_SUCCESS;
  467     pid = fork();
  468     if (pid < 0) {
  469         if ((errno == EAGAIN) || (errno == ENOMEM))
  470             goto again;
  471         pr_err("%s: fork failed: errno=%d: (%s)\n",
  472             args->name, errno, strerror(errno));
  473     } else if (pid > 0) {
  474         int status, waitret;
  475 
  476         /* Parent, wait for child */
  477         (void)setpgid(pid, g_pgrp);
  478         waitret = waitpid(pid, &status, 0);
  479         if (waitret < 0) {
  480             if (errno != EINTR)
  481                 pr_dbg("%s: waitpid(): errno=%d (%s)\n",
  482                     args->name, errno, strerror(errno));
  483             (void)kill(pid, SIGTERM);
  484             (void)kill(pid, SIGKILL);
  485             (void)waitpid(pid, &status, 0);
  486         } else if (WIFSIGNALED(status)) {
  487             pr_dbg("%s: child died: %s (instance %d)\n",
  488                 args->name, stress_strsignal(WTERMSIG(status)),
  489                 args->instance);
  490             /* If we got killed by OOM killer, re-start */
  491             if (WTERMSIG(status) == SIGKILL) {
  492                 log_system_mem_info();
  493                 pr_dbg("%s: assuming killed by OOM killer, "
  494                     "restarting again (instance %d)\n",
  495                     args->name, args->instance);
  496                 goto again;
  497             }
  498         }
  499     } else if (pid == 0) {
  500         uint32_t i;
  501 
  502         /* Make sure this is killable by OOM killer */
  503         set_oom_adjustment(args->name, true);
  504 
  505         int flags = MAP_PRIVATE | MAP_ANONYMOUS;
  506 #if defined(MAP_POPULATE)
  507         flags |= MAP_POPULATE;
  508 #endif
  509 
  510 mmap_retry:
  511         mem = mmap(NULL, MEM_SIZE, PROT_READ | PROT_WRITE, flags, -1, 0);
  512         if (mem == MAP_FAILED) {
  513 #if defined(MAP_POPULATE)
  514             flags &= ~MAP_POPULATE; /* Less aggressive, more OOMable */
  515 #endif
  516             if (!g_keep_stressing_flag) {
  517                 pr_dbg("%s: mmap failed: %d %s\n",
  518                     args->name, errno, strerror(errno));
  519                 return EXIT_NO_RESOURCE;
  520             }
  521             (void)shim_usleep(100000);
  522             if (!g_keep_stressing_flag)
  523                 goto reap_mem;
  524             goto mmap_retry;
  525         }
  526 
  527         for (i = 0; i < max_threads; i++) {
  528             ret[i] = pthread_create(&pthreads[i], NULL,
  529                 stress_memthrash_func, (void *)&pargs);
  530             if (ret[i]) {
  531                 /* Just give up and go to next thread */
  532                 if (ret[i] == EAGAIN)
  533                     continue;
  534                 /* Something really unexpected */
  535                 pr_fail_errno("pthread create", ret[i]);
  536                 goto reap;
  537             }
  538             if (!g_keep_stressing_flag)
  539                 goto reap;
  540         }
  541         /* Wait for SIGALRM or SIGINT/SIGHUP etc */
  542         (void)pause();
  543 
  544 reap:
  545         thread_terminate = true;
  546         for (i = 0; i < max_threads; i++) {
  547             if (!ret[i]) {
  548                 ret[i] = pthread_join(pthreads[i], NULL);
  549                 if (ret[i])
  550                     pr_fail_errno("pthread join", ret[i]);
  551             }
  552         }
  553 reap_mem:
  554         (void)munmap(mem, MEM_SIZE);
  555     }
  556     return EXIT_SUCCESS;
  557 }
  558 
  559 stressor_info_t stress_memthrash_info = {
  560     .stressor = stress_memthrash,
  561     .class = CLASS_MEMORY
  562 };
  563 #else
  564 
  565 int stress_set_memthrash_method(const char *name)
  566 {
  567     (void)name;
  568 
  569     (void)pr_inf("warning: --memthrash-method not available on this system\n");
  570     return 0;
  571 }
  572 
  573 stressor_info_t stress_memthrash_info = {
  574     .stressor = stress_not_implemented,
  575     .class = CLASS_MEMORY
  576 };
  577 #endif