"Fossies" - the Fresh Open Source Software Archive

Member "stress-ng-0.09.56/stress-vm.c" (15 Mar 2019, 49429 Bytes) of package /linux/privat/stress-ng-0.09.56.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "stress-vm.c" see the Fossies "Dox" file reference documentation and the last Fossies "Diffs" side-by-side code changes report: 0.09.49_vs_0.09.50.

    1 /*
    2  * Copyright (C) 2013-2019 Canonical, Ltd.
    3  *
    4  * This program is free software; you can redistribute it and/or
    5  * modify it under the terms of the GNU General Public License
    6  * as published by the Free Software Foundation; either version 2
    7  * of the License, or (at your option) any later version.
    8  *
    9  * This program is distributed in the hope that it will be useful,
   10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12  * GNU General Public License for more details.
   13  *
   14  * You should have received a copy of the GNU General Public License
   15  * along with this program; if not, write to the Free Software
   16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
   17  *
   18  * This code is a complete clean re-write of the stress tool by
   19  * Colin Ian King <colin.king@canonical.com> and attempts to be
   20  * backwardly compatible with the stress tool by Amos Waterland
   21  * <apw@rossby.metr.ou.edu> but has more stress tests and more
   22  * functionality.
   23  *
   24  */
   25 #include "stress-ng.h"
   26 
   27 /*
   28  *  For testing, set this to 1 to simulate random memory errors
   29  */
   30 #define INJECT_BIT_ERRORS   (0)
   31 
   32 #define VM_BOGO_SHIFT       (12)
   33 #define VM_ROWHAMMER_LOOPS  (1000000)
   34 
   35 #define NO_MEM_RETRIES_MAX  (100)
   36 
   37 /*
   38  *  the VM stress test has diffent methods of vm stressor
   39  */
   40 typedef size_t (*stress_vm_func)(uint8_t *buf, const size_t sz,
   41         const args_t *args, const uint64_t max_ops);
   42 
   43 typedef struct {
   44     const char *name;
   45     const stress_vm_func func;
   46 } stress_vm_method_info_t;
   47 
   48 typedef struct {
   49     const char *name;
   50         const int advice;
   51 } vm_madvise_info_t;
   52 
   53 static const stress_vm_method_info_t vm_methods[];
   54 
   55 static const vm_madvise_info_t vm_madvise_info[] = {
   56 #if defined(HAVE_MADVISE)
   57 #if defined(MADV_DONTNEED)
   58     { "dontneed",   MADV_DONTNEED},
   59 #endif
   60 #if defined(MADV_HUGEPAGE)
   61     { "hugepage",   MADV_HUGEPAGE },
   62 #endif
   63 #if defined(MADV_MERGEABLE)
   64     { "mergeable",  MADV_MERGEABLE },
   65 #endif
   66 #if defined(MADV_NOHUGEPAGE)
   67     { "nohugepage", MADV_NOHUGEPAGE },
   68 #endif
   69 #if defined(MADV_NORMAL)
   70     { "normal", MADV_NORMAL },
   71 #endif
   72 #if defined(MADV_RANDOM)
   73     { "random", MADV_RANDOM },
   74 #endif
   75 #if defined(MADV_SEQUENTIAL)
   76     { "sequential", MADV_SEQUENTIAL },
   77 #endif
   78 #if defined(MADV_UNMERGEABLE)
   79     { "unmergeable",MADV_UNMERGEABLE },
   80 #endif
   81 #if defined(MADV_WILLNEED)
   82     { "willneed",   MADV_WILLNEED},
   83 #endif
   84         { NULL,         0 },
   85 #else
   86     /* No MADVISE, default to normal, ignored */
   87     { "normal", 0 },
   88 #endif
   89 };
   90 
   91 /*
   92  *  keep_stressing()
   93  *  returns true if we can keep on running a stressor
   94  */
   95 static bool HOT OPTIMIZE3 keep_stressing_vm(const args_t *args)
   96 {
   97     return (LIKELY(g_keep_stressing_flag) &&
   98             LIKELY(!args->max_ops || ((get_counter(args) >> VM_BOGO_SHIFT) < args->max_ops)));
   99 }
  100 
  101 int stress_set_vm_hang(const char *opt)
  102 {
  103     uint64_t vm_hang;
  104 
  105     vm_hang = get_uint64_time(opt);
  106     check_range("vm-hang", vm_hang,
  107         MIN_VM_HANG, MAX_VM_HANG);
  108     return set_setting("vm-hang", TYPE_ID_UINT64, &vm_hang);
  109 }
  110 
  111 int stress_set_vm_bytes(const char *opt)
  112 {
  113     size_t vm_bytes;
  114 
  115     vm_bytes = (size_t)get_uint64_byte_memory(opt, 1);
  116     check_range_bytes("vm-bytes", vm_bytes,
  117         MIN_VM_BYTES, MAX_MEM_LIMIT);
  118     return set_setting("vm-bytes", TYPE_ID_SIZE_T, &vm_bytes);
  119 }
  120 
  121 #if defined(MAP_LOCKED) || defined(MAP_POPULATE)
  122 int stress_set_vm_flags(const int flag)
  123 {
  124     int vm_flags = 0;
  125 
  126     (void)get_setting("vm-flags", &vm_flags);
  127     vm_flags |= flag;
  128     return set_setting("vm-flags", TYPE_ID_INT, &vm_flags);
  129 }
  130 #endif
  131 
  132 int stress_set_vm_madvise(const char *opt)
  133 {
  134     const vm_madvise_info_t *info;
  135 
  136     for (info = vm_madvise_info; info->name; info++) {
  137         if (!strcmp(opt, info->name)) {
  138             set_setting("vm-madvise", TYPE_ID_INT, &info->advice);
  139             return 0;
  140         }
  141     }
  142     (void)fprintf(stderr, "invalid vm-madvise advice '%s', allowed advice options are:", opt);
  143     for (info = vm_madvise_info; info->name; info++) {
  144         (void)fprintf(stderr, " %s", info->name);
  145         }
  146     (void)fprintf(stderr, "\n");
  147     return -1;
  148 }
  149 
  150 #define SET_AND_TEST(ptr, val, bit_errors)  \
  151 {                       \
  152     *ptr = val;             \
  153     bit_errors += (*ptr != val);        \
  154 }
  155 
  156 /*
  157  *  This compiles down to a load, ror, store in x86
  158  */
  159 #define ROR64(val)              \
  160 {                       \
  161     uint64_t tmp = val;         \
  162     const uint64_t bit0 = (tmp & 1) << 63;  \
  163     tmp >>= 1;              \
  164     tmp |= bit0;                \
  165     val = tmp;              \
  166 }
  167 
  168 #define ROR8(val)               \
  169 {                       \
  170     uint8_t tmp = val;          \
  171     const uint8_t bit0 = (tmp & 1) << 7;    \
  172     tmp >>= 1;              \
  173     tmp |= bit0;                \
  174     val = tmp;              \
  175 }
  176 
  177 #define INC_LO_NYBBLE(val)          \
  178 {                       \
  179     uint8_t lo = (val);         \
  180     lo += 1;                \
  181     lo &= 0xf;              \
  182     (val) = ((val) & 0xf0) | lo;        \
  183 }
  184 
  185 #define INC_HI_NYBBLE(val)          \
  186 {                       \
  187     uint8_t hi = (val);         \
  188     hi += 0xf0;             \
  189     hi &= 0xf0;             \
  190     (val) = ((val) & 0x0f) | hi;        \
  191 }
  192 
  193 #define UNSIGNED_ABS(a, b)          \
  194     ((a) > (b)) ? (a) - (b) : (b) - (a)
  195 
  196 #if INJECT_BIT_ERRORS
  197 /*
  198  *  inject_random_bit_errors()
  199  *  for testing purposes, we can insert various faults
  200  */
  201 static void inject_random_bit_errors(uint8_t *buf, const size_t sz)
  202 {
  203     int i;
  204 
  205     for (i = 0; i < 8; i++) {
  206         /* 1 bit errors */
  207         buf[random() % sz] ^= (1 << i);
  208         buf[random() % sz] |= (1 << i);
  209         buf[random() % sz] &= ~(1 << i);
  210     }
  211 
  212     for (i = 0; i < 7; i++) {
  213         /* 2 bit errors */
  214         buf[random() % sz] ^= (3 << i);
  215         buf[random() % sz] |= (3 << i);
  216         buf[random() % sz] &= ~(3 << i);
  217     }
  218 
  219     for (i = 0; i < 6; i++) {
  220         /* 3 bit errors */
  221         buf[random() % sz] ^= (7 << i);
  222         buf[random() % sz] |= (7 << i);
  223         buf[random() % sz] &= ~(7 << i);
  224     }
  225 }
  226 #else
  227 /* No-op */
  228 static inline void inject_random_bit_errors(uint8_t *buf, const size_t sz)
  229 {
  230     (void)buf;
  231     (void)sz;
  232 }
  233 #endif
  234 
  235 
  236 /*
  237  *  stress_vm_check()
  238  *  report back on bit errors found
  239  */
  240 static void stress_vm_check(const char *name, const size_t bit_errors)
  241 {
  242     if (bit_errors && (g_opt_flags & OPT_FLAGS_VERIFY))
  243 #if INJECT_BIT_ERRORS
  244         pr_dbg("%s: detected %zu memory error%s\n",
  245             name, bit_errors, bit_errors == 1 ? "" : "s");
  246 #else
  247         pr_fail("%s: detected %zu memory error%s\n",
  248             name, bit_errors, bit_errors == 1 ? "" : "s");
  249 #endif
  250 }
  251 
  252 /*
  253  *  stress_vm_count_bits()
  254  *  count number of bits set (K and R)
  255  */
  256 static inline size_t stress_vm_count_bits(uint64_t v)
  257 {
  258     size_t n;
  259 
  260     for (n = 0; v; n++)
  261         v &= v - 1;
  262 
  263     return n;
  264 }
  265 
  266 /*
  267  *  stress_vm_moving_inversion()
  268  *  work sequentially through memory setting 8 bytes at at a time
  269  *  with a random value, then check if it is correct, invert it and
  270  *  then check if that is correct.
  271  */
  272 static size_t TARGET_CLONES stress_vm_moving_inversion(
  273     uint8_t *buf,
  274     const size_t sz,
  275     const args_t *args,
  276     const uint64_t max_ops)
  277 {
  278     uint64_t w, z, *buf_end, c = get_counter(args);
  279     volatile uint64_t *ptr;
  280     size_t bit_errors;
  281 
  282     buf_end = (uint64_t *)(buf + sz);
  283 
  284     mwc_reseed();
  285     w = mwc64();
  286     z = mwc64();
  287 
  288     mwc_seed(w, z);
  289     for (ptr = (uint64_t *)buf; ptr < buf_end; ) {
  290         *(ptr++) = mwc64();
  291     }
  292 
  293     mwc_seed(w, z);
  294     for (bit_errors = 0, ptr = (uint64_t *)buf; ptr < buf_end; ) {
  295         uint64_t val = mwc64();
  296 
  297         if (UNLIKELY(*ptr != val))
  298             bit_errors++;
  299         *(ptr++) = ~val;
  300         c++;
  301         if (UNLIKELY(max_ops && c >= max_ops))
  302             goto abort;
  303         if (UNLIKELY(!g_keep_stressing_flag))
  304             goto abort;
  305     }
  306 
  307     (void)mincore_touch_pages(buf, sz);
  308 
  309     inject_random_bit_errors(buf, sz);
  310 
  311     mwc_seed(w, z);
  312     for (bit_errors = 0, ptr = (uint64_t *)buf; ptr < buf_end; ) {
  313         uint64_t val = mwc64();
  314         if (UNLIKELY(*(ptr++) != ~val))
  315             bit_errors++;
  316         c++;
  317         if (UNLIKELY(max_ops && c >= max_ops))
  318             goto abort;
  319         if (UNLIKELY(!g_keep_stressing_flag))
  320             goto abort;
  321     }
  322 
  323     mwc_seed(w, z);
  324     for (ptr = (uint64_t *)buf_end; ptr > (uint64_t *)buf; ) {
  325         *--ptr = mwc64();
  326         c++;
  327         if (UNLIKELY(max_ops && c >= max_ops))
  328             goto abort;
  329     }
  330 
  331     inject_random_bit_errors(buf, sz);
  332 
  333     (void)mincore_touch_pages(buf, sz);
  334     mwc_seed(w, z);
  335     for (ptr = (uint64_t *)buf_end; ptr > (uint64_t *)buf; ) {
  336         uint64_t val = mwc64();
  337         if (UNLIKELY(*--ptr != val))
  338             bit_errors++;
  339         *ptr = ~val;
  340         c++;
  341         if (UNLIKELY(max_ops && c >= max_ops))
  342             goto abort;
  343         if (UNLIKELY(!g_keep_stressing_flag))
  344             goto abort;
  345     }
  346     mwc_seed(w, z);
  347     for (ptr = (uint64_t *)buf_end; ptr > (uint64_t *)buf; ) {
  348         uint64_t val = mwc64();
  349         if (UNLIKELY(*--ptr != ~val))
  350             bit_errors++;
  351         if (UNLIKELY(!g_keep_stressing_flag))
  352             goto abort;
  353     }
  354 
  355 abort:
  356     stress_vm_check("moving inversion", bit_errors);
  357     set_counter(args, c);
  358 
  359     return bit_errors;
  360 }
  361 
  362 /*
  363  *  stress_vm_modulo_x()
  364  *  set every 23rd byte to a random pattern and then set
  365  *  all the other bytes to the complement of this. Check
  366  *  that the random patterns are still set.
  367  */
  368 static size_t TARGET_CLONES stress_vm_modulo_x(
  369     uint8_t *buf,
  370     const size_t sz,
  371     const args_t *args,
  372     const uint64_t max_ops)
  373 {
  374     uint32_t i, j;
  375     const uint32_t stride = 23; /* Small prime to hit cache */
  376     uint8_t pattern, compliment;
  377     volatile uint8_t *ptr;
  378     uint8_t *buf_end = buf + sz;
  379     size_t bit_errors = 0;
  380     uint64_t c = get_counter(args);
  381 
  382     mwc_reseed();
  383     pattern = mwc8();
  384     compliment = ~pattern;
  385 
  386     for (i = 0; i < stride; i++) {
  387         for (ptr = buf + i; ptr < buf_end; ptr += stride) {
  388             *ptr = pattern;
  389             if (UNLIKELY(!g_keep_stressing_flag))
  390                 goto abort;
  391         }
  392         for (ptr = buf; ptr < buf_end; ptr += stride) {
  393             for (j = 0; j < i && ptr < buf_end; j++) {
  394                 *ptr++ = compliment;
  395                 c++;
  396                 if (UNLIKELY(max_ops && c >= max_ops))
  397                     goto abort;
  398             }
  399             if (!g_keep_stressing_flag)
  400                 goto abort;
  401             ptr++;
  402             for (j = i + 1; j < stride && ptr < buf_end; j++) {
  403                 *ptr++ = compliment;
  404                 c++;
  405                 if (UNLIKELY(max_ops && c >= max_ops))
  406                     goto abort;
  407             }
  408             if (UNLIKELY(!g_keep_stressing_flag))
  409                 goto abort;
  410         }
  411         inject_random_bit_errors(buf, sz);
  412 
  413         for (ptr = buf + i; ptr < buf_end; ptr += stride) {
  414             if (UNLIKELY(*ptr != pattern))
  415                 bit_errors++;
  416             if (UNLIKELY(!g_keep_stressing_flag))
  417                 return bit_errors;
  418         }
  419     }
  420 
  421 abort:
  422     stress_vm_check("modulo X", bit_errors);
  423     set_counter(args, c);
  424 
  425     return bit_errors;
  426 }
  427 
  428 /*
  429  *  stress_vm_walking_one_data()
  430  *  for each byte, walk through each data line setting them to high
  431  *  setting each bit to see if none of the lines are stuck
  432  */
  433 static size_t TARGET_CLONES stress_vm_walking_one_data(
  434     uint8_t *buf,
  435     const size_t sz,
  436     const args_t *args,
  437     const uint64_t max_ops)
  438 {
  439     size_t bit_errors = 0;
  440     volatile uint8_t *ptr;
  441     uint8_t *buf_end = buf + sz;
  442     uint64_t c = get_counter(args);
  443 
  444     for (ptr = buf; ptr < buf_end; ptr++) {
  445         SET_AND_TEST(ptr, 0x01, bit_errors);
  446         SET_AND_TEST(ptr, 0x02, bit_errors);
  447         SET_AND_TEST(ptr, 0x04, bit_errors);
  448         SET_AND_TEST(ptr, 0x08, bit_errors);
  449         SET_AND_TEST(ptr, 0x10, bit_errors);
  450         SET_AND_TEST(ptr, 0x20, bit_errors);
  451         SET_AND_TEST(ptr, 0x40, bit_errors);
  452         SET_AND_TEST(ptr, 0x80, bit_errors);
  453         c++;
  454         if (UNLIKELY(max_ops && c >= max_ops))
  455             break;
  456         if (UNLIKELY(!g_keep_stressing_flag))
  457             break;
  458     }
  459     stress_vm_check("walking one (data)", bit_errors);
  460     set_counter(args, c);
  461 
  462     return bit_errors;
  463 }
  464 
  465 /*
  466  *  stress_vm_walking_zero_data()
  467  *  for each byte, walk through each data line setting them to low
  468  *  setting each bit to see if none of the lines are stuck
  469  */
  470 static size_t TARGET_CLONES stress_vm_walking_zero_data(
  471     uint8_t *buf,
  472     const size_t sz,
  473     const args_t *args,
  474     const uint64_t max_ops)
  475 {
  476     size_t bit_errors = 0;
  477     volatile uint8_t *ptr;
  478     uint8_t *buf_end = buf + sz;
  479     uint64_t c = get_counter(args);
  480 
  481     for (ptr = buf; ptr < buf_end; ptr++) {
  482         SET_AND_TEST(ptr, 0xfe, bit_errors);
  483         SET_AND_TEST(ptr, 0xfd, bit_errors);
  484         SET_AND_TEST(ptr, 0xfb, bit_errors);
  485         SET_AND_TEST(ptr, 0xf7, bit_errors);
  486         SET_AND_TEST(ptr, 0xef, bit_errors);
  487         SET_AND_TEST(ptr, 0xdf, bit_errors);
  488         SET_AND_TEST(ptr, 0xbf, bit_errors);
  489         SET_AND_TEST(ptr, 0x7f, bit_errors);
  490         c++;
  491         if (UNLIKELY(max_ops && c >= max_ops))
  492             break;
  493         if (UNLIKELY(!g_keep_stressing_flag))
  494             break;
  495     }
  496     stress_vm_check("walking zero (data)", bit_errors);
  497     set_counter(args, c);
  498 
  499     return bit_errors;
  500 }
  501 
  502 /*
  503  *  stress_vm_walking_one_addr()
  504  *  work through a range of addresses setting each address bit in
  505  *  the given memory mapped range to high to see if any address bits
  506  *  are stuck.
  507  */
  508 static size_t TARGET_CLONES stress_vm_walking_one_addr(
  509     uint8_t *buf,
  510     const size_t sz,
  511     const args_t *args,
  512     const uint64_t max_ops)
  513 {
  514     volatile uint8_t *ptr;
  515     uint8_t *buf_end = buf + sz;
  516     uint8_t d1 = 0, d2 = ~d1;
  517     size_t bit_errors = 0;
  518     size_t tests = 0;
  519     uint64_t c = get_counter(args);
  520 
  521     (void)memset(buf, d1, sz);
  522     for (ptr = buf; ptr < buf_end; ptr += 256) {
  523         uint16_t i;
  524         uint64_t mask;
  525 
  526         *ptr = d1;
  527         for (mask = 1, i = 1; i < 64; i++) {
  528             uintptr_t uintptr = ((uintptr_t)ptr) ^ mask;
  529             uint8_t *addr = (uint8_t *)uintptr;
  530             if ((addr < buf) || (addr >= buf_end) || (addr == ptr))
  531                 continue;
  532             *addr = d2;
  533             tests++;
  534             if (UNLIKELY(*ptr != d1))
  535                 bit_errors++;
  536             mask <<= 1;
  537         }
  538         c++;
  539         if (UNLIKELY(max_ops && c >= max_ops))
  540             break;
  541         if (UNLIKELY(!g_keep_stressing_flag))
  542             break;
  543     }
  544     stress_vm_check("walking one (address)", bit_errors);
  545     set_counter(args, c);
  546 
  547     return bit_errors;
  548 }
  549 
  550 /*
  551  *  stress_vm_walking_zero_addr()
  552  *  work through a range of addresses setting each address bit in
  553  *  the given memory mapped range to low to see if any address bits
  554  *  are stuck.
  555  */
  556 static size_t TARGET_CLONES stress_vm_walking_zero_addr(
  557     uint8_t *buf,
  558     const size_t sz,
  559     const args_t *args,
  560     const uint64_t max_ops)
  561 {
  562     volatile uint8_t *ptr;
  563     uint8_t *buf_end = buf + sz;
  564     uint8_t d1 = 0, d2 = ~d1;
  565     size_t bit_errors = 0;
  566     size_t tests = 0;
  567     uint64_t sz_mask;
  568     uint64_t c = get_counter(args);
  569 
  570     for (sz_mask = 1; sz_mask < sz; sz_mask <<= 1)
  571         ;
  572 
  573     sz_mask--;
  574 
  575     (void)memset(buf, d1, sz);
  576     for (ptr = buf; ptr < buf_end; ptr += 256) {
  577         uint16_t i;
  578         uint64_t mask;
  579 
  580         *ptr = d1;
  581         for (mask = 1, i = 1; i < 64; i++) {
  582             uintptr_t uintptr = ((uintptr_t)ptr) ^ (~mask & sz_mask);
  583             uint8_t *addr = (uint8_t *)uintptr;
  584             if (addr < buf || addr >= buf_end || addr == ptr)
  585                 continue;
  586             *addr = d2;
  587             tests++;
  588             if (UNLIKELY(*ptr != d1))
  589                 bit_errors++;
  590             mask <<= 1;
  591         }
  592         c++;
  593         if (UNLIKELY(max_ops && c >= max_ops))
  594             break;
  595         if (UNLIKELY(!g_keep_stressing_flag))
  596             break;
  597     }
  598     stress_vm_check("walking zero (address)", bit_errors);
  599     set_counter(args, c);
  600 
  601     return bit_errors;
  602 }
  603 
  604 /*
  605  *  stress_vm_gray()
  606  *  fill all of memory with a gray code and check that
  607  *  all the bits are set correctly. gray codes just change
  608  *  one bit at a time.
  609  */
  610 static size_t TARGET_CLONES stress_vm_gray(
  611     uint8_t *buf,
  612     const size_t sz,
  613     const args_t *args,
  614     const uint64_t max_ops)
  615 {
  616     static uint8_t val;
  617     uint8_t v, *buf_end = buf + sz;
  618     volatile uint8_t *ptr;
  619     size_t bit_errors = 0;
  620     uint64_t c = get_counter(args);
  621 
  622     for (v = val, ptr = buf; ptr < buf_end; ptr++, v++) {
  623         if (UNLIKELY(!g_keep_stressing_flag))
  624             return 0;
  625         *ptr = (v >> 1) ^ v;
  626         c++;
  627         if (UNLIKELY(max_ops && c >= max_ops))
  628             break;
  629     }
  630     (void)mincore_touch_pages(buf, sz);
  631     inject_random_bit_errors(buf, sz);
  632 
  633     for (v = val, ptr = buf; ptr < buf_end; ptr++, v++) {
  634         if (UNLIKELY(!g_keep_stressing_flag))
  635             break;
  636         if (UNLIKELY(*ptr != ((v >> 1) ^ v)))
  637             bit_errors++;
  638     }
  639     val++;
  640 
  641     stress_vm_check("gray code", bit_errors);
  642     set_counter(args, c);
  643 
  644     return bit_errors;
  645 }
  646 
  647 /*
  648  *  stress_vm_incdec()
  649  *  work through memory incrementing it and then decrementing
  650  *  it by a value that changes on each test iteration.
  651  *  Check that the memory has not changed by the inc + dec
  652  *  operations.
  653  */
  654 static size_t TARGET_CLONES stress_vm_incdec(
  655     uint8_t *buf,
  656     const size_t sz,
  657     const args_t *args,
  658     const uint64_t max_ops)
  659 {
  660     static uint8_t val = 0;
  661     uint8_t *buf_end = buf + sz;
  662     volatile uint8_t *ptr;
  663     size_t bit_errors = 0;
  664     uint64_t c = get_counter(args);
  665 
  666     val++;
  667     (void)memset(buf, 0x00, sz);
  668 
  669     for (ptr = buf; ptr < buf_end; ptr++) {
  670         *ptr += val;
  671         c++;
  672         if (UNLIKELY(max_ops && c >= max_ops))
  673             break;
  674     }
  675     (void)mincore_touch_pages(buf, sz);
  676     inject_random_bit_errors(buf, sz);
  677     for (ptr = buf; ptr < buf_end; ptr++) {
  678         *ptr -= val;
  679         c++;
  680         if (UNLIKELY(max_ops && c >= max_ops))
  681             break;
  682     }
  683 
  684     for (ptr = buf; ptr < buf_end; ptr++) {
  685         if (UNLIKELY(*ptr != 0))
  686             bit_errors++;
  687     }
  688 
  689     stress_vm_check("incdec code", bit_errors);
  690     set_counter(args, c);
  691 
  692     return bit_errors;
  693 }
  694 
  695 /*
  696  *  stress_vm_prime_incdec()
  697  *  walk through memory in large prime steps incrementing
  698  *  bytes and then re-walk again decrementing; then sanity
  699  *  check.
  700  */
  701 static size_t TARGET_CLONES stress_vm_prime_incdec(
  702     uint8_t *buf,
  703     const size_t sz,
  704     const args_t *args,
  705     const uint64_t max_ops)
  706 {
  707     static uint8_t val = 0;
  708     uint8_t *buf_end = buf + sz;
  709     volatile uint8_t *ptr = buf;
  710     size_t bit_errors = 0, i;
  711     const uint64_t prime = PRIME_64;
  712     uint64_t j, c = get_counter(args);
  713 
  714 #if SIZE_MAX > UINT32_MAX
  715     /* Unlikely.. */
  716     if (UNLIKELY(sz > (1ULL << 63)))
  717         return 0;
  718 #endif
  719 
  720     (void)memset(buf, 0x00, sz);
  721 
  722     for (i = 0; i < sz; i++) {
  723         ptr[i] += val;
  724         c++;
  725         if (UNLIKELY(max_ops && c >= max_ops))
  726             break;
  727     }
  728     (void)mincore_touch_pages(buf, sz);
  729     inject_random_bit_errors(buf, sz);
  730     /*
  731      *  Step through memory in prime sized steps
  732      *  in a totally sub-optimal way to exercise
  733      *  memory and cache stalls
  734      */
  735     for (i = 0, j = prime; i < sz; i++, j += prime) {
  736         ptr[j % sz] -= val;
  737         c++;
  738         if (UNLIKELY(max_ops && c >= max_ops))
  739             break;
  740     }
  741 
  742     for (ptr = buf; ptr < buf_end; ptr++) {
  743         if (UNLIKELY(*ptr != 0))
  744             bit_errors++;
  745     }
  746 
  747     stress_vm_check("prime-incdec", bit_errors);
  748     set_counter(args, c);
  749 
  750     return bit_errors;
  751 }
  752 
  753 /*
  754  *  stress_vm_swap()
  755  *  forward swap and then reverse swap chunks of memory
  756  *  and see that nothing got corrupted.
  757  */
  758 static size_t TARGET_CLONES stress_vm_swap(
  759     uint8_t *buf,
  760     const size_t sz,
  761     const args_t *args,
  762     const uint64_t max_ops)
  763 {
  764     const size_t chunk_sz = 64, chunks = sz / chunk_sz;
  765     uint64_t w1, z1, c = get_counter(args);
  766     uint8_t *buf_end = buf + sz;
  767     uint8_t *ptr;
  768     size_t bit_errors = 0, i;
  769     size_t *swaps;
  770 
  771     mwc_reseed();
  772     z1 = mwc64();
  773     w1 = mwc64();
  774 
  775     if ((swaps = calloc(chunks, sizeof(*swaps))) == NULL) {
  776         pr_fail("stress-vm: calloc failed on vm_swap\n");
  777         return 0;
  778     }
  779 
  780     for (i = 0; i < chunks; i++) {
  781         swaps[i] = (mwc64() % chunks) * chunk_sz;
  782     }
  783 
  784     mwc_seed(w1, z1);
  785     for (ptr = buf; ptr < buf_end; ptr += chunk_sz) {
  786         uint8_t val = mwc8();
  787         (void)memset((void *)ptr, val, chunk_sz);
  788     }
  789 
  790     /* Forward swaps */
  791     for (i = 0, ptr = buf; ptr < buf_end; ptr += chunk_sz, i++) {
  792         size_t offset = swaps[i];
  793 
  794         volatile uint8_t *dst = buf + offset;
  795         volatile uint8_t *src = (volatile uint8_t *)ptr;
  796         volatile uint8_t *src_end = src + chunk_sz;
  797 
  798         while (src < src_end) {
  799             uint8_t tmp = *src;
  800             *src++ = *dst;
  801             *dst++ = tmp;
  802         }
  803         c++;
  804         if (UNLIKELY(max_ops && c >= max_ops))
  805             goto abort;
  806         if (UNLIKELY(!g_keep_stressing_flag))
  807             goto abort;
  808     }
  809     /* Reverse swaps */
  810     for (i = chunks - 1, ptr = buf_end - chunk_sz; ptr >= buf; ptr -= chunk_sz, i--) {
  811         size_t offset = swaps[i];
  812 
  813         volatile uint8_t *dst = buf + offset;
  814         volatile uint8_t *src = (volatile uint8_t *)ptr;
  815         volatile uint8_t *src_end = src + chunk_sz;
  816 
  817         while (src < src_end) {
  818             uint8_t tmp = *src;
  819             *src++ = *dst;
  820             *dst++ = tmp;
  821         }
  822         c++;
  823         if (UNLIKELY(max_ops && c >= max_ops))
  824             goto abort;
  825         if (UNLIKELY(!g_keep_stressing_flag))
  826             goto abort;
  827     }
  828 
  829     (void)mincore_touch_pages(buf, sz);
  830     inject_random_bit_errors(buf, sz);
  831 
  832     mwc_seed(w1, z1);
  833     for (ptr = buf; ptr < buf_end; ptr += chunk_sz) {
  834         volatile uint8_t *p = (volatile uint8_t *)ptr;
  835         volatile uint8_t *p_end = (volatile uint8_t *)ptr + chunk_sz;
  836         uint8_t val = mwc8();
  837 
  838         while (p < p_end) {
  839             if (UNLIKELY(*p != val))
  840                 bit_errors++;
  841             p++;
  842         }
  843         if (UNLIKELY(!g_keep_stressing_flag))
  844             break;
  845     }
  846 abort:
  847     free(swaps);
  848     stress_vm_check("swap bytes", bit_errors);
  849     set_counter(args, c);
  850 
  851     return bit_errors;
  852 }
  853 
  854 /*
  855  *  stress_vm_rand_set()
  856  *  fill 64 bit chunks of memory with a random pattern and
  857  *  and then sanity check they are all set correctly.
  858  */
  859 static size_t TARGET_CLONES stress_vm_rand_set(
  860     uint8_t *buf,
  861     const size_t sz,
  862     const args_t *args,
  863     const uint64_t max_ops)
  864 {
  865     volatile uint8_t *ptr;
  866     const size_t chunk_sz = sizeof(*ptr) * 8;
  867     uint8_t *buf_end = buf + sz;
  868     uint64_t w, z, c = get_counter(args);
  869     size_t bit_errors = 0;
  870 
  871     mwc_reseed();
  872     w = mwc64();
  873     z = mwc64();
  874 
  875     mwc_seed(w, z);
  876     for (ptr = buf; ptr < buf_end; ptr += chunk_sz) {
  877         uint8_t val = mwc8();
  878 
  879         *(ptr + 0) = val;
  880         *(ptr + 1) = val;
  881         *(ptr + 2) = val;
  882         *(ptr + 3) = val;
  883         *(ptr + 4) = val;
  884         *(ptr + 5) = val;
  885         *(ptr + 6) = val;
  886         *(ptr + 7) = val;
  887         c++;
  888         if (UNLIKELY(max_ops && c >= max_ops))
  889             goto abort;
  890         if (UNLIKELY(!g_keep_stressing_flag))
  891             goto abort;
  892     }
  893 
  894     (void)mincore_touch_pages(buf, sz);
  895     inject_random_bit_errors(buf, sz);
  896 
  897     mwc_seed(w, z);
  898     for (ptr = buf; ptr < buf_end; ptr += chunk_sz) {
  899         uint8_t val = mwc8();
  900 
  901         bit_errors += (*(ptr + 0) != val);
  902         bit_errors += (*(ptr + 1) != val);
  903         bit_errors += (*(ptr + 2) != val);
  904         bit_errors += (*(ptr + 3) != val);
  905         bit_errors += (*(ptr + 4) != val);
  906         bit_errors += (*(ptr + 5) != val);
  907         bit_errors += (*(ptr + 6) != val);
  908         bit_errors += (*(ptr + 7) != val);
  909         if (UNLIKELY(!g_keep_stressing_flag))
  910             break;
  911     }
  912 abort:
  913     stress_vm_check("rand-set", bit_errors);
  914     set_counter(args, c);
  915 
  916     return bit_errors;
  917 }
  918 
  919 /*
  920  *  stress_vm_ror()
  921  *  fill memory with a random pattern and then rotate
  922  *  right all the bits in an 8 byte (64 bit) chunk
  923  *  and then sanity check they are all shifted at the
  924  *  end.
  925  */
  926 static size_t TARGET_CLONES stress_vm_ror(
  927     uint8_t *buf,
  928     const size_t sz,
  929     const args_t *args,
  930     const uint64_t max_ops)
  931 {
  932     volatile uint8_t *ptr;
  933     uint8_t *buf_end = buf + sz;
  934     uint64_t w, z, c = get_counter(args);
  935     size_t bit_errors = 0;
  936     const size_t chunk_sz = sizeof(*ptr) * 8;
  937 
  938     mwc_reseed();
  939     w = mwc64();
  940     z = mwc64();
  941 
  942     mwc_seed(w, z);
  943     for (ptr = buf; ptr < buf_end; ptr += chunk_sz) {
  944         uint8_t val = mwc8();
  945 
  946         *(ptr + 0) = val;
  947         *(ptr + 1) = val;
  948         *(ptr + 2) = val;
  949         *(ptr + 3) = val;
  950         *(ptr + 4) = val;
  951         *(ptr + 5) = val;
  952         *(ptr + 6) = val;
  953         *(ptr + 7) = val;
  954         c++;
  955         if (UNLIKELY(max_ops && c >= max_ops))
  956             goto abort;
  957         if (UNLIKELY(!g_keep_stressing_flag))
  958             goto abort;
  959     }
  960     (void)mincore_touch_pages(buf, sz);
  961 
  962     for (ptr = buf; ptr < buf_end; ptr += chunk_sz) {
  963         ROR64(*(ptr + 0));
  964         ROR64(*(ptr + 1));
  965         ROR64(*(ptr + 2));
  966         ROR64(*(ptr + 3));
  967         ROR64(*(ptr + 4));
  968         ROR64(*(ptr + 5));
  969         ROR64(*(ptr + 6));
  970         ROR64(*(ptr + 7));
  971         c++;
  972         if (UNLIKELY(max_ops && c >= max_ops))
  973             goto abort;
  974         if (UNLIKELY(!g_keep_stressing_flag))
  975             goto abort;
  976     }
  977     (void)mincore_touch_pages(buf, sz);
  978 
  979     inject_random_bit_errors(buf, sz);
  980 
  981     mwc_seed(w, z);
  982     for (ptr = buf; ptr < buf_end; ptr += chunk_sz) {
  983         uint8_t val = mwc8();
  984         ROR64(val);
  985 
  986         bit_errors += (*(ptr + 0) != val);
  987         bit_errors += (*(ptr + 1) != val);
  988         bit_errors += (*(ptr + 2) != val);
  989         bit_errors += (*(ptr + 3) != val);
  990         bit_errors += (*(ptr + 4) != val);
  991         bit_errors += (*(ptr + 5) != val);
  992         bit_errors += (*(ptr + 6) != val);
  993         bit_errors += (*(ptr + 7) != val);
  994         if (UNLIKELY(!g_keep_stressing_flag))
  995             break;
  996     }
  997 abort:
  998     stress_vm_check("ror", bit_errors);
  999     set_counter(args, c);
 1000 
 1001     return bit_errors;
 1002 }
 1003 
 1004 /*
 1005  *  stress_vm_flip()
 1006  *  set all memory to random pattern, then work through
 1007  *  memory 8 times flipping bits 0..7 on by one to eventually
 1008  *  invert all the bits.  Check if the final bits are all
 1009  *  correctly inverted.
 1010  */
 1011 static size_t TARGET_CLONES stress_vm_flip(
 1012     uint8_t *buf,
 1013     const size_t sz,
 1014     const args_t *args,
 1015     const uint64_t max_ops)
 1016 {
 1017     volatile uint8_t *ptr;
 1018     uint8_t *buf_end = buf + sz, bit = 0x03;
 1019     uint64_t w, z, c = get_counter(args);
 1020     size_t bit_errors = 0, i;
 1021     const size_t chunk_sz = sizeof(*ptr) * 8;
 1022 
 1023     mwc_reseed();
 1024     w = mwc64();
 1025     z = mwc64();
 1026 
 1027     mwc_seed(w, z);
 1028     for (ptr = buf; ptr < buf_end; ptr += chunk_sz) {
 1029         uint8_t val = mwc8();
 1030 
 1031         *(ptr + 0) = val;
 1032         ROR8(val);
 1033         *(ptr + 1) = val;
 1034         ROR8(val);
 1035         *(ptr + 2) = val;
 1036         ROR8(val);
 1037         *(ptr + 3) = val;
 1038         ROR8(val);
 1039         *(ptr + 4) = val;
 1040         ROR8(val);
 1041         *(ptr + 5) = val;
 1042         ROR8(val);
 1043         *(ptr + 6) = val;
 1044         ROR8(val);
 1045         *(ptr + 7) = val;
 1046         c++;
 1047         if (UNLIKELY(max_ops && c >= max_ops))
 1048             goto abort;
 1049         if (UNLIKELY(!g_keep_stressing_flag))
 1050             goto abort;
 1051     }
 1052     (void)mincore_touch_pages(buf, sz);
 1053 
 1054     for (i = 0; i < 8; i++) {
 1055         ROR8(bit);
 1056         for (ptr = buf; ptr < buf_end; ptr += chunk_sz) {
 1057             *(ptr + 0) ^= bit;
 1058             *(ptr + 1) ^= bit;
 1059             *(ptr + 2) ^= bit;
 1060             *(ptr + 3) ^= bit;
 1061             *(ptr + 4) ^= bit;
 1062             *(ptr + 5) ^= bit;
 1063             *(ptr + 6) ^= bit;
 1064             *(ptr + 7) ^= bit;
 1065             c++;
 1066             if (UNLIKELY(max_ops && c >= max_ops))
 1067                 goto abort;
 1068             if (UNLIKELY(!g_keep_stressing_flag))
 1069                 goto abort;
 1070         }
 1071         (void)mincore_touch_pages(buf, sz);
 1072     }
 1073 
 1074     inject_random_bit_errors(buf, sz);
 1075 
 1076     mwc_seed(w, z);
 1077     for (ptr = buf; ptr < buf_end; ptr += chunk_sz) {
 1078         uint8_t val = mwc8();
 1079 
 1080         bit_errors += (*(ptr + 0) != val);
 1081         ROR8(val);
 1082         bit_errors += (*(ptr + 1) != val);
 1083         ROR8(val);
 1084         bit_errors += (*(ptr + 2) != val);
 1085         ROR8(val);
 1086         bit_errors += (*(ptr + 3) != val);
 1087         ROR8(val);
 1088         bit_errors += (*(ptr + 4) != val);
 1089         ROR8(val);
 1090         bit_errors += (*(ptr + 5) != val);
 1091         ROR8(val);
 1092         bit_errors += (*(ptr + 6) != val);
 1093         ROR8(val);
 1094         bit_errors += (*(ptr + 7) != val);
 1095         if (UNLIKELY(!g_keep_stressing_flag))
 1096             break;
 1097     }
 1098 
 1099 abort:
 1100     stress_vm_check("flip", bit_errors);
 1101     set_counter(args, c);
 1102 
 1103     return bit_errors;
 1104 }
 1105 
 1106 /*
 1107  *  stress_vm_zero_one()
 1108  *  set all memory to zero and see if any bits are stuck at one and
 1109  *  set all memory to one and see if any bits are stuck at zero
 1110  */
 1111 static size_t TARGET_CLONES stress_vm_zero_one(
 1112     uint8_t *buf,
 1113     const size_t sz,
 1114     const args_t *args,
 1115     const uint64_t max_ops)
 1116 {
 1117     volatile uint64_t *ptr;
 1118     uint64_t *buf_end = (uint64_t *)(buf + sz);
 1119     uint64_t c = get_counter(args);
 1120     size_t bit_errors = 0;
 1121 
 1122     (void)max_ops;
 1123 
 1124     (void)memset(buf, 0x00, sz);
 1125     (void)mincore_touch_pages(buf, sz);
 1126     inject_random_bit_errors(buf, sz);
 1127     c += sz / 8;
 1128 
 1129     for (ptr = (uint64_t *)buf; ptr < buf_end; ptr += 8) {
 1130         bit_errors += stress_vm_count_bits(*(ptr + 0));
 1131         bit_errors += stress_vm_count_bits(*(ptr + 1));
 1132         bit_errors += stress_vm_count_bits(*(ptr + 2));
 1133         bit_errors += stress_vm_count_bits(*(ptr + 3));
 1134         bit_errors += stress_vm_count_bits(*(ptr + 4));
 1135         bit_errors += stress_vm_count_bits(*(ptr + 5));
 1136         bit_errors += stress_vm_count_bits(*(ptr + 6));
 1137         bit_errors += stress_vm_count_bits(*(ptr + 7));
 1138 
 1139         if (UNLIKELY(!g_keep_stressing_flag))
 1140             goto abort;
 1141     }
 1142 
 1143     (void)memset(buf, 0xff, sz);
 1144     (void)mincore_touch_pages(buf, sz);
 1145     inject_random_bit_errors(buf, sz);
 1146     c += sz / 8;
 1147 
 1148     for (ptr = (uint64_t *)buf; ptr < buf_end; ptr += 8) {
 1149         bit_errors += stress_vm_count_bits(~*(ptr + 0));
 1150         bit_errors += stress_vm_count_bits(~*(ptr + 1));
 1151         bit_errors += stress_vm_count_bits(~*(ptr + 2));
 1152         bit_errors += stress_vm_count_bits(~*(ptr + 3));
 1153         bit_errors += stress_vm_count_bits(~*(ptr + 4));
 1154         bit_errors += stress_vm_count_bits(~*(ptr + 5));
 1155         bit_errors += stress_vm_count_bits(~*(ptr + 6));
 1156         bit_errors += stress_vm_count_bits(~*(ptr + 7));
 1157 
 1158         if (UNLIKELY(!g_keep_stressing_flag))
 1159             break;
 1160     }
 1161 abort:
 1162     stress_vm_check("zero-one", bit_errors);
 1163     set_counter(args, c);
 1164 
 1165     return bit_errors;
 1166 }
 1167 
 1168 /*
 1169  *  stress_vm_galpat_one()
 1170  *  galloping pattern. Set all bits to zero and flip a few
 1171  *  random bits to one.  Check if this one is pulled down
 1172  *  or pulls its neighbours up.
 1173  */
 1174 static size_t TARGET_CLONES stress_vm_galpat_zero(
 1175     uint8_t *buf,
 1176     const size_t sz,
 1177     const args_t *args,
 1178     const uint64_t max_ops)
 1179 {
 1180     volatile uint64_t *ptr;
 1181     uint64_t *buf_end = (uint64_t *)(buf + sz);
 1182     size_t i, bit_errors = 0, bits_set = 0;
 1183     size_t bits_bad = sz / 4096;
 1184     uint64_t c = get_counter(args);
 1185 
 1186     (void)memset(buf, 0x00, sz);
 1187 
 1188     mwc_reseed();
 1189 
 1190     for (i = 0; i < bits_bad; i++) {
 1191         for (;;) {
 1192             size_t offset = mwc64() % sz;
 1193             uint8_t bit = mwc32() & 3;
 1194 
 1195             if (!buf[offset]) {
 1196                 buf[offset] |= (1 << bit);
 1197                 break;
 1198             }
 1199         }
 1200         c++;
 1201         if (UNLIKELY(max_ops && c >= max_ops))
 1202             break;
 1203     }
 1204     (void)mincore_touch_pages(buf, sz);
 1205     inject_random_bit_errors(buf, sz);
 1206 
 1207     for (ptr = (uint64_t *)buf; ptr < buf_end; ptr += 8) {
 1208         bits_set += stress_vm_count_bits(*(ptr + 0));
 1209         bits_set += stress_vm_count_bits(*(ptr + 1));
 1210         bits_set += stress_vm_count_bits(*(ptr + 2));
 1211         bits_set += stress_vm_count_bits(*(ptr + 3));
 1212         bits_set += stress_vm_count_bits(*(ptr + 4));
 1213         bits_set += stress_vm_count_bits(*(ptr + 5));
 1214         bits_set += stress_vm_count_bits(*(ptr + 6));
 1215         bits_set += stress_vm_count_bits(*(ptr + 7));
 1216 
 1217         if (UNLIKELY(!g_keep_stressing_flag))
 1218             break;
 1219     }
 1220 
 1221     if (bits_set != bits_bad)
 1222         bit_errors += UNSIGNED_ABS(bits_set, bits_bad);
 1223 
 1224     stress_vm_check("galpat-zero", bit_errors);
 1225     set_counter(args, c);
 1226 
 1227     return bit_errors;
 1228 }
 1229 
 1230 /*
 1231  *  stress_vm_galpat_one()
 1232  *  galloping pattern. Set all bits to one and flip a few
 1233  *  random bits to zero.  Check if this zero is pulled up
 1234  *  or pulls its neighbours down.
 1235  */
 1236 static size_t TARGET_CLONES stress_vm_galpat_one(
 1237     uint8_t *buf,
 1238     const size_t sz,
 1239     const args_t *args,
 1240     const uint64_t max_ops)
 1241 {
 1242     volatile uint64_t *ptr;
 1243     uint64_t *buf_end = (uint64_t *)(buf + sz);
 1244     size_t i, bit_errors = 0, bits_set = 0;
 1245     size_t bits_bad = sz / 4096;
 1246     uint64_t c = get_counter(args);
 1247 
 1248     (void)memset(buf, 0xff, sz);
 1249 
 1250     mwc_reseed();
 1251 
 1252     for (i = 0; i < bits_bad; i++) {
 1253         for (;;) {
 1254             size_t offset = mwc64() % sz;
 1255             uint8_t bit = mwc32() & 3;
 1256 
 1257             if (buf[offset] == 0xff) {
 1258                 buf[offset] &= ~(1 << bit);
 1259                 break;
 1260             }
 1261         }
 1262         c++;
 1263         if (UNLIKELY(max_ops && c >= max_ops))
 1264             break;
 1265     }
 1266     (void)mincore_touch_pages(buf, sz);
 1267     inject_random_bit_errors(buf, sz);
 1268 
 1269     for (ptr = (uint64_t *)buf; ptr < buf_end; ptr += 8) {
 1270         bits_set += stress_vm_count_bits(~(*(ptr + 0)));
 1271         bits_set += stress_vm_count_bits(~(*(ptr + 1)));
 1272         bits_set += stress_vm_count_bits(~(*(ptr + 2)));
 1273         bits_set += stress_vm_count_bits(~(*(ptr + 3)));
 1274         bits_set += stress_vm_count_bits(~(*(ptr + 4)));
 1275         bits_set += stress_vm_count_bits(~(*(ptr + 5)));
 1276         bits_set += stress_vm_count_bits(~(*(ptr + 6)));
 1277         bits_set += stress_vm_count_bits(~(*(ptr + 7)));
 1278         if (UNLIKELY(!g_keep_stressing_flag))
 1279             break;
 1280     }
 1281 
 1282     if (bits_set != bits_bad)
 1283         bit_errors += UNSIGNED_ABS(bits_set, bits_bad);
 1284 
 1285     stress_vm_check("galpat-one", bit_errors);
 1286     set_counter(args, c);
 1287 
 1288     return bit_errors;
 1289 }
 1290 
 1291 /*
 1292  *  stress_vm_inc_nybble()
 1293  *  work through memort and bump increment lower nybbles by
 1294  *  1 and upper nybbles by 0xf and sanity check byte.
 1295  */
 1296 static size_t TARGET_CLONES stress_vm_inc_nybble(
 1297     uint8_t *buf,
 1298     const size_t sz,
 1299     const args_t *args,
 1300     const uint64_t max_ops)
 1301 {
 1302     static uint8_t val = 0;
 1303     volatile uint8_t *ptr;
 1304     uint8_t *buf_end = buf + sz;
 1305     size_t bit_errors = 0;
 1306     uint64_t c = get_counter(args);
 1307 
 1308     (void)memset(buf, val, sz);
 1309     INC_LO_NYBBLE(val);
 1310     INC_HI_NYBBLE(val);
 1311 
 1312     mwc_reseed();
 1313     for (ptr = buf; ptr < buf_end; ptr += 8) {
 1314         INC_LO_NYBBLE(*(ptr + 0));
 1315         INC_LO_NYBBLE(*(ptr + 1));
 1316         INC_LO_NYBBLE(*(ptr + 2));
 1317         INC_LO_NYBBLE(*(ptr + 3));
 1318         INC_LO_NYBBLE(*(ptr + 4));
 1319         INC_LO_NYBBLE(*(ptr + 5));
 1320         INC_LO_NYBBLE(*(ptr + 6));
 1321         INC_LO_NYBBLE(*(ptr + 7));
 1322         c++;
 1323         if (UNLIKELY(max_ops && c >= max_ops))
 1324             goto abort;
 1325         if (UNLIKELY(!g_keep_stressing_flag))
 1326             goto abort;
 1327     }
 1328 
 1329     for (ptr = buf; ptr < buf_end; ptr += 8) {
 1330         INC_HI_NYBBLE(*(ptr + 0));
 1331         INC_HI_NYBBLE(*(ptr + 1));
 1332         INC_HI_NYBBLE(*(ptr + 2));
 1333         INC_HI_NYBBLE(*(ptr + 3));
 1334         INC_HI_NYBBLE(*(ptr + 4));
 1335         INC_HI_NYBBLE(*(ptr + 5));
 1336         INC_HI_NYBBLE(*(ptr + 6));
 1337         INC_HI_NYBBLE(*(ptr + 7));
 1338         c++;
 1339         if (UNLIKELY(max_ops && c >= max_ops))
 1340             goto abort;
 1341         if (UNLIKELY(!g_keep_stressing_flag))
 1342             goto abort;
 1343     }
 1344     (void)mincore_touch_pages(buf, sz);
 1345     inject_random_bit_errors(buf, sz);
 1346 
 1347     for (ptr = buf; ptr < buf_end; ptr += 8) {
 1348         bit_errors += (*(ptr + 0) != val);
 1349         bit_errors += (*(ptr + 1) != val);
 1350         bit_errors += (*(ptr + 2) != val);
 1351         bit_errors += (*(ptr + 3) != val);
 1352         bit_errors += (*(ptr + 4) != val);
 1353         bit_errors += (*(ptr + 5) != val);
 1354         bit_errors += (*(ptr + 6) != val);
 1355         bit_errors += (*(ptr + 7) != val);
 1356         if (UNLIKELY(!g_keep_stressing_flag))
 1357             break;
 1358     }
 1359 
 1360 abort:
 1361     stress_vm_check("inc-nybble", bit_errors);
 1362     set_counter(args, c);
 1363 
 1364     return bit_errors;
 1365 }
 1366 
 1367 /*
 1368  *  stress_vm_rand_sum()
 1369  *  sequentially set all memory to random values and then
 1370  *  check if they are still set correctly.
 1371  */
 1372 static size_t TARGET_CLONES stress_vm_rand_sum(
 1373     uint8_t *buf,
 1374     const size_t sz,
 1375     const args_t *args,
 1376     const uint64_t max_ops)
 1377 {
 1378     volatile uint64_t *ptr;
 1379     uint64_t *buf_end = (uint64_t *)(buf + sz);
 1380     uint64_t w, z, c = get_counter(args);
 1381     size_t bit_errors = 0;
 1382     const size_t chunk_sz = sizeof(*ptr) * 8;
 1383 
 1384     mwc_reseed();
 1385     w = mwc64();
 1386     z = mwc64();
 1387 
 1388     mwc_seed(w, z);
 1389     for (ptr = (uint64_t *)buf; ptr < buf_end; ptr += chunk_sz) {
 1390         *(ptr + 0) = mwc64();
 1391         *(ptr + 1) = mwc64();
 1392         *(ptr + 2) = mwc64();
 1393         *(ptr + 3) = mwc64();
 1394         *(ptr + 4) = mwc64();
 1395         *(ptr + 5) = mwc64();
 1396         *(ptr + 6) = mwc64();
 1397         *(ptr + 7) = mwc64();
 1398         c++;
 1399         if (UNLIKELY(max_ops && c >= max_ops))
 1400             goto abort;
 1401         if (UNLIKELY(!g_keep_stressing_flag))
 1402             goto abort;
 1403     }
 1404 
 1405     (void)mincore_touch_pages(buf, sz);
 1406     inject_random_bit_errors(buf, sz);
 1407 
 1408     mwc_seed(w, z);
 1409     for (ptr = (uint64_t *)buf; ptr < buf_end; ptr += chunk_sz) {
 1410         bit_errors += stress_vm_count_bits(*(ptr + 0) ^ mwc64());
 1411         bit_errors += stress_vm_count_bits(*(ptr + 1) ^ mwc64());
 1412         bit_errors += stress_vm_count_bits(*(ptr + 2) ^ mwc64());
 1413         bit_errors += stress_vm_count_bits(*(ptr + 3) ^ mwc64());
 1414         bit_errors += stress_vm_count_bits(*(ptr + 4) ^ mwc64());
 1415         bit_errors += stress_vm_count_bits(*(ptr + 5) ^ mwc64());
 1416         bit_errors += stress_vm_count_bits(*(ptr + 6) ^ mwc64());
 1417         bit_errors += stress_vm_count_bits(*(ptr + 7) ^ mwc64());
 1418         if (UNLIKELY(!g_keep_stressing_flag))
 1419             break;
 1420     }
 1421 abort:
 1422     stress_vm_check("rand-sum", bit_errors);
 1423     set_counter(args, c);
 1424 
 1425     return bit_errors;
 1426 }
 1427 
 1428 /*
 1429  *  stress_vm_prime_zero()
 1430  *  step through memory in non-contiguous large steps
 1431  *  and clearing each bit to one (one bit per complete memory cycle)
 1432  *  and check if they are clear.
 1433  */
 1434 static size_t TARGET_CLONES stress_vm_prime_zero(
 1435     uint8_t *buf,
 1436     const size_t sz,
 1437     const args_t *args,
 1438     const uint64_t max_ops)
 1439 {
 1440     size_t i;
 1441     volatile uint8_t *ptr = buf;
 1442     uint8_t j;
 1443     size_t bit_errors = 0;
 1444     const uint64_t prime = PRIME_64;
 1445     uint64_t k, c = get_counter(args);
 1446 
 1447 #if SIZE_MAX > UINT32_MAX
 1448     /* Unlikely.. */
 1449     if (sz > (1ULL << 63))
 1450         return 0;
 1451 #endif
 1452 
 1453     (void)memset(buf, 0xff, sz);
 1454 
 1455     for (j = 0; j < 8; j++) {
 1456         uint8_t mask = ~(1 << j);
 1457         /*
 1458          *  Step through memory in prime sized steps
 1459          *  in a totally sub-optimal way to exercise
 1460          *  memory and cache stalls
 1461          */
 1462         for (i = 0, k = prime; i < sz; i++, k += prime) {
 1463             ptr[k % sz] &= mask;
 1464             c++;
 1465             if (UNLIKELY(max_ops && c >= max_ops))
 1466                 goto abort;
 1467             if (UNLIKELY(!g_keep_stressing_flag))
 1468                 goto abort;
 1469         }
 1470     }
 1471     (void)mincore_touch_pages(buf, sz);
 1472     inject_random_bit_errors(buf, sz);
 1473 
 1474     for (i = 0; i < sz; i++) {
 1475         bit_errors += stress_vm_count_bits(buf[i]);
 1476     }
 1477 
 1478 abort:
 1479     stress_vm_check("prime-zero", bit_errors);
 1480     set_counter(args, c);
 1481 
 1482     return bit_errors;
 1483 }
 1484 
 1485 /*
 1486  *  stress_vm_prime_one()
 1487  *  step through memory in non-contiguous large steps
 1488  *  and set each bit to one (one bit per complete memory cycle)
 1489  *  and check if they are set.
 1490  */
 1491 static size_t TARGET_CLONES stress_vm_prime_one(
 1492     uint8_t *buf,
 1493     const size_t sz,
 1494     const args_t *args,
 1495     const uint64_t max_ops)
 1496 {
 1497     size_t i;
 1498     volatile uint8_t *ptr = buf;
 1499     uint8_t j;
 1500     size_t bit_errors = 0;
 1501     const uint64_t prime = PRIME_64;
 1502     uint64_t k, c = get_counter(args);
 1503 
 1504 #if SIZE_MAX > UINT32_MAX
 1505     /* Unlikely.. */
 1506     if (sz > (1ULL << 63))
 1507         return 0;
 1508 #endif
 1509 
 1510     (void)memset(buf, 0x00, sz);
 1511 
 1512     for (j = 0; j < 8; j++) {
 1513         uint8_t mask = 1 << j;
 1514         /*
 1515          *  Step through memory in prime sized steps
 1516          *  in a totally sub-optimal way to exercise
 1517          *  memory and cache stalls
 1518          */
 1519         for (i = 0, k = prime; i < sz; i++, k += prime) {
 1520             ptr[k % sz] |= mask;
 1521             c++;
 1522             if (UNLIKELY(max_ops && c >= max_ops))
 1523                 goto abort;
 1524             if (UNLIKELY(!g_keep_stressing_flag))
 1525                 goto abort;
 1526         }
 1527     }
 1528     (void)mincore_touch_pages(buf, sz);
 1529     inject_random_bit_errors(buf, sz);
 1530 
 1531     for (i = 0; i < sz; i++) {
 1532         bit_errors += 8 - stress_vm_count_bits(buf[i]);
 1533         if (UNLIKELY(!g_keep_stressing_flag))
 1534             break;
 1535     }
 1536 abort:
 1537     stress_vm_check("prime-one", bit_errors);
 1538     set_counter(args, c);
 1539 
 1540     return bit_errors;
 1541 }
 1542 
 1543 /*
 1544  *  stress_vm_prime_gray_zero()
 1545  *  step through memory in non-contiguous large steps
 1546  *  and first clear just one bit (based on gray code) and then
 1547  *  clear all the other bits and finally check if thay are all clear
 1548  */
 1549 static size_t TARGET_CLONES stress_vm_prime_gray_zero(
 1550     uint8_t *buf,
 1551     const size_t sz,
 1552     const args_t *args,
 1553     const uint64_t max_ops)
 1554 {
 1555     size_t i;
 1556     volatile uint8_t *ptr = buf;
 1557     size_t bit_errors = 0;
 1558     const uint64_t prime = PRIME_64;
 1559     uint64_t j, c = get_counter(args);
 1560 
 1561 #if SIZE_MAX > UINT32_MAX
 1562     /* Unlikely.. */
 1563     if (sz > (1ULL << 63))
 1564         return 0;
 1565 #endif
 1566 
 1567     (void)memset(buf, 0xff, sz);
 1568 
 1569     for (i = 0, j = prime; i < sz; i++, j += prime) {
 1570         /*
 1571          *  Step through memory in prime sized steps
 1572          *  in a totally sub-optimal way to exercise
 1573          *  memory and cache stalls
 1574          */
 1575         ptr[j % sz] &= ((i >> 1) ^ i);
 1576         if (!g_keep_stressing_flag)
 1577             goto abort;
 1578         c++;
 1579         if (max_ops && c >= max_ops)
 1580             goto abort;
 1581     }
 1582     for (i = 0, j = prime; i < sz; i++, j += prime) {
 1583         /*
 1584          *  Step through memory in prime sized steps
 1585          *  in a totally sub-optimal way to exercise
 1586          *  memory and cache stalls
 1587          */
 1588         ptr[j % sz] &= ~((i >> 1) ^ i);
 1589         if (UNLIKELY(!g_keep_stressing_flag))
 1590             goto abort;
 1591         c++;
 1592         if (UNLIKELY(max_ops && c >= max_ops))
 1593             goto abort;
 1594     }
 1595     (void)mincore_touch_pages(buf, sz);
 1596     inject_random_bit_errors(buf, sz);
 1597 
 1598     for (i = 0; i < sz; i++) {
 1599         bit_errors += stress_vm_count_bits(buf[i]);
 1600         if (UNLIKELY(!g_keep_stressing_flag))
 1601             break;
 1602     }
 1603 abort:
 1604     stress_vm_check("prime-gray-zero", bit_errors);
 1605     set_counter(args, c);
 1606 
 1607     return bit_errors;
 1608 }
 1609 
 1610 /*
 1611  *  stress_vm_prime_one()
 1612  *  step through memory in non-contiguous large steps
 1613  *  and first set just one bit (based on gray code) and then
 1614  *  set all the other bits and finally check if thay are all set
 1615  */
 1616 static size_t TARGET_CLONES stress_vm_prime_gray_one(
 1617     uint8_t *buf,
 1618     const size_t sz,
 1619     const args_t *args,
 1620     const uint64_t max_ops)
 1621 {
 1622     size_t i;
 1623     volatile uint8_t *ptr = buf;
 1624     size_t bit_errors = 0;
 1625     const uint64_t prime = PRIME_64;
 1626     uint64_t j, c = get_counter(args);
 1627 
 1628 #if SIZE_MAX > UINT32_MAX
 1629     /* Unlikely.. */
 1630     if (sz > (1ULL << 63))
 1631         return 0;
 1632 #endif
 1633 
 1634     (void)memset(buf, 0x00, sz);
 1635 
 1636     for (i = 0, j = prime; i < sz; i++, j += prime) {
 1637         /*
 1638          *  Step through memory in prime sized steps
 1639          *  in a totally sub-optimal way to exercise
 1640          *  memory and cache stalls
 1641          */
 1642         ptr[j % sz] |= ((i >> 1) ^ i);
 1643         if (UNLIKELY(!g_keep_stressing_flag))
 1644             goto abort;
 1645         c++;
 1646         if (UNLIKELY(max_ops && c >= max_ops))
 1647             goto abort;
 1648     }
 1649     (void)mincore_touch_pages(buf, sz);
 1650     for (i = 0, j = prime; i < sz; i++, j += prime) {
 1651         /*
 1652          *  Step through memory in prime sized steps
 1653          *  in a totally sub-optimal way to exercise
 1654          *  memory and cache stalls
 1655          */
 1656         ptr[j % sz] |= ~((i >> 1) ^ i);
 1657         if (UNLIKELY(!g_keep_stressing_flag))
 1658             goto abort;
 1659         c++;
 1660         if (UNLIKELY(max_ops && c >= max_ops))
 1661             goto abort;
 1662     }
 1663     (void)mincore_touch_pages(buf, sz);
 1664     inject_random_bit_errors(buf, sz);
 1665 
 1666     for (i = 0; i < sz; i++) {
 1667         bit_errors += 8 - stress_vm_count_bits(buf[i]);
 1668         if (UNLIKELY(!g_keep_stressing_flag))
 1669             break;
 1670     }
 1671 abort:
 1672     stress_vm_check("prime-gray-one", bit_errors);
 1673     set_counter(args, c);
 1674 
 1675     return bit_errors;
 1676 }
 1677 
 1678 /*
 1679  *  stress_vm_write_64()
 1680  *  simple 64 bit write, no read check
 1681  */
 1682 static size_t TARGET_CLONES stress_vm_write64(
 1683     uint8_t *buf,
 1684     const size_t sz,
 1685     const args_t *args,
 1686     const uint64_t max_ops)
 1687 {
 1688     static uint64_t val;
 1689     uint64_t *ptr = (uint64_t *)buf;
 1690     register uint64_t v = val;
 1691     register size_t i = 0, n = sz / (sizeof(*ptr) * 32);
 1692 
 1693     while (i < n) {
 1694         *ptr++ = v;
 1695         *ptr++ = v;
 1696         *ptr++ = v;
 1697         *ptr++ = v;
 1698         *ptr++ = v;
 1699         *ptr++ = v;
 1700         *ptr++ = v;
 1701         *ptr++ = v;
 1702 
 1703         *ptr++ = v;
 1704         *ptr++ = v;
 1705         *ptr++ = v;
 1706         *ptr++ = v;
 1707         *ptr++ = v;
 1708         *ptr++ = v;
 1709         *ptr++ = v;
 1710         *ptr++ = v;
 1711 
 1712         *ptr++ = v;
 1713         *ptr++ = v;
 1714         *ptr++ = v;
 1715         *ptr++ = v;
 1716         *ptr++ = v;
 1717         *ptr++ = v;
 1718         *ptr++ = v;
 1719         *ptr++ = v;
 1720 
 1721         *ptr++ = v;
 1722         *ptr++ = v;
 1723         *ptr++ = v;
 1724         *ptr++ = v;
 1725         *ptr++ = v;
 1726         *ptr++ = v;
 1727         *ptr++ = v;
 1728         *ptr++ = v;
 1729         i++;
 1730         if (UNLIKELY(!g_keep_stressing_flag || (max_ops && i >= max_ops)))
 1731             break;
 1732     }
 1733     add_counter(args, i);
 1734     val++;
 1735 
 1736     return 0;
 1737 }
 1738 
 1739 /*
 1740  *  stress_vm_read_64()
 1741  *  simple 64 bit read
 1742  */
 1743 static size_t TARGET_CLONES stress_vm_read64(
 1744     uint8_t *buf,
 1745     const size_t sz,
 1746     const args_t *args,
 1747     const uint64_t max_ops)
 1748 {
 1749     volatile uint64_t *ptr = (uint64_t *)buf;
 1750     register size_t i = 0, n = sz / (sizeof(*ptr) * 32);
 1751 
 1752     while (i < n) {
 1753         (void)*(ptr++);
 1754         (void)*(ptr++);
 1755         (void)*(ptr++);
 1756         (void)*(ptr++);
 1757         (void)*(ptr++);
 1758         (void)*(ptr++);
 1759         (void)*(ptr++);
 1760         (void)*(ptr++);
 1761 
 1762         (void)*(ptr++);
 1763         (void)*(ptr++);
 1764         (void)*(ptr++);
 1765         (void)*(ptr++);
 1766         (void)*(ptr++);
 1767         (void)*(ptr++);
 1768         (void)*(ptr++);
 1769         (void)*(ptr++);
 1770 
 1771         (void)*(ptr++);
 1772         (void)*(ptr++);
 1773         (void)*(ptr++);
 1774         (void)*(ptr++);
 1775         (void)*(ptr++);
 1776         (void)*(ptr++);
 1777         (void)*(ptr++);
 1778         (void)*(ptr++);
 1779 
 1780         (void)*(ptr++);
 1781         (void)*(ptr++);
 1782         (void)*(ptr++);
 1783         (void)*(ptr++);
 1784         (void)*(ptr++);
 1785         (void)*(ptr++);
 1786         (void)*(ptr++);
 1787         (void)*(ptr++);
 1788 
 1789         i++;
 1790         if (UNLIKELY(!g_keep_stressing_flag || (max_ops && i >= max_ops)))
 1791             break;
 1792     }
 1793     add_counter(args, i);
 1794 
 1795     return 0;
 1796 }
 1797 
 1798 /*
 1799  *  stress_vm_rowhammer()
 1800  *
 1801  */
 1802 static size_t TARGET_CLONES stress_vm_rowhammer(
 1803     uint8_t *buf,
 1804     const size_t sz,
 1805     const args_t *args,
 1806     const uint64_t max_ops)
 1807 {
 1808     size_t bit_errors = 0;
 1809     uint32_t *buf32 = (uint32_t *)buf;
 1810     static uint32_t val = 0xff5a00a5;
 1811     register size_t j;
 1812     register volatile uint32_t *addr0, *addr1;
 1813     register size_t errors = 0;
 1814     const size_t n = sz / sizeof(*addr0);
 1815 
 1816     (void)max_ops;
 1817 
 1818     if (!n) {
 1819         pr_dbg("stress-vm: rowhammer: zero uint32_t integers could "
 1820             "be hammered, aborting\n");
 1821         return 0;
 1822     }
 1823 
 1824     (void)mincore_touch_pages(buf, sz);
 1825 
 1826     for (j = 0; j < n; j++)
 1827         buf32[j] = val;
 1828 
 1829     /* Pick two random addresses */
 1830     addr0 = &buf32[(mwc64() << 12) % n];
 1831     addr1 = &buf32[(mwc64() << 12) % n];
 1832 
 1833     /* Hammer the rows */
 1834     for (j = VM_ROWHAMMER_LOOPS / 4; j; j--) {
 1835         *addr0;
 1836         *addr1;
 1837         clflush(addr0);
 1838         clflush(addr1);
 1839         *addr0;
 1840         *addr1;
 1841         clflush(addr0);
 1842         clflush(addr1);
 1843         *addr0;
 1844         *addr1;
 1845         clflush(addr0);
 1846         clflush(addr1);
 1847         *addr0;
 1848         *addr1;
 1849         clflush(addr0);
 1850         clflush(addr1);
 1851     }
 1852     for (j = 0; j < n; j++)
 1853         if (UNLIKELY(buf32[j] != val))
 1854             errors++;
 1855     if (errors) {
 1856         bit_errors += errors;
 1857         pr_dbg("stress-vm: rowhammer: %zu errors on addresses "
 1858             "%p and %p\n", errors, addr0, addr1);
 1859     }
 1860     add_counter(args, VM_ROWHAMMER_LOOPS);
 1861     val = (val >> 31) | (val << 1);
 1862 
 1863     stress_vm_check("rowhammer", bit_errors);
 1864 
 1865     return bit_errors;
 1866 }
 1867 
 1868 /*
 1869  *  stress_vm_all()
 1870  *  work through all vm stressors sequentially
 1871  */
 1872 static size_t stress_vm_all(
 1873     uint8_t *buf,
 1874     const size_t sz,
 1875     const args_t *args,
 1876     const uint64_t max_ops)
 1877 {
 1878     static int i = 1;
 1879     size_t bit_errors = 0;
 1880 
 1881     bit_errors = vm_methods[i].func(buf, sz, args, max_ops);
 1882     i++;
 1883     if (vm_methods[i].func == NULL)
 1884         i = 1;
 1885 
 1886     return bit_errors;
 1887 }
 1888 
 1889 static const stress_vm_method_info_t vm_methods[] = {
 1890     { "all",    stress_vm_all },
 1891     { "flip",   stress_vm_flip },
 1892     { "galpat-0",   stress_vm_galpat_zero },
 1893     { "galpat-1",   stress_vm_galpat_one },
 1894     { "gray",   stress_vm_gray },
 1895     { "rowhammer",  stress_vm_rowhammer },
 1896     { "incdec", stress_vm_incdec },
 1897     { "inc-nybble", stress_vm_inc_nybble },
 1898     { "rand-set",   stress_vm_rand_set },
 1899     { "rand-sum",   stress_vm_rand_sum },
 1900     { "read64", stress_vm_read64 },
 1901     { "ror",    stress_vm_ror },
 1902     { "swap",   stress_vm_swap },
 1903     { "move-inv",   stress_vm_moving_inversion },
 1904     { "modulo-x",   stress_vm_modulo_x },
 1905     { "prime-0",    stress_vm_prime_zero },
 1906     { "prime-1",    stress_vm_prime_one },
 1907     { "prime-gray-0",stress_vm_prime_gray_zero },
 1908     { "prime-gray-1",stress_vm_prime_gray_one },
 1909     { "prime-incdec",stress_vm_prime_incdec },
 1910     { "walk-0d",    stress_vm_walking_zero_data },
 1911     { "walk-1d",    stress_vm_walking_one_data },
 1912     { "walk-0a",    stress_vm_walking_zero_addr },
 1913     { "walk-1a",    stress_vm_walking_one_addr },
 1914     { "write64",    stress_vm_write64 },
 1915     { "zero-one",   stress_vm_zero_one },
 1916     { NULL,     NULL  }
 1917 };
 1918 
 1919 /*
 1920  *  stress_set_vm_method()
 1921  *      set default vm stress method
 1922  */
 1923 int stress_set_vm_method(const char *name)
 1924 {
 1925     stress_vm_method_info_t const *info;
 1926 
 1927     for (info = vm_methods; info->func; info++) {
 1928         if (!strcmp(info->name, name)) {
 1929             set_setting("vm-method", TYPE_ID_UINTPTR_T, &info);
 1930             return 0;
 1931         }
 1932     }
 1933 
 1934     (void)fprintf(stderr, "vm-method must be one of:");
 1935     for (info = vm_methods; info->func; info++) {
 1936         (void)fprintf(stderr, " %s", info->name);
 1937     }
 1938     (void)fprintf(stderr, "\n");
 1939 
 1940     return -1;
 1941 }
 1942 
 1943 
 1944 /*
 1945  *  stress_vm()
 1946  *  stress virtual memory
 1947  */
 1948 static int stress_vm(const args_t *args)
 1949 {
 1950     uint64_t *bit_error_count = MAP_FAILED;
 1951     uint64_t vm_hang = DEFAULT_VM_HANG;
 1952     uint64_t tmp_counter;
 1953     uint32_t restarts = 0, nomems = 0;
 1954     size_t vm_bytes = DEFAULT_VM_BYTES;
 1955     uint8_t *buf = NULL;
 1956     pid_t pid;
 1957     const bool keep = (g_opt_flags & OPT_FLAGS_VM_KEEP);
 1958         const size_t page_size = args->page_size;
 1959     size_t buf_sz, retries;
 1960     int err = 0, ret = EXIT_SUCCESS;
 1961     int vm_flags = 0;                      /* VM mmap flags */
 1962     int vm_madvise = -1;
 1963     const stress_vm_method_info_t *vm_method = &vm_methods[0];
 1964     stress_vm_func func;
 1965 
 1966     (void)get_setting("vm-hang", &vm_hang);
 1967     (void)get_setting("vm-flags", &vm_flags);
 1968     (void)get_setting("vm-method", &vm_method);
 1969     (void)get_setting("vm-madvise", &vm_madvise);
 1970 
 1971     func = vm_method->func;
 1972     pr_dbg("%s using method '%s'\n", args->name, vm_method->name);
 1973 
 1974     if (!get_setting("vm-bytes", &vm_bytes)) {
 1975         if (g_opt_flags & OPT_FLAGS_MAXIMIZE)
 1976             vm_bytes = MAX_VM_BYTES;
 1977         if (g_opt_flags & OPT_FLAGS_MINIMIZE)
 1978             vm_bytes = MIN_VM_BYTES;
 1979     }
 1980     vm_bytes /= args->num_instances;
 1981     if (vm_bytes < MIN_VM_BYTES)
 1982         vm_bytes = MIN_VM_BYTES;
 1983     buf_sz = vm_bytes & ~(page_size - 1);
 1984 
 1985     for (retries = 0; (retries < 100) && g_keep_stressing_flag; retries++) {
 1986         bit_error_count = (uint64_t *)
 1987             mmap(NULL, page_size, PROT_READ | PROT_WRITE,
 1988                 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
 1989         err = errno;
 1990         if (bit_error_count != MAP_FAILED)
 1991             break;
 1992         (void)shim_usleep(100);
 1993     }
 1994 
 1995     /* Cannot allocate a single page for bit error counter */
 1996     if (bit_error_count == MAP_FAILED) {
 1997         if (g_keep_stressing_flag) {
 1998             pr_err("%s: could not mmap bit error counter: "
 1999                 "retry count=%zu, errno=%d (%s)\n",
 2000                 args->name, retries, err, strerror(err));
 2001         }
 2002         return EXIT_NO_RESOURCE;
 2003     }
 2004 
 2005     *bit_error_count = 0ULL;
 2006 
 2007 again:
 2008     if (!g_keep_stressing_flag)
 2009         goto clean_up;
 2010     pid = fork();
 2011     if (pid < 0) {
 2012         if (errno == EAGAIN)
 2013             goto again;
 2014         pr_err("%s: fork failed: errno=%d: (%s)\n",
 2015             args->name, errno, strerror(errno));
 2016     } else if (pid > 0) {
 2017         int status, waitret;
 2018 
 2019         /* Parent, wait for child */
 2020         (void)setpgid(pid, g_pgrp);
 2021         waitret = waitpid(pid, &status, 0);
 2022         if (waitret < 0) {
 2023             if (errno != EINTR)
 2024                 pr_dbg("%s: waitpid(): errno=%d (%s)\n",
 2025                     args->name, errno, strerror(errno));
 2026             (void)kill(pid, SIGTERM);
 2027             (void)kill(pid, SIGKILL);
 2028             (void)waitpid(pid, &status, 0);
 2029         } else if (WIFSIGNALED(status)) {
 2030             pr_dbg("%s: child died: %s (instance %d)\n",
 2031                 args->name, stress_strsignal(WTERMSIG(status)),
 2032                 args->instance);
 2033             /* If we got killed by OOM killer, re-start */
 2034             if (WTERMSIG(status) == SIGKILL) {
 2035                 log_system_mem_info();
 2036                 pr_dbg("%s: assuming killed by OOM killer, "
 2037                     "restarting again (instance %d)\n",
 2038                     args->name, args->instance);
 2039                 restarts++;
 2040                 goto again;
 2041             }
 2042         }
 2043     } else if (pid == 0) {
 2044         int no_mem_retries = 0;
 2045         const uint64_t max_ops = args->max_ops << VM_BOGO_SHIFT;
 2046 
 2047         (void)setpgid(0, g_pgrp);
 2048         stress_parent_died_alarm();
 2049 
 2050         /* Make sure this is killable by OOM killer */
 2051         set_oom_adjustment(args->name, true);
 2052 
 2053         do {
 2054             if (no_mem_retries >= NO_MEM_RETRIES_MAX) {
 2055                 pr_err("%s: gave up trying to mmap, no available memory\n",
 2056                     args->name);
 2057                 break;
 2058             }
 2059             if (!keep || (buf == NULL)) {
 2060                 if (!g_keep_stressing_flag)
 2061                     return EXIT_SUCCESS;
 2062                 buf = (uint8_t *)mmap(NULL, buf_sz,
 2063                     PROT_READ | PROT_WRITE,
 2064                     MAP_PRIVATE | MAP_ANONYMOUS |
 2065                     vm_flags, -1, 0);
 2066                 if (buf == MAP_FAILED) {
 2067                     buf = NULL;
 2068                     no_mem_retries++;
 2069                     (void)shim_usleep(100000);
 2070                     continue;   /* Try again */
 2071                 }
 2072                 if (vm_madvise < 0)
 2073                     (void)madvise_random(buf, buf_sz);
 2074                 else
 2075                     (void)shim_madvise(buf, buf_sz, vm_madvise);
 2076             }
 2077 
 2078             no_mem_retries = 0;
 2079             (void)mincore_touch_pages(buf, buf_sz);
 2080             *bit_error_count += func(buf, buf_sz, args, max_ops);
 2081 
 2082             if (vm_hang == 0) {
 2083                 while (keep_stressing_vm(args)) {
 2084                     (void)sleep(3600);
 2085                 }
 2086             } else if (vm_hang != DEFAULT_VM_HANG) {
 2087                 (void)sleep((int)vm_hang);
 2088             }
 2089 
 2090             if (!keep) {
 2091                 (void)madvise_random(buf, buf_sz);
 2092                 (void)munmap((void *)buf, buf_sz);
 2093             }
 2094         } while (keep_stressing_vm(args));
 2095 
 2096         if (keep && buf != NULL)
 2097             (void)munmap((void *)buf, buf_sz);
 2098 
 2099         _exit(EXIT_SUCCESS);
 2100     }
 2101 clean_up:
 2102     (void)shim_msync(bit_error_count, page_size, MS_SYNC);
 2103     if (*bit_error_count > 0) {
 2104         pr_fail("%s: detected %" PRIu64 " bit errors while "
 2105             "stressing memory\n",
 2106             args->name, *bit_error_count);
 2107         ret = EXIT_FAILURE;
 2108     }
 2109     (void)munmap((void *)bit_error_count, page_size);
 2110 
 2111     tmp_counter = get_counter(args) >> VM_BOGO_SHIFT;
 2112     set_counter(args, tmp_counter);
 2113 
 2114     if (restarts + nomems > 0)
 2115         pr_dbg("%s: OOM restarts: %" PRIu32
 2116             ", out of memory restarts: %" PRIu32 ".\n",
 2117             args->name, restarts, nomems);
 2118 
 2119     return ret;
 2120 }
 2121 
 2122 static void stress_vm_set_default(void)
 2123 {
 2124     stress_set_vm_method("all");
 2125 }
 2126 
 2127 stressor_info_t stress_vm_info = {
 2128     .stressor = stress_vm,
 2129     .set_default = stress_vm_set_default,
 2130     .class = CLASS_VM | CLASS_MEMORY | CLASS_OS
 2131 };