"Fossies" - the Fresh Open Source Software Archive

Member "stress-ng-0.13.05/stress-vm.c" (11 Oct 2021, 54557 Bytes) of package /linux/privat/stress-ng-0.13.05.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "stress-vm.c" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 0.13.04_vs_0.13.05.

    1 /*
    2  * Copyright (C) 2013-2021 Canonical, Ltd.
    3  *
    4  * This program is free software; you can redistribute it and/or
    5  * modify it under the terms of the GNU General Public License
    6  * as published by the Free Software Foundation; either version 2
    7  * of the License, or (at your option) any later version.
    8  *
    9  * This program is distributed in the hope that it will be useful,
   10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12  * GNU General Public License for more details.
   13  *
   14  * You should have received a copy of the GNU General Public License
   15  * along with this program; if not, write to the Free Software
   16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
   17  *
   18  * This code is a complete clean re-write of the stress tool by
   19  * Colin Ian King <colin.king@canonical.com> and attempts to be
   20  * backwardly compatible with the stress tool by Amos Waterland
   21  * <apw@rossby.metr.ou.edu> but has more stress tests and more
   22  * functionality.
   23  *
   24  */
   25 #include "stress-ng.h"
   26 
   27 /*
   28  *  For testing, set this to 1 to simulate random memory errors
   29  */
   30 #define INJECT_BIT_ERRORS   (0)
   31 
   32 #define VM_BOGO_SHIFT       (12)
   33 #define VM_ROWHAMMER_LOOPS  (1000000)
   34 
   35 #define NO_MEM_RETRIES_MAX  (100)
   36 
   37 /*
   38  *  the VM stress test has diffent methods of vm stressor
   39  */
   40 typedef size_t (*stress_vm_func)(void *buf, void *buf_end, const size_t sz,
   41         const stress_args_t *args, const uint64_t max_ops);
   42 
   43 typedef struct {
   44     const char *name;
   45     const stress_vm_func func;
   46 } stress_vm_method_info_t;
   47 
   48 typedef struct {
   49     const char *name;
   50     const int advice;
   51 } stress_vm_madvise_info_t;
   52 
   53 typedef struct {
   54     uint64_t *bit_error_count;
   55     const stress_vm_method_info_t *vm_method;
   56 } stress_vm_context_t;
   57 
   58 static const stress_vm_method_info_t vm_methods[];
   59 
   60 static const stress_help_t help[] = {
   61     { "m N", "vm N",     "start N workers spinning on anonymous mmap" },
   62     { NULL,  "vm-bytes N",   "allocate N bytes per vm worker (default 256MB)" },
   63     { NULL,  "vm-hang N",    "sleep N seconds before freeing memory" },
   64     { NULL,  "vm-keep",  "redirty memory instead of reallocating" },
   65     { NULL,  "vm-ops N",     "stop after N vm bogo operations" },
   66 #if defined(MAP_LOCKED)
   67     { NULL,  "vm-locked",    "lock the pages of the mapped region into memory" },
   68 #endif
   69     { NULL,  "vm-madvise M", "specify mmap'd vm buffer madvise advice" },
   70     { NULL,  "vm-method M",  "specify stress vm method M, default is all" },
   71 #if defined(MAP_POPULATE)
   72     { NULL,  "vm-populate",  "populate (prefault) page tables for a mapping" },
   73 #endif
   74     { NULL,  NULL,       NULL }
   75 };
   76 
   77 static const stress_vm_madvise_info_t vm_madvise_info[] = {
   78 #if defined(HAVE_MADVISE)
   79 #if defined(MADV_DONTNEED)
   80     { "dontneed",   MADV_DONTNEED},
   81 #endif
   82 #if defined(MADV_HUGEPAGE)
   83     { "hugepage",   MADV_HUGEPAGE },
   84 #endif
   85 #if defined(MADV_MERGEABLE)
   86     { "mergeable",  MADV_MERGEABLE },
   87 #endif
   88 #if defined(MADV_NOHUGEPAGE)
   89     { "nohugepage", MADV_NOHUGEPAGE },
   90 #endif
   91 #if defined(MADV_NORMAL)
   92     { "normal", MADV_NORMAL },
   93 #endif
   94 #if defined(MADV_RANDOM)
   95     { "random", MADV_RANDOM },
   96 #endif
   97 #if defined(MADV_SEQUENTIAL)
   98     { "sequential", MADV_SEQUENTIAL },
   99 #endif
  100 #if defined(MADV_UNMERGEABLE)
  101     { "unmergeable",MADV_UNMERGEABLE },
  102 #endif
  103 #if defined(MADV_WILLNEED)
  104     { "willneed",   MADV_WILLNEED},
  105 #endif
  106     { NULL,         0 },
  107 #else
  108     /* No MADVISE, default to normal, ignored */
  109     { "normal", 0 },
  110 #endif
  111 };
  112 
  113 /*
  114  *  keep_stressing(args)
  115  *  returns true if we can keep on running a stressor
  116  */
  117 static bool HOT OPTIMIZE3 keep_stressing_vm(const stress_args_t *args)
  118 {
  119     return (LIKELY(keep_stressing_flag()) &&
  120             LIKELY(!args->max_ops || ((get_counter(args) >> VM_BOGO_SHIFT) < args->max_ops)));
  121 }
  122 
  123 static int stress_set_vm_hang(const char *opt)
  124 {
  125     uint64_t vm_hang;
  126 
  127     vm_hang = stress_get_uint64_time(opt);
  128     stress_check_range("vm-hang", vm_hang,
  129         MIN_VM_HANG, MAX_VM_HANG);
  130     return stress_set_setting("vm-hang", TYPE_ID_UINT64, &vm_hang);
  131 }
  132 
  133 static int stress_set_vm_bytes(const char *opt)
  134 {
  135     size_t vm_bytes;
  136 
  137     vm_bytes = (size_t)stress_get_uint64_byte_memory(opt, 1);
  138     stress_check_range_bytes("vm-bytes", vm_bytes,
  139         MIN_VM_BYTES, MAX_MEM_LIMIT);
  140     return stress_set_setting("vm-bytes", TYPE_ID_SIZE_T, &vm_bytes);
  141 }
  142 
  143 #if defined(MAP_LOCKED) || defined(MAP_POPULATE)
  144 static int stress_set_vm_flags(const int flag)
  145 {
  146     int vm_flags = 0;
  147 
  148     (void)stress_get_setting("vm-flags", &vm_flags);
  149     vm_flags |= flag;
  150     return stress_set_setting("vm-flags", TYPE_ID_INT, &vm_flags);
  151 }
  152 #endif
  153 
  154 static int stress_set_vm_mmap_locked(const char *opt)
  155 {
  156     (void)opt;
  157 
  158 #if defined(MAP_LOCKED)
  159     return stress_set_vm_flags(MAP_LOCKED);
  160 #else
  161     return 0;
  162 #endif
  163 }
  164 
  165 static int stress_set_vm_mmap_populate(const char *opt)
  166 {
  167     (void)opt;
  168 
  169 #if defined(MAP_POPULATE)
  170     return stress_set_vm_flags(MAP_POPULATE);
  171 #else
  172     return 0;
  173 #endif
  174 }
  175 
  176 static int stress_set_vm_madvise(const char *opt)
  177 {
  178     const stress_vm_madvise_info_t *info;
  179 
  180     for (info = vm_madvise_info; info->name; info++) {
  181         if (!strcmp(opt, info->name)) {
  182             stress_set_setting("vm-madvise", TYPE_ID_INT, &info->advice);
  183             return 0;
  184         }
  185     }
  186     (void)fprintf(stderr, "invalid vm-madvise advice '%s', allowed advice options are:", opt);
  187     for (info = vm_madvise_info; info->name; info++) {
  188         (void)fprintf(stderr, " %s", info->name);
  189     }
  190     (void)fprintf(stderr, "\n");
  191     return -1;
  192 }
  193 
  194 static int stress_set_vm_keep(const char *opt)
  195 {
  196     bool vm_keep = true;
  197 
  198     (void)opt;
  199     return stress_set_setting("vm-keep", TYPE_ID_BOOL, &vm_keep);
  200 }
  201 
  202 #define SET_AND_TEST(ptr, val, bit_errors)  \
  203 do {                        \
  204     *ptr = val;             \
  205     bit_errors += (*ptr != val);        \
  206 } while (0)
  207 
  208 #define ROR8(val)               \
  209 do {                        \
  210     uint8_t tmp = val;          \
  211     const uint8_t bit0 = (uint8_t)((tmp & 1) << 7); \
  212     tmp >>= 1;              \
  213     tmp |= bit0;                \
  214     val = tmp;              \
  215 } while (0)
  216 
  217 #define INC_LO_NYBBLE(val)          \
  218 do {                        \
  219     uint8_t lo = (val);         \
  220     lo += 1;                \
  221     lo &= 0xf;              \
  222     (val) = ((val) & 0xf0) | lo;        \
  223 } while (0)
  224 
  225 #define INC_HI_NYBBLE(val)          \
  226 do {                        \
  227     uint8_t hi = (val);         \
  228     hi += 0xf0;             \
  229     hi &= 0xf0;             \
  230     (val) = ((val) & 0x0f) | hi;        \
  231 } while (0)
  232 
  233 #define UNSIGNED_ABS(a, b)          \
  234     ((a) > (b)) ? (a) - (b) : (b) - (a)
  235 
  236 #if INJECT_BIT_ERRORS
  237 /*
  238  *  inject_random_bit_errors()
  239  *  for testing purposes, we can insert various faults
  240  */
  241 static void inject_random_bit_errors(uint8_t *buf, const size_t sz)
  242 {
  243     int i;
  244 
  245     for (i = 0; i < 8; i++) {
  246         /* 1 bit errors */
  247         buf[stress_mwc64() % sz] ^= (1 << i);
  248         buf[stress_mwc64() % sz] |= (1 << i);
  249         buf[stress_mwc64() % sz] &= ~(1 << i);
  250     }
  251 
  252     for (i = 0; i < 7; i++) {
  253         /* 2 bit errors */
  254         buf[stress_mwc64() % sz] ^= (3 << i);
  255         buf[stress_mwc64() % sz] |= (3 << i);
  256         buf[stress_mwc64() % sz] &= ~(3 << i);
  257     }
  258 
  259     for (i = 0; i < 6; i++) {
  260         /* 3 bit errors */
  261         buf[stress_mwc64() % sz] ^= (7 << i);
  262         buf[stress_mwc64() % sz] |= (7 << i);
  263         buf[stress_mwc64() % sz] &= ~(7 << i);
  264     }
  265 }
  266 #else
  267 /* No-op */
  268 static inline void inject_random_bit_errors(uint8_t *buf, const size_t sz)
  269 {
  270     (void)buf;
  271     (void)sz;
  272 }
  273 #endif
  274 
  275 
  276 /*
  277  *  stress_vm_check()
  278  *  report back on bit errors found
  279  */
  280 static void stress_vm_check(const char *name, const size_t bit_errors)
  281 {
  282     if (bit_errors && (g_opt_flags & OPT_FLAGS_VERIFY))
  283 #if INJECT_BIT_ERRORS
  284         pr_dbg("%s: detected %zu memory error%s\n",
  285             name, bit_errors, bit_errors == 1 ? "" : "s");
  286 #else
  287         pr_fail("%s: detected %zu memory error%s\n",
  288             name, bit_errors, bit_errors == 1 ? "" : "s");
  289 #endif
  290 }
  291 
  292 /*
  293  *  stress_vm_count_bits8()
  294  *  count number of bits set (K and R)
  295  */
  296 static inline size_t stress_vm_count_bits8(uint8_t v)
  297 {
  298     size_t n;
  299 
  300     for (n = 0; v; n++)
  301         v &= v - 1;
  302 
  303     return n;
  304 }
  305 
  306 /*
  307  *  stress_vm_count_bits()
  308  *  count number of bits set (K and R)
  309  */
  310 static inline size_t stress_vm_count_bits(uint64_t v)
  311 {
  312     size_t n;
  313 
  314     for (n = 0; v; n++)
  315         v &= v - 1;
  316 
  317     return n;
  318 }
  319 
  320 /*
  321  *  stress_vm_moving_inversion()
  322  *  work sequentially through memory setting 8 bytes at at a time
  323  *  with a random value, then check if it is correct, invert it and
  324  *  then check if that is correct.
  325  */
  326 static size_t TARGET_CLONES stress_vm_moving_inversion(
  327     void *buf,
  328     void *buf_end,
  329     const size_t sz,
  330     const stress_args_t *args,
  331     const uint64_t max_ops)
  332 {
  333     uint64_t c = get_counter(args);
  334     uint32_t w, z;
  335     volatile uint64_t *ptr;
  336     size_t bit_errors;
  337 
  338     stress_mwc_reseed();
  339     w = stress_mwc32();
  340     z = stress_mwc32();
  341 
  342     stress_mwc_seed(w, z);
  343     for (ptr = (uint64_t *)buf; ptr < (uint64_t *)buf_end; ) {
  344         *(ptr++) = stress_mwc64();
  345     }
  346 
  347     stress_mwc_seed(w, z);
  348     for (bit_errors = 0, ptr = (uint64_t *)buf; ptr < (uint64_t *)buf_end; ) {
  349         uint64_t val = stress_mwc64();
  350 
  351         if (UNLIKELY(*ptr != val))
  352             bit_errors++;
  353         *(ptr++) = ~val;
  354         c++;
  355     }
  356     if (UNLIKELY(max_ops && c >= max_ops))
  357         goto ret;
  358     if (UNLIKELY(!keep_stressing_flag()))
  359         goto ret;
  360 
  361     (void)stress_mincore_touch_pages(buf, sz);
  362 
  363     inject_random_bit_errors(buf, sz);
  364 
  365     stress_mwc_seed(w, z);
  366     for (bit_errors = 0, ptr = (uint64_t *)buf; ptr < (uint64_t *)buf_end; ) {
  367         uint64_t val = stress_mwc64();
  368 
  369         if (UNLIKELY(*(ptr++) != ~val))
  370             bit_errors++;
  371         c++;
  372     }
  373     if (UNLIKELY(max_ops && c >= max_ops))
  374         goto ret;
  375     if (UNLIKELY(!keep_stressing_flag()))
  376         goto ret;
  377 
  378     stress_mwc_seed(w, z);
  379     for (ptr = (uint64_t *)buf_end; ptr > (uint64_t *)buf; ) {
  380         *--ptr = stress_mwc64();
  381     }
  382     if (UNLIKELY(!keep_stressing_flag()))
  383         goto ret;
  384 
  385     inject_random_bit_errors(buf, sz);
  386 
  387     (void)stress_mincore_touch_pages(buf, sz);
  388     stress_mwc_seed(w, z);
  389     for (ptr = (uint64_t *)buf_end; ptr > (uint64_t *)buf; ) {
  390         uint64_t val = stress_mwc64();
  391 
  392         if (UNLIKELY(*--ptr != val))
  393             bit_errors++;
  394         *ptr = ~val;
  395         c++;
  396     }
  397     if (UNLIKELY(max_ops && c >= max_ops))
  398         goto ret;
  399     if (UNLIKELY(!keep_stressing_flag()))
  400         goto ret;
  401 
  402     stress_mwc_seed(w, z);
  403     for (ptr = (uint64_t *)buf_end; ptr > (uint64_t *)buf; ) {
  404         uint64_t val = stress_mwc64();
  405 
  406         if (UNLIKELY(*--ptr != ~val))
  407             bit_errors++;
  408         c++;
  409     }
  410     if (UNLIKELY(max_ops && c >= max_ops))
  411         goto ret;
  412     if (UNLIKELY(!keep_stressing_flag()))
  413         goto ret;
  414 
  415 ret:
  416     stress_vm_check("moving inversion", bit_errors);
  417     if (UNLIKELY(max_ops && c >= max_ops))
  418         c = max_ops;
  419     set_counter(args, c);
  420 
  421     return bit_errors;
  422 }
  423 
  424 /*
  425  *  stress_vm_modulo_x()
  426  *  set every 23rd byte to a random pattern and then set
  427  *  all the other bytes to the complement of this. Check
  428  *  that the random patterns are still set.
  429  */
  430 static size_t TARGET_CLONES stress_vm_modulo_x(
  431     void *buf,
  432     void *buf_end,
  433     const size_t sz,
  434     const stress_args_t *args,
  435     const uint64_t max_ops)
  436 {
  437     uint32_t i, j;
  438     const uint32_t stride = 23; /* Small prime to hit cache */
  439     uint8_t pattern, compliment;
  440     volatile uint8_t *ptr;
  441     size_t bit_errors = 0;
  442     uint64_t c = get_counter(args);
  443 
  444     stress_mwc_reseed();
  445     pattern = stress_mwc8();
  446     compliment = ~pattern;
  447 
  448     for (i = 0; i < stride; i++) {
  449         for (ptr = (uint8_t *)buf + i; ptr < (uint8_t *)buf_end; ptr += stride) {
  450             *ptr = pattern;
  451         }
  452         if (UNLIKELY(!keep_stressing_flag()))
  453             goto ret;
  454         for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += stride) {
  455             for (j = 0; j < i && ptr < (uint8_t *)buf_end; j++) {
  456                 *ptr++ = compliment;
  457                 c++;
  458             }
  459             if (UNLIKELY(!keep_stressing_flag()))
  460                 goto ret;
  461             ptr++;
  462             for (j = i + 1; j < stride && ptr < (uint8_t *)buf_end; j++) {
  463                 *ptr++ = compliment;
  464                 c++;
  465             }
  466             if (UNLIKELY(!keep_stressing_flag()))
  467                 goto ret;
  468         }
  469         inject_random_bit_errors(buf, sz);
  470 
  471         for (ptr = (uint8_t *)buf + i; ptr < (uint8_t *)buf_end; ptr += stride) {
  472             if (UNLIKELY(*ptr != pattern))
  473                 bit_errors++;
  474         }
  475         if (UNLIKELY(!keep_stressing_flag()))
  476             break;
  477         if (UNLIKELY(max_ops && c >= max_ops))
  478             break;
  479     }
  480 
  481     if (UNLIKELY(max_ops && c >= max_ops))
  482         c = max_ops;
  483     stress_vm_check("modulo X", bit_errors);
  484 ret:
  485     set_counter(args, c);
  486 
  487     return bit_errors;
  488 }
  489 
  490 /*
  491  *  stress_vm_walking_one_data()
  492  *  for each byte, walk through each data line setting them to high
  493  *  setting each bit to see if none of the lines are stuck
  494  */
  495 static size_t TARGET_CLONES stress_vm_walking_one_data(
  496     void *buf,
  497     void *buf_end,
  498     const size_t sz,
  499     const stress_args_t *args,
  500     const uint64_t max_ops)
  501 {
  502     size_t bit_errors = 0;
  503     volatile uint8_t *ptr;
  504     uint64_t c = get_counter(args);
  505 
  506     (void)sz;
  507 
  508     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr++) {
  509         SET_AND_TEST(ptr, 0x01, bit_errors);
  510         SET_AND_TEST(ptr, 0x02, bit_errors);
  511         SET_AND_TEST(ptr, 0x04, bit_errors);
  512         SET_AND_TEST(ptr, 0x08, bit_errors);
  513         SET_AND_TEST(ptr, 0x10, bit_errors);
  514         SET_AND_TEST(ptr, 0x20, bit_errors);
  515         SET_AND_TEST(ptr, 0x40, bit_errors);
  516         SET_AND_TEST(ptr, 0x80, bit_errors);
  517         c++;
  518         if (UNLIKELY(max_ops && c >= max_ops))
  519             break;
  520         if (UNLIKELY(!keep_stressing_flag()))
  521             break;
  522     }
  523     stress_vm_check("walking one (data)", bit_errors);
  524     set_counter(args, c);
  525 
  526     return bit_errors;
  527 }
  528 
  529 /*
  530  *  stress_vm_walking_zero_data()
  531  *  for each byte, walk through each data line setting them to low
  532  *  setting each bit to see if none of the lines are stuck
  533  */
  534 static size_t TARGET_CLONES stress_vm_walking_zero_data(
  535     void *buf,
  536     void *buf_end,
  537     const size_t sz,
  538     const stress_args_t *args,
  539     const uint64_t max_ops)
  540 {
  541     size_t bit_errors = 0;
  542     volatile uint8_t *ptr;
  543     uint64_t c = get_counter(args);
  544 
  545     (void)sz;
  546 
  547     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr++) {
  548         SET_AND_TEST(ptr, 0xfe, bit_errors);
  549         SET_AND_TEST(ptr, 0xfd, bit_errors);
  550         SET_AND_TEST(ptr, 0xfb, bit_errors);
  551         SET_AND_TEST(ptr, 0xf7, bit_errors);
  552         SET_AND_TEST(ptr, 0xef, bit_errors);
  553         SET_AND_TEST(ptr, 0xdf, bit_errors);
  554         SET_AND_TEST(ptr, 0xbf, bit_errors);
  555         SET_AND_TEST(ptr, 0x7f, bit_errors);
  556         c++;
  557         if (UNLIKELY(max_ops && c >= max_ops))
  558             break;
  559         if (UNLIKELY(!keep_stressing_flag()))
  560             break;
  561     }
  562     stress_vm_check("walking zero (data)", bit_errors);
  563     set_counter(args, c);
  564 
  565     return bit_errors;
  566 }
  567 
  568 /*
  569  *  stress_vm_walking_one_addr()
  570  *  work through a range of addresses setting each address bit in
  571  *  the given memory mapped range to high to see if any address bits
  572  *  are stuck.
  573  */
  574 static size_t TARGET_CLONES stress_vm_walking_one_addr(
  575     void *buf,
  576     void *buf_end,
  577     const size_t sz,
  578     const stress_args_t *args,
  579     const uint64_t max_ops)
  580 {
  581     volatile uint8_t *ptr;
  582     uint8_t d1 = 0, d2 = ~d1;
  583     size_t bit_errors = 0;
  584     size_t tests = 0;
  585     uint64_t c = get_counter(args);
  586 
  587     (void)memset(buf, d1, sz);
  588     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += 256) {
  589         uint16_t i;
  590         uint64_t mask;
  591 
  592         *ptr = d1;
  593         for (mask = 1, i = 1; i < 64; i++) {
  594             uintptr_t uintptr = ((uintptr_t)ptr) ^ mask;
  595             uint8_t *addr = (uint8_t *)uintptr;
  596             if ((addr < (uint8_t *)buf) || (addr >= (uint8_t *)buf_end) || (addr == ptr))
  597                 continue;
  598             *addr = d2;
  599             tests++;
  600             if (UNLIKELY(*ptr != d1)) /* cppcheck-suppress knownConditionTrueFalse */
  601                 bit_errors++;
  602             mask <<= 1;
  603         }
  604         c++;
  605         if (UNLIKELY(max_ops && c >= max_ops))
  606             break;
  607         if (UNLIKELY(!keep_stressing_flag()))
  608             break;
  609     }
  610     stress_vm_check("walking one (address)", bit_errors);
  611     set_counter(args, c);
  612 
  613     return bit_errors;
  614 }
  615 
  616 /*
  617  *  stress_vm_walking_zero_addr()
  618  *  work through a range of addresses setting each address bit in
  619  *  the given memory mapped range to low to see if any address bits
  620  *  are stuck.
  621  */
  622 static size_t TARGET_CLONES stress_vm_walking_zero_addr(
  623     void *buf,
  624     void *buf_end,
  625     const size_t sz,
  626     const stress_args_t *args,
  627     const uint64_t max_ops)
  628 {
  629     volatile uint8_t *ptr;
  630     uint8_t d1 = 0, d2 = ~d1;
  631     size_t bit_errors = 0;
  632     size_t tests = 0;
  633     uint64_t sz_mask;
  634     uint64_t c = get_counter(args);
  635 
  636     for (sz_mask = 1; sz_mask < sz; sz_mask <<= 1)
  637         ;
  638 
  639     sz_mask--;
  640 
  641     (void)memset(buf, d1, sz);
  642     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += 256) {
  643         uint16_t i;
  644         uint64_t mask;
  645 
  646         *ptr = d1;
  647         for (mask = 1, i = 1; i < 64; i++) {
  648             uintptr_t uintptr = ((uintptr_t)ptr) ^ (~mask & sz_mask);
  649             uint8_t *addr = (uint8_t *)uintptr;
  650             if ((addr < (uint8_t *)buf) || (addr >= (uint8_t *)buf_end) || (addr == ptr))
  651                 continue;
  652             *addr = d2;
  653             tests++;
  654             if (UNLIKELY(*ptr != d1)) /* cppcheck-suppress knownConditionTrueFalse */
  655                 bit_errors++;
  656             mask <<= 1;
  657         }
  658         c++;
  659         if (UNLIKELY(max_ops && c >= max_ops))
  660             break;
  661         if (UNLIKELY(!keep_stressing_flag()))
  662             break;
  663     }
  664     stress_vm_check("walking zero (address)", bit_errors);
  665     set_counter(args, c);
  666 
  667     return bit_errors;
  668 }
  669 
  670 /*
  671  *  stress_vm_gray()
  672  *  fill all of memory with a gray code and check that
  673  *  all the bits are set correctly. gray codes just change
  674  *  one bit at a time.
  675  */
  676 static size_t TARGET_CLONES stress_vm_gray(
  677     void *buf,
  678     void *buf_end,
  679     const size_t sz,
  680     const stress_args_t *args,
  681     const uint64_t max_ops)
  682 {
  683     static uint8_t val;
  684     uint8_t v;
  685     volatile uint8_t *ptr;
  686     size_t bit_errors = 0;
  687     const uint64_t c_orig = get_counter(args);
  688     uint64_t c;
  689 
  690     for (c = c_orig, v = val, ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr++, v++) {
  691         if (UNLIKELY(!keep_stressing_flag()))
  692             return 0;
  693         *ptr = (v >> 1) ^ v;
  694     }
  695     (void)stress_mincore_touch_pages(buf, sz);
  696     inject_random_bit_errors(buf, sz);
  697 
  698     for (v = val, ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr++, v++) {
  699         if (UNLIKELY(!keep_stressing_flag()))
  700             break;
  701         if (UNLIKELY(*ptr != ((v >> 1) ^ v)))
  702             bit_errors++;
  703         c++;
  704         if (UNLIKELY(max_ops && c >= max_ops))
  705             break;
  706     }
  707     val++;
  708 
  709     stress_vm_check("gray code", bit_errors);
  710     set_counter(args, c);
  711 
  712     return bit_errors;
  713 }
  714 
  715 /*
  716  *  stress_vm_incdec()
  717  *  work through memory incrementing it and then decrementing
  718  *  it by a value that changes on each test iteration.
  719  *  Check that the memory has not changed by the inc + dec
  720  *  operations.
  721  */
  722 static size_t TARGET_CLONES stress_vm_incdec(
  723     void *buf,
  724     void *buf_end,
  725     const size_t sz,
  726     const stress_args_t *args,
  727     const uint64_t max_ops)
  728 {
  729     static uint8_t val = 0;
  730     volatile uint8_t *ptr;
  731     size_t bit_errors = 0;
  732     uint64_t c = get_counter(args);
  733 
  734     val++;
  735     (void)memset(buf, 0x00, sz);
  736 
  737     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr++) {
  738         *ptr += val;
  739     }
  740     (void)stress_mincore_touch_pages(buf, sz);
  741     inject_random_bit_errors(buf, sz);
  742     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr++) {
  743         *ptr -= val;
  744     }
  745     c += sz;
  746     if (UNLIKELY(max_ops && c >= max_ops))
  747         c = max_ops;
  748 
  749     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr++) {
  750         if (UNLIKELY(*ptr != 0))
  751             bit_errors++;
  752     }
  753 
  754     stress_vm_check("incdec code", bit_errors);
  755     set_counter(args, c);
  756 
  757     return bit_errors;
  758 }
  759 
  760 /*
  761  *  stress_vm_prime_incdec()
  762  *  walk through memory in large prime steps incrementing
  763  *  bytes and then re-walk again decrementing; then sanity
  764  *  check.
  765  */
  766 static size_t TARGET_CLONES stress_vm_prime_incdec(
  767     void *buf,
  768     void *buf_end,
  769     const size_t sz,
  770     const stress_args_t *args,
  771     const uint64_t max_ops)
  772 {
  773     static uint8_t val = 0;
  774     volatile uint8_t *ptr = buf;
  775     size_t bit_errors = 0, i;
  776     const uint64_t prime = PRIME_64;
  777     uint64_t j, c = get_counter(args);
  778 
  779 #if SIZE_MAX > UINT32_MAX
  780     /* Unlikely.. */
  781     if (UNLIKELY(sz > (1ULL << 63)))
  782         return 0;
  783 #endif
  784 
  785     (void)memset(buf, 0x00, sz);
  786 
  787     for (i = 0; i < sz; i++) {
  788         ptr[i] += val;
  789         c++;
  790         if (UNLIKELY(max_ops && c >= max_ops))
  791             break;
  792     }
  793     (void)stress_mincore_touch_pages(buf, sz);
  794     inject_random_bit_errors(buf, sz);
  795     /*
  796      *  Step through memory in prime sized steps
  797      *  in a totally sub-optimal way to exercise
  798      *  memory and cache stalls
  799      */
  800     for (i = 0, j = prime; i < sz; i++, j += prime) {
  801         ptr[j % sz] -= val;
  802         c++;
  803         if (UNLIKELY(max_ops && c >= max_ops))
  804             break;
  805     }
  806 
  807     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr++) {
  808         if (UNLIKELY(*ptr != 0))
  809             bit_errors++;
  810     }
  811 
  812     stress_vm_check("prime-incdec", bit_errors);
  813     set_counter(args, c);
  814 
  815     return bit_errors;
  816 }
  817 
  818 /*
  819  *  stress_vm_swap()
  820  *  forward swap and then reverse swap chunks of memory
  821  *  and see that nothing got corrupted.
  822  */
  823 static size_t TARGET_CLONES stress_vm_swap(
  824     void *buf,
  825     void *buf_end,
  826     const size_t sz,
  827     const stress_args_t *args,
  828     const uint64_t max_ops)
  829 {
  830     const size_t chunk_sz = 64, chunks = sz / chunk_sz;
  831     uint64_t c = get_counter(args);
  832     uint32_t w1, z1;
  833     uint8_t *ptr;
  834     size_t bit_errors = 0, i;
  835     size_t *swaps;
  836 
  837     stress_mwc_reseed();
  838     z1 = stress_mwc32();
  839     w1 = stress_mwc32();
  840 
  841     if ((swaps = calloc(chunks, sizeof(*swaps))) == NULL) {
  842         pr_fail("%s: calloc failed on vm_swap\n", args->name);
  843         return 0;
  844     }
  845 
  846     for (i = 0; i < chunks; i++) {
  847         swaps[i] = (stress_mwc64() % chunks) * chunk_sz;
  848     }
  849 
  850     stress_mwc_seed(w1, z1);
  851     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += chunk_sz) {
  852         uint8_t val = stress_mwc8();
  853         (void)memset((void *)ptr, val, chunk_sz);
  854     }
  855 
  856     /* Forward swaps */
  857     for (i = 0, ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += chunk_sz, i++) {
  858         size_t offset = swaps[i];
  859 
  860         volatile uint8_t *dst = (volatile uint8_t *)buf + offset;
  861         volatile uint8_t *src = (volatile uint8_t *)ptr;
  862         const volatile uint8_t *src_end = src + chunk_sz;
  863 
  864         while (src < src_end) {
  865             uint8_t tmp = *src;
  866             *src++ = *dst;
  867             *dst++ = tmp;
  868         }
  869         c++;
  870         if (UNLIKELY(max_ops && c >= max_ops))
  871             goto abort;
  872         if (UNLIKELY(!keep_stressing_flag()))
  873             goto abort;
  874     }
  875     /* Reverse swaps */
  876     for (i = chunks - 1, ptr = (uint8_t *)buf_end - chunk_sz; ptr >= (uint8_t *)buf; ptr -= chunk_sz, i--) {
  877         size_t offset = swaps[i];
  878 
  879         volatile uint8_t *dst = (volatile uint8_t *)buf + offset;
  880         volatile uint8_t *src = (volatile uint8_t *)ptr;
  881         const volatile uint8_t *src_end = src + chunk_sz;
  882 
  883         while (src < src_end) {
  884             uint8_t tmp = *src;
  885             *src++ = *dst;
  886             *dst++ = tmp;
  887         }
  888         c++;
  889         if (UNLIKELY(max_ops && c >= max_ops))
  890             goto abort;
  891         if (UNLIKELY(!keep_stressing_flag()))
  892             goto abort;
  893     }
  894 
  895     (void)stress_mincore_touch_pages(buf, sz);
  896     inject_random_bit_errors(buf, sz);
  897 
  898     stress_mwc_seed(w1, z1);
  899     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += chunk_sz) {
  900         volatile uint8_t *p = (volatile uint8_t *)ptr;
  901         const volatile uint8_t *p_end = (volatile uint8_t *)ptr + chunk_sz;
  902         uint8_t val = stress_mwc8();
  903 
  904         while (p < p_end) {
  905             if (UNLIKELY(*p != val))
  906                 bit_errors++;
  907             p++;
  908         }
  909         if (UNLIKELY(!keep_stressing_flag()))
  910             break;
  911     }
  912 abort:
  913     free(swaps);
  914     stress_vm_check("swap bytes", bit_errors);
  915     set_counter(args, c);
  916 
  917     return bit_errors;
  918 }
  919 
  920 /*
  921  *  stress_vm_rand_set()
  922  *  fill 64 bit chunks of memory with a random pattern and
  923  *  and then sanity check they are all set correctly.
  924  */
  925 static size_t TARGET_CLONES stress_vm_rand_set(
  926     void *buf,
  927     void *buf_end,
  928     const size_t sz,
  929     const stress_args_t *args,
  930     const uint64_t max_ops)
  931 {
  932     volatile uint8_t *ptr;
  933     const size_t chunk_sz = sizeof(*ptr) * 8;
  934     uint64_t c = get_counter(args);
  935     uint32_t w, z;
  936     size_t bit_errors = 0;
  937 
  938     stress_mwc_reseed();
  939     w = stress_mwc32();
  940     z = stress_mwc32();
  941 
  942     stress_mwc_seed(w, z);
  943     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += chunk_sz) {
  944         uint8_t val = stress_mwc8();
  945 
  946         *(ptr + 0) = val;
  947         *(ptr + 1) = val;
  948         *(ptr + 2) = val;
  949         *(ptr + 3) = val;
  950         *(ptr + 4) = val;
  951         *(ptr + 5) = val;
  952         *(ptr + 6) = val;
  953         *(ptr + 7) = val;
  954         c++;
  955         if (UNLIKELY(max_ops && c >= max_ops))
  956             goto abort;
  957         if (UNLIKELY(!keep_stressing_flag()))
  958             goto abort;
  959     }
  960 
  961     (void)stress_mincore_touch_pages(buf, sz);
  962     inject_random_bit_errors(buf, sz);
  963 
  964     stress_mwc_seed(w, z);
  965     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += chunk_sz) {
  966         uint8_t val = stress_mwc8();
  967 
  968         bit_errors += (*(ptr + 0) != val);
  969         bit_errors += (*(ptr + 1) != val);
  970         bit_errors += (*(ptr + 2) != val);
  971         bit_errors += (*(ptr + 3) != val);
  972         bit_errors += (*(ptr + 4) != val);
  973         bit_errors += (*(ptr + 5) != val);
  974         bit_errors += (*(ptr + 6) != val);
  975         bit_errors += (*(ptr + 7) != val);
  976         if (UNLIKELY(!keep_stressing_flag()))
  977             break;
  978     }
  979 abort:
  980     stress_vm_check("rand-set", bit_errors);
  981     set_counter(args, c);
  982 
  983     return bit_errors;
  984 }
  985 
  986 /*
  987  *  stress_vm_ror()
  988  *  fill memory with a random pattern and then rotate
  989  *  right all the bits in an 8 byte (64 bit) chunk
  990  *  and then sanity check they are all shifted at the
  991  *  end.
  992  */
  993 static size_t TARGET_CLONES stress_vm_ror(
  994     void *buf,
  995     void *buf_end,
  996     const size_t sz,
  997     const stress_args_t *args,
  998     const uint64_t max_ops)
  999 {
 1000     volatile uint8_t *ptr;
 1001     uint64_t c = get_counter(args);
 1002     uint32_t w, z;
 1003     size_t bit_errors = 0;
 1004     const size_t chunk_sz = sizeof(*ptr) * 8;
 1005 
 1006     stress_mwc_reseed();
 1007     w = stress_mwc32();
 1008     z = stress_mwc32();
 1009 
 1010     stress_mwc_seed(w, z);
 1011     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += chunk_sz) {
 1012         uint8_t val = stress_mwc8();
 1013 
 1014         *(ptr + 0) = val;
 1015         *(ptr + 1) = val;
 1016         *(ptr + 2) = val;
 1017         *(ptr + 3) = val;
 1018         *(ptr + 4) = val;
 1019         *(ptr + 5) = val;
 1020         *(ptr + 6) = val;
 1021         *(ptr + 7) = val;
 1022         c++;
 1023         if (UNLIKELY(max_ops && c >= max_ops))
 1024             goto abort;
 1025         if (UNLIKELY(!keep_stressing_flag()))
 1026             goto abort;
 1027     }
 1028     (void)stress_mincore_touch_pages(buf, sz);
 1029 
 1030     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += chunk_sz) {
 1031         ROR8(*(ptr + 0));
 1032         ROR8(*(ptr + 1));
 1033         ROR8(*(ptr + 2));
 1034         ROR8(*(ptr + 3));
 1035         ROR8(*(ptr + 4));
 1036         ROR8(*(ptr + 5));
 1037         ROR8(*(ptr + 6));
 1038         ROR8(*(ptr + 7));
 1039 
 1040         c++;
 1041         if (UNLIKELY(max_ops && c >= max_ops))
 1042             goto abort;
 1043         if (UNLIKELY(!keep_stressing_flag()))
 1044             goto abort;
 1045     }
 1046     (void)stress_mincore_touch_pages(buf, sz);
 1047 
 1048     inject_random_bit_errors(buf, sz);
 1049 
 1050     stress_mwc_seed(w, z);
 1051     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += chunk_sz) {
 1052         uint8_t val = stress_mwc8();
 1053         ROR8(val);
 1054 
 1055         bit_errors += (*(ptr + 0) != val);
 1056         bit_errors += (*(ptr + 1) != val);
 1057         bit_errors += (*(ptr + 2) != val);
 1058         bit_errors += (*(ptr + 3) != val);
 1059         bit_errors += (*(ptr + 4) != val);
 1060         bit_errors += (*(ptr + 5) != val);
 1061         bit_errors += (*(ptr + 6) != val);
 1062         bit_errors += (*(ptr + 7) != val);
 1063         if (UNLIKELY(!keep_stressing_flag()))
 1064             break;
 1065     }
 1066 abort:
 1067     stress_vm_check("ror", bit_errors);
 1068     set_counter(args, c);
 1069 
 1070     return bit_errors;
 1071 }
 1072 
 1073 /*
 1074  *  stress_vm_flip()
 1075  *  set all memory to random pattern, then work through
 1076  *  memory 8 times flipping bits 0..7 on by one to eventually
 1077  *  invert all the bits.  Check if the final bits are all
 1078  *  correctly inverted.
 1079  */
 1080 static size_t TARGET_CLONES stress_vm_flip(
 1081     void *buf,
 1082     void *buf_end,
 1083     const size_t sz,
 1084     const stress_args_t *args,
 1085     const uint64_t max_ops)
 1086 {
 1087     volatile uint8_t *ptr;
 1088     uint8_t bit = 0x03;
 1089     uint64_t c = get_counter(args);
 1090     uint32_t w, z;
 1091     size_t bit_errors = 0, i;
 1092     const size_t chunk_sz = sizeof(*ptr) * 8;
 1093 
 1094     stress_mwc_reseed();
 1095     w = stress_mwc32();
 1096     z = stress_mwc32();
 1097 
 1098     stress_mwc_seed(w, z);
 1099     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += chunk_sz) {
 1100         uint8_t val = stress_mwc8();
 1101 
 1102         *(ptr + 0) = val;
 1103         ROR8(val);
 1104         *(ptr + 1) = val;
 1105         ROR8(val);
 1106         *(ptr + 2) = val;
 1107         ROR8(val);
 1108         *(ptr + 3) = val;
 1109         ROR8(val);
 1110         *(ptr + 4) = val;
 1111         ROR8(val);
 1112         *(ptr + 5) = val;
 1113         ROR8(val);
 1114         *(ptr + 6) = val;
 1115         ROR8(val);
 1116         *(ptr + 7) = val;
 1117         c++;
 1118         if (UNLIKELY(max_ops && c >= max_ops))
 1119             goto abort;
 1120         if (UNLIKELY(!keep_stressing_flag()))
 1121             goto abort;
 1122     }
 1123     (void)stress_mincore_touch_pages(buf, sz);
 1124 
 1125     for (i = 0; i < 8; i++) {
 1126         ROR8(bit);
 1127         for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += chunk_sz) {
 1128             *(ptr + 0) ^= bit;
 1129             *(ptr + 1) ^= bit;
 1130             *(ptr + 2) ^= bit;
 1131             *(ptr + 3) ^= bit;
 1132             *(ptr + 4) ^= bit;
 1133             *(ptr + 5) ^= bit;
 1134             *(ptr + 6) ^= bit;
 1135             *(ptr + 7) ^= bit;
 1136             c++;
 1137             if (UNLIKELY(max_ops && c >= max_ops))
 1138                 goto abort;
 1139             if (UNLIKELY(!keep_stressing_flag()))
 1140                 goto abort;
 1141         }
 1142         (void)stress_mincore_touch_pages(buf, sz);
 1143     }
 1144 
 1145     inject_random_bit_errors(buf, sz);
 1146 
 1147     stress_mwc_seed(w, z);
 1148     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += chunk_sz) {
 1149         uint8_t val = stress_mwc8();
 1150 
 1151         bit_errors += (*(ptr + 0) != val);
 1152         ROR8(val);
 1153         bit_errors += (*(ptr + 1) != val);
 1154         ROR8(val);
 1155         bit_errors += (*(ptr + 2) != val);
 1156         ROR8(val);
 1157         bit_errors += (*(ptr + 3) != val);
 1158         ROR8(val);
 1159         bit_errors += (*(ptr + 4) != val);
 1160         ROR8(val);
 1161         bit_errors += (*(ptr + 5) != val);
 1162         ROR8(val);
 1163         bit_errors += (*(ptr + 6) != val);
 1164         ROR8(val);
 1165         bit_errors += (*(ptr + 7) != val);
 1166         if (UNLIKELY(!keep_stressing_flag()))
 1167             break;
 1168     }
 1169 
 1170 abort:
 1171     stress_vm_check("flip", bit_errors);
 1172     set_counter(args, c);
 1173 
 1174     return bit_errors;
 1175 }
 1176 
 1177 /*
 1178  *  stress_vm_zero_one()
 1179  *  set all memory to zero and see if any bits are stuck at one and
 1180  *  set all memory to one and see if any bits are stuck at zero
 1181  */
 1182 static size_t TARGET_CLONES stress_vm_zero_one(
 1183     void *buf,
 1184     void *buf_end,
 1185     const size_t sz,
 1186     const stress_args_t *args,
 1187     const uint64_t max_ops)
 1188 {
 1189     volatile uint64_t *ptr;
 1190     uint64_t c = get_counter(args);
 1191     size_t bit_errors = 0;
 1192 
 1193     (void)max_ops;
 1194 
 1195     (void)memset(buf, 0x00, sz);
 1196     (void)stress_mincore_touch_pages(buf, sz);
 1197     inject_random_bit_errors(buf, sz);
 1198     c += sz / 8;
 1199 
 1200     for (ptr = (uint64_t *)buf; ptr < (uint64_t *)buf_end; ptr += 8) {
 1201         bit_errors += stress_vm_count_bits(*(ptr + 0));
 1202         bit_errors += stress_vm_count_bits(*(ptr + 1));
 1203         bit_errors += stress_vm_count_bits(*(ptr + 2));
 1204         bit_errors += stress_vm_count_bits(*(ptr + 3));
 1205         bit_errors += stress_vm_count_bits(*(ptr + 4));
 1206         bit_errors += stress_vm_count_bits(*(ptr + 5));
 1207         bit_errors += stress_vm_count_bits(*(ptr + 6));
 1208         bit_errors += stress_vm_count_bits(*(ptr + 7));
 1209 
 1210         if (UNLIKELY(!keep_stressing_flag()))
 1211             goto abort;
 1212     }
 1213 
 1214     (void)memset(buf, 0xff, sz);
 1215     (void)stress_mincore_touch_pages(buf, sz);
 1216     inject_random_bit_errors(buf, sz);
 1217     c += sz / 8;
 1218 
 1219     for (ptr = (uint64_t *)buf; ptr < (uint64_t *)buf_end; ptr += 8) {
 1220         bit_errors += stress_vm_count_bits(~*(ptr + 0));
 1221         bit_errors += stress_vm_count_bits(~*(ptr + 1));
 1222         bit_errors += stress_vm_count_bits(~*(ptr + 2));
 1223         bit_errors += stress_vm_count_bits(~*(ptr + 3));
 1224         bit_errors += stress_vm_count_bits(~*(ptr + 4));
 1225         bit_errors += stress_vm_count_bits(~*(ptr + 5));
 1226         bit_errors += stress_vm_count_bits(~*(ptr + 6));
 1227         bit_errors += stress_vm_count_bits(~*(ptr + 7));
 1228 
 1229         if (UNLIKELY(!keep_stressing_flag()))
 1230             break;
 1231     }
 1232 abort:
 1233     stress_vm_check("zero-one", bit_errors);
 1234     set_counter(args, c);
 1235 
 1236     return bit_errors;
 1237 }
 1238 
 1239 /*
 1240  *  stress_vm_galpat_one()
 1241  *  galloping pattern. Set all bits to zero and flip a few
 1242  *  random bits to one.  Check if this one is pulled down
 1243  *  or pulls its neighbours up.
 1244  */
 1245 static size_t TARGET_CLONES stress_vm_galpat_zero(
 1246     void *buf,
 1247     void *buf_end,
 1248     const size_t sz,
 1249     const stress_args_t *args,
 1250     const uint64_t max_ops)
 1251 {
 1252     volatile uint64_t *ptr;
 1253     size_t i, bit_errors = 0, bits_set = 0;
 1254     size_t bits_bad = sz / 4096;
 1255     uint64_t c = get_counter(args);
 1256 
 1257     (void)memset(buf, 0x00, sz);
 1258 
 1259     stress_mwc_reseed();
 1260 
 1261     for (i = 0; i < bits_bad; i++) {
 1262         for (;;) {
 1263             size_t offset = stress_mwc64() % sz;
 1264             uint8_t bit = stress_mwc32() & 3;
 1265             register uint8_t *ptr8 = (uint8_t *)buf + offset;
 1266 
 1267             if (!*ptr8) {
 1268                 *ptr8 |= (1 << bit);
 1269                 break;
 1270             }
 1271         }
 1272     }
 1273     (void)stress_mincore_touch_pages(buf, sz);
 1274     inject_random_bit_errors(buf, sz);
 1275 
 1276     for (ptr = (uint64_t *)buf; ptr < (uint64_t *)buf_end; ptr += 8) {
 1277         bits_set += stress_vm_count_bits(*(ptr + 0));
 1278         bits_set += stress_vm_count_bits(*(ptr + 1));
 1279         bits_set += stress_vm_count_bits(*(ptr + 2));
 1280         bits_set += stress_vm_count_bits(*(ptr + 3));
 1281         bits_set += stress_vm_count_bits(*(ptr + 4));
 1282         bits_set += stress_vm_count_bits(*(ptr + 5));
 1283         bits_set += stress_vm_count_bits(*(ptr + 6));
 1284         bits_set += stress_vm_count_bits(*(ptr + 7));
 1285 
 1286         c++;
 1287         if (UNLIKELY(!keep_stressing_flag()))
 1288             goto ret;
 1289     }
 1290 
 1291     if (bits_set != bits_bad)
 1292         bit_errors += UNSIGNED_ABS(bits_set, bits_bad);
 1293 
 1294     stress_vm_check("galpat-zero", bit_errors);
 1295 ret:
 1296     if (UNLIKELY(max_ops && c >= max_ops))
 1297         c = max_ops;
 1298     set_counter(args, c);
 1299 
 1300     return bit_errors;
 1301 }
 1302 
 1303 /*
 1304  *  stress_vm_galpat_one()
 1305  *  galloping pattern. Set all bits to one and flip a few
 1306  *  random bits to zero.  Check if this zero is pulled up
 1307  *  or pulls its neighbours down.
 1308  */
 1309 static size_t TARGET_CLONES stress_vm_galpat_one(
 1310     void *buf,
 1311     void *buf_end,
 1312     const size_t sz,
 1313     const stress_args_t *args,
 1314     const uint64_t max_ops)
 1315 {
 1316     volatile uint64_t *ptr;
 1317     size_t i, bit_errors = 0, bits_set = 0;
 1318     size_t bits_bad = sz / 4096;
 1319     uint64_t c = get_counter(args);
 1320 
 1321     (void)memset(buf, 0xff, sz);
 1322 
 1323     stress_mwc_reseed();
 1324 
 1325     for (i = 0; i < bits_bad; i++) {
 1326         for (;;) {
 1327             size_t offset = stress_mwc64() % sz;
 1328             uint8_t bit = stress_mwc32() & 3;
 1329             register uint8_t *ptr8 = (uint8_t *)buf + offset;
 1330 
 1331             if (*ptr8 == 0xff) {
 1332                 *ptr8 &= ~(1 << bit);
 1333                 break;
 1334             }
 1335         }
 1336     }
 1337     (void)stress_mincore_touch_pages(buf, sz);
 1338     inject_random_bit_errors(buf, sz);
 1339 
 1340     for (ptr = (uint64_t *)buf; ptr < (uint64_t *)buf_end; ptr += 8) {
 1341         bits_set += stress_vm_count_bits(~(*(ptr + 0)));
 1342         bits_set += stress_vm_count_bits(~(*(ptr + 1)));
 1343         bits_set += stress_vm_count_bits(~(*(ptr + 2)));
 1344         bits_set += stress_vm_count_bits(~(*(ptr + 3)));
 1345         bits_set += stress_vm_count_bits(~(*(ptr + 4)));
 1346         bits_set += stress_vm_count_bits(~(*(ptr + 5)));
 1347         bits_set += stress_vm_count_bits(~(*(ptr + 6)));
 1348         bits_set += stress_vm_count_bits(~(*(ptr + 7)));
 1349 
 1350         c++;
 1351         if (UNLIKELY(!keep_stressing_flag()))
 1352             goto ret;
 1353     }
 1354 
 1355     if (bits_set != bits_bad)
 1356         bit_errors += UNSIGNED_ABS(bits_set, bits_bad);
 1357 
 1358     stress_vm_check("galpat-one", bit_errors);
 1359 ret:
 1360     if (UNLIKELY(max_ops && c >= max_ops))
 1361         c = max_ops;
 1362     set_counter(args, c);
 1363 
 1364     return bit_errors;
 1365 }
 1366 
 1367 /*
 1368  *  stress_vm_inc_nybble()
 1369  *  work through memort and bump increment lower nybbles by
 1370  *  1 and upper nybbles by 0xf and sanity check byte.
 1371  */
 1372 static size_t TARGET_CLONES stress_vm_inc_nybble(
 1373     void *buf,
 1374     void *buf_end,
 1375     const size_t sz,
 1376     const stress_args_t *args,
 1377     const uint64_t max_ops)
 1378 {
 1379     static uint8_t val = 0;
 1380     volatile uint8_t *ptr;
 1381     size_t bit_errors = 0;
 1382     uint64_t c = get_counter(args);
 1383 
 1384     (void)memset(buf, val, sz);
 1385     INC_LO_NYBBLE(val);
 1386     INC_HI_NYBBLE(val);
 1387 
 1388     stress_mwc_reseed();
 1389     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += 8) {
 1390         INC_LO_NYBBLE(*(ptr + 0));
 1391         INC_LO_NYBBLE(*(ptr + 1));
 1392         INC_LO_NYBBLE(*(ptr + 2));
 1393         INC_LO_NYBBLE(*(ptr + 3));
 1394         INC_LO_NYBBLE(*(ptr + 4));
 1395         INC_LO_NYBBLE(*(ptr + 5));
 1396         INC_LO_NYBBLE(*(ptr + 6));
 1397         INC_LO_NYBBLE(*(ptr + 7));
 1398         c++;
 1399         if (UNLIKELY(max_ops && c >= max_ops))
 1400             goto abort;
 1401         if (UNLIKELY(!keep_stressing_flag()))
 1402             goto abort;
 1403     }
 1404 
 1405     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += 8) {
 1406         INC_HI_NYBBLE(*(ptr + 0));
 1407         INC_HI_NYBBLE(*(ptr + 1));
 1408         INC_HI_NYBBLE(*(ptr + 2));
 1409         INC_HI_NYBBLE(*(ptr + 3));
 1410         INC_HI_NYBBLE(*(ptr + 4));
 1411         INC_HI_NYBBLE(*(ptr + 5));
 1412         INC_HI_NYBBLE(*(ptr + 6));
 1413         INC_HI_NYBBLE(*(ptr + 7));
 1414         c++;
 1415         if (UNLIKELY(max_ops && c >= max_ops))
 1416             goto abort;
 1417         if (UNLIKELY(!keep_stressing_flag()))
 1418             goto abort;
 1419     }
 1420     (void)stress_mincore_touch_pages(buf, sz);
 1421     inject_random_bit_errors(buf, sz);
 1422 
 1423     for (ptr = (uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr += 8) {
 1424         bit_errors += (*(ptr + 0) != val);
 1425         bit_errors += (*(ptr + 1) != val);
 1426         bit_errors += (*(ptr + 2) != val);
 1427         bit_errors += (*(ptr + 3) != val);
 1428         bit_errors += (*(ptr + 4) != val);
 1429         bit_errors += (*(ptr + 5) != val);
 1430         bit_errors += (*(ptr + 6) != val);
 1431         bit_errors += (*(ptr + 7) != val);
 1432         if (UNLIKELY(!keep_stressing_flag()))
 1433             break;
 1434     }
 1435 
 1436 abort:
 1437     stress_vm_check("inc-nybble", bit_errors);
 1438     set_counter(args, c);
 1439 
 1440     return bit_errors;
 1441 }
 1442 
 1443 /*
 1444  *  stress_vm_rand_sum()
 1445  *  sequentially set all memory to random values and then
 1446  *  check if they are still set correctly.
 1447  */
 1448 static size_t TARGET_CLONES stress_vm_rand_sum(
 1449     void *buf,
 1450     void *buf_end,
 1451     const size_t sz,
 1452     const stress_args_t *args,
 1453     const uint64_t max_ops)
 1454 {
 1455     volatile uint64_t *ptr;
 1456     uint64_t c = get_counter(args);
 1457     uint32_t w, z;
 1458     size_t bit_errors = 0;
 1459     const size_t chunk_sz = sizeof(*ptr) * 8;
 1460 
 1461     (void)buf_end;
 1462 
 1463     stress_mwc_reseed();
 1464     w = stress_mwc32();
 1465     z = stress_mwc32();
 1466 
 1467     stress_mwc_seed(w, z);
 1468     for (ptr = (uint64_t *)buf; ptr < (uint64_t *)buf_end; ptr += chunk_sz) {
 1469         *(ptr + 0) = stress_mwc64();
 1470         *(ptr + 1) = stress_mwc64();
 1471         *(ptr + 2) = stress_mwc64();
 1472         *(ptr + 3) = stress_mwc64();
 1473         *(ptr + 4) = stress_mwc64();
 1474         *(ptr + 5) = stress_mwc64();
 1475         *(ptr + 6) = stress_mwc64();
 1476         *(ptr + 7) = stress_mwc64();
 1477         c++;
 1478         if (UNLIKELY(max_ops && c >= max_ops))
 1479             goto abort;
 1480         if (UNLIKELY(!keep_stressing_flag()))
 1481             goto abort;
 1482     }
 1483 
 1484     (void)stress_mincore_touch_pages(buf, sz);
 1485     inject_random_bit_errors(buf, sz);
 1486 
 1487     stress_mwc_seed(w, z);
 1488     for (ptr = (uint64_t *)buf; ptr < (uint64_t *)buf_end; ptr += chunk_sz) {
 1489         bit_errors += stress_vm_count_bits(*(ptr + 0) ^ stress_mwc64());
 1490         bit_errors += stress_vm_count_bits(*(ptr + 1) ^ stress_mwc64());
 1491         bit_errors += stress_vm_count_bits(*(ptr + 2) ^ stress_mwc64());
 1492         bit_errors += stress_vm_count_bits(*(ptr + 3) ^ stress_mwc64());
 1493         bit_errors += stress_vm_count_bits(*(ptr + 4) ^ stress_mwc64());
 1494         bit_errors += stress_vm_count_bits(*(ptr + 5) ^ stress_mwc64());
 1495         bit_errors += stress_vm_count_bits(*(ptr + 6) ^ stress_mwc64());
 1496         bit_errors += stress_vm_count_bits(*(ptr + 7) ^ stress_mwc64());
 1497         if (UNLIKELY(!keep_stressing_flag()))
 1498             break;
 1499     }
 1500 abort:
 1501     stress_vm_check("rand-sum", bit_errors);
 1502     set_counter(args, c);
 1503 
 1504     return bit_errors;
 1505 }
 1506 
 1507 /*
 1508  *  stress_vm_prime_zero()
 1509  *  step through memory in non-contiguous large steps
 1510  *  and clearing each bit to one (one bit per complete memory cycle)
 1511  *  and check if they are clear.
 1512  */
 1513 static size_t TARGET_CLONES stress_vm_prime_zero(
 1514     void *buf,
 1515     void *buf_end,
 1516     const size_t sz,
 1517     const stress_args_t *args,
 1518     const uint64_t max_ops)
 1519 {
 1520     size_t i;
 1521     volatile uint8_t *ptr = buf;
 1522     uint8_t j, *ptr8;
 1523     size_t bit_errors = 0;
 1524     const uint64_t prime = PRIME_64;
 1525     uint64_t k, c = get_counter(args);
 1526 
 1527     (void)buf_end;
 1528 #if SIZE_MAX > UINT32_MAX
 1529     /* Unlikely.. */
 1530     if (sz > (1ULL << 63))
 1531         return 0;
 1532 #endif
 1533 
 1534     (void)memset(buf, 0xff, sz);
 1535 
 1536     for (j = 0; j < 8; j++) {
 1537         uint8_t mask = (uint8_t)~(1 << j);
 1538         /*
 1539          *  Step through memory in prime sized steps
 1540          *  in a totally sub-optimal way to exercise
 1541          *  memory and cache stalls
 1542          */
 1543         for (i = 0, k = prime; i < sz; i++, k += prime) {
 1544             ptr[k % sz] &= mask;
 1545             c++;
 1546             if (UNLIKELY(max_ops && c >= max_ops))
 1547                 goto abort;
 1548             if (UNLIKELY(!keep_stressing_flag()))
 1549                 goto abort;
 1550         }
 1551     }
 1552     (void)stress_mincore_touch_pages(buf, sz);
 1553     inject_random_bit_errors(buf, sz);
 1554 
 1555     ptr8 = (uint8_t *)buf;
 1556     for (i = 0; i < sz; i++) {
 1557         bit_errors += stress_vm_count_bits8(ptr8[i]);
 1558     }
 1559 
 1560 abort:
 1561     stress_vm_check("prime-zero", bit_errors);
 1562     set_counter(args, c);
 1563 
 1564     return bit_errors;
 1565 }
 1566 
 1567 /*
 1568  *  stress_vm_prime_one()
 1569  *  step through memory in non-contiguous large steps
 1570  *  and set each bit to one (one bit per complete memory cycle)
 1571  *  and check if they are set.
 1572  */
 1573 static size_t TARGET_CLONES stress_vm_prime_one(
 1574     void *buf,
 1575     void *buf_end,
 1576     const size_t sz,
 1577     const stress_args_t *args,
 1578     const uint64_t max_ops)
 1579 {
 1580     size_t i;
 1581     volatile uint8_t *ptr = buf;
 1582     uint8_t j, *ptr8;
 1583     size_t bit_errors = 0;
 1584     const uint64_t prime = PRIME_64;
 1585     uint64_t k, c = get_counter(args);
 1586 
 1587     (void)buf_end;
 1588 #if SIZE_MAX > UINT32_MAX
 1589     /* Unlikely.. */
 1590     if (sz > (1ULL << 63))
 1591         return 0;
 1592 #endif
 1593 
 1594     (void)memset(buf, 0x00, sz);
 1595 
 1596     for (j = 0; j < 8; j++) {
 1597         uint8_t mask = (uint8_t)(1 << j);
 1598         /*
 1599          *  Step through memory in prime sized steps
 1600          *  in a totally sub-optimal way to exercise
 1601          *  memory and cache stalls
 1602          */
 1603         for (i = 0, k = prime; i < sz; i++, k += prime) {
 1604             ptr[k % sz] |= mask;
 1605             c++;
 1606             if (UNLIKELY(max_ops && c >= max_ops))
 1607                 goto abort;
 1608             if (UNLIKELY(!keep_stressing_flag()))
 1609                 goto abort;
 1610         }
 1611     }
 1612     (void)stress_mincore_touch_pages(buf, sz);
 1613     inject_random_bit_errors(buf, sz);
 1614 
 1615     ptr8 = (uint8_t *)buf;
 1616     for (i = 0; i < sz; i++) {
 1617         bit_errors += 8 - stress_vm_count_bits8(ptr8[i]);
 1618         if (UNLIKELY(!keep_stressing_flag()))
 1619             break;
 1620     }
 1621 abort:
 1622     stress_vm_check("prime-one", bit_errors);
 1623     set_counter(args, c);
 1624 
 1625     return bit_errors;
 1626 }
 1627 
 1628 /*
 1629  *  stress_vm_prime_gray_zero()
 1630  *  step through memory in non-contiguous large steps
 1631  *  and first clear just one bit (based on gray code) and then
 1632  *  clear all the other bits and finally check if they are all clear
 1633  */
 1634 static size_t TARGET_CLONES stress_vm_prime_gray_zero(
 1635     void *buf,
 1636     void *buf_end,
 1637     const size_t sz,
 1638     const stress_args_t *args,
 1639     const uint64_t max_ops)
 1640 {
 1641     size_t i;
 1642     volatile uint8_t *ptr = buf;
 1643     uint8_t *ptr8;
 1644     size_t bit_errors = 0;
 1645     const uint64_t prime = PRIME_64;
 1646     uint64_t j, c = get_counter(args);
 1647 
 1648     (void)buf_end;
 1649 #if SIZE_MAX > UINT32_MAX
 1650     /* Unlikely.. */
 1651     if (sz > (1ULL << 63))
 1652         return 0;
 1653 #endif
 1654 
 1655     (void)memset(buf, 0xff, sz);
 1656 
 1657     for (i = 0, j = prime; i < sz; i++, j += prime) {
 1658         /*
 1659          *  Step through memory in prime sized steps
 1660          *  in a totally sub-optimal way to exercise
 1661          *  memory and cache stalls
 1662          */
 1663         ptr[j % sz] &= ((i >> 1) ^ i);
 1664         if (!keep_stressing_flag())
 1665             goto abort;
 1666         c++;
 1667         if (max_ops && c >= max_ops)
 1668             goto abort;
 1669     }
 1670     for (i = 0, j = prime; i < sz; i++, j += prime) {
 1671         /*
 1672          *  Step through memory in prime sized steps
 1673          *  in a totally sub-optimal way to exercise
 1674          *  memory and cache stalls
 1675          */
 1676         ptr[j % sz] &= ~((i >> 1) ^ i);
 1677         if (UNLIKELY(!keep_stressing_flag()))
 1678             goto abort;
 1679         c++;
 1680         if (UNLIKELY(max_ops && c >= max_ops))
 1681             goto abort;
 1682     }
 1683     (void)stress_mincore_touch_pages(buf, sz);
 1684     inject_random_bit_errors(buf, sz);
 1685 
 1686     ptr8 = (uint8_t *)buf;
 1687     for (i = 0; i < sz; i++) {
 1688         bit_errors += stress_vm_count_bits8(ptr8[i]);
 1689         if (UNLIKELY(!keep_stressing_flag()))
 1690             break;
 1691     }
 1692 abort:
 1693     stress_vm_check("prime-gray-zero", bit_errors);
 1694     set_counter(args, c);
 1695 
 1696     return bit_errors;
 1697 }
 1698 
 1699 /*
 1700  *  stress_vm_prime_one()
 1701  *  step through memory in non-contiguous large steps
 1702  *  and first set just one bit (based on gray code) and then
 1703  *  set all the other bits and finally check if they are all set
 1704  */
 1705 static size_t TARGET_CLONES stress_vm_prime_gray_one(
 1706     void *buf,
 1707     void *buf_end,
 1708     const size_t sz,
 1709     const stress_args_t *args,
 1710     const uint64_t max_ops)
 1711 {
 1712     size_t i;
 1713     volatile uint8_t *ptr = buf;
 1714     uint8_t *ptr8;
 1715     size_t bit_errors = 0;
 1716     const uint64_t prime = PRIME_64;
 1717     uint64_t j, c = get_counter(args);
 1718 
 1719     (void)buf_end;
 1720 #if SIZE_MAX > UINT32_MAX
 1721     /* Unlikely.. */
 1722     if (sz > (1ULL << 63))
 1723         return 0;
 1724 #endif
 1725 
 1726     (void)memset(buf, 0x00, sz);
 1727 
 1728     for (i = 0, j = prime; i < sz; i++, j += prime) {
 1729         /*
 1730          *  Step through memory in prime sized steps
 1731          *  in a totally sub-optimal way to exercise
 1732          *  memory and cache stalls
 1733          */
 1734         ptr[j % sz] |= ((i >> 1) ^ i);
 1735         if (UNLIKELY(!keep_stressing_flag()))
 1736             goto abort;
 1737         c++;
 1738         if (UNLIKELY(max_ops && c >= max_ops))
 1739             goto abort;
 1740     }
 1741     (void)stress_mincore_touch_pages(buf, sz);
 1742     for (i = 0, j = prime; i < sz; i++, j += prime) {
 1743         /*
 1744          *  Step through memory in prime sized steps
 1745          *  in a totally sub-optimal way to exercise
 1746          *  memory and cache stalls
 1747          */
 1748         ptr[j % sz] |= ~((i >> 1) ^ i);
 1749         if (UNLIKELY(!keep_stressing_flag()))
 1750             goto abort;
 1751         c++;
 1752         if (UNLIKELY(max_ops && c >= max_ops))
 1753             goto abort;
 1754     }
 1755     (void)stress_mincore_touch_pages(buf, sz);
 1756     inject_random_bit_errors(buf, sz);
 1757 
 1758     ptr8 = (uint8_t *)buf;
 1759     for (i = 0; i < sz; i++) {
 1760         bit_errors += 8 - stress_vm_count_bits8(ptr8[i]);
 1761         if (UNLIKELY(!keep_stressing_flag()))
 1762             break;
 1763     }
 1764 abort:
 1765     stress_vm_check("prime-gray-one", bit_errors);
 1766     set_counter(args, c);
 1767 
 1768     return bit_errors;
 1769 }
 1770 
 1771 /*
 1772  *  stress_vm_write_64()
 1773  *  simple 64 bit write, no read check
 1774  */
 1775 static size_t TARGET_CLONES stress_vm_write64(
 1776     void *buf,
 1777     void *buf_end,
 1778     const size_t sz,
 1779     const stress_args_t *args,
 1780     const uint64_t max_ops)
 1781 {
 1782     static uint64_t val;
 1783     uint64_t *ptr = (uint64_t *)buf;
 1784     register uint64_t v = val;
 1785     register size_t i = 0, n = sz / (sizeof(*ptr) * 32);
 1786 
 1787     (void)buf_end;
 1788 
 1789     while (i < n) {
 1790         *ptr++ = v;
 1791         *ptr++ = v;
 1792         *ptr++ = v;
 1793         *ptr++ = v;
 1794         *ptr++ = v;
 1795         *ptr++ = v;
 1796         *ptr++ = v;
 1797         *ptr++ = v;
 1798 
 1799         *ptr++ = v;
 1800         *ptr++ = v;
 1801         *ptr++ = v;
 1802         *ptr++ = v;
 1803         *ptr++ = v;
 1804         *ptr++ = v;
 1805         *ptr++ = v;
 1806         *ptr++ = v;
 1807 
 1808         *ptr++ = v;
 1809         *ptr++ = v;
 1810         *ptr++ = v;
 1811         *ptr++ = v;
 1812         *ptr++ = v;
 1813         *ptr++ = v;
 1814         *ptr++ = v;
 1815         *ptr++ = v;
 1816 
 1817         *ptr++ = v;
 1818         *ptr++ = v;
 1819         *ptr++ = v;
 1820         *ptr++ = v;
 1821         *ptr++ = v;
 1822         *ptr++ = v;
 1823         *ptr++ = v;
 1824         *ptr++ = v;
 1825         i++;
 1826         if (UNLIKELY(!keep_stressing_flag() || (max_ops && (i >= max_ops))))
 1827             break;
 1828     }
 1829     add_counter(args, i);
 1830     val++;
 1831 
 1832     return 0;
 1833 }
 1834 
 1835 /*
 1836  *  stress_vm_read_64()
 1837  *  simple 64 bit read
 1838  */
 1839 static size_t TARGET_CLONES stress_vm_read64(
 1840     void *buf,
 1841     void *buf_end,
 1842     const size_t sz,
 1843     const stress_args_t *args,
 1844     const uint64_t max_ops)
 1845 {
 1846     volatile uint64_t *ptr = (uint64_t *)buf;
 1847     register size_t i = 0, n = sz / (sizeof(*ptr) * 32);
 1848 
 1849     (void)buf_end;
 1850 
 1851     while (i < n) {
 1852         (void)*(ptr++);
 1853         (void)*(ptr++);
 1854         (void)*(ptr++);
 1855         (void)*(ptr++);
 1856         (void)*(ptr++);
 1857         (void)*(ptr++);
 1858         (void)*(ptr++);
 1859         (void)*(ptr++);
 1860 
 1861         (void)*(ptr++);
 1862         (void)*(ptr++);
 1863         (void)*(ptr++);
 1864         (void)*(ptr++);
 1865         (void)*(ptr++);
 1866         (void)*(ptr++);
 1867         (void)*(ptr++);
 1868         (void)*(ptr++);
 1869 
 1870         (void)*(ptr++);
 1871         (void)*(ptr++);
 1872         (void)*(ptr++);
 1873         (void)*(ptr++);
 1874         (void)*(ptr++);
 1875         (void)*(ptr++);
 1876         (void)*(ptr++);
 1877         (void)*(ptr++);
 1878 
 1879         (void)*(ptr++);
 1880         (void)*(ptr++);
 1881         (void)*(ptr++);
 1882         (void)*(ptr++);
 1883         (void)*(ptr++);
 1884         (void)*(ptr++);
 1885         (void)*(ptr++);
 1886         (void)*(ptr++);
 1887 
 1888         i++;
 1889         if (UNLIKELY(!keep_stressing_flag() || (max_ops && (i >= max_ops))))
 1890             break;
 1891     }
 1892     add_counter(args, i);
 1893 
 1894     return 0;
 1895 }
 1896 
 1897 /*
 1898  *  stress_vm_rowhammer()
 1899  *
 1900  */
 1901 static size_t TARGET_CLONES stress_vm_rowhammer(
 1902     void *buf,
 1903     void *buf_end,
 1904     const size_t sz,
 1905     const stress_args_t *args,
 1906     const uint64_t max_ops)
 1907 {
 1908     size_t bit_errors = 0;
 1909     uint32_t *buf32 = (uint32_t *)buf;
 1910     static uint32_t val = 0xff5a00a5;
 1911     register size_t j;
 1912     register volatile uint32_t *addr0, *addr1;
 1913     register size_t errors = 0;
 1914     const size_t n = sz / sizeof(*addr0);
 1915 
 1916     (void)buf_end;
 1917     (void)max_ops;
 1918 
 1919     if (!n) {
 1920         pr_dbg("stress-vm: rowhammer: zero uint32_t integers could "
 1921             "be hammered, aborting\n");
 1922         return 0;
 1923     }
 1924 
 1925     (void)stress_mincore_touch_pages(buf, sz);
 1926 
 1927     for (j = 0; j < n; j++)
 1928         buf32[j] = val;
 1929 
 1930     /* Pick two random addresses */
 1931     addr0 = &buf32[(stress_mwc64() << 12) % n];
 1932     addr1 = &buf32[(stress_mwc64() << 12) % n];
 1933 
 1934     /* Hammer the rows */
 1935     for (j = VM_ROWHAMMER_LOOPS / 4; j; j--) {
 1936         *addr0;
 1937         *addr1;
 1938         shim_clflush(addr0);
 1939         shim_clflush(addr1);
 1940         shim_mfence();
 1941         *addr0;
 1942         *addr1;
 1943         shim_clflush(addr0);
 1944         shim_clflush(addr1);
 1945         shim_mfence();
 1946         *addr0;
 1947         *addr1;
 1948         shim_clflush(addr0);
 1949         shim_clflush(addr1);
 1950         shim_mfence();
 1951         *addr0;
 1952         *addr1;
 1953         shim_clflush(addr0);
 1954         shim_clflush(addr1);
 1955         shim_mfence();
 1956     }
 1957     for (j = 0; j < n; j++)
 1958         if (UNLIKELY(buf32[j] != val))
 1959             errors++;
 1960     if (errors) {
 1961         bit_errors += errors;
 1962         pr_dbg("stress-vm: rowhammer: %zu errors on addresses "
 1963             "%p and %p\n", errors, (volatile void *)addr0, (volatile void *)addr1);
 1964     }
 1965     add_counter(args, VM_ROWHAMMER_LOOPS);
 1966     val = (val >> 31) | (val << 1);
 1967 
 1968     stress_vm_check("rowhammer", bit_errors);
 1969 
 1970     return bit_errors;
 1971 }
 1972 
 1973 /*
 1974  *  stress_vm_popcount()
 1975  *  population count - count number of 1 bits in val (K & R method)
 1976  */
 1977 static inline unsigned int OPTIMIZE3 stress_vm_popcount(register uint8_t val)
 1978 {
 1979     register unsigned int count;
 1980 
 1981     for (count = 0; val; count++)
 1982         val &= val - 1;
 1983 
 1984     return count;
 1985 }
 1986 
 1987 /*
 1988  *  stress_vm_mscan()
 1989  *  for each byte, walk through each bit set to 0, check, set to 1, check
 1990  */
 1991 static size_t TARGET_CLONES stress_vm_mscan(
 1992     void *buf,
 1993     void *buf_end,
 1994     const size_t sz,
 1995     const stress_args_t *args,
 1996     const uint64_t max_ops)
 1997 {
 1998     size_t bit_errors = 0;
 1999     volatile uint8_t *ptr = (volatile uint8_t *)buf, *end;
 2000     uint64_t c = get_counter(args);
 2001 
 2002     (void)sz;
 2003 
 2004     for (ptr = (volatile uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr++, c++) {
 2005         *ptr |= 0x01;
 2006         *ptr |= 0x02;
 2007         *ptr |= 0x04;
 2008         *ptr |= 0x08;
 2009         *ptr |= 0x10;
 2010         *ptr |= 0x20;
 2011         *ptr |= 0x40;
 2012         *ptr |= 0x80;
 2013 
 2014         if (UNLIKELY(!keep_stressing_flag() || (max_ops && (c >= max_ops))))
 2015             break;
 2016     }
 2017     end = (volatile uint8_t *)ptr;
 2018 
 2019     add_counter(args, c);
 2020 
 2021     for (ptr = (volatile uint8_t *)buf; ptr < end; ptr++) {
 2022         bit_errors += 8 - stress_vm_popcount(*ptr);
 2023     }
 2024 
 2025     for (ptr = (volatile uint8_t *)buf; ptr < (uint8_t *)buf_end; ptr++, c++) {
 2026         *ptr &= 0xfe;
 2027         *ptr &= 0xfd;
 2028         *ptr &= 0xfb;
 2029         *ptr &= 0xf7;
 2030         *ptr &= 0xef;
 2031         *ptr &= 0xdf;
 2032         *ptr &= 0xbf;
 2033         *ptr &= 0x7f;
 2034 
 2035         if (UNLIKELY(!keep_stressing_flag() || (max_ops && (c >= max_ops))))
 2036             break;
 2037     }
 2038     end = (volatile uint8_t *)ptr;
 2039 
 2040     add_counter(args, c);
 2041 
 2042     for (ptr = (volatile uint8_t *)buf; ptr < end; ptr++) {
 2043         bit_errors += stress_vm_popcount(*ptr);
 2044     }
 2045 
 2046     stress_vm_check("mscan", bit_errors);
 2047     set_counter(args, c);
 2048 
 2049     return bit_errors;
 2050 }
 2051 
 2052 
 2053 /*
 2054  *  stress_vm_all()
 2055  *  work through all vm stressors sequentially
 2056  */
 2057 static size_t stress_vm_all(
 2058     void *buf,
 2059     void *buf_end,
 2060     const size_t sz,
 2061     const stress_args_t *args,
 2062     const uint64_t max_ops)
 2063 {
 2064     static int i = 1;
 2065     size_t bit_errors = 0;
 2066 
 2067     bit_errors = vm_methods[i].func(buf, buf_end, sz, args, max_ops);
 2068     i++;
 2069     if (vm_methods[i].func == NULL)
 2070         i = 1;
 2071 
 2072     return bit_errors;
 2073 }
 2074 
 2075 static const stress_vm_method_info_t vm_methods[] = {
 2076     { "all",    stress_vm_all },
 2077     { "flip",   stress_vm_flip },
 2078     { "galpat-0",   stress_vm_galpat_zero },
 2079     { "galpat-1",   stress_vm_galpat_one },
 2080     { "gray",   stress_vm_gray },
 2081     { "rowhammer",  stress_vm_rowhammer },
 2082     { "incdec", stress_vm_incdec },
 2083     { "inc-nybble", stress_vm_inc_nybble },
 2084     { "rand-set",   stress_vm_rand_set },
 2085     { "rand-sum",   stress_vm_rand_sum },
 2086     { "read64", stress_vm_read64 },
 2087     { "ror",    stress_vm_ror },
 2088     { "swap",   stress_vm_swap },
 2089     { "move-inv",   stress_vm_moving_inversion },
 2090     { "modulo-x",   stress_vm_modulo_x },
 2091     { "mscan",  stress_vm_mscan },
 2092     { "prime-0",    stress_vm_prime_zero },
 2093     { "prime-1",    stress_vm_prime_one },
 2094     { "prime-gray-0",stress_vm_prime_gray_zero },
 2095     { "prime-gray-1",stress_vm_prime_gray_one },
 2096     { "prime-incdec",stress_vm_prime_incdec },
 2097     { "walk-0d",    stress_vm_walking_zero_data },
 2098     { "walk-1d",    stress_vm_walking_one_data },
 2099     { "walk-0a",    stress_vm_walking_zero_addr },
 2100     { "walk-1a",    stress_vm_walking_one_addr },
 2101     { "write64",    stress_vm_write64 },
 2102     { "zero-one",   stress_vm_zero_one },
 2103     { NULL,     NULL  }
 2104 };
 2105 
 2106 /*
 2107  *  stress_set_vm_method()
 2108  *      set default vm stress method
 2109  */
 2110 static int stress_set_vm_method(const char *name)
 2111 {
 2112     stress_vm_method_info_t const *info;
 2113 
 2114     for (info = vm_methods; info->func; info++) {
 2115         if (!strcmp(info->name, name)) {
 2116             stress_set_setting("vm-method", TYPE_ID_UINTPTR_T, &info);
 2117             return 0;
 2118         }
 2119     }
 2120 
 2121     (void)fprintf(stderr, "vm-method must be one of:");
 2122     for (info = vm_methods; info->func; info++) {
 2123         (void)fprintf(stderr, " %s", info->name);
 2124     }
 2125     (void)fprintf(stderr, "\n");
 2126 
 2127     return -1;
 2128 }
 2129 
 2130 static int stress_vm_child(const stress_args_t *args, void *ctxt)
 2131 {
 2132     int no_mem_retries = 0;
 2133     const uint64_t max_ops = args->max_ops << VM_BOGO_SHIFT;
 2134     uint64_t vm_hang = DEFAULT_VM_HANG;
 2135     void *buf = NULL, *buf_end = NULL;
 2136     int vm_flags = 0;                      /* VM mmap flags */
 2137     int vm_madvise = -1;
 2138     size_t buf_sz;
 2139     size_t vm_bytes = DEFAULT_VM_BYTES;
 2140     const size_t page_size = args->page_size;
 2141     bool vm_keep = false;
 2142     stress_vm_context_t *context = (stress_vm_context_t *)ctxt;
 2143     const stress_vm_func func = context->vm_method->func;
 2144 
 2145     (void)stress_get_setting("vm-hang", &vm_hang);
 2146     (void)stress_get_setting("vm-keep", &vm_keep);
 2147     (void)stress_get_setting("vm-flags", &vm_flags);
 2148 
 2149     if (!stress_get_setting("vm-bytes", &vm_bytes)) {
 2150         if (g_opt_flags & OPT_FLAGS_MAXIMIZE)
 2151             vm_bytes = MAX_32;
 2152         if (g_opt_flags & OPT_FLAGS_MINIMIZE)
 2153             vm_bytes = MIN_VM_BYTES;
 2154     }
 2155     vm_bytes /= args->num_instances;
 2156     if (vm_bytes < MIN_VM_BYTES)
 2157         vm_bytes = MIN_VM_BYTES;
 2158     buf_sz = vm_bytes & ~(page_size - 1);
 2159     (void)stress_get_setting("vm-madvise", &vm_madvise);
 2160 
 2161     do {
 2162         if (no_mem_retries >= NO_MEM_RETRIES_MAX) {
 2163             pr_err("%s: gave up trying to mmap, no available memory\n",
 2164                 args->name);
 2165             break;
 2166         }
 2167         if (!vm_keep || (buf == NULL)) {
 2168             if (!keep_stressing_flag())
 2169                 return EXIT_SUCCESS;
 2170             buf = (uint8_t *)mmap(NULL, buf_sz,
 2171                 PROT_READ | PROT_WRITE,
 2172                 MAP_PRIVATE | MAP_ANONYMOUS |
 2173                 vm_flags, -1, 0);
 2174             if (buf == MAP_FAILED) {
 2175                 buf = NULL;
 2176                 no_mem_retries++;
 2177                 (void)shim_usleep(100000);
 2178                 continue;   /* Try again */
 2179             }
 2180             buf_end = (void *)((uint8_t *)buf + buf_sz);
 2181             if (vm_madvise < 0)
 2182                 (void)stress_madvise_random(buf, buf_sz);
 2183             else
 2184                 (void)shim_madvise(buf, buf_sz, vm_madvise);
 2185         }
 2186 
 2187         no_mem_retries = 0;
 2188         (void)stress_mincore_touch_pages(buf, buf_sz);
 2189         *(context->bit_error_count) += func(buf, buf_end, buf_sz, args, max_ops);
 2190 
 2191         if (vm_hang == 0) {
 2192             while (keep_stressing_vm(args)) {
 2193                 (void)sleep(3600);
 2194             }
 2195         } else if (vm_hang != DEFAULT_VM_HANG) {
 2196             (void)sleep((unsigned int)vm_hang);
 2197         }
 2198 
 2199         if (!vm_keep) {
 2200             (void)stress_madvise_random(buf, buf_sz);
 2201             (void)munmap(buf, buf_sz);
 2202         }
 2203     } while (keep_stressing_vm(args));
 2204 
 2205     if (vm_keep && buf != NULL)
 2206         (void)munmap((void *)buf, buf_sz);
 2207 
 2208     return EXIT_SUCCESS;
 2209 }
 2210 
 2211 /*
 2212  *  stress_vm()
 2213  *  stress virtual memory
 2214  */
 2215 static int stress_vm(const stress_args_t *args)
 2216 {
 2217     uint64_t tmp_counter;
 2218     const size_t page_size = args->page_size;
 2219     size_t retries;
 2220     int err = 0, ret = EXIT_SUCCESS;
 2221     stress_vm_context_t context;
 2222 
 2223     context.vm_method = &vm_methods[0];
 2224     context.bit_error_count = MAP_FAILED;
 2225 
 2226     (void)stress_get_setting("vm-method", &context.vm_method);
 2227 
 2228     pr_dbg("%s using method '%s'\n", args->name, context.vm_method->name);
 2229 
 2230     for (retries = 0; (retries < 100) && keep_stressing_flag(); retries++) {
 2231         context.bit_error_count = (uint64_t *)
 2232             mmap(NULL, page_size, PROT_READ | PROT_WRITE,
 2233                 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
 2234         err = errno;
 2235         if (context.bit_error_count != MAP_FAILED)
 2236             break;
 2237         (void)shim_usleep(100);
 2238     }
 2239 
 2240     /* Cannot allocate a single page for bit error counter */
 2241     if (context.bit_error_count == MAP_FAILED) {
 2242         if (keep_stressing_flag()) {
 2243             pr_err("%s: could not mmap bit error counter: "
 2244                 "retry count=%zu, errno=%d (%s)\n",
 2245                 args->name, retries, err, strerror(err));
 2246         }
 2247         return EXIT_NO_RESOURCE;
 2248     }
 2249 
 2250     *context.bit_error_count = 0ULL;
 2251 
 2252     stress_set_proc_state(args->name, STRESS_STATE_RUN);
 2253 
 2254     ret = stress_oomable_child(args, &context, stress_vm_child, STRESS_OOMABLE_NORMAL);
 2255 
 2256     (void)shim_msync(context.bit_error_count, page_size, MS_SYNC);
 2257     if (*context.bit_error_count > 0) {
 2258         pr_fail("%s: detected %" PRIu64 " bit errors while "
 2259             "stressing memory\n",
 2260             args->name, *context.bit_error_count);
 2261         ret = EXIT_FAILURE;
 2262     }
 2263 
 2264     stress_set_proc_state(args->name, STRESS_STATE_DEINIT);
 2265     (void)munmap((void *)context.bit_error_count, page_size);
 2266 
 2267     tmp_counter = get_counter(args) >> VM_BOGO_SHIFT;
 2268     set_counter(args, tmp_counter);
 2269 
 2270     return ret;
 2271 }
 2272 
 2273 static void stress_vm_set_default(void)
 2274 {
 2275     stress_set_vm_method("all");
 2276 }
 2277 
 2278 static const stress_opt_set_func_t opt_set_funcs[] = {
 2279     { OPT_vm_bytes,     stress_set_vm_bytes },
 2280     { OPT_vm_hang,      stress_set_vm_hang },
 2281     { OPT_vm_keep,      stress_set_vm_keep },
 2282     { OPT_vm_madvise,   stress_set_vm_madvise },
 2283     { OPT_vm_method,    stress_set_vm_method },
 2284     { OPT_vm_mmap_locked,   stress_set_vm_mmap_locked },
 2285     { OPT_vm_mmap_populate, stress_set_vm_mmap_populate },
 2286     { 0,            NULL }
 2287 };
 2288 
 2289 stressor_info_t stress_vm_info = {
 2290     .stressor = stress_vm,
 2291     .set_default = stress_vm_set_default,
 2292     .class = CLASS_VM | CLASS_MEMORY | CLASS_OS,
 2293     .opt_set_funcs = opt_set_funcs,
 2294     .help = help
 2295 };