"Fossies" - the Fresh Open Source Software Archive

Member "stress-ng-0.09.56/stress-dev.c" (15 Mar 2019, 26149 Bytes) of package /linux/privat/stress-ng-0.09.56.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "stress-dev.c" see the Fossies "Dox" file reference documentation and the last Fossies "Diffs" side-by-side code changes report: 0.09.52_vs_0.09.54.

    1 /*
    2  * Copyright (C) 2013-2019 Canonical, Ltd.
    3  *
    4  * This program is free software; you can redistribute it and/or
    5  * modify it under the terms of the GNU General Public License
    6  * as published by the Free Software Foundation; either version 2
    7  * of the License, or (at your option) any later version.
    8  *
    9  * This program is distributed in the hope that it will be useful,
   10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12  * GNU General Public License for more details.
   13  *
   14  * You should have received a copy of the GNU General Public License
   15  * along with this program; if not, write to the Free Software
   16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
   17  *
   18  * This code is a complete clean re-write of the stress tool by
   19  * Colin Ian King <colin.king@canonical.com> and attempts to be
   20  * backwardly compatible with the stress tool by Amos Waterland
   21  * <apw@rossby.metr.ou.edu> but has more stress tests and more
   22  * functionality.
   23  *
   24  */
   25 #include "stress-ng.h"
   26 
   27 #if defined(HAVE_POLL_H) &&     \
   28     defined(HAVE_LIB_PTHREAD) &&    \
   29     !defined(__sun__) &&        \
   30     !defined(__HAIKU__)
   31 
   32 #define MAX_DEV_THREADS     (4)
   33 
   34 static sigset_t set;
   35 static shim_pthread_spinlock_t lock;
   36 static char *dev_path;
   37 static uint32_t mixup;
   38 
   39 typedef struct dev_func {
   40     const char *devpath;
   41     const size_t devpath_len;
   42     void (*func)(const char *name, const int fd, const char *devpath);
   43 } dev_func_t;
   44 
   45 typedef struct dev_scsi {
   46     struct dev_scsi *next;
   47     char *devpath;
   48 } dev_scsi_t;
   49 
   50 #if defined(__linux__)
   51 static dev_scsi_t *dev_scsi_list;
   52 #endif
   53 
   54 /*
   55  *  path_sum()
   56  *  simple hash on path (hash pjw), from
   57  *  from Aho, Sethi, Ullman, Compiling Techniques.
   58  */
   59 static uint32_t path_sum(const char *path)
   60 {
   61     const char *ptr = path;
   62     register uint32_t h = mixup;
   63 
   64     while (*ptr) {
   65         register uint32_t g;
   66 
   67         h = (h << 4) + (*(ptr++));
   68         if (0 != (g = h & 0xf0000000)) {
   69             h ^= (g >> 24);
   70             h ^= g;
   71         }
   72     }
   73     return h;
   74 }
   75 
   76 /*
   77  *  mixup_sort()
   78  *  sort helper based on hash to mix up ordering
   79  */
   80 #if defined(__NetBSD__)
   81 static int mixup_sort(const void *p1, const void *p2)
   82 {
   83     uint32_t s1, s2;
   84     const struct dirent *d1 = p1;
   85     const struct dirent *d2 = p2;
   86 
   87     s1 = path_sum(d1->d_name);
   88     s2 = path_sum(d2->d_name);
   89 
   90     if (s1 == s2)
   91         return 0;
   92     return (s1 < s2) ? -1 : 1;
   93 }
   94 #else
   95 static int mixup_sort(const struct dirent **d1, const struct dirent **d2)
   96 {
   97     uint32_t s1, s2;
   98 
   99     s1 = path_sum((*d1)->d_name);
  100     s2 = path_sum((*d2)->d_name);
  101 
  102     if (s1 == s2)
  103         return 0;
  104     return (s1 < s2) ? -1 : 1;
  105 }
  106 #endif
  107 
  108 #if defined(__linux__) && defined(HAVE_LINUX_MEDIA_H) && \
  109     defined(MEDIA_IOC_DEVICE_INFO)
  110 static void stress_dev_media_linux(
  111     const char *name,
  112     const int fd,
  113     const char *devpath)
  114 {
  115     (void)name;
  116     (void)fd;
  117     (void)devpath;
  118 
  119 #if defined(MEDIA_IOC_DEVICE_INFO)
  120     {
  121         struct media_device_info mdi;
  122         int ret;
  123 
  124         ret = ioctl(fd, MEDIA_IOC_DEVICE_INFO, &mdi);
  125         if (ret < 0)
  126             return;
  127 
  128         if (!mdi.driver[0])
  129             pr_inf("%s: ioctl MEDIA_IOC_DEVICE_INFO %s: null driver name\n",
  130                 name, devpath);
  131         if (!mdi.model[0])
  132             pr_inf("%s: ioctl MEDIA_IOC_DEVICE_INFO %s: null model name\n",
  133                 name, devpath);
  134         if (!mdi.bus_info[0])
  135             pr_inf("%s: ioctl MEDIA_IOC_DEVICE_INFO %s: null bus_info field\n",
  136                 name, devpath);
  137     }
  138 #endif
  139 }
  140 #endif
  141 
  142 #if defined(HAVE_LINUX_VT_H)
  143 static void stress_dev_vcs_linux(
  144     const char *name,
  145     const int fd,
  146     const char *devpath)
  147 {
  148     (void)name;
  149     (void)fd;
  150     (void)devpath;
  151 
  152 #if defined(VT_GETMODE)
  153     {
  154         struct vt_mode mode;
  155         int ret;
  156 
  157         ret = ioctl(fd, VT_GETMODE, &mode);
  158         (void)ret;
  159     }
  160 #endif
  161 #if defined(VT_GETSTATE)
  162     {
  163         struct vt_stat vt_stat;
  164         int ret;
  165 
  166         ret = ioctl(fd, VT_GETSTATE, &vt_stat);
  167         (void)ret;
  168     }
  169 #endif
  170 }
  171 #endif
  172 
  173 #if defined(HAVE_LINUX_DM_IOCTL_H)
  174 static void stress_dev_dm_linux(
  175     const char *name,
  176     const int fd,
  177     const char *devpath)
  178 {
  179     (void)name;
  180     (void)fd;
  181     (void)devpath;
  182 
  183 #if defined(DM_VERSION)
  184     {
  185         struct dm_ioctl dm;
  186         int ret;
  187 
  188         ret = ioctl(fd, DM_VERSION, &dm);
  189         (void)ret;
  190     }
  191 #endif
  192 #if defined(DM_STATUS)
  193     {
  194         struct dm_ioctl dm;
  195         int ret;
  196 
  197         ret = ioctl(fd, DM_STATUS, &dm);
  198         (void)ret;
  199     }
  200 #endif
  201 }
  202 #endif
  203 
  204 #if defined(HAVE_LINUX_VIDEODEV2_H)
  205 static void stress_dev_video_linux(
  206     const char *name,
  207     const int fd,
  208     const char *devpath)
  209 {
  210     (void)name;
  211     (void)fd;
  212     (void)devpath;
  213 
  214 #if defined(VIDIOC_QUERYCAP)
  215     {
  216         struct v4l2_capability c;
  217         int ret;
  218 
  219         ret = ioctl(fd, VIDIOC_QUERYCAP, &c);
  220         (void)ret;
  221     }
  222 #endif
  223 }
  224 #endif
  225 
  226 #if defined(HAVE_TERMIOS_H) && defined(TCGETS)
  227 static void stress_dev_tty(
  228     const char *name,
  229     const int fd,
  230     const char *devpath)
  231 {
  232     int ret;
  233     struct termios t;
  234 
  235     (void)name;
  236     (void)devpath;
  237 
  238     ret = tcgetattr(fd, &t);
  239     (void)ret;
  240 #if defined(TCGETS)
  241     {
  242         ret = ioctl(fd, TCGETS, &t);
  243         (void)ret;
  244     }
  245 #endif
  246 #if defined(TIOCGPTLCK)
  247     {
  248         int lck;
  249 
  250         ret = ioctl(fd, TIOCGPTLCK, &lck);
  251         (void)ret;
  252     }
  253 #endif
  254 #if defined(TIOCGPKT)
  255     {
  256         int pktmode;
  257 
  258         ret = ioctl(fd, TIOCGPKT, &pktmode);
  259         (void)ret;
  260     }
  261 #endif
  262 #if defined(TIOCGPTN)
  263     {
  264         int ptnum;
  265 
  266         ret = ioctl(fd, TIOCGPTN, &ptnum);
  267         (void)ret;
  268     }
  269 #endif
  270 #if defined(TIOCGWINSZ)
  271     {
  272         struct winsize ws;
  273 
  274         ret = ioctl(fd, TIOCGWINSZ, &ws);
  275         (void)ret;
  276     }
  277 #endif
  278 #if defined(FIONREAD)
  279     {
  280         int n;
  281 
  282         ret = ioctl(fd, FIONREAD, &n);
  283         (void)ret;
  284     }
  285 #endif
  286 #if defined(TIOCINQ)
  287     {
  288         int n;
  289 
  290         ret = ioctl(fd, TIOCINQ, &n);
  291         (void)ret;
  292     }
  293 #endif
  294 #if defined(TIOCOUTQ)
  295     {
  296         int n;
  297 
  298         ret = ioctl(fd, TIOCOUTQ, &n);
  299         (void)ret;
  300     }
  301 #endif
  302 #if defined(TIOCGPGRP)
  303     {
  304         pid_t pgrp;
  305 
  306         ret = ioctl(fd, TIOCGPGRP, &pgrp);
  307         (void)ret;
  308     }
  309 #endif
  310 #if defined(TIOCGSID)
  311     {
  312         pid_t gsid;
  313 
  314         ret = ioctl(fd, TIOCGSID, &gsid);
  315         (void)ret;
  316     }
  317 #endif
  318 #if defined(TIOCGEXCL)
  319     {
  320         int excl;
  321 
  322         ret = ioctl(fd, TIOCGEXCL, &excl);
  323         (void)ret;
  324     }
  325 #endif
  326 #if defined(TIOCGETD)
  327     {
  328         int ldis;
  329 
  330         ret = ioctl(fd, TIOCGETD, &ldis);
  331         (void)ret;
  332     }
  333 #endif
  334     /* Modem */
  335 #if defined(TIOCGSOFTCAR)
  336     {
  337         int flag;
  338 
  339         ret = ioctl(fd, TIOCGSOFTCAR, &flag);
  340         (void)ret;
  341     }
  342 #endif
  343 }
  344 #endif
  345 
  346 /*
  347  *  stress_dev_blk()
  348  *  block device specific ioctls
  349  */
  350 static void stress_dev_blk(
  351     const char *name,
  352     const int fd,
  353     const char *devpath)
  354 {
  355     (void)name;
  356     (void)fd;
  357     (void)devpath;
  358 
  359 #if defined(BLKFLSBUF)
  360     {
  361         int ret;
  362         ret = ioctl(fd, BLKFLSBUF, 0);
  363         (void)ret;
  364     }
  365 #endif
  366 #if defined(BLKRAGET)
  367     /* readahead */
  368     {
  369         unsigned long ra;
  370         int ret;
  371 
  372         ret = ioctl(fd, BLKRAGET, &ra);
  373         (void)ret;
  374     }
  375 #endif
  376 #if defined(BLKROGET)
  377     /* readonly state */
  378     {
  379         int ret, ro;
  380 
  381         ret = ioctl(fd, BLKROGET, &ro);
  382         (void)ret;
  383     }
  384 #endif
  385 #if defined(BLKBSZGET)
  386     /* get block device soft block size */
  387     {
  388         int ret, sz;
  389 
  390         ret = ioctl(fd, BLKBSZGET, &sz);
  391         (void)ret;
  392     }
  393 #endif
  394 #if defined(BLKPBSZGET)
  395     /* get block device physical block size */
  396     {
  397         unsigned int sz;
  398         int ret;
  399 
  400         ret = ioctl(fd, BLKPBSZGET, &sz);
  401         (void)ret;
  402     }
  403 #endif
  404 #if defined(BLKIOMIN)
  405     {
  406         unsigned int sz;
  407         int ret;
  408 
  409         ret = ioctl(fd, BLKIOMIN, &sz);
  410         (void)ret;
  411     }
  412 #endif
  413 #if defined(BLKIOOPT)
  414     {
  415         unsigned int sz;
  416         int ret;
  417 
  418         ret = ioctl(fd, BLKIOOPT, &sz);
  419         (void)ret;
  420     }
  421 #endif
  422 #if defined(BLKALIGNOFF)
  423     {
  424         unsigned int sz;
  425         int ret;
  426 
  427         ret = ioctl(fd, BLKALIGNOFF, &sz);
  428         (void)ret;
  429     }
  430 #endif
  431 #if defined(BLKROTATIONAL)
  432     {
  433         unsigned short rotational;
  434         int ret;
  435 
  436         ret = ioctl(fd, BLKROTATIONAL, &rotational);
  437         (void)ret;
  438     }
  439 #endif
  440 #if defined(BLKSECTGET)
  441     {
  442         unsigned short max_sectors;
  443         int ret;
  444 
  445         ret = ioctl(fd, BLKSECTGET, &max_sectors);
  446         (void)ret;
  447     }
  448 #endif
  449 #if defined(BLKGETSIZE)
  450     {
  451         unsigned long sz;
  452         int ret;
  453 
  454         ret = ioctl(fd, BLKGETSIZE, &sz);
  455         (void)ret;
  456     }
  457 #endif
  458 #if defined(BLKGETSIZE64)
  459     {
  460         uint64_t sz;
  461         int ret;
  462 
  463         ret = ioctl(fd, BLKGETSIZE64, &sz);
  464         (void)ret;
  465     }
  466 #endif
  467 #if defined(BLKGETZONESZ)
  468     {
  469         uint32_t sz;
  470         int ret;
  471 
  472         ret = ioctl(fd, BLKGETZONESZ, &sz);
  473         (void)ret;
  474     }
  475 #endif
  476 #if defined(BLKGETNRZONES)
  477     {
  478         uint32_t sz;
  479         int ret;
  480 
  481         ret = ioctl(fd, BLKGETNRZONES, &sz);
  482         (void)ret;
  483     }
  484 #endif
  485 }
  486 
  487 #if defined(__linux__)
  488 static inline const char *dev_basename(const char *devpath)
  489 {
  490     const char *ptr = devpath;
  491     const char *base = devpath;
  492 
  493     while (*ptr) {
  494         if ((*ptr == '/') && (*(ptr + 1)))
  495             base = ptr + 1;
  496         ptr++;
  497     }
  498 
  499     return base;
  500 }
  501 
  502 static inline void add_scsi_dev(const char *devpath)
  503 {
  504     dev_scsi_t *dev_scsi, *dev_scsi_new;
  505     int ret;
  506 
  507     /*
  508      *  Try to add new device to cache list, don't
  509      *  cause a failure if we can't do this.
  510      */
  511     dev_scsi_new = malloc(sizeof(*dev_scsi));
  512     if (!dev_scsi_new)
  513         return;
  514 
  515     dev_scsi_new->devpath = strdup(devpath);
  516     if (!dev_scsi_new->devpath)
  517         goto free_dev_scsi;
  518 
  519     ret = shim_pthread_spin_lock(&lock);
  520     if (ret)
  521         goto free_devpath;
  522 
  523     /*
  524      *  We may have had another thread add the same devpath to
  525      *  the list, so check first before adding a duplicate
  526      */
  527     for (dev_scsi = dev_scsi_list; dev_scsi; dev_scsi = dev_scsi->next) {
  528         if (!strcmp(dev_scsi->devpath, devpath))
  529             break;
  530     }
  531     /* Not found, add to list */
  532     if (!dev_scsi) {
  533         dev_scsi_new->next = dev_scsi_list;
  534         dev_scsi_list = dev_scsi_new;
  535         (void)shim_pthread_spin_unlock(&lock);
  536 
  537         return;
  538     }
  539     (void)shim_pthread_spin_unlock(&lock);
  540 
  541 free_devpath:
  542     free(dev_scsi_new->devpath);
  543 free_dev_scsi:
  544     free(dev_scsi_new);
  545 }
  546 
  547 static inline bool is_scsi_dev_cached(const char *devpath)
  548 {
  549     dev_scsi_t *dev_scsi;
  550     int ret;
  551     bool is_scsi = false;
  552 
  553     ret = shim_pthread_spin_lock(&lock);
  554     if (ret)
  555         return false;
  556 
  557     for (dev_scsi = dev_scsi_list; dev_scsi; dev_scsi = dev_scsi->next) {
  558         if (!strcmp(dev_scsi->devpath, devpath)) {
  559             is_scsi = true;
  560             break;
  561         }
  562     }
  563     (void)shim_pthread_spin_unlock(&lock);
  564 
  565     return is_scsi;
  566 }
  567 
  568 static inline bool is_scsi_dev(const char *devpath)
  569 {
  570     int i, n;
  571     static const char scsi_device_path[] = "/sys/class/scsi_device/";
  572     struct dirent **scsi_device_list;
  573     bool is_scsi = false;
  574     const char *devname = dev_basename(devpath);
  575 
  576     if (!*devname)
  577         return false;
  578 
  579     if (is_scsi_dev_cached(devpath))
  580         return true;
  581 
  582     scsi_device_list = NULL;
  583     n = scandir(scsi_device_path, &scsi_device_list, NULL, alphasort);
  584     if (n <= 0)
  585         return is_scsi;
  586 
  587     for (i = 0; !is_scsi && (i < n); i++) {
  588         int j, m;
  589         char scsi_block_path[PATH_MAX];
  590         struct dirent **scsi_block_list;
  591 
  592         if (scsi_device_list[i]->d_name[0] == '.')
  593             continue;
  594 
  595         (void)snprintf(scsi_block_path, sizeof(scsi_block_path),
  596             "%s/%s/device/block", scsi_device_path,
  597             scsi_device_list[i]->d_name);
  598         scsi_block_list = NULL;
  599         m = scandir(scsi_block_path, &scsi_block_list, NULL, alphasort);
  600         if (m <= 0)
  601             continue;
  602 
  603         for (j = 0; j < m; j++) {
  604             if (!strcmp(devname, scsi_block_list[j]->d_name)) {
  605                 is_scsi = true;
  606                 break;
  607             }
  608         }
  609 
  610         for (j = 0; j < m; j++)
  611             free(scsi_block_list[j]);
  612         free(scsi_block_list);
  613     }
  614 
  615     for (i = 0; i < n; i++)
  616         free(scsi_device_list[i]);
  617     free(scsi_device_list);
  618 
  619     if (is_scsi)
  620         add_scsi_dev(devpath);
  621 
  622     return is_scsi;
  623 }
  624 
  625 static inline void free_scsi_list(void)
  626 {
  627     /*
  628      *  We don't need locking as there is
  629      *  just one thread running at this point
  630      */
  631     dev_scsi_t *dev_scsi = dev_scsi_list;
  632 
  633     while (dev_scsi) {
  634         dev_scsi_t *next = dev_scsi->next;
  635 
  636         free(dev_scsi->devpath);
  637         free(dev_scsi);
  638 
  639         dev_scsi = next;
  640     }
  641     dev_scsi_list = NULL;
  642 }
  643 
  644 #else
  645 
  646 static inline void free_scsi_list(void)
  647 {
  648 }
  649 
  650 static inline bool is_scsi_dev(const char *name)
  651 {
  652     (void)name;
  653 
  654     /* Assume not */
  655     return false;
  656 }
  657 #endif
  658 
  659 /*
  660  *  stress_dev_scsi_blk()
  661  *  SCSI block device specific ioctls
  662  */
  663 static void stress_dev_scsi_blk(
  664     const char *name,
  665     const int fd,
  666     const char *devpath)
  667 {
  668     (void)name;
  669     (void)fd;
  670 
  671     if (!is_scsi_dev(devpath))
  672         return;
  673 
  674 #if defined(SG_GET_VERSION_NUM)
  675     {
  676         int ret, ver;
  677 
  678         ret = ioctl(fd, SG_GET_VERSION_NUM, &ver);
  679         (void)ret;
  680     }
  681 #endif
  682 #if defined(SCSI_IOCTL_GET_IDLUN)
  683     {
  684         int ret;
  685         struct sng_scsi_idlun {
  686             int four_in_one;
  687             int host_unique_id;
  688         } lun;
  689 
  690         (void)memset(&lun, 0, sizeof(lun));
  691         ret = ioctl(fd, SCSI_IOCTL_GET_IDLUN, &lun);
  692         (void)ret;
  693     }
  694 #endif
  695 #if defined(SCSI_IOCTL_GET_BUS_NUMBER)
  696     {
  697         int ret, bus;
  698 
  699         ret = ioctl(fd, SCSI_IOCTL_GET_BUS_NUMBER, &bus);
  700         (void)ret;
  701     }
  702 #endif
  703 #if defined(SCSI_IOCTL_GET_TIMEOUT)
  704     {
  705         int ret;
  706 
  707         ret = ioctl(fd, SCSI_IOCTL_GET_TIMEOUT, 0);
  708         (void)ret;
  709     }
  710 #endif
  711 #if defined(SCSI_IOCTL_GET_RESERVED_SIZE)
  712     {
  713         int ret, sz;
  714 
  715         ret = ioctl(fd, SCSI_IOCTL_GET_RESERVED_SIZE, &sz);
  716         (void)ret;
  717     }
  718 #endif
  719 }
  720 
  721 #if defined(HAVE_LINUX_RANDOM_H)
  722 /*
  723  *  stress_dev_random_linux()
  724  *  Linux /dev/random ioctls
  725  */
  726 static void stress_dev_random_linux(
  727     const char *name,
  728     const int fd,
  729     const char *devpath)
  730 {
  731     (void)name;
  732     (void)fd;
  733     (void)devpath;
  734 
  735 #if defined(RNDGETENTCNT)
  736     {
  737         long entropy;
  738         int ret;
  739 
  740         ret = ioctl(fd, RNDGETENTCNT, &entropy);
  741         (void)ret;
  742     }
  743 #endif
  744 }
  745 #endif
  746 
  747 #if defined(__linux__)
  748 /*
  749  *  stress_dev_mem_mmap_linux()
  750  *  Linux mmap'ing on a device
  751  */
  752 static void stress_dev_mem_mmap_linux(const int fd, const bool read_page)
  753 {
  754     void *ptr;
  755     const size_t page_size = stress_get_pagesize();
  756 
  757     ptr = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0);
  758     if (ptr != MAP_FAILED) {
  759         (void)munmap(ptr, page_size);
  760     }
  761     if (read_page) {
  762         char buffer[page_size];
  763         ssize_t ret;
  764 
  765         ret = read(fd, buffer, page_size);
  766         (void)ret;
  767     }
  768 
  769     ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
  770     if (ptr != MAP_FAILED) {
  771         (void)munmap(ptr, page_size);
  772     }
  773 
  774 }
  775 
  776 static void stress_dev_mem_linux(
  777     const char *name,
  778     const int fd,
  779     const char *devpath)
  780 {
  781     (void)name;
  782     (void)devpath;
  783 
  784     stress_dev_mem_mmap_linux(fd, false);
  785 }
  786 #endif
  787 
  788 #if defined(__linux__)
  789 static void stress_dev_kmem_linux(
  790     const char *name,
  791     const int fd,
  792     const char *devpath)
  793 {
  794     (void)name;
  795     (void)devpath;
  796 
  797     stress_dev_mem_mmap_linux(fd, false);
  798 }
  799 #endif
  800 
  801 #if defined(__linux__)
  802 static void stress_dev_kmsg_linux(
  803     const char *name,
  804     const int fd,
  805     const char *devpath)
  806 {
  807     (void)name;
  808     (void)devpath;
  809 
  810     stress_dev_mem_mmap_linux(fd, true);
  811 }
  812 #endif
  813 
  814 #if defined(__linux__) && defined(STRESS_X86)
  815 static void stress_dev_port_linux(
  816     const char *name,
  817     const int fd,
  818     const char *devpath)
  819 {
  820     off_t off;
  821     uint8_t *ptr;
  822     const size_t page_size = stress_get_pagesize();
  823 
  824     (void)name;
  825     (void)devpath;
  826 
  827     /* seek and read port 0x80 */
  828     off = lseek(fd, (off_t)0x80, SEEK_SET);
  829     if (off == 0) {
  830         char data[1];
  831         ssize_t ret;
  832 
  833         ret = read(fd, data, sizeof(data));
  834         (void)ret;
  835     }
  836 
  837     /* Should fail */
  838     ptr = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0);
  839     if (ptr != MAP_FAILED)
  840         (void)munmap(ptr, page_size);
  841 }
  842 #endif
  843 
  844 #if defined(HAVE_LINUX_HDREG_H)
  845 static void stress_dev_hd_linux_ioctl_long(int fd, int cmd)
  846 {
  847     long val;
  848     int ret;
  849 
  850     ret = ioctl(fd, cmd, &val);
  851     (void)ret;
  852 }
  853 
  854 /*
  855  *  stress_dev_hd_linux()
  856  *  Linux HDIO ioctls
  857  */
  858 static void stress_dev_hd_linux(
  859     const char *name,
  860     const int fd,
  861     const char *devpath)
  862 {
  863     (void)name;
  864     (void)devpath;
  865 
  866 #if defined(HDIO_GETGEO)
  867     {
  868         struct hd_geometry geom;
  869         int ret;
  870 
  871         ret = ioctl(fd, HDIO_GETGEO, &geom);
  872         (void)ret;
  873     }
  874 #endif
  875 
  876 #if defined(HDIO_GET_UNMASKINTR)
  877     stress_dev_hd_linux_ioctl_long(fd, HDIO_GET_UNMASKINTR);
  878 #endif
  879 
  880 #if defined(HDIO_GET_MULTCOUNT)
  881     {
  882         int val, ret;
  883 
  884         ret = ioctl(fd, HDIO_GET_MULTCOUNT, &val);
  885         (void)ret;
  886     }
  887 #endif
  888 
  889 #if defined(HDIO_GET_IDENTITY)
  890     {
  891         unsigned char identity[512];
  892         int ret;
  893 
  894         ret = ioctl(fd, HDIO_GET_IDENTITY, identity);
  895         (void)ret;
  896     }
  897 #endif
  898 
  899 #if defined(HDIO_GET_KEEPSETTINGS)
  900     stress_dev_hd_linux_ioctl_long(fd, HDIO_GET_KEEPSETTINGS);
  901 #endif
  902 
  903 #if defined(HDIO_GET_32BIT)
  904     stress_dev_hd_linux_ioctl_long(fd, HDIO_GET_32BIT);
  905 #endif
  906 
  907 #if defined(HDIO_GET_NOWERR)
  908     stress_dev_hd_linux_ioctl_long(fd, HDIO_GET_NOWERR);
  909 #endif
  910 
  911 #if defined(HDIO_GET_DMA)
  912     stress_dev_hd_linux_ioctl_long(fd, HDIO_GET_DMA);
  913 #endif
  914 
  915 #if defined(HDIO_GET_NICE)
  916     stress_dev_hd_linux_ioctl_long(fd, HDIO_GET_NICE);
  917 #endif
  918 
  919 #if defined(HDIO_GET_WCACHE)
  920     stress_dev_hd_linux_ioctl_long(fd, HDIO_GET_WCACHE);
  921 #endif
  922 
  923 #if defined(HDIO_GET_ACOUSTIC)
  924     stress_dev_hd_linux_ioctl_long(fd, HDIO_GET_ACOUSTIC);
  925 #endif
  926 
  927 #if defined(HDIO_GET_ADDRESS)
  928     stress_dev_hd_linux_ioctl_long(fd, HDIO_GET_ADDRESS);
  929 #endif
  930 
  931 #if defined(HDIO_GET_BUSSTATE)
  932     stress_dev_hd_linux_ioctl_long(fd, HDIO_GET_BUSSTATE);
  933 #endif
  934 }
  935 #endif
  936 
  937 static void stress_dev_null_nop(
  938     const char *name,
  939     const int fd,
  940     const char *devpath)
  941 {
  942     (void)name;
  943     (void)fd;
  944     (void)devpath;
  945 }
  946 
  947 #define DEV_FUNC(dev, func) \
  948     { dev, sizeof(dev) - 1, func }
  949 
  950 static const dev_func_t dev_funcs[] = {
  951 #if defined(__linux__) && defined(HAVE_LINUX_MEDIA_H) && \
  952     defined(MEDIA_IOC_DEVICE_INFO)
  953     DEV_FUNC("/dev/media",  stress_dev_media_linux),
  954 #endif
  955 #if defined(HAVE_LINUX_VT_H)
  956     DEV_FUNC("/dev/vcs",    stress_dev_vcs_linux),
  957 #endif
  958 #if defined(HAVE_LINUX_DM_IOCTL_H)
  959     DEV_FUNC("/dev/dm", stress_dev_dm_linux),
  960 #endif
  961 #if defined(HAVE_LINUX_VIDEODEV2_H)
  962     DEV_FUNC("/dev/video",  stress_dev_video_linux),
  963 #endif
  964 #if defined(HAVE_LINUX_RANDOM_H)
  965     DEV_FUNC("/dev/random", stress_dev_random_linux),
  966 #endif
  967 #if defined(__linux__)
  968     DEV_FUNC("/dev/mem",    stress_dev_mem_linux),
  969     DEV_FUNC("/dev/kmem",   stress_dev_kmem_linux),
  970     DEV_FUNC("/dev/kmsg",   stress_dev_kmsg_linux),
  971 #endif
  972 #if defined(__linux__) && defined(STRESS_X86)
  973     DEV_FUNC("/dev/port",   stress_dev_port_linux),
  974 #endif
  975     DEV_FUNC("/dev/null",   stress_dev_null_nop),
  976 };
  977 
  978 /*
  979  *  stress_dev_rw()
  980  *  exercise a dev entry
  981  */
  982 static inline void stress_dev_rw(
  983     const args_t *args,
  984     int32_t loops)
  985 {
  986     int fd, ret;
  987     off_t off;
  988     struct stat buf;
  989     struct pollfd fds[1];
  990     fd_set rfds;
  991     void *ptr;
  992     size_t i;
  993     char path[PATH_MAX];
  994     const double threshold = 0.25;
  995 
  996     while (loops == -1 || loops > 0) {
  997         double t_start;
  998         bool timeout = false;
  999 #if defined(HAVE_TERMIOS_H) && defined(TCGETS)
 1000         struct termios tios;
 1001 #endif
 1002 
 1003         ret = shim_pthread_spin_lock(&lock);
 1004         if (ret)
 1005             return;
 1006         (void)shim_strlcpy(path, dev_path, sizeof(path));
 1007         (void)shim_pthread_spin_unlock(&lock);
 1008 
 1009         if (!*path || !g_keep_stressing_flag)
 1010             break;
 1011 
 1012         t_start = time_now();
 1013 
 1014         if ((fd = open(path, O_RDONLY | O_NONBLOCK)) < 0)
 1015             goto rdwr;
 1016 
 1017         if (time_now() - t_start > threshold) {
 1018             timeout = true;
 1019             (void)close(fd);
 1020             goto next;
 1021         }
 1022 
 1023         if (fstat(fd, &buf) < 0) {
 1024             pr_fail_err("stat");
 1025         } else {
 1026             if ((S_ISBLK(buf.st_mode) | (S_ISCHR(buf.st_mode))) == 0) {
 1027                 (void)close(fd);
 1028                 goto next;
 1029             }
 1030         }
 1031 
 1032         if (S_ISBLK(buf.st_mode)) {
 1033             stress_dev_blk(args->name, fd, path);
 1034             stress_dev_scsi_blk(args->name, fd, path);
 1035 #if defined(HAVE_LINUX_HDREG_H)
 1036             stress_dev_hd_linux(args->name, fd, path);
 1037 #endif
 1038         }
 1039 #if defined(HAVE_TERMIOS_H) && defined(TCGETS)
 1040         if (ioctl(fd, TCGETS, &tios) == 0)
 1041             stress_dev_tty(args->name, fd, path);
 1042 #endif
 1043 
 1044         off = lseek(fd, 0, SEEK_SET);
 1045         (void)off;
 1046         off = lseek(fd, 0, SEEK_CUR);
 1047         (void)off;
 1048         off = lseek(fd, 0, SEEK_END);
 1049         (void)off;
 1050 
 1051         if (time_now() - t_start > threshold) {
 1052             timeout = true;
 1053             (void)close(fd);
 1054             goto next;
 1055         }
 1056 
 1057         FD_ZERO(&rfds);
 1058         fds[0].fd = fd;
 1059         fds[0].events = POLLIN;
 1060         ret = poll(fds, 1, 0);
 1061         (void)ret;
 1062 
 1063         if (time_now() - t_start > threshold) {
 1064             timeout = true;
 1065             (void)close(fd);
 1066             goto next;
 1067         }
 1068 
 1069 #if !defined(__NetBSD__)
 1070         {
 1071             struct timeval tv;
 1072             fd_set wfds;
 1073 
 1074             FD_ZERO(&rfds);
 1075             FD_SET(fd, &rfds);
 1076             FD_ZERO(&wfds);
 1077             FD_SET(fd, &wfds);
 1078             tv.tv_sec = 0;
 1079             tv.tv_usec = 10000;
 1080             ret = select(fd + 1, &rfds, &wfds, NULL, &tv);
 1081             (void)ret;
 1082 
 1083             if (time_now() - t_start > threshold) {
 1084                 timeout = true;
 1085                 (void)close(fd);
 1086                 goto next;
 1087             }
 1088         }
 1089 #endif
 1090 
 1091 #if defined(F_GETFD)
 1092         ret = fcntl(fd, F_GETFD, NULL);
 1093         (void)ret;
 1094 
 1095         if (time_now() - t_start > threshold) {
 1096             timeout = true;
 1097             (void)close(fd);
 1098             goto next;
 1099         }
 1100 #endif
 1101 #if defined(F_GETFL)
 1102         ret = fcntl(fd, F_GETFL, NULL);
 1103         (void)ret;
 1104 
 1105         if (time_now() - t_start > threshold) {
 1106             timeout = true;
 1107             (void)close(fd);
 1108             goto next;
 1109         }
 1110 #endif
 1111 #if defined(F_GETSIG)
 1112         ret = fcntl(fd, F_GETSIG, NULL);
 1113         (void)ret;
 1114 
 1115         if (time_now() - t_start > threshold) {
 1116             timeout = true;
 1117             (void)close(fd);
 1118             goto next;
 1119         }
 1120 #endif
 1121         ptr = mmap(NULL, args->page_size, PROT_READ, MAP_PRIVATE, fd, 0);
 1122         if (ptr != MAP_FAILED)
 1123             (void)munmap(ptr, args->page_size);
 1124         ptr = mmap(NULL, args->page_size, PROT_READ, MAP_SHARED, fd, 0);
 1125         if (ptr != MAP_FAILED)
 1126             (void)munmap(ptr, args->page_size);
 1127         (void)close(fd);
 1128 
 1129         if (time_now() - t_start > threshold) {
 1130             timeout = true;
 1131             goto next;
 1132         }
 1133 
 1134         if ((fd = open(path, O_RDONLY | O_NONBLOCK)) < 0)
 1135             goto rdwr;
 1136         ptr = mmap(NULL, args->page_size, PROT_WRITE, MAP_PRIVATE, fd, 0);
 1137         if (ptr != MAP_FAILED)
 1138             (void)munmap(ptr, args->page_size);
 1139         ptr = mmap(NULL, args->page_size, PROT_WRITE, MAP_SHARED, fd, 0);
 1140         if (ptr != MAP_FAILED)
 1141             (void)munmap(ptr, args->page_size);
 1142 
 1143         ret = shim_fsync(fd);
 1144         (void)ret;
 1145 
 1146         for (i = 0; i < SIZEOF_ARRAY(dev_funcs); i++) {
 1147             if (!strncmp(path, dev_funcs[i].devpath, dev_funcs[i].devpath_len))
 1148                 dev_funcs[i].func(args->name, fd, path);
 1149         }
 1150         (void)close(fd);
 1151         if (time_now() - t_start > threshold) {
 1152             timeout = true;
 1153             goto next;
 1154         }
 1155 rdwr:
 1156         /*
 1157          *   O_RDONLY | O_WRONLY allows one to
 1158          *   use the fd for ioctl() only operations
 1159          */
 1160         fd = open(path, O_RDONLY | O_WRONLY | O_NONBLOCK);
 1161         if (fd >= 0)
 1162             (void)close(fd);
 1163 
 1164 next:
 1165         if (loops > 0) {
 1166             if (timeout)
 1167                 break;
 1168             loops--;
 1169         }
 1170     }
 1171 }
 1172 
 1173 /*
 1174  *  stress_dev_thread
 1175  *  keep exercising a /dev entry until
 1176  *  controlling thread triggers an exit
 1177  */
 1178 static void *stress_dev_thread(void *arg)
 1179 {
 1180     static void *nowt = NULL;
 1181     uint8_t stack[SIGSTKSZ + STACK_ALIGNMENT];
 1182     const pthread_args_t *pa = (pthread_args_t *)arg;
 1183     const args_t *args = pa->args;
 1184 
 1185     /*
 1186      *  Block all signals, let controlling thread
 1187      *  handle these
 1188      */
 1189     (void)sigprocmask(SIG_BLOCK, &set, NULL);
 1190 
 1191     /*
 1192      *  According to POSIX.1 a thread should have
 1193      *  a distinct alternative signal stack.
 1194      *  However, we block signals in this thread
 1195      *  so this is probably just totally unncessary.
 1196      */
 1197     (void)memset(stack, 0, sizeof(stack));
 1198     if (stress_sigaltstack(stack, SIGSTKSZ) < 0)
 1199         return &nowt;
 1200 
 1201     while (g_keep_stressing_flag)
 1202         stress_dev_rw(args, -1);
 1203 
 1204     return &nowt;
 1205 }
 1206 
 1207 /*
 1208  *  stress_dev_dir()
 1209  *  read directory
 1210  */
 1211 static void stress_dev_dir(
 1212     const args_t *args,
 1213     const char *path,
 1214     const bool recurse,
 1215     const int depth,
 1216     const uid_t euid)
 1217 {
 1218     struct dirent **dlist;
 1219     const mode_t flags = S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
 1220     int32_t loops = args->instance < 8 ? args->instance + 1 : 8;
 1221     int i, n;
 1222 
 1223     if (!g_keep_stressing_flag)
 1224         return;
 1225 
 1226     /* Don't want to go too deep */
 1227     if (depth > 20)
 1228         return;
 1229 
 1230     dlist = NULL;
 1231     n = scandir(path, &dlist, NULL, mixup_sort);
 1232     if (n <= 0)
 1233         goto done;
 1234 
 1235     for (i = 0; i < n; i++) {
 1236         int ret;
 1237         struct stat buf;
 1238         char filename[PATH_MAX];
 1239         char tmp[PATH_MAX];
 1240         struct dirent *d = dlist[i];
 1241         size_t len;
 1242 
 1243         if (!keep_stressing())
 1244             break;
 1245         if (stress_is_dot_filename(d->d_name))
 1246             continue;
 1247         /*
 1248          * Xen clients hang on hpet when running as root
 1249          * see: LP#1741409, so avoid opening /dev/hpet
 1250          */
 1251         if (!euid && !strcmp(d->d_name, "hpet"))
 1252             continue;
 1253 
 1254         len = strlen(d->d_name);
 1255 
 1256         /*
 1257          *  Exercise no more than 3 of the same device
 1258          *  driver, e.g. ttyS0..ttyS2
 1259          */
 1260         if (len > 1) {
 1261             int dev_n;
 1262             char *ptr = d->d_name + len - 1;
 1263 
 1264             while (ptr > d->d_name && isdigit((int)*ptr))
 1265                 ptr--;
 1266             ptr++;
 1267             dev_n = atoi(ptr);
 1268             if (dev_n > 2)
 1269                 continue;
 1270         }
 1271 
 1272         (void)snprintf(tmp, sizeof(tmp), "%s/%s", path, d->d_name);
 1273         switch (d->d_type) {
 1274         case DT_DIR:
 1275             if (!recurse)
 1276                 continue;
 1277 
 1278             ret = stat(tmp, &buf);
 1279             if (ret < 0)
 1280                 continue;
 1281             if ((buf.st_mode & flags) == 0)
 1282                 continue;
 1283 
 1284             inc_counter(args);
 1285             stress_dev_dir(args, tmp, recurse, depth + 1, euid);
 1286             break;
 1287         case DT_BLK:
 1288         case DT_CHR:
 1289             if (strstr(tmp, "watchdog"))
 1290                 continue;
 1291             ret = shim_pthread_spin_lock(&lock);
 1292             if (!ret) {
 1293                 (void)shim_strlcpy(filename, tmp, sizeof(filename));
 1294                 dev_path = filename;
 1295                 (void)shim_pthread_spin_unlock(&lock);
 1296                 stress_dev_rw(args, loops);
 1297                 inc_counter(args);
 1298             }
 1299             break;
 1300         default:
 1301             break;
 1302         }
 1303     }
 1304 done:
 1305     if (dlist) {
 1306         for (i = 0; i < n; i++)
 1307             free(dlist[i]);
 1308         free(dlist);
 1309     }
 1310 }
 1311 
 1312 /*
 1313  *  stress_dev
 1314  *  stress reading all of /dev
 1315  */
 1316 static int stress_dev(const args_t *args)
 1317 {
 1318     pthread_t pthreads[MAX_DEV_THREADS];
 1319     int ret[MAX_DEV_THREADS], rc = EXIT_SUCCESS;
 1320     uid_t euid = geteuid();
 1321     pthread_args_t pa;
 1322 
 1323     dev_path = "/dev/null";
 1324     pa.args = args;
 1325     pa.data = NULL;
 1326 
 1327     (void)memset(ret, 0, sizeof(ret));
 1328 
 1329     do {
 1330         pid_t pid;
 1331 
 1332 again:
 1333         if (!keep_stressing())
 1334             break;
 1335         pid = fork();
 1336         if (pid < 0) {
 1337             if ((errno == EAGAIN) || (errno == ENOMEM))
 1338                 goto again;
 1339         } else if (pid > 0) {
 1340             int status, wret;
 1341 
 1342             (void)setpgid(pid, g_pgrp);
 1343             /* Parent, wait for child */
 1344             wret = waitpid(pid, &status, 0);
 1345             if (wret < 0) {
 1346                 if (errno != EINTR)
 1347                     pr_dbg("%s: waitpid(): errno=%d (%s)\n",
 1348                         args->name, errno, strerror(errno));
 1349                 (void)kill(pid, SIGTERM);
 1350                 (void)kill(pid, SIGKILL);
 1351                 (void)waitpid(pid, &status, 0);
 1352             } else {
 1353                 if (WIFEXITED(status) &&
 1354                     WEXITSTATUS(status) != 0) {
 1355                     rc = EXIT_FAILURE;
 1356                     break;
 1357                 }
 1358             }
 1359         } else if (pid == 0) {
 1360             size_t i;
 1361             int r;
 1362 
 1363             (void)setpgid(0, g_pgrp);
 1364             stress_parent_died_alarm();
 1365             rc = shim_pthread_spin_init(&lock, SHIM_PTHREAD_PROCESS_SHARED);
 1366             if (rc) {
 1367                 pr_inf("%s: pthread_spin_init failed, errno=%d (%s)\n",
 1368                     args->name, rc, strerror(rc));
 1369                 return EXIT_NO_RESOURCE;
 1370             }
 1371 
 1372             /* Make sure this is killable by OOM killer */
 1373             set_oom_adjustment(args->name, true);
 1374             mixup = mwc32();
 1375 
 1376             for (i = 0; i < MAX_DEV_THREADS; i++) {
 1377                 ret[i] = pthread_create(&pthreads[i], NULL,
 1378                         stress_dev_thread, (void *)&pa);
 1379             }
 1380 
 1381             do {
 1382                 stress_dev_dir(args, "/dev", true, 0, euid);
 1383             } while (keep_stressing());
 1384 
 1385             r = shim_pthread_spin_lock(&lock);
 1386             if (r) {
 1387                 pr_dbg("%s: failed to lock spin lock for dev_path\n", args->name);
 1388             } else {
 1389                 dev_path = "";
 1390                 r = shim_pthread_spin_unlock(&lock);
 1391                 (void)r;
 1392             }
 1393 
 1394             for (i = 0; i < MAX_DEV_THREADS; i++) {
 1395                 if (ret[i] == 0)
 1396                     pthread_join(pthreads[i], NULL);
 1397             }
 1398             (void)free_scsi_list();
 1399             _exit(!g_keep_stressing_flag);
 1400         }
 1401     } while (keep_stressing());
 1402 
 1403     (void)shim_pthread_spin_destroy(&lock);
 1404 
 1405     return rc;
 1406 }
 1407 stressor_info_t stress_dev_info = {
 1408     .stressor = stress_dev,
 1409     .class = CLASS_DEV | CLASS_OS
 1410 };
 1411 #else
 1412 stressor_info_t stress_dev_info = {
 1413     .stressor = stress_not_implemented,
 1414     .class = CLASS_DEV | CLASS_OS
 1415 };
 1416 #endif