"Fossies" - the Fresh Open Source Software Archive

Member "stress-ng-0.09.56/stress-epoll.c" (15 Mar 2019, 14426 Bytes) of package /linux/privat/stress-ng-0.09.56.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "stress-epoll.c" see the Fossies "Dox" file reference documentation and the last Fossies "Diffs" side-by-side code changes report: 0.09.52_vs_0.09.54.

    1 /*
    2  * Copyright (C) 2013-2019 Canonical, Ltd.
    3  *
    4  * This program is free software; you can redistribute it and/or
    5  * modify it under the terms of the GNU General Public License
    6  * as published by the Free Software Foundation; either version 2
    7  * of the License, or (at your option) any later version.
    8  *
    9  * This program is distributed in the hope that it will be useful,
   10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12  * GNU General Public License for more details.
   13  *
   14  * You should have received a copy of the GNU General Public License
   15  * along with this program; if not, write to the Free Software
   16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
   17  *
   18  * This code is a complete clean re-write of the stress tool by
   19  * Colin Ian King <colin.king@canonical.com> and attempts to be
   20  * backwardly compatible with the stress tool by Amos Waterland
   21  * <apw@rossby.metr.ou.edu> but has more stress tests and more
   22  * functionality.
   23  *
   24  */
   25 #include "stress-ng.h"
   26 
   27 #define MAX_EPOLL_EVENTS    (1024)
   28 #define MAX_SERVERS     (4)
   29 
   30 #if defined(HAVE_SYS_EPOLL_H) &&    \
   31     defined(HAVE_LIB_RT) &&     \
   32     defined(HAVE_TIMER_CREATE) &&   \
   33     defined(HAVE_TIMER_DELETE) &&   \
   34     defined(HAVE_TIMER_SETTIME) &&  \
   35     NEED_GLIBC(2,3,2)
   36 
   37 typedef void (epoll_func_t)(
   38     const args_t *args,
   39     const int child,
   40     const pid_t ppid,
   41     const int epoll_port,
   42     const int epoll_domain);
   43 
   44 static timer_t epoll_timerid;
   45 
   46 #endif
   47 
   48 static int max_servers = 1;
   49 
   50 /*
   51  *  stress_set_epoll_port()
   52  *  set the default port base
   53  */
   54 int stress_set_epoll_port(const char *opt)
   55 {
   56     int epoll_port;
   57 
   58     stress_set_net_port("epoll-port", opt,
   59         MIN_EPOLL_PORT,
   60         MAX_EPOLL_PORT - (STRESS_PROCS_MAX * MAX_SERVERS),
   61         &epoll_port);
   62     return set_setting("epoll-port", TYPE_ID_INT, &epoll_port);
   63 }
   64 
   65 /*
   66  *  stress_set_epoll_domain()
   67  *  set the socket domain option
   68  */
   69 int stress_set_epoll_domain(const char *name)
   70 {
   71     int ret, epoll_domain;
   72 
   73     ret = stress_set_net_domain(DOMAIN_ALL, "epoll-domain",
   74         name, &epoll_domain);
   75     set_setting("epoll-domain", TYPE_ID_INT, &epoll_domain);
   76 
   77     switch (epoll_domain) {
   78     case AF_INET:
   79     case AF_INET6:
   80         max_servers = 4;
   81         break;
   82     case AF_UNIX:
   83     default:
   84         max_servers = 1;
   85     }
   86 
   87     return ret;
   88 }
   89 
   90 #if defined(HAVE_SYS_EPOLL_H) &&    \
   91     defined(HAVE_LIB_RT) &&     \
   92     defined(HAVE_TIMER_CREATE) &&   \
   93     defined(HAVE_TIMER_DELETE) &&   \
   94     defined(HAVE_TIMER_SETTIME) &&  \
   95     NEED_GLIBC(2,3,2)
   96 
   97 /*
   98  * epoll_timer_handler()
   99  *  catch timer signal and cancel if no more runs flagged
  100  */
  101 static void MLOCKED_TEXT epoll_timer_handler(int sig)
  102 {
  103     (void)sig;
  104 
  105     /* Cancel timer if we detect no more runs */
  106     if (!g_keep_stressing_flag) {
  107         struct itimerspec timer;
  108 
  109         timer.it_value.tv_sec = 0;
  110         timer.it_value.tv_nsec = 0;
  111         timer.it_interval.tv_sec = timer.it_value.tv_sec;
  112         timer.it_interval.tv_nsec = timer.it_value.tv_nsec;
  113 
  114         (void)timer_settime(epoll_timerid, 0, &timer, NULL);
  115     }
  116 }
  117 
  118 /*
  119  *  epoll_spawn()
  120  *  spawn a process
  121  */
  122 static pid_t epoll_spawn(
  123     const args_t *args,
  124     epoll_func_t func,
  125     const int child,
  126     const pid_t ppid,
  127     const int epoll_port,
  128     const int epoll_domain)
  129 {
  130     pid_t pid;
  131 
  132 again:
  133     pid = fork();
  134     if (pid < 0) {
  135         if (g_keep_stressing_flag &&
  136             ((errno == EAGAIN) || (errno == ENOMEM)))
  137             goto again;
  138         return -1;
  139     }
  140     if (pid == 0) {
  141         (void)setpgid(0, g_pgrp);
  142         stress_parent_died_alarm();
  143         func(args, child, ppid, epoll_port, epoll_domain);
  144         _exit(EXIT_SUCCESS);
  145     }
  146     (void)setpgid(pid, g_pgrp);
  147     return pid;
  148 }
  149 
  150 /*
  151  *  epoll_set_fd_nonblock()
  152  *  set non-blocking mode on fd
  153  */
  154 static int epoll_set_fd_nonblock(const int fd)
  155 {
  156     int flags;
  157 
  158     if ((flags = fcntl(fd, F_GETFL, 0)) < 0)
  159         return -1;
  160     if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) < 0)
  161         return -1;
  162     return 0;
  163 }
  164 
  165 /*
  166  *  epoll_recv_data()
  167  *  receive data on fd
  168  */
  169 static void epoll_recv_data(const int fd)
  170 {
  171     while (g_keep_stressing_flag) {
  172         char buf[8192];
  173         ssize_t n;
  174 
  175         n = recv(fd, buf, sizeof(buf), 0);
  176         if (n == -1) {
  177             if (errno != EAGAIN)
  178                 (void)close(fd);
  179             break;
  180         } else if (n == 0) {
  181             (void)close(fd);
  182             break;
  183         }
  184     }
  185 }
  186 
  187 /*
  188  *  epoll_ctl_add()
  189  *  add fd to epoll list
  190  */
  191 static int epoll_ctl_add(const int efd, const int fd)
  192 {
  193     struct epoll_event event;
  194 
  195     (void)memset(&event, 0, sizeof(event));
  196     event.data.fd = fd;
  197     event.events = EPOLLIN | EPOLLET;
  198     if (epoll_ctl(efd, EPOLL_CTL_ADD, fd, &event) < 0)
  199         return -1;
  200 
  201     return 0;
  202 }
  203 
  204 /*
  205  *  epoll_notification()
  206  *  handle accept notification on sfd, add
  207  *  fd's to epoll event list
  208  */
  209 static int epoll_notification(
  210     const args_t *args,
  211     const int efd,
  212     const int sfd)
  213 {
  214     for (;;) {
  215         struct sockaddr saddr;
  216         socklen_t slen = sizeof(saddr);
  217         int fd;
  218 
  219         if (!keep_stressing())
  220             return -1;
  221 
  222         if ((fd = accept(sfd, &saddr, &slen)) < 0) {
  223             if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
  224                 /* all incoming connections handled so finish */
  225                 return 0;
  226             }
  227             if ((errno == EMFILE) || (errno == ENFILE)) {
  228                 /* out of file descriptors! */
  229                 return 0;
  230             }
  231             pr_fail_err("accept");
  232             return -1;
  233         }
  234         /*
  235          *  Add non-blocking fd to epoll event list
  236          */
  237         if (epoll_set_fd_nonblock(fd) < 0) {
  238             pr_fail_err("setting socket to non-blocking");
  239             (void)close(fd);
  240             return -1;
  241         }
  242         if (epoll_ctl_add(efd, fd) < 0) {
  243             pr_fail_err("epoll ctl add");
  244             (void)close(fd);
  245             return -1;
  246         }
  247     }
  248     return 0;
  249 }
  250 
  251 /*
  252  *  epoll_client()
  253  *  rapidly try to connect to server(s) and
  254  *  send a relatively short message
  255  */
  256 static int epoll_client(
  257     const args_t *args,
  258     const pid_t ppid,
  259     const int epoll_port,
  260     const int epoll_domain)
  261 {
  262     int port_counter = 0;
  263     uint64_t connect_timeouts = 0;
  264     struct sigevent sev;
  265     struct itimerspec timer;
  266     struct sockaddr *addr = NULL;
  267 
  268     if (stress_sighandler(args->name, SIGRTMIN, epoll_timer_handler, NULL) < 0)
  269         return -1;
  270 
  271     do {
  272         char buf[4096];
  273         int fd, saved_errno;
  274         int retries = 0;
  275         int ret = -1;
  276         int port = epoll_port + port_counter +
  277                 (max_servers * args->instance);
  278         socklen_t addr_len = 0;
  279 
  280         /* Cycle through the servers */
  281         port_counter = (port_counter + 1) % max_servers;
  282 retry:
  283         if (!g_keep_stressing_flag)
  284             break;
  285 
  286         if ((fd = socket(epoll_domain, SOCK_STREAM, 0)) < 0) {
  287             pr_fail_dbg("socket");
  288             return -1;
  289         }
  290 
  291         sev.sigev_notify = SIGEV_SIGNAL;
  292         sev.sigev_signo = SIGRTMIN;
  293         sev.sigev_value.sival_ptr = &epoll_timerid;
  294         if (timer_create(CLOCK_REALTIME, &sev, &epoll_timerid) < 0) {
  295             pr_fail_err("timer_create");
  296             (void)close(fd);
  297             return -1;
  298         }
  299 
  300         /*
  301          * Allow 0.25 seconds for connection to occur,
  302          * connect can block if the connection table
  303          * fills up because we're waiting for TIME-OUTs
  304          * to occur on previously closed connections
  305          */
  306         timer.it_value.tv_sec = 0;
  307         timer.it_value.tv_nsec = 250000000;
  308         timer.it_interval.tv_sec = timer.it_value.tv_sec;
  309         timer.it_interval.tv_nsec = timer.it_value.tv_nsec;
  310         if (timer_settime(epoll_timerid, 0, &timer, NULL) < 0) {
  311             pr_fail_err("timer_settime");
  312             (void)close(fd);
  313             return -1;
  314         }
  315 
  316         stress_set_sockaddr(args->name, args->instance, ppid,
  317             epoll_domain, port, &addr, &addr_len, NET_ADDR_ANY);
  318 
  319         errno = 0;
  320         ret = connect(fd, addr, addr_len);
  321         saved_errno = errno;
  322 
  323         /* No longer need timer */
  324         if (timer_delete(epoll_timerid) < 0) {
  325             pr_fail_err("timer_delete");
  326             (void)close(fd);
  327             return -1;
  328         }
  329 
  330         if (ret < 0) {
  331             switch (saved_errno) {
  332             case EINTR:
  333                 connect_timeouts++;
  334                 break;
  335             case ECONNREFUSED: /* No servers yet running */
  336             case ENOENT:       /* unix domain not yet created */
  337                 break;
  338             default:
  339                 pr_dbg("%s: connect failed: %d (%s)\n",
  340                     args->name, saved_errno, strerror(saved_errno));
  341                 break;
  342             }
  343             (void)close(fd);
  344             (void)shim_usleep(100000);  /* Twiddle fingers for a moment */
  345 
  346             retries++;
  347             if (retries > 1000) {
  348                 /* Sigh, give up.. */
  349                 errno = saved_errno;
  350                 pr_fail_dbg("too many connects");
  351                 return -1;
  352             }
  353             goto retry;
  354         }
  355 
  356         (void)memset(buf, 'A' + (get_counter(args) % 26), sizeof(buf));
  357         if (send(fd, buf, sizeof(buf), 0) < 0) {
  358             (void)close(fd);
  359             pr_fail_dbg("send");
  360             break;
  361         }
  362         (void)close(fd);
  363         (void)shim_sched_yield();
  364         inc_counter(args);
  365     } while (keep_stressing());
  366 
  367 #if defined(AF_UNIX)
  368     if (addr && (epoll_domain == AF_UNIX)) {
  369         struct sockaddr_un *addr_un = (struct sockaddr_un *)addr;
  370         (void)unlink(addr_un->sun_path);
  371     }
  372 #endif
  373     if (connect_timeouts)
  374         pr_dbg("%s: %" PRIu64 " x 0.25 second "
  375             "connect timeouts, connection table full "
  376             "(instance %" PRIu32 ")\n",
  377             args->name, connect_timeouts, args->instance);
  378     return EXIT_SUCCESS;
  379 }
  380 
  381 /*
  382  *  epoll_server()
  383  *  wait on connections and read data
  384  */
  385 static void epoll_server(
  386     const args_t *args,
  387     const int child,
  388     const pid_t ppid,
  389     const int epoll_port,
  390     const int epoll_domain)
  391 {
  392     int efd = -1, sfd = -1, rc = EXIT_SUCCESS;
  393     int so_reuseaddr = 1;
  394     int port = epoll_port + child + (max_servers * args->instance);
  395     struct epoll_event *events = NULL;
  396     struct sockaddr *addr = NULL;
  397     socklen_t addr_len = 0;
  398 
  399     if (stress_sig_stop_stressing(args->name, SIGALRM) < 0) {
  400         rc = EXIT_FAILURE;
  401         goto die;
  402     }
  403     if ((sfd = socket(epoll_domain, SOCK_STREAM, 0)) < 0) {
  404         pr_fail_err("socket");
  405         rc = EXIT_FAILURE;
  406         goto die;
  407     }
  408     if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR,
  409             &so_reuseaddr, sizeof(so_reuseaddr)) < 0) {
  410         pr_fail_err("setsockopt");
  411         rc = EXIT_FAILURE;
  412         goto die_close;
  413     }
  414 
  415     stress_set_sockaddr(args->name, args->instance, ppid,
  416         epoll_domain, port, &addr, &addr_len, NET_ADDR_ANY);
  417 
  418     if (bind(sfd, addr, addr_len) < 0) {
  419         pr_fail_err("bind");
  420         rc = EXIT_FAILURE;
  421         goto die_close;
  422     }
  423     if (epoll_set_fd_nonblock(sfd) < 0) {
  424         pr_fail_err("setting socket to non-blocking");
  425         rc = EXIT_FAILURE;
  426         goto die_close;
  427     }
  428     if (listen(sfd, SOMAXCONN) < 0) {
  429         pr_fail_err("listen");
  430         rc = EXIT_FAILURE;
  431         goto die_close;
  432     }
  433 
  434     /*
  435      *  Due to historical reasons we have two ways of
  436      *  creating the epoll fd, so randomly select one
  437      *  or the other to get more test coverage
  438      */
  439 #if defined(HAVE_EPOLL_CREATE1)
  440     if (mwc1()) {
  441         efd = epoll_create1(0); /* flag version */
  442         if (efd < 0) {
  443             pr_fail_err("epoll_create1");
  444             rc = EXIT_FAILURE;
  445             goto die_close;
  446         }
  447     } else {
  448         efd = epoll_create(1);  /* size version */
  449         if (efd < 0) {
  450             pr_fail_err("epoll_create");
  451             rc = EXIT_FAILURE;
  452             goto die_close;
  453         }
  454     }
  455 #else
  456     efd = epoll_create(1);  /* size version */
  457     if (efd < 0) {
  458         pr_fail_err("epoll_create");
  459         rc = EXIT_FAILURE;
  460         goto die_close;
  461     }
  462 #endif
  463     if (epoll_ctl_add(efd, sfd) < 0) {
  464         pr_fail_err("epoll ctl add");
  465         rc = EXIT_FAILURE;
  466         goto die_close;
  467     }
  468     if ((events = calloc(MAX_EPOLL_EVENTS,
  469                 sizeof(struct epoll_event))) == NULL) {
  470         pr_fail_err("epoll ctl add");
  471         rc = EXIT_FAILURE;
  472         goto die_close;
  473     }
  474 
  475     do {
  476         int n, i;
  477         sigset_t sigmask;
  478 
  479         (void)sigemptyset(&sigmask);
  480         (void)sigaddset(&sigmask, SIGALRM);
  481 
  482         (void)memset(events, 0, MAX_EPOLL_EVENTS * sizeof(struct epoll_event));
  483         errno = 0;
  484 
  485         /*
  486          * Wait for 100ms for an event, allowing us to
  487          * to break out if keep_stressing_flag has been changed.
  488          * Note: epoll_wait maps to epoll_pwait in glibc, ho hum.
  489          */
  490         if (mwc1()) {
  491             n = epoll_wait(efd, events, MAX_EPOLL_EVENTS, 100);
  492         } else {
  493             n = epoll_pwait(efd, events, MAX_EPOLL_EVENTS, 100, &sigmask);
  494         }
  495         if (n < 0) {
  496             if (errno != EINTR) {
  497                 pr_fail_err("epoll_wait");
  498                 rc = EXIT_FAILURE;
  499                 goto die_close;
  500             }
  501             break;
  502         }
  503 
  504         for (i = 0; i < n; i++) {
  505             if ((events[i].events & EPOLLERR) ||
  506                 (events[i].events & EPOLLHUP) ||
  507                 (!(events[i].events & EPOLLIN))) {
  508                 /*
  509                  *  Error has occurred or fd is not
  510                  *  for reading anymore.. so reap fd
  511                  */
  512                 (void)close(events[i].data.fd);
  513             } else if (sfd == events[i].data.fd) {
  514                 /*
  515                  *  The listening socket has notification(s)
  516                  *  pending, so handle incoming connections
  517                  */
  518                 if (epoll_notification(args, efd, sfd) < 0)
  519                     break;
  520             } else {
  521                 /*
  522                  *  The fd has data available, so read it
  523                  */
  524                 epoll_recv_data(events[i].data.fd);
  525             }
  526         }
  527     } while (keep_stressing());
  528 
  529 die_close:
  530     if (efd != -1)
  531         (void)close(efd);
  532     if (sfd != -1)
  533         (void)close(sfd);
  534 die:
  535 #if defined(AF_UNIX)
  536     if (addr && (epoll_domain == AF_UNIX)) {
  537         struct sockaddr_un *addr_un = (struct sockaddr_un *)addr;
  538         (void)unlink(addr_un->sun_path);
  539     }
  540 #endif
  541     free(events);
  542 
  543     _exit(rc);
  544 }
  545 
  546 /*
  547  *  stress_epoll
  548  *  stress by heavy socket I/O
  549  */
  550 static int stress_epoll(const args_t *args)
  551 {
  552     pid_t pids[MAX_SERVERS], ppid = getppid();
  553     int i, rc = EXIT_SUCCESS;
  554     int epoll_port = DEFAULT_EPOLL_PORT;
  555     int epoll_domain = AF_UNIX;
  556 
  557     (void)get_setting("epoll-port", &epoll_port);
  558     (void)get_setting("epoll-domain", &epoll_domain);
  559 
  560     if (max_servers == 1) {
  561         pr_dbg("%s: process [%d] using socket port %d\n",
  562             args->name, args->pid,
  563             epoll_port + args->instance);
  564     } else {
  565         pr_dbg("%s: process [%d] using socket ports %d..%d\n",
  566             args->name, args->pid,
  567             epoll_port + (max_servers * args->instance),
  568             epoll_port + (max_servers * (args->instance + 1)) - 1);
  569     }
  570 
  571     /*
  572      *  Spawn off servers to handle multi port connections.
  573      *  The (src address, src port, dst address, dst port) tuple
  574      *  is kept in the connection table for a default of 60 seconds
  575      *  which means for many fast short connections we can
  576      *  fill this table up and new connections get blocked until
  577      *  this table empties. One strategy is to reduce TIME_WAIT (not
  578      *  good) so the easiest way forward is to just increase the
  579      *  number of ports being listened to to increase the tuple
  580      *  range and hence allow more connections.  See
  581      *  http://vincent.bernat.im/en/blog/2014-tcp-time-wait-state-linux.html
  582      *  Typically, we are limited to ~500 connections per second
  583      *  on a default Linux configuration.
  584      */
  585     (void)memset(pids, 0, sizeof(pids));
  586     for (i = 0; i < max_servers; i++) {
  587         pids[i] = epoll_spawn(args, epoll_server, i, ppid, epoll_port, epoll_domain);
  588         if (pids[i] < 0) {
  589             pr_fail_dbg("fork");
  590             goto reap;
  591         }
  592     }
  593 
  594     epoll_client(args, ppid, epoll_port, epoll_domain);
  595 reap:
  596     for (i = 0; i < max_servers; i++) {
  597         int status;
  598 
  599         if (pids[i] > 0) {
  600             (void)kill(pids[i], SIGKILL);
  601             if (waitpid(pids[i], &status, 0) < 0) {
  602                 pr_fail_dbg("waitpid");
  603             }
  604         }
  605     }
  606 
  607     return rc;
  608 }
  609 stressor_info_t stress_epoll_info = {
  610     .stressor = stress_epoll,
  611     .class = CLASS_NETWORK | CLASS_OS
  612 };
  613 #else
  614 stressor_info_t stress_epoll_info = {
  615     .stressor = stress_not_implemented,
  616     .class = CLASS_NETWORK | CLASS_OS
  617 };
  618 #endif