"Fossies" - the Fresh Open Source Software Archive

Member "stress-ng-0.09.56/stress-cyclic.c" (15 Mar 2019, 19926 Bytes) of package /linux/privat/stress-ng-0.09.56.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "stress-cyclic.c" see the Fossies "Dox" file reference documentation and the last Fossies "Diffs" side-by-side code changes report: 0.09.52_vs_0.09.54.

    1 /*
    2  * Copyright (C) 2013-2019 Canonical, Ltd.
    3  *
    4  * This program is free software; you can redistribute it and/or
    5  * modify it under the terms of the GNU General Public License
    6  * as published by the Free Software Foundation; either version 2
    7  * of the License, or (at your option) any later version.
    8  *
    9  * This program is distributed in the hope that it will be useful,
   10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12  * GNU General Public License for more details.
   13  *
   14  * You should have received a copy of the GNU General Public License
   15  * along with this program; if not, write to the Free Software
   16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
   17  *
   18  * This code is a complete clean re-write of the stress tool by
   19  * Colin Ian King <colin.king@canonical.com> and attempts to be
   20  * backwardly compatible with the stress tool by Amos Waterland
   21  * <apw@rossby.metr.ou.edu> but has more stress tests and more
   22  * functionality.
   23  *
   24  */
   25 #include "stress-ng.h"
   26 
   27 #define DEFAULT_DELAY_NS    (100000)
   28 #define MAX_SAMPLES     (10000)
   29 #define MAX_BUCKETS     (250)
   30 #define NANOSECS        (1000000000)
   31 
   32 typedef struct {
   33     const int   policy;     /* scheduler policy */
   34     const char  *name;      /* name of scheduler policy */
   35     const char  *opt_name;  /* option name */
   36 } policy_t;
   37 
   38 typedef struct {
   39     int64_t     min_ns;     /* min latency */
   40     int64_t     max_ns;     /* max latency */
   41     int64_t     latencies[MAX_SAMPLES];
   42     size_t      index;      /* index into latencies */
   43     int32_t     min_prio;   /* min priority allowed */
   44     int32_t     max_prio;   /* max priority allowed */
   45     double      ns;     /* total nanosecond latency */
   46     double      latency_mean;   /* average latency */
   47     int64_t     latency_mode;   /* first mode */
   48     double      std_dev;    /* standard deviation */
   49 } rt_stats_t;
   50 
   51 typedef int (*cyclic_func)(const args_t *args, rt_stats_t *rt_stats, uint64_t cyclic_sleep);
   52 
   53 typedef struct {
   54     const char      *name;
   55     const cyclic_func   func;
   56 } stress_cyclic_method_info_t;
   57 
   58 static const policy_t policies[] = {
   59 #if defined(SCHED_DEADLINE)
   60     { SCHED_DEADLINE, "SCHED_DEADLINE",  "deadline" },
   61 #endif
   62 #if defined(SCHED_FIFO)
   63     { SCHED_FIFO,     "SCHED_FIFO",      "fifo" },
   64 #endif
   65 #if defined(SCHED_RR)
   66     { SCHED_RR,       "SCHED_RR",        "rr" },
   67 #endif
   68 };
   69 
   70 static const size_t num_policies = SIZEOF_ARRAY(policies);
   71 
   72 int stress_set_cyclic_sleep(const char *opt)
   73 {
   74     uint64_t cyclic_sleep;
   75 
   76     cyclic_sleep = get_uint64(opt);
   77         check_range("cyclic-sleep", cyclic_sleep,
   78                 1, NANOSECS);
   79         return set_setting("cyclic-sleep", TYPE_ID_UINT64, &cyclic_sleep);
   80 }
   81 
   82 int stress_set_cyclic_policy(const char *opt)
   83 {
   84     size_t policy;
   85 
   86     for (policy = 0; policy < num_policies; policy++) {
   87         if (!strcmp(opt, policies[policy].opt_name)) {
   88             set_setting("cyclic-policy", TYPE_ID_SIZE_T, &policy);
   89             return 0;
   90         }
   91     }
   92     (void)fprintf(stderr, "invalid cyclic-policy '%s', policies allowed are:", opt);
   93     for (policy = 0; policy < num_policies; policy++) {
   94         (void)fprintf(stderr, " %s", policies[policy].opt_name);
   95     }
   96     (void)fprintf(stderr, "\n");
   97     return -1;
   98 }
   99 
  100 int stress_set_cyclic_prio(const char *opt)
  101 {
  102     int32_t cyclic_prio;
  103 
  104     cyclic_prio = get_int32(opt);
  105         check_range("cyclic-prio", cyclic_prio, 1, 100);
  106         return set_setting("cyclic-prio", TYPE_ID_INT32, &cyclic_prio);
  107 }
  108 
  109 int stress_set_cyclic_dist(const char *opt)
  110 {
  111     uint64_t cyclic_dist;
  112 
  113     cyclic_dist = get_uint64(opt);
  114         check_range("cyclic-dist", cyclic_dist, 1, 10000000);
  115         return set_setting("cyclic-dist", TYPE_ID_UINT64, &cyclic_dist);
  116 }
  117 
  118 #if defined(HAVE_CLOCK_GETTIME) &&  \
  119     defined(HAVE_CLOCK_NANOSLEEP) 
  120 /*
  121  *  stress_cyclic_clock_nanosleep()
  122  *  measure latencies with clock_nanosleep
  123  */
  124 static int stress_cyclic_clock_nanosleep(
  125     const args_t *args,
  126     rt_stats_t *rt_stats,
  127     uint64_t cyclic_sleep)
  128 {
  129     struct timespec t1, t2, t, trem;
  130     int ret;
  131 
  132     (void)args;
  133 
  134     t.tv_sec = cyclic_sleep / NANOSECS;
  135     t.tv_nsec = cyclic_sleep % NANOSECS;
  136     (void)clock_gettime(CLOCK_REALTIME, &t1);
  137     ret = clock_nanosleep(CLOCK_REALTIME, 0, &t, &trem);
  138     (void)clock_gettime(CLOCK_REALTIME, &t2);
  139     if (ret == 0) {
  140         int64_t delta_ns;
  141 
  142         delta_ns = ((int64_t)(t2.tv_sec - t1.tv_sec) * NANOSECS) +
  143                (t2.tv_nsec - t1.tv_nsec);
  144         delta_ns -= cyclic_sleep;
  145 
  146         if (rt_stats->index < MAX_SAMPLES)
  147             rt_stats->latencies[rt_stats->index++] = delta_ns;
  148 
  149         rt_stats->ns += (double)delta_ns;
  150     }
  151     return 0;
  152 }
  153 #endif
  154 
  155 #if defined(HAVE_CLOCK_GETTIME) &&  \
  156     defined(HAVE_NANOSLEEP)
  157 /*
  158  *  stress_cyclic_posix_nanosleep()
  159  *  measure latencies with posix nanosleep
  160  */
  161 static int stress_cyclic_posix_nanosleep(
  162     const args_t *args,
  163     rt_stats_t *rt_stats,
  164     uint64_t cyclic_sleep)
  165 {
  166     struct timespec t1, t2, t, trem;
  167     int ret;
  168 
  169     (void)args;
  170 
  171     t.tv_sec = cyclic_sleep / NANOSECS;
  172     t.tv_nsec = cyclic_sleep % NANOSECS;
  173     (void)clock_gettime(CLOCK_REALTIME, &t1);
  174     ret = nanosleep(&t, &trem);
  175     (void)clock_gettime(CLOCK_REALTIME, &t2);
  176     if (ret == 0) {
  177         int64_t delta_ns;
  178 
  179         delta_ns = ((int64_t)(t2.tv_sec - t1.tv_sec) * NANOSECS) +
  180                (t2.tv_nsec - t1.tv_nsec);
  181         delta_ns -= cyclic_sleep;
  182 
  183         if (rt_stats->index < MAX_SAMPLES)
  184             rt_stats->latencies[rt_stats->index++] = delta_ns;
  185 
  186         rt_stats->ns += (double)delta_ns;
  187     }
  188     return 0;
  189 }
  190 #endif
  191 
  192 #if defined(HAVE_CLOCK_GETTIME)
  193 /*
  194  *  stress_cyclic_poll()
  195  *  measure latencies of heavy polling the clock
  196  */
  197 static int stress_cyclic_poll(
  198     const args_t *args,
  199     rt_stats_t *rt_stats,
  200     uint64_t cyclic_sleep)
  201 {
  202     struct timespec t1, t2;
  203 
  204     (void)args;
  205 
  206     /* find nearest point to clock roll over */
  207     (void)clock_gettime(CLOCK_REALTIME, &t1);
  208     for (;;) {
  209         (void)clock_gettime(CLOCK_REALTIME, &t2);
  210         if ((t1.tv_sec != t2.tv_sec) || (t1.tv_nsec != t2.tv_nsec))
  211             break;
  212     }
  213     t1 = t2;
  214 
  215     for (;;) {
  216         int64_t delta_ns;
  217 
  218         (void)clock_gettime(CLOCK_REALTIME, &t2);
  219 
  220         delta_ns = ((int64_t)(t2.tv_sec - t1.tv_sec) * NANOSECS) +
  221                (t2.tv_nsec - t1.tv_nsec);
  222         if (delta_ns >= (int64_t)cyclic_sleep) {
  223             delta_ns -= cyclic_sleep;
  224 
  225             if (rt_stats->index < MAX_SAMPLES)
  226                 rt_stats->latencies[rt_stats->index++] = delta_ns;
  227 
  228             rt_stats->ns += (double)delta_ns;
  229             break;
  230         }
  231     }
  232     return 0;
  233 }
  234 #endif
  235 
  236 #if defined(HAVE_PSELECT) &&        \
  237     defined(HAVE_CLOCK_GETTIME)
  238 /*
  239  *  stress_cyclic_pselect()
  240  *  measure latencies with pselect sleep
  241  */
  242 static int stress_cyclic_pselect(
  243     const args_t *args,
  244     rt_stats_t *rt_stats,
  245     uint64_t cyclic_sleep)
  246 {
  247     struct timespec t1, t2, t;
  248     int ret;
  249 
  250     (void)args;
  251 
  252     t.tv_sec = cyclic_sleep / NANOSECS;
  253     t.tv_nsec = cyclic_sleep % NANOSECS;
  254     (void)clock_gettime(CLOCK_REALTIME, &t1);
  255     ret = pselect(0, NULL, NULL,NULL, &t, NULL);
  256     (void)clock_gettime(CLOCK_REALTIME, &t2);
  257     if (ret == 0) {
  258         int64_t delta_ns;
  259 
  260         delta_ns = ((int64_t)(t2.tv_sec - t1.tv_sec) * NANOSECS) +
  261                (t2.tv_nsec - t1.tv_nsec);
  262         delta_ns -= cyclic_sleep;
  263 
  264         if (rt_stats->index < MAX_SAMPLES)
  265             rt_stats->latencies[rt_stats->index++] = delta_ns;
  266 
  267         rt_stats->ns += (double)delta_ns;
  268     }
  269     return 0;
  270 }
  271 #endif
  272 
  273 #if defined(HAVE_CLOCK_GETTIME) &&  \
  274     defined(HAVE_TIMER_CREATE) &&   \
  275     defined(HAVE_TIMER_DELETE) &&   \
  276     defined(HAVE_TIMER_SETTIME)
  277 static struct timespec itimer_time;
  278 
  279 static void MLOCKED_TEXT stress_cyclic_itimer_handler(int sig)
  280 {
  281     (void)sig;
  282 
  283     (void)clock_gettime(CLOCK_REALTIME, &itimer_time);
  284 }
  285 
  286 /*
  287  *  stress_cyclic_itimer()
  288  *  measure latencies with itimers
  289  */
  290 static int stress_cyclic_itimer(
  291     const args_t *args,
  292     rt_stats_t *rt_stats,
  293     uint64_t cyclic_sleep)
  294 {
  295     struct itimerspec timer;
  296     struct timespec t1;
  297     int64_t delta_ns;
  298     struct sigaction old_action;
  299     struct sigevent sev;
  300     timer_t timerid;
  301     int ret = -1;
  302 
  303     timer.it_interval.tv_sec = timer.it_value.tv_sec = cyclic_sleep / NANOSECS;
  304     timer.it_interval.tv_nsec = timer.it_value.tv_nsec = cyclic_sleep % NANOSECS;
  305 
  306     if (stress_sighandler(args->name, SIGRTMIN, stress_cyclic_itimer_handler, &old_action) < 0)
  307         return ret;
  308 
  309     sev.sigev_notify = SIGEV_SIGNAL;
  310     sev.sigev_signo = SIGRTMIN;
  311     sev.sigev_value.sival_ptr = &timerid;
  312     if (timer_create(CLOCK_REALTIME, &sev, &timerid) < 0)
  313         goto restore;
  314 
  315     (void)memset(&itimer_time, 0, sizeof(itimer_time));
  316     (void)clock_gettime(CLOCK_REALTIME, &t1);
  317     if (timer_settime(timerid, 0, &timer, NULL) < 0)
  318         goto restore;
  319 
  320     (void)pause();
  321     if ((itimer_time.tv_sec == 0) &&
  322             (itimer_time.tv_nsec == 0))
  323         goto tidy;
  324 
  325     delta_ns = ((int64_t)(itimer_time.tv_sec - t1.tv_sec) * NANOSECS) + (itimer_time.tv_nsec - t1.tv_nsec);
  326     delta_ns -= cyclic_sleep;
  327 
  328     if (rt_stats->index < MAX_SAMPLES)
  329         rt_stats->latencies[rt_stats->index++] = delta_ns;
  330 
  331     rt_stats->ns += (double)delta_ns;
  332 
  333     (void)timer_delete(timerid);
  334 
  335     ret = 0;
  336 tidy:
  337     /* And cancel timer */
  338     (void)memset(&timer, 0, sizeof(timer));
  339     (void)timer_settime(timerid, 0, &timer, NULL);
  340 restore:
  341     stress_sigrestore(args->name, SIGRTMIN, &old_action);
  342     return ret;
  343 }
  344 #endif
  345 
  346 static sigjmp_buf jmp_env;
  347 
  348 /*
  349  *  stress_rlimit_handler()
  350  *      rlimit generic handler
  351  */
  352 static void MLOCKED_TEXT stress_rlimit_handler(int signum)
  353 {
  354     (void)signum;
  355 
  356     g_keep_stressing_flag = 1;
  357     siglongjmp(jmp_env, 1);
  358 }
  359 
  360 /*
  361  *  stress_cyclic_cmp()
  362  *  sort latencies into order, least first
  363  */
  364 static int stress_cyclic_cmp(const void *p1, const void *p2)
  365 {
  366     const int64_t *i1 = (const int64_t *)p1;
  367     const int64_t *i2 = (const int64_t *)p2;
  368 
  369     if (*i1 > *i2)
  370         return 1;
  371     else if (*i1 < *i2)
  372         return -1;
  373     return 0;
  374 }
  375 
  376 /*
  377  *  stress_rt_stats()
  378  *  compute statistics on gathered latencies
  379  */
  380 static void stress_rt_stats(rt_stats_t *rt_stats)
  381 {
  382     size_t i;
  383     size_t n = 0, best_n = 0;
  384     int64_t current;
  385     double variance = 0.0;
  386 
  387     rt_stats->latency_mean = 0.0;
  388     rt_stats->latency_mode = 0;
  389 
  390     for (i = 0; i < rt_stats->index; i++) {
  391         int64_t ns = rt_stats->latencies[i];
  392 
  393         if (ns > rt_stats->max_ns)
  394             rt_stats->max_ns = ns;
  395         if (ns < rt_stats->min_ns)
  396             rt_stats->min_ns = ns;
  397 
  398         rt_stats->latency_mean += (double)ns;
  399     }
  400     if (rt_stats->index)
  401         rt_stats->latency_mean /= (double)rt_stats->index;
  402 
  403     qsort(rt_stats->latencies, rt_stats->index, sizeof(int64_t), stress_cyclic_cmp);
  404 
  405     current = rt_stats->latency_mode = rt_stats->latencies[0];
  406 
  407     for (i = 0; i < rt_stats->index; i++) {
  408         int64_t ns = rt_stats->latencies[i];
  409         double diff;
  410 
  411         if (ns == current) {
  412             n++;
  413             if (n > best_n) {
  414                 rt_stats->latency_mode = current;
  415                 best_n = n;
  416             }
  417         } else {
  418             current = ns;
  419             n = 0;
  420         }
  421         diff = ((double)ns - rt_stats->latency_mean);
  422         variance += (diff * diff);
  423     }
  424     if (rt_stats->index) {
  425         variance /= rt_stats->index;
  426         rt_stats->std_dev = sqrt(variance);
  427     }
  428 }
  429 
  430 /*
  431  *  cyclic methods
  432  */
  433 static const stress_cyclic_method_info_t cyclic_methods[] = {
  434 #if defined(HAVE_CLOCK_GETTIME) &&  \
  435     defined(HAVE_CLOCK_NANOSLEEP) 
  436     { "clock_ns",   stress_cyclic_clock_nanosleep },
  437 #endif
  438 
  439 #if defined(HAVE_CLOCK_GETTIME) &&  \
  440     defined(HAVE_TIMER_CREATE) &&   \
  441     defined(HAVE_TIMER_DELETE) &&   \
  442     defined(HAVE_TIMER_SETTIME)
  443     { "itimer", stress_cyclic_itimer },
  444 #endif
  445 
  446 #if defined(HAVE_CLOCK_GETTIME)
  447     { "poll",   stress_cyclic_poll },
  448 #endif
  449 
  450 #if defined(HAVE_CLOCK_GETTIME) &&  \
  451     defined(HAVE_NANOSLEEP)
  452     { "posix_ns",   stress_cyclic_posix_nanosleep },
  453 #endif
  454 
  455 #if defined(HAVE_PSELECT) &&        \
  456     defined(HAVE_CLOCK_GETTIME)
  457     { "pselect",    stress_cyclic_pselect },
  458 #endif
  459 
  460     { NULL,     NULL }
  461 };
  462 
  463 /*
  464  *  stress_set_cyclic_method()
  465  *  set the default cyclic method
  466  */
  467 int stress_set_cyclic_method(const char *name)
  468 {
  469     stress_cyclic_method_info_t const *info;
  470 
  471     for (info = cyclic_methods; info->func; info++) {
  472         if (!strcmp(info->name, name)) {
  473             set_setting("cyclic-method", TYPE_ID_UINTPTR_T, &info);
  474             return 0;
  475         }
  476     }
  477 
  478     (void)fprintf(stderr, "cyclic-method must be one of:");
  479     for (info = cyclic_methods; info->func; info++) {
  480         (void)fprintf(stderr, " %s", info->name);
  481     }
  482     (void)fprintf(stderr, "\n");
  483 
  484     return -1;
  485 }
  486 
  487 /*
  488  *  stress_rt_dist()
  489  *  show real time distribution
  490  */
  491 static void stress_rt_dist(
  492     const char *name,
  493     bool *lock,
  494     rt_stats_t *rt_stats,
  495     const uint64_t cyclic_dist)
  496 {
  497     ssize_t dist_max_size = (cyclic_dist > 0) ? (rt_stats->max_ns / cyclic_dist) + 1 : 1;
  498     ssize_t dist_size = STRESS_MINIMUM(MAX_BUCKETS, dist_max_size);
  499     const ssize_t dist_min = STRESS_MINIMUM(5, dist_max_size);
  500     ssize_t i, n;
  501     int64_t dist[dist_size];
  502 
  503     if (!cyclic_dist)
  504         return;
  505 
  506     (void)memset(dist, 0, sizeof(dist));
  507 
  508     for (i = 0; i < (ssize_t)rt_stats->index; i++) {
  509         int64_t lat = rt_stats->latencies[i] / cyclic_dist;
  510 
  511         if (lat < (int64_t)dist_size)
  512             dist[lat]++;
  513     }
  514 
  515     for (n = dist_size; n >= 1; n--) {
  516         if (dist[n - 1])
  517             break;
  518     }
  519     if (n < dist_min)
  520         n = dist_min;
  521     if (n >= dist_size - 3)
  522         n = dist_size;
  523 
  524     pr_inf_lock(lock, "%s: latency distribution (%" PRIu64 " ns intervals):\n", name, cyclic_dist);
  525     pr_inf_lock(lock, "%s: (for the first %zd buckets of %zd)\n", name, dist_size, dist_max_size);
  526     pr_inf_lock(lock, "%s: %12s %10s\n", name, "latency (ns)", "frequency");
  527     for (i = 0; i < n; i++) {
  528         pr_inf_lock(lock, "%s: %12" PRIu64 " %10" PRId64 "\n",
  529             name, cyclic_dist * i, dist[i]);
  530     }
  531 
  532     /*
  533      *  This caters for the case where there are lots of zeros at
  534      *  the end of the distribution
  535      */
  536     if (n < dist_size) {
  537         pr_inf_lock(lock, "%s: %12s %10s (all zeros hereafter)\n", name, "..", "..");
  538         pr_inf_lock(lock, "%s: %12s %10s\n", name, "..", "..");
  539         for (i = STRESS_MAXIMUM(dist_size - 3, n); i < dist_size; i++) {
  540             pr_inf_lock(lock, "%s: %12" PRIu64 " %10" PRId64 "\n",
  541                 name, cyclic_dist * i, (int64_t)0);
  542         }
  543     }
  544 }
  545 
  546 /*
  547  *  stress_cyclic_supported()
  548  *      check if we can run this as root
  549  */
  550 static int stress_cyclic_supported(void)
  551 {
  552         if (geteuid() != 0) {
  553         pr_inf("stress-cyclic stressor needs to be run as root to "
  554             "set SCHED_RR, SCHED_FIFO or SCHED_DEADLINE priorities, "
  555             "skipping this stressor\n");
  556                 return -1;
  557         }
  558         return 0;
  559 }
  560 
  561 static int stress_cyclic(const args_t *args)
  562 {
  563     const stress_cyclic_method_info_t *cyclic_method = &cyclic_methods[0];
  564     const uint32_t num_instances = args->num_instances;
  565     struct sigaction old_action_xcpu;
  566     struct rlimit rlim;
  567     pid_t pid;
  568     NOCLOBBER uint64_t timeout;
  569     uint64_t cyclic_sleep = DEFAULT_DELAY_NS;
  570     uint64_t cyclic_dist = 0;
  571     int32_t cyclic_prio = INT32_MAX;
  572     int policy;
  573     size_t cyclic_policy = 0;
  574     const double start = time_now();
  575     rt_stats_t *rt_stats;
  576     const size_t page_size = args->page_size;
  577     const size_t size = (sizeof(rt_stats_t) + page_size - 1) & (~(page_size - 1));
  578     cyclic_func func;
  579 
  580     timeout  = g_opt_timeout;
  581     (void)get_setting("cyclic-sleep", &cyclic_sleep);
  582     (void)get_setting("cyclic-prio", &cyclic_prio);
  583     (void)get_setting("cyclic-policy", &cyclic_policy);
  584     (void)get_setting("cyclic-dist", &cyclic_dist);
  585     (void)get_setting("cyclic-method", &cyclic_method);
  586 
  587     func = cyclic_method->func;
  588     policy = policies[cyclic_policy].policy;
  589 
  590     if (!args->instance) {
  591         if (num_policies == 0) {
  592             pr_inf("%s: no scheduling policies "
  593                 "available, skipping test\n",
  594                 args->name);
  595             return EXIT_NOT_IMPLEMENTED;
  596         }
  597     }
  598 
  599     if (g_opt_timeout == TIMEOUT_NOT_SET) {
  600         timeout = 60;
  601         pr_inf("%s: timeout has not been set, forcing timeout to "
  602             "be %" PRIu64 " seconds\n", args->name, timeout);
  603     }
  604 
  605     if ((num_instances > 1) && (args->instance == 0)) {
  606         pr_inf("%s: for best results, run just 1 instance of "
  607             "this stressor\n", args->name);
  608     }
  609 
  610     rt_stats = (rt_stats_t *)mmap(NULL, size, PROT_READ | PROT_WRITE,
  611             MAP_SHARED | MAP_ANONYMOUS, -1, 0);
  612     if (rt_stats == MAP_FAILED) {
  613         pr_inf("%s: mmap of shared policy data failed: %d (%s)\n",
  614             args->name, errno, strerror(errno));
  615         return EXIT_NO_RESOURCE;
  616     }
  617     rt_stats->min_ns = INT64_MAX;
  618     rt_stats->max_ns = INT64_MIN;
  619     rt_stats->ns = 0.0;
  620 #if defined(HAVE_SCHED_GET_PRIORITY_MIN)
  621     rt_stats->min_prio = sched_get_priority_min(policy);
  622 #else
  623     rt_stats->min_prio = 0;
  624 #endif
  625 
  626 #if defined(HAVE_SCHED_GET_PRIORITY_MIN)
  627     rt_stats->max_prio = sched_get_priority_max(policy);
  628 #else
  629     rt_stats->max_prio = 0;
  630 #endif
  631     /* If user has set max priority.. */
  632     if (cyclic_prio != INT32_MAX) {
  633         if (rt_stats->max_prio > cyclic_prio) {
  634             rt_stats->max_prio = cyclic_prio;
  635         }
  636     }
  637 
  638     if (args->instance == 0)
  639         pr_dbg("%s: using method '%s'\n", args->name, cyclic_method->name);
  640 
  641     pid = fork();
  642     if (pid < 0) {
  643         pr_inf("%s: cannot fork, errno=%d (%s)\n",
  644             args->name, errno, strerror(errno));
  645         return EXIT_NO_RESOURCE;
  646     } else if (pid == 0) {
  647 #if defined(HAVE_SCHED_GET_PRIORITY_MIN) && \
  648     defined(HAVE_SCHED_GET_PRIORITY_MAX)
  649         const pid_t mypid = getpid();
  650 #endif
  651 #if defined(HAVE_ATOMIC)
  652         uint32_t count;
  653 #endif
  654         int ret;
  655         NOCLOBBER int rc = EXIT_FAILURE;
  656 
  657 #if defined(HAVE_ATOMIC)
  658         __sync_fetch_and_add(&g_shared->softlockup_count, 1);
  659 
  660         /*
  661          * Wait until all instances have reached this point
  662          */
  663         do {
  664             if ((time_now() - start) > (double)timeout)
  665                 goto tidy_ok;
  666             (void)usleep(50000);
  667             __atomic_load(&g_shared->softlockup_count, &count, __ATOMIC_RELAXED);
  668         } while (keep_stressing() && count < num_instances);
  669 #endif
  670 
  671         /*
  672          * We run the stressor as a child so that
  673          * if we the hard time timits the child is
  674          * terminated with a SIGKILL and we can
  675          * catch that with the parent
  676          */
  677         rlim.rlim_cur = timeout;
  678         rlim.rlim_max = timeout;
  679         (void)setrlimit(RLIMIT_CPU, &rlim);
  680 
  681 #if defined(RLIMIT_RTTIME)
  682         rlim.rlim_cur = 1000000 * timeout;
  683         rlim.rlim_max = 1000000 * timeout;
  684         (void)setrlimit(RLIMIT_RTTIME, &rlim);
  685 #endif
  686 
  687         if (stress_sighandler(args->name, SIGXCPU, stress_rlimit_handler, &old_action_xcpu) < 0)
  688             goto tidy;
  689 
  690         ret = sigsetjmp(jmp_env, 1);
  691         if (ret)
  692             goto tidy_ok;
  693 
  694 #if defined(HAVE_SCHED_GET_PRIORITY_MIN) && \
  695     defined(HAVE_SCHED_GET_PRIORITY_MAX)
  696         ret = stress_set_sched(mypid, policy, rt_stats->max_prio, args->instance != 0);
  697         if (ret < 0) {
  698             if (errno != EPERM) {
  699                 pr_fail("%s: sched_setscheduler "
  700                     "failed: errno=%d (%s) "
  701                     "for scheduler policy %s\n",
  702                     args->name, errno, strerror(errno),
  703                     policies[cyclic_policy].name);
  704             }
  705             goto tidy;
  706         }
  707 #endif
  708 
  709         do {
  710             func(args, rt_stats, cyclic_sleep);
  711             inc_counter(args);
  712 
  713             /* Ensure we NEVER spin forever */
  714             if ((time_now() - start) > (double)timeout)
  715                 break;
  716         } while (keep_stressing());
  717 
  718 tidy_ok:
  719         rc = EXIT_SUCCESS;
  720 tidy:
  721         (void)fflush(stdout);
  722         _exit(rc);
  723     } else {
  724         int status, ret;
  725 
  726         ret = stress_set_sched(args->pid, policy, rt_stats->max_prio, true);
  727         (void)ret;
  728 
  729         (void)pause();
  730         (void)kill(pid, SIGKILL);
  731 #if defined(HAVE_ATOMIC)
  732         __sync_fetch_and_sub(&g_shared->softlockup_count, 1);
  733 #endif
  734 
  735         (void)waitpid(pid, &status, 0);
  736     }
  737 
  738     stress_rt_stats(rt_stats);
  739 
  740     if (args->instance  == 0) {
  741         if (rt_stats->index) {
  742             size_t i;
  743             bool lock = false;
  744 
  745             static const float percentiles[] = {
  746                 25.0,
  747                 50.0,
  748                 75.0,
  749                 90.0,
  750                 95.40,
  751                 99.0,
  752                 99.5,
  753                 99.9,
  754                 99.99,
  755             };
  756 
  757             pr_lock(&lock);
  758             pr_inf_lock(&lock, "%s: sched %s: %" PRIu64 " ns delay, %zd samples\n",
  759                 args->name,
  760                 policies[cyclic_policy].name,
  761                 cyclic_sleep,
  762                 rt_stats->index);
  763             pr_inf_lock(&lock, "%s:   mean: %.2f ns, mode: %" PRId64 " ns\n",
  764                 args->name,
  765                 rt_stats->latency_mean,
  766                 rt_stats->latency_mode);
  767             pr_inf_lock(&lock, "%s:   min: %" PRId64 " ns, max: %" PRId64 " ns, std.dev. %.2f\n",
  768                 args->name,
  769                 rt_stats->min_ns,
  770                 rt_stats->max_ns,
  771                 rt_stats->std_dev);
  772 
  773             pr_inf_lock(&lock, "%s: latency percentiles:\n", args->name);
  774             for (i = 0; i < sizeof(percentiles) / sizeof(percentiles[0]); i++) {
  775                 size_t j = (size_t)(((double)rt_stats->index * percentiles[i]) / 100.0);
  776                 pr_inf_lock(&lock, "%s:   %5.2f%%: %10" PRId64 " ns\n",
  777                     args->name,
  778                     percentiles[i],
  779                     rt_stats->latencies[j]);
  780             }
  781             stress_rt_dist(args->name, &lock, rt_stats, cyclic_dist);
  782             pr_unlock(&lock);
  783         } else {
  784             pr_inf("%s: %10s: no latency information available\n",
  785                 args->name,
  786                 policies[policy].name);
  787         }
  788     }
  789 
  790     (void)munmap((void *)rt_stats, size);
  791 
  792     return EXIT_SUCCESS;
  793 }
  794 
  795 stressor_info_t stress_cyclic_info = {
  796     .stressor = stress_cyclic,
  797     .supported = stress_cyclic_supported,
  798     .class = CLASS_SCHEDULER | CLASS_OS
  799 };