"Fossies" - the Fresh Open Source Software Archive

Member "stress-ng-0.09.59.1/stress-pthread.c" (8 Jun 2019, 10152 Bytes) of package /linux/privat/stress-ng-0.09.59.1.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "stress-pthread.c" see the Fossies "Dox" file reference documentation and the last Fossies "Diffs" side-by-side code changes report: 0.09.57_vs_0.09.58.

    1 /*
    2  * Copyright (C) 2013-2019 Canonical, Ltd.
    3  *
    4  * This program is free software; you can redistribute it and/or
    5  * modify it under the terms of the GNU General Public License
    6  * as published by the Free Software Foundation; either version 2
    7  * of the License, or (at your option) any later version.
    8  *
    9  * This program is distributed in the hope that it will be useful,
   10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12  * GNU General Public License for more details.
   13  *
   14  * You should have received a copy of the GNU General Public License
   15  * along with this program; if not, write to the Free Software
   16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
   17  *
   18  * This code is a complete clean re-write of the stress tool by
   19  * Colin Ian King <colin.king@canonical.com> and attempts to be
   20  * backwardly compatible with the stress tool by Amos Waterland
   21  * <apw@rossby.metr.ou.edu> but has more stress tests and more
   22  * functionality.
   23  *
   24  */
   25 #include "stress-ng.h"
   26 
   27 static const help_t help[] = {
   28     { NULL, "pthread N",     "start N workers that create multiple threads" },
   29     { NULL, "pthread-ops N", "stop pthread workers after N bogo threads created" },
   30     { NULL, "pthread-max P", "create P threads at a time by each worker" },
   31     { NULL, NULL,        NULL }
   32 };
   33 
   34 #if defined(HAVE_LIB_PTHREAD)
   35 
   36 typedef struct {
   37     pthread_t pthread;
   38     pid_t     tid;
   39 } pthread_info_t;
   40 
   41 static pthread_cond_t cond;
   42 static pthread_mutex_t mutex;
   43 static shim_pthread_spinlock_t spinlock;
   44 static bool thread_terminate;
   45 static uint64_t pthread_count;
   46 static sigset_t set;
   47 static pthread_info_t pthreads[MAX_PTHREAD];
   48 
   49 #endif
   50 
   51 static int stress_set_pthread_max(const char *opt)
   52 {
   53     uint64_t pthread_max;
   54 
   55     pthread_max = get_uint64(opt);
   56     check_range("pthread-max", pthread_max,
   57         MIN_PTHREAD, MAX_PTHREAD);
   58     return set_setting("pthread-max", TYPE_ID_UINT64, &pthread_max);
   59 }
   60 
   61 static const opt_set_func_t opt_set_funcs[] = {
   62     { OPT_pthread_max,  stress_set_pthread_max },
   63     { 0,            NULL }
   64 };
   65 
   66 #if defined(HAVE_LIB_PTHREAD)
   67 
   68 #if defined(HAVE_GET_ROBUST_LIST) && defined(HAVE_LINUX_FUTEX_H)
   69 static inline long sys_get_robust_list(int pid, struct robust_list_head **head_ptr, size_t *len_ptr)
   70 {
   71     return syscall(__NR_get_robust_list, pid, head_ptr, len_ptr);
   72 }
   73 #endif
   74 
   75 #if defined(HAVE_SET_ROBUST_LIST) && defined(HAVE_LINUX_FUTEX_H)
   76 static inline long sys_set_robust_list(struct robust_list_head *head, size_t len)
   77 {
   78     return syscall(__NR_set_robust_list, head, len);
   79 }
   80 #endif
   81 
   82 /*
   83  *  stress_pthread_func()
   84  *  pthread that exits immediately
   85  */
   86 static void *stress_pthread_func(void *parg)
   87 {
   88     uint8_t stack[SIGSTKSZ + STACK_ALIGNMENT];
   89     static void *nowt = NULL;
   90     int ret;
   91     const pid_t tid = shim_gettid();
   92 #if defined(HAVE_GET_ROBUST_LIST) && defined(HAVE_LINUX_FUTEX_H)
   93     struct robust_list_head *head;
   94     size_t len;
   95 #endif
   96     const args_t *args = ((pthread_args_t *)parg)->args;
   97 
   98     /*
   99      *  Block all signals, let controlling thread
  100      *  handle these
  101      */
  102 #if !defined(__APPLE__) && !defined(__DragonFly__)
  103     (void)sigprocmask(SIG_BLOCK, &set, NULL);
  104 #endif
  105 
  106     /*
  107      *  According to POSIX.1 a thread should have
  108      *  a distinct alternative signal stack.
  109      *  However, we block signals in this thread
  110      *  so this is probably just totally unncessary.
  111      */
  112     (void)memset(stack, 0, sizeof(stack));
  113     if (stress_sigaltstack(stack, SIGSTKSZ) < 0)
  114         goto die;
  115 
  116 #if defined(HAVE_GETTID)
  117     {
  118         pthread_info_t *pi = ((pthread_args_t *)parg)->data;
  119         pi->tid = shim_gettid();
  120     }
  121 #endif
  122 
  123 #if defined(HAVE_GET_ROBUST_LIST) && defined(HAVE_LINUX_FUTEX_H)
  124     /*
  125      *  Check that get_robust_list() works OK
  126      */
  127     if (sys_get_robust_list(0, &head, &len) < 0) {
  128         if (errno != ENOSYS) {
  129             pr_fail("%s: get_robust_list failed, tid=%d, errno=%d (%s)",
  130                 args->name, tid, errno, strerror(errno));
  131             goto die;
  132         }
  133     } else {
  134 #if defined(HAVE_SET_ROBUST_LIST) && defined(HAVE_LINUX_FUTEX_H)
  135         if (sys_set_robust_list(head, len) < 0) {
  136             if (errno != ENOSYS) {
  137                 pr_fail("%s: set_robust_list failed, tid=%d, errno=%d (%s)",
  138                     args->name, tid, errno, strerror(errno));
  139                 goto die;
  140             }
  141         }
  142 #endif
  143     }
  144 #endif
  145 
  146     /*
  147      *  Bump count of running threads
  148      */
  149     ret = shim_pthread_spin_lock(&spinlock);
  150     if (ret) {
  151         pr_fail("%s: pthread_spin_lock failed, tid=%d, errno=%d (%s)",
  152             args->name, (int)tid, ret, strerror(ret));
  153         goto die;
  154     }
  155     pthread_count++;
  156     ret = shim_pthread_spin_unlock(&spinlock);
  157     if (ret) {
  158         pr_fail("%s: pthread_spin_unlock failed, tid=%d, errno=%d (%s)",
  159             args->name, (int)tid, ret, strerror(ret));
  160         goto die;
  161     }
  162 
  163     if (thread_terminate)
  164         goto die;
  165 
  166     /*
  167      *  Wait for controlling thread to
  168      *  indicate it is time to die
  169      */
  170     ret = pthread_mutex_lock(&mutex);
  171     if (ret) {
  172         pr_fail("%s: pthread_mutex_lock failed, tid=%d, errno=%d (%s)",
  173             args->name, (int)tid, ret, strerror(ret));
  174         goto die;
  175     }
  176     while (!thread_terminate) {
  177         ret = pthread_cond_wait(&cond, &mutex);
  178         if (ret) {
  179             pr_fail("%s: pthread_cond_wait failed, tid=%d, errno=%d (%s)",
  180                 args->name, (int)tid, ret, strerror(ret));
  181             break;
  182         }
  183         (void)shim_sched_yield();
  184     }
  185     ret = pthread_mutex_unlock(&mutex);
  186     if (ret)
  187         pr_fail("%s: pthread_mutex_unlock failed, tid=%d, errno=%d (%s)",
  188             args->name, (int)tid, ret, strerror(ret));
  189 
  190 #if defined(HAVE_SETNS)
  191     {
  192         int fd;
  193 
  194         fd = open("/proc/self/ns/uts", O_RDONLY);
  195         if (fd >= 0) {
  196             /*
  197              *  Capabilities have been dropped
  198              *  so this will always fail, but
  199              *  lets exercise it anyhow.
  200              */
  201             (void)setns(fd, 0);
  202             (void)close(fd);
  203         }
  204     }
  205 #endif
  206 die:
  207     return &nowt;
  208 }
  209 
  210 /*
  211  *  stress_pthread()
  212  *  stress by creating pthreads
  213  */
  214 static int stress_pthread(const args_t *args)
  215 {
  216     bool ok = true;
  217     bool locked = false;
  218     bool try_unlock = true;
  219     uint64_t limited = 0, attempted = 0;
  220     uint64_t pthread_max = DEFAULT_PTHREAD;
  221     int ret;
  222     pthread_args_t pargs = { args, NULL };
  223 
  224 #if defined(SIGUSR2)
  225     if (stress_sighandler(args->name, SIGUSR2, SIG_IGN, NULL) < 0)
  226         return EXIT_FAILURE;
  227 #endif
  228 
  229     if (!get_setting("pthread-max", &pthread_max)) {
  230         if (g_opt_flags & OPT_FLAGS_MAXIMIZE)
  231             pthread_max = MAX_PTHREAD;
  232         if (g_opt_flags & OPT_FLAGS_MINIMIZE)
  233             pthread_max = MIN_PTHREAD;
  234     }
  235 
  236     ret = pthread_cond_init(&cond, NULL);
  237     if (ret) {
  238         pr_fail("%s: pthread_cond_init failed, errno=%d (%s)",
  239             args->name, ret, strerror(ret));
  240         return EXIT_FAILURE;
  241     }
  242     ret = shim_pthread_spin_init(&spinlock, SHIM_PTHREAD_PROCESS_SHARED);
  243     if (ret) {
  244         pr_fail("%s: pthread_spin_init failed, errno=%d (%s)",
  245             args->name, ret, strerror(ret));
  246         return EXIT_FAILURE;
  247     }
  248     ret = pthread_mutex_init(&mutex, NULL);
  249     if (ret) {
  250         pr_fail("%s: pthread_mutex_init failed, errno=%d (%s)",
  251             args->name, ret, strerror(ret));
  252         return EXIT_FAILURE;
  253     }
  254 
  255     (void)sigfillset(&set);
  256     do {
  257         uint64_t i, j;
  258 
  259         thread_terminate = false;
  260         pthread_count = 0;
  261 
  262         (void)memset(&pthreads, 0, sizeof(pthreads));
  263 
  264         for (i = 0; (i < pthread_max) && (!args->max_ops || get_counter(args) < args->max_ops); i++) {
  265             pargs.data = (void *)&pthreads[i];
  266 
  267             ret = pthread_create(&pthreads[i].pthread, NULL,
  268                 stress_pthread_func, (void *)&pargs);
  269             if (ret) {
  270                 /* Out of resources, don't try any more */
  271                 if (ret == EAGAIN) {
  272                     limited++;
  273                     break;
  274                 }
  275                 /* Something really unexpected */
  276                 pr_fail("%s: pthread_create failed, errno=%d (%s)",
  277                     args->name, ret, strerror(ret));
  278                 ok = false;
  279                 break;
  280             }
  281             inc_counter(args);
  282             if (!g_keep_stressing_flag)
  283                 break;
  284         }
  285         attempted++;
  286 
  287         /*
  288          *  Wait until they are all started or
  289          *  we get bored waiting..
  290          */
  291         for (j = 0; j < 1000; j++) {
  292             bool all_running = false;
  293 
  294             if (!locked) {
  295                 ret = pthread_mutex_lock(&mutex);
  296                 if (ret) {
  297                     pr_fail("%s: pthread_mutex_lock failed (parent), errno=%d (%s)",
  298                         args->name, ret, strerror(ret));
  299                     ok = false;
  300                     goto reap;
  301                 }
  302                 locked = true;
  303             }
  304             all_running = (pthread_count == i);
  305 
  306             if (locked) {
  307                 ret = pthread_mutex_unlock(&mutex);
  308                 if (ret) {
  309                     pr_fail("%s: pthread_mutex_unlock failed (parent), errno=%d (%s)",
  310                         args->name, ret, strerror(ret));
  311                     ok = false;
  312                     /* We failed to unlock, so don't try again on reap */
  313                     try_unlock = false;
  314                     goto reap;
  315                 }
  316                 locked = false;
  317             }
  318 
  319             if (all_running)
  320                 break;
  321         }
  322 
  323         if (!locked) {
  324             ret = pthread_mutex_lock(&mutex);
  325             if (ret) {
  326                 pr_fail("%s: pthread_mutex_lock failed (parent), errno=%d (%s)",
  327                     args->name, ret, strerror(ret));
  328                 ok = false;
  329                 goto reap;
  330             }
  331             locked = true;
  332         }
  333 reap:
  334 
  335 #if defined(HAVE_TGKILL) && defined(SIGUSR2)
  336         for (j = 0; j < i; j++) {
  337             if (pthreads[j].tid)
  338                 (void)syscall(__NR_tgkill, args->pid, pthreads[j].tid, SIGUSR2);
  339         }
  340 #endif
  341         thread_terminate = true;
  342         ret = pthread_cond_broadcast(&cond);
  343         if (ret) {
  344             pr_fail("%s: pthread_cond_broadcast failed (parent), errno=%d (%s)",
  345                 args->name, ret, strerror(ret));
  346             ok = false;
  347             /* fall through and unlock */
  348         }
  349         if (locked && try_unlock) {
  350             ret = pthread_mutex_unlock(&mutex);
  351             if (ret) {
  352                 pr_fail("%s: pthread_mutex_unlock failed (parent), errno=%d (%s)",
  353                     args->name, ret, strerror(ret));
  354                 ok = false;
  355             } else {
  356                 locked = false;
  357             }
  358         }
  359         for (j = 0; j < i; j++) {
  360             ret = pthread_join(pthreads[j].pthread, NULL);
  361             if ((ret) && (ret != ESRCH)) {
  362                 pr_fail("%s: pthread_join failed (parent), errno=%d (%s)",
  363                     args->name, ret, strerror(ret));
  364                 ok = false;
  365             }
  366         }
  367     } while (ok && keep_stressing());
  368 
  369     if (limited) {
  370         pr_inf("%s: %.2f%% of iterations could not reach "
  371             "requested %" PRIu64 " threads (instance %"
  372             PRIu32 ")\n",
  373             args->name,
  374             100.0 * (double)limited / (double)attempted,
  375             pthread_max, args->instance);
  376     }
  377 
  378     (void)pthread_cond_destroy(&cond);
  379     (void)pthread_mutex_destroy(&mutex);
  380     (void)shim_pthread_spin_destroy(&spinlock);
  381 
  382     return EXIT_SUCCESS;
  383 }
  384 
  385 stressor_info_t stress_pthread_info = {
  386     .stressor = stress_pthread,
  387     .class = CLASS_SCHEDULER | CLASS_OS,
  388     .opt_set_funcs = opt_set_funcs,
  389     .help = help
  390 };
  391 #else
  392 stressor_info_t stress_pthread_info = {
  393     .stressor = stress_not_implemented,
  394     .class = CLASS_SCHEDULER | CLASS_OS,
  395     .opt_set_funcs = opt_set_funcs,
  396     .help = help
  397 };
  398 #endif