"Fossies" - the Fresh Open Source Software Archive

Member "stress-ng-0.09.56/stress-pthread.c" (15 Mar 2019, 9640 Bytes) of package /linux/privat/stress-ng-0.09.56.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "stress-pthread.c" see the Fossies "Dox" file reference documentation and the last Fossies "Diffs" side-by-side code changes report: 0.09.52_vs_0.09.54.

    1 /*
    2  * Copyright (C) 2013-2019 Canonical, Ltd.
    3  *
    4  * This program is free software; you can redistribute it and/or
    5  * modify it under the terms of the GNU General Public License
    6  * as published by the Free Software Foundation; either version 2
    7  * of the License, or (at your option) any later version.
    8  *
    9  * This program is distributed in the hope that it will be useful,
   10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12  * GNU General Public License for more details.
   13  *
   14  * You should have received a copy of the GNU General Public License
   15  * along with this program; if not, write to the Free Software
   16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
   17  *
   18  * This code is a complete clean re-write of the stress tool by
   19  * Colin Ian King <colin.king@canonical.com> and attempts to be
   20  * backwardly compatible with the stress tool by Amos Waterland
   21  * <apw@rossby.metr.ou.edu> but has more stress tests and more
   22  * functionality.
   23  *
   24  */
   25 #include "stress-ng.h"
   26 
   27 #if defined(HAVE_LIB_PTHREAD)
   28 
   29 typedef struct {
   30     pthread_t pthread;
   31     pid_t     tid;
   32 } pthread_info_t;
   33 
   34 static pthread_cond_t cond;
   35 static pthread_mutex_t mutex;
   36 static shim_pthread_spinlock_t spinlock;
   37 static bool thread_terminate;
   38 static uint64_t pthread_count;
   39 static sigset_t set;
   40 static pthread_info_t pthreads[MAX_PTHREAD];
   41 
   42 #endif
   43 
   44 int stress_set_pthread_max(const char *opt)
   45 {
   46     uint64_t pthread_max;
   47 
   48     pthread_max = get_uint64(opt);
   49     check_range("pthread-max", pthread_max,
   50         MIN_PTHREAD, MAX_PTHREAD);
   51     return set_setting("pthread-max", TYPE_ID_UINT64, &pthread_max);
   52 }
   53 
   54 #if defined(HAVE_LIB_PTHREAD)
   55 
   56 #if defined(HAVE_GET_ROBUST_LIST) && defined(HAVE_LINUX_FUTEX_H)
   57 static inline long sys_get_robust_list(int pid, struct robust_list_head **head_ptr, size_t *len_ptr)
   58 {
   59     return syscall(__NR_get_robust_list, pid, head_ptr, len_ptr);
   60 }
   61 #endif
   62 
   63 #if defined(HAVE_SET_ROBUST_LIST) && defined(HAVE_LINUX_FUTEX_H)
   64 static inline long sys_set_robust_list(struct robust_list_head *head, size_t len)
   65 {
   66     return syscall(__NR_set_robust_list, head, len);
   67 }
   68 #endif
   69 
   70 /*
   71  *  stress_pthread_func()
   72  *  pthread that exits immediately
   73  */
   74 static void *stress_pthread_func(void *parg)
   75 {
   76     uint8_t stack[SIGSTKSZ + STACK_ALIGNMENT];
   77     static void *nowt = NULL;
   78     int ret;
   79     const pid_t tid = shim_gettid();
   80 #if defined(HAVE_GET_ROBUST_LIST) && defined(HAVE_LINUX_FUTEX_H)
   81     struct robust_list_head *head;
   82     size_t len;
   83 #endif
   84     const args_t *args = ((pthread_args_t *)parg)->args;
   85 
   86     /*
   87      *  Block all signals, let controlling thread
   88      *  handle these
   89      */
   90 #if !defined(__APPLE__) && !defined(__DragonFly__)
   91     (void)sigprocmask(SIG_BLOCK, &set, NULL);
   92 #endif
   93 
   94     /*
   95      *  According to POSIX.1 a thread should have
   96      *  a distinct alternative signal stack.
   97      *  However, we block signals in this thread
   98      *  so this is probably just totally unncessary.
   99      */
  100     (void)memset(stack, 0, sizeof(stack));
  101     if (stress_sigaltstack(stack, SIGSTKSZ) < 0)
  102         goto die;
  103 
  104 #if defined(HAVE_GETTID)
  105     {
  106         pthread_info_t *pi = ((pthread_args_t *)parg)->data;
  107         pi->tid = shim_gettid();
  108     }
  109 #endif
  110 
  111 #if defined(HAVE_GET_ROBUST_LIST) && defined(HAVE_LINUX_FUTEX_H)
  112     /*
  113      *  Check that get_robust_list() works OK
  114      */
  115     if (sys_get_robust_list(0, &head, &len) < 0) {
  116         if (errno != ENOSYS) {
  117             pr_fail("%s: get_robust_list failed, tid=%d, errno=%d (%s)",
  118                 args->name, tid, errno, strerror(errno));
  119             goto die;
  120         }
  121     } else {
  122 #if defined(HAVE_SET_ROBUST_LIST) && defined(HAVE_LINUX_FUTEX_H)
  123         if (sys_set_robust_list(head, len) < 0) {
  124             if (errno != ENOSYS) {
  125                 pr_fail("%s: set_robust_list failed, tid=%d, errno=%d (%s)",
  126                     args->name, tid, errno, strerror(errno));
  127                 goto die;
  128             }
  129         }
  130 #endif
  131     }
  132 #endif
  133 
  134     /*
  135      *  Bump count of running threads
  136      */
  137     ret = shim_pthread_spin_lock(&spinlock);
  138     if (ret) {
  139         pr_fail("%s: pthread_spin_lock failed, tid=%d, errno=%d (%s)",
  140             args->name, (int)tid, ret, strerror(ret));
  141         goto die;
  142     }
  143     pthread_count++;
  144     ret = shim_pthread_spin_unlock(&spinlock);
  145     if (ret) {
  146         pr_fail("%s: pthread_spin_unlock failed, tid=%d, errno=%d (%s)",
  147             args->name, (int)tid, ret, strerror(ret));
  148         goto die;
  149     }
  150 
  151     if (thread_terminate)
  152         goto die;
  153 
  154     /*
  155      *  Wait for controlling thread to
  156      *  indicate it is time to die
  157      */
  158     ret = pthread_mutex_lock(&mutex);
  159     if (ret) {
  160         pr_fail("%s: pthread_mutex_lock failed, tid=%d, errno=%d (%s)",
  161             args->name, (int)tid, ret, strerror(ret));
  162         goto die;
  163     }
  164     while (!thread_terminate) {
  165         ret = pthread_cond_wait(&cond, &mutex);
  166         if (ret) {
  167             pr_fail("%s: pthread_cond_wait failed, tid=%d, errno=%d (%s)",
  168                 args->name, (int)tid, ret, strerror(ret));
  169             break;
  170         }
  171         (void)shim_sched_yield();
  172     }
  173     ret = pthread_mutex_unlock(&mutex);
  174     if (ret)
  175         pr_fail("%s: pthread_mutex_unlock failed, tid=%d, errno=%d (%s)",
  176             args->name, (int)tid, ret, strerror(ret));
  177 
  178 #if defined(HAVE_SETNS)
  179     {
  180         int fd;
  181 
  182         fd = open("/proc/self/ns/uts", O_RDONLY);
  183         if (fd >= 0) {
  184             /*
  185              *  Capabilities have been dropped
  186              *  so this will always fail, but
  187              *  lets exercise it anyhow.
  188              */
  189             (void)setns(fd, 0);
  190             (void)close(fd);
  191         }
  192     }
  193 #endif
  194 die:
  195     return &nowt;
  196 }
  197 
  198 /*
  199  *  stress_pthread()
  200  *  stress by creating pthreads
  201  */
  202 static int stress_pthread(const args_t *args)
  203 {
  204     bool ok = true;
  205     bool locked = false;
  206     bool try_unlock = true;
  207     uint64_t limited = 0, attempted = 0;
  208     uint64_t pthread_max = DEFAULT_PTHREAD;
  209     int ret;
  210     pthread_args_t pargs = { args, NULL };
  211 
  212 #if defined(SIGUSR2)
  213     if (stress_sighandler(args->name, SIGUSR2, SIG_IGN, NULL) < 0)
  214         return EXIT_FAILURE;
  215 #endif
  216 
  217     if (!get_setting("pthread-max", &pthread_max)) {
  218         if (g_opt_flags & OPT_FLAGS_MAXIMIZE)
  219             pthread_max = MAX_PTHREAD;
  220         if (g_opt_flags & OPT_FLAGS_MINIMIZE)
  221             pthread_max = MIN_PTHREAD;
  222     }
  223 
  224     ret = pthread_cond_init(&cond, NULL);
  225     if (ret) {
  226         pr_fail("%s pthread_cond_init failed, errno=%d (%s)",
  227             args->name, ret, strerror(ret));
  228         return EXIT_FAILURE;
  229     }
  230     ret = shim_pthread_spin_init(&spinlock, SHIM_PTHREAD_PROCESS_SHARED);
  231     if (ret) {
  232         pr_fail("%s pthread_spin_init failed, errno=%d (%s)",
  233             args->name, ret, strerror(ret));
  234         return EXIT_FAILURE;
  235     }
  236     ret = pthread_mutex_init(&mutex, NULL);
  237     if (ret) {
  238         pr_fail("%s pthread_mutex_init failed, errno=%d (%s)",
  239             args->name, ret, strerror(ret));
  240         return EXIT_FAILURE;
  241     }
  242 
  243     (void)sigfillset(&set);
  244     do {
  245         uint64_t i, j;
  246 
  247         thread_terminate = false;
  248         pthread_count = 0;
  249 
  250         (void)memset(&pthreads, 0, sizeof(pthreads));
  251 
  252         for (i = 0; (i < pthread_max) && (!args->max_ops || get_counter(args) < args->max_ops); i++) {
  253             pargs.data = (void *)&pthreads[i];
  254 
  255             ret = pthread_create(&pthreads[i].pthread, NULL,
  256                 stress_pthread_func, (void *)&pargs);
  257             if (ret) {
  258                 /* Out of resources, don't try any more */
  259                 if (ret == EAGAIN) {
  260                     limited++;
  261                     break;
  262                 }
  263                 /* Something really unexpected */
  264                 pr_fail("%s pthread_create failed, errno=%d (%s)",
  265                     args->name, ret, strerror(ret));
  266                 ok = false;
  267                 break;
  268             }
  269             inc_counter(args);
  270             if (!g_keep_stressing_flag)
  271                 break;
  272         }
  273         attempted++;
  274 
  275         /*
  276          *  Wait until they are all started or
  277          *  we get bored waiting..
  278          */
  279         for (j = 0; j < 1000; j++) {
  280             bool all_running = false;
  281 
  282             if (!locked) {
  283                 ret = pthread_mutex_lock(&mutex);
  284                 if (ret) {
  285                     pr_fail("%s pthread_mutex_lock failed (parent), errno=%d (%s)",
  286                         args->name, ret, strerror(ret));
  287                     ok = false;
  288                     goto reap;
  289                 }
  290                 locked = true;
  291             }
  292             all_running = (pthread_count == i);
  293 
  294             if (locked) {
  295                 ret = pthread_mutex_unlock(&mutex);
  296                 if (ret) {
  297                     pr_fail("%s pthread_mutex_unlock failed (parent), errno=%d (%s)",
  298                         args->name, ret, strerror(ret));
  299                     ok = false;
  300                     /* We failed to unlock, so don't try again on reap */
  301                     try_unlock = false;
  302                     goto reap;
  303                 }
  304                 locked = false;
  305             }
  306 
  307             if (all_running)
  308                 break;
  309         }
  310 
  311         if (!locked) {
  312             ret = pthread_mutex_lock(&mutex);
  313             if (ret) {
  314                 pr_fail("%s pthread_mutex_lock failed (parent), errno=%d (%s)",
  315                     args->name, ret, strerror(ret));
  316                 ok = false;
  317                 goto reap;
  318             }
  319             locked = true;
  320         }
  321 reap:
  322 
  323 #if defined(HAVE_TGKILL) && defined(SIGUSR2)
  324         for (j = 0; j < i; j++) {
  325             if (pthreads[j].tid)
  326                 (void)syscall(__NR_tgkill, args->pid, pthreads[j].tid, SIGUSR2);
  327         }
  328 #endif
  329         thread_terminate = true;
  330         ret = pthread_cond_broadcast(&cond);
  331         if (ret) {
  332             pr_fail("%s pthread_cond_broadcast failed (parent), errno=%d (%s)",
  333                 args->name, ret, strerror(ret));
  334             ok = false;
  335             /* fall through and unlock */
  336         }
  337         if (locked && try_unlock) {
  338             ret = pthread_mutex_unlock(&mutex);
  339             if (ret) {
  340                 pr_fail("%s pthread_mutex_unlock failed (parent), errno=%d (%s)",
  341                     args->name, ret, strerror(ret));
  342                 ok = false;
  343             } else {
  344                 locked = false;
  345             }
  346         }
  347         for (j = 0; j < i; j++) {
  348             ret = pthread_join(pthreads[j].pthread, NULL);
  349             if ((ret) && (ret != ESRCH)) {
  350                 pr_fail("%s pthread_join failed (parent), errno=%d (%s)",
  351                     args->name, ret, strerror(ret));
  352                 ok = false;
  353             }
  354         }
  355     } while (ok && keep_stressing());
  356 
  357     if (limited) {
  358         pr_inf("%s: %.2f%% of iterations could not reach "
  359             "requested %" PRIu64 " threads (instance %"
  360             PRIu32 ")\n",
  361             args->name,
  362             100.0 * (double)limited / (double)attempted,
  363             pthread_max, args->instance);
  364     }
  365 
  366     (void)pthread_cond_destroy(&cond);
  367     (void)pthread_mutex_destroy(&mutex);
  368     (void)shim_pthread_spin_destroy(&spinlock);
  369 
  370     return EXIT_SUCCESS;
  371 }
  372 
  373 stressor_info_t stress_pthread_info = {
  374     .stressor = stress_pthread,
  375     .class = CLASS_SCHEDULER | CLASS_OS
  376 };
  377 #else
  378 stressor_info_t stress_pthread_info = {
  379     .stressor = stress_not_implemented,
  380     .class = CLASS_SCHEDULER | CLASS_OS
  381 };
  382 #endif