"Fossies" - the Fresh Open Source Software Archive

Member "bind-9.17.5/lib/isc/task.c" (4 Sep 2020, 52071 Bytes) of package /linux/misc/dns/bind9/9.17.5/bind-9.17.5.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "task.c" see the Fossies "Dox" file reference documentation.

    1 /*
    2  * Copyright (C) Internet Systems Consortium, Inc. ("ISC")
    3  *
    4  * This Source Code Form is subject to the terms of the Mozilla Public
    5  * License, v. 2.0. If a copy of the MPL was not distributed with this
    6  * file, You can obtain one at http://mozilla.org/MPL/2.0/.
    7  *
    8  * See the COPYRIGHT file distributed with this work for additional
    9  * information regarding copyright ownership.
   10  */
   11 
   12 /*! \file */
   13 
   14 /*
   15  * XXXRTH  Need to document the states a task can be in, and the rules
   16  * for changing states.
   17  */
   18 
   19 #include <stdbool.h>
   20 
   21 #include <isc/app.h>
   22 #include <isc/atomic.h>
   23 #include <isc/condition.h>
   24 #include <isc/event.h>
   25 #include <isc/magic.h>
   26 #include <isc/mem.h>
   27 #include <isc/once.h>
   28 #include <isc/platform.h>
   29 #include <isc/print.h>
   30 #include <isc/random.h>
   31 #include <isc/refcount.h>
   32 #include <isc/string.h>
   33 #include <isc/task.h>
   34 #include <isc/thread.h>
   35 #include <isc/time.h>
   36 #include <isc/util.h>
   37 
   38 #ifdef HAVE_LIBXML2
   39 #include <libxml/xmlwriter.h>
   40 #define ISC_XMLCHAR (const xmlChar *)
   41 #endif /* HAVE_LIBXML2 */
   42 
   43 #ifdef HAVE_JSON_C
   44 #include <json_object.h>
   45 #endif /* HAVE_JSON_C */
   46 
   47 #ifdef OPENSSL_LEAKS
   48 #include <openssl/err.h>
   49 #endif /* ifdef OPENSSL_LEAKS */
   50 
   51 /*
   52  * Task manager is built around 'as little locking as possible' concept.
   53  * Each thread has his own queue of tasks to be run, if a task is in running
   54  * state it will stay on the runner it's currently on, if a task is in idle
   55  * state it can be woken up on a specific runner with isc_task_sendto - that
   56  * helps with data locality on CPU.
   57  *
   58  * To make load even some tasks (from task pools) are bound to specific
   59  * queues using isc_task_create_bound. This way load balancing between
   60  * CPUs/queues happens on the higher layer.
   61  */
   62 
   63 #ifdef ISC_TASK_TRACE
   64 #define XTRACE(m)                                                            \
   65     fprintf(stderr, "task %p thread %lu: %s\n", task, isc_thread_self(), \
   66         (m))
   67 #define XTTRACE(t, m) \
   68     fprintf(stderr, "task %p thread %lu: %s\n", (t), isc_thread_self(), (m))
   69 #define XTHREADTRACE(m) \
   70     fprintf(stderr, "thread %lu: %s\n", isc_thread_self(), (m))
   71 #else /* ifdef ISC_TASK_TRACE */
   72 #define XTRACE(m)
   73 #define XTTRACE(t, m)
   74 #define XTHREADTRACE(m)
   75 #endif /* ifdef ISC_TASK_TRACE */
   76 
   77 /***
   78  *** Types.
   79  ***/
   80 
   81 typedef enum {
   82     task_state_idle,    /* not doing anything, events queue empty */
   83     task_state_ready,   /* waiting in worker's queue */
   84     task_state_paused,  /* not running, paused */
   85     task_state_pausing, /* running, waiting to be paused */
   86     task_state_running, /* actively processing events */
   87     task_state_done     /* shutting down, no events or references */
   88 } task_state_t;
   89 
   90 #if defined(HAVE_LIBXML2) || defined(HAVE_JSON_C)
   91 static const char *statenames[] = {
   92     "idle", "ready", "paused", "pausing", "running", "done",
   93 };
   94 #endif /* if defined(HAVE_LIBXML2) || defined(HAVE_JSON_C) */
   95 
   96 #define TASK_MAGIC    ISC_MAGIC('T', 'A', 'S', 'K')
   97 #define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
   98 
   99 typedef struct isc__task isc__task_t;
  100 typedef struct isc__taskmgr isc__taskmgr_t;
  101 typedef struct isc__taskqueue isc__taskqueue_t;
  102 
  103 struct isc__task {
  104     /* Not locked. */
  105     isc_task_t common;
  106     isc__taskmgr_t *manager;
  107     isc_mutex_t lock;
  108     /* Locked by task lock. */
  109     task_state_t state;
  110     int pause_cnt;
  111     isc_refcount_t references;
  112     isc_eventlist_t events;
  113     isc_eventlist_t on_shutdown;
  114     unsigned int nevents;
  115     unsigned int quantum;
  116     isc_stdtime_t now;
  117     isc_time_t tnow;
  118     char name[16];
  119     void *tag;
  120     unsigned int threadid;
  121     bool bound;
  122     /* Protected by atomics */
  123     atomic_uint_fast32_t flags;
  124     /* Locked by task manager lock. */
  125     LINK(isc__task_t) link;
  126     LINK(isc__task_t) ready_link;
  127     LINK(isc__task_t) ready_priority_link;
  128 };
  129 
  130 #define TASK_F_SHUTTINGDOWN 0x01
  131 #define TASK_F_PRIVILEGED   0x02
  132 
  133 #define TASK_SHUTTINGDOWN(t) \
  134     ((atomic_load_acquire(&(t)->flags) & TASK_F_SHUTTINGDOWN) != 0)
  135 #define TASK_PRIVILEGED(t) \
  136     ((atomic_load_acquire(&(t)->flags) & TASK_F_PRIVILEGED) != 0)
  137 
  138 #define TASK_FLAG_SET(t, f) atomic_fetch_or_release(&(t)->flags, (f))
  139 #define TASK_FLAG_CLR(t, f) atomic_fetch_and_release(&(t)->flags, ~(f))
  140 
  141 #define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
  142 #define VALID_MANAGER(m)   ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
  143 
  144 typedef ISC_LIST(isc__task_t) isc__tasklist_t;
  145 
  146 struct isc__taskqueue {
  147     /* Everything locked by lock */
  148     isc_mutex_t lock;
  149     isc__tasklist_t ready_tasks;
  150     isc__tasklist_t ready_priority_tasks;
  151     isc_condition_t work_available;
  152     isc_thread_t thread;
  153     unsigned int threadid;
  154     isc__taskmgr_t *manager;
  155 };
  156 
  157 struct isc__taskmgr {
  158     /* Not locked. */
  159     isc_taskmgr_t common;
  160     isc_mem_t *mctx;
  161     isc_mutex_t lock;
  162     isc_mutex_t halt_lock;
  163     isc_condition_t halt_cond;
  164     unsigned int workers;
  165     atomic_uint_fast32_t tasks_running;
  166     atomic_uint_fast32_t tasks_ready;
  167     atomic_uint_fast32_t curq;
  168     atomic_uint_fast32_t tasks_count;
  169     isc__taskqueue_t *queues;
  170     isc_nm_t *nm;
  171 
  172     /* Locked by task manager lock. */
  173     unsigned int default_quantum;
  174     LIST(isc__task_t) tasks;
  175     atomic_uint_fast32_t mode;
  176     atomic_bool pause_req;
  177     atomic_bool exclusive_req;
  178     atomic_bool exiting;
  179 
  180     /* Locked by halt_lock */
  181     unsigned int halted;
  182 
  183     /*
  184      * Multiple threads can read/write 'excl' at the same time, so we need
  185      * to protect the access.  We can't use 'lock' since isc_task_detach()
  186      * will try to acquire it.
  187      */
  188     isc_mutex_t excl_lock;
  189     isc__task_t *excl;
  190 };
  191 
  192 void
  193 isc__taskmgr_pause(isc_taskmgr_t *manager0);
  194 void
  195 isc__taskmgr_resume(isc_taskmgr_t *manager0);
  196 
  197 #define DEFAULT_DEFAULT_QUANTUM 25
  198 #define FINISHED(m)                              \
  199     (atomic_load_relaxed(&((m)->exiting)) && \
  200      atomic_load(&(m)->tasks_count) == 0)
  201 
  202 /*%
  203  * The following are intended for internal use (indicated by "isc__"
  204  * prefix) but are not declared as static, allowing direct access from
  205  * unit tests etc.
  206  */
  207 
  208 bool
  209 isc_task_purgeevent(isc_task_t *task0, isc_event_t *event);
  210 void
  211 isc_taskmgr_setexcltask(isc_taskmgr_t *mgr0, isc_task_t *task0);
  212 isc_result_t
  213 isc_taskmgr_excltask(isc_taskmgr_t *mgr0, isc_task_t **taskp);
  214 static inline bool
  215 empty_readyq(isc__taskmgr_t *manager, int c);
  216 
  217 static inline isc__task_t *
  218 pop_readyq(isc__taskmgr_t *manager, int c);
  219 
  220 static inline void
  221 push_readyq(isc__taskmgr_t *manager, isc__task_t *task, int c);
  222 
  223 static inline void
  224 wake_all_queues(isc__taskmgr_t *manager);
  225 
  226 /***
  227  *** Tasks.
  228  ***/
  229 
  230 static inline void
  231 wake_all_queues(isc__taskmgr_t *manager) {
  232     for (unsigned int i = 0; i < manager->workers; i++) {
  233         LOCK(&manager->queues[i].lock);
  234         BROADCAST(&manager->queues[i].work_available);
  235         UNLOCK(&manager->queues[i].lock);
  236     }
  237 }
  238 
  239 static void
  240 task_finished(isc__task_t *task) {
  241     isc__taskmgr_t *manager = task->manager;
  242     REQUIRE(EMPTY(task->events));
  243     REQUIRE(task->nevents == 0);
  244     REQUIRE(EMPTY(task->on_shutdown));
  245     REQUIRE(task->state == task_state_done);
  246 
  247     XTRACE("task_finished");
  248 
  249     isc_refcount_destroy(&task->references);
  250 
  251     LOCK(&manager->lock);
  252     UNLINK(manager->tasks, task, link);
  253     atomic_fetch_sub(&manager->tasks_count, 1);
  254     UNLOCK(&manager->lock);
  255     if (FINISHED(manager)) {
  256         /*
  257          * All tasks have completed and the
  258          * task manager is exiting.  Wake up
  259          * any idle worker threads so they
  260          * can exit.
  261          */
  262         wake_all_queues(manager);
  263     }
  264     isc_mutex_destroy(&task->lock);
  265     task->common.impmagic = 0;
  266     task->common.magic = 0;
  267     isc_mem_put(manager->mctx, task, sizeof(*task));
  268 }
  269 
  270 isc_result_t
  271 isc_task_create(isc_taskmgr_t *manager0, unsigned int quantum,
  272         isc_task_t **taskp) {
  273     return (isc_task_create_bound(manager0, quantum, taskp, -1));
  274 }
  275 
  276 isc_result_t
  277 isc_task_create_bound(isc_taskmgr_t *manager0, unsigned int quantum,
  278               isc_task_t **taskp, int threadid) {
  279     isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
  280     isc__task_t *task;
  281     bool exiting;
  282 
  283     REQUIRE(VALID_MANAGER(manager));
  284     REQUIRE(taskp != NULL && *taskp == NULL);
  285 
  286     task = isc_mem_get(manager->mctx, sizeof(*task));
  287     XTRACE("isc_task_create");
  288     task->manager = manager;
  289 
  290     if (threadid == -1) {
  291         /*
  292          * Task is not pinned to a queue, it's threadid will be
  293          * chosen when first task will be sent to it - either
  294          * randomly or specified by isc_task_sendto.
  295          */
  296         task->bound = false;
  297         task->threadid = 0;
  298     } else {
  299         /*
  300          * Task is pinned to a queue, it'll always be run
  301          * by a specific thread.
  302          */
  303         task->bound = true;
  304         task->threadid = threadid % manager->workers;
  305     }
  306 
  307     isc_mutex_init(&task->lock);
  308     task->state = task_state_idle;
  309     task->pause_cnt = 0;
  310 
  311     isc_refcount_init(&task->references, 1);
  312     INIT_LIST(task->events);
  313     INIT_LIST(task->on_shutdown);
  314     task->nevents = 0;
  315     task->quantum = (quantum > 0) ? quantum : manager->default_quantum;
  316     atomic_init(&task->flags, 0);
  317     task->now = 0;
  318     isc_time_settoepoch(&task->tnow);
  319     memset(task->name, 0, sizeof(task->name));
  320     task->tag = NULL;
  321     INIT_LINK(task, link);
  322     INIT_LINK(task, ready_link);
  323     INIT_LINK(task, ready_priority_link);
  324 
  325     exiting = false;
  326     LOCK(&manager->lock);
  327     if (!atomic_load_relaxed(&manager->exiting)) {
  328         APPEND(manager->tasks, task, link);
  329         atomic_fetch_add(&manager->tasks_count, 1);
  330     } else {
  331         exiting = true;
  332     }
  333     UNLOCK(&manager->lock);
  334 
  335     if (exiting) {
  336         isc_mutex_destroy(&task->lock);
  337         isc_mem_put(manager->mctx, task, sizeof(*task));
  338         return (ISC_R_SHUTTINGDOWN);
  339     }
  340 
  341     task->common.magic = ISCAPI_TASK_MAGIC;
  342     task->common.impmagic = TASK_MAGIC;
  343     *taskp = (isc_task_t *)task;
  344 
  345     return (ISC_R_SUCCESS);
  346 }
  347 
  348 void
  349 isc_task_attach(isc_task_t *source0, isc_task_t **targetp) {
  350     isc__task_t *source = (isc__task_t *)source0;
  351 
  352     /*
  353      * Attach *targetp to source.
  354      */
  355 
  356     REQUIRE(VALID_TASK(source));
  357     REQUIRE(targetp != NULL && *targetp == NULL);
  358 
  359     XTTRACE(source, "isc_task_attach");
  360 
  361     isc_refcount_increment(&source->references);
  362 
  363     *targetp = (isc_task_t *)source;
  364 }
  365 
  366 static inline bool
  367 task_shutdown(isc__task_t *task) {
  368     bool was_idle = false;
  369     isc_event_t *event, *prev;
  370 
  371     /*
  372      * Caller must be holding the task's lock.
  373      */
  374 
  375     XTRACE("task_shutdown");
  376 
  377     if (!TASK_SHUTTINGDOWN(task)) {
  378         XTRACE("shutting down");
  379         TASK_FLAG_SET(task, TASK_F_SHUTTINGDOWN);
  380         if (task->state == task_state_idle) {
  381             INSIST(EMPTY(task->events));
  382             task->state = task_state_ready;
  383             was_idle = true;
  384         }
  385         INSIST(task->state == task_state_ready ||
  386                task->state == task_state_paused ||
  387                task->state == task_state_pausing ||
  388                task->state == task_state_running);
  389 
  390         /*
  391          * Note that we post shutdown events LIFO.
  392          */
  393         for (event = TAIL(task->on_shutdown); event != NULL;
  394              event = prev) {
  395             prev = PREV(event, ev_link);
  396             DEQUEUE(task->on_shutdown, event, ev_link);
  397             ENQUEUE(task->events, event, ev_link);
  398             task->nevents++;
  399         }
  400     }
  401 
  402     return (was_idle);
  403 }
  404 
  405 /*
  406  * Moves a task onto the appropriate run queue.
  407  *
  408  * Caller must NOT hold queue lock.
  409  */
  410 static inline void
  411 task_ready(isc__task_t *task) {
  412     isc__taskmgr_t *manager = task->manager;
  413     bool has_privilege = isc_task_privilege((isc_task_t *)task);
  414 
  415     REQUIRE(VALID_MANAGER(manager));
  416 
  417     XTRACE("task_ready");
  418     LOCK(&manager->queues[task->threadid].lock);
  419     push_readyq(manager, task, task->threadid);
  420     if (atomic_load(&manager->mode) == isc_taskmgrmode_normal ||
  421         has_privilege) {
  422         SIGNAL(&manager->queues[task->threadid].work_available);
  423     }
  424     UNLOCK(&manager->queues[task->threadid].lock);
  425 }
  426 
  427 static inline bool
  428 task_detach(isc__task_t *task) {
  429     /*
  430      * Caller must be holding the task lock.
  431      */
  432 
  433     XTRACE("detach");
  434 
  435     if (isc_refcount_decrement(&task->references) == 1 &&
  436         task->state == task_state_idle)
  437     {
  438         INSIST(EMPTY(task->events));
  439         /*
  440          * There are no references to this task, and no
  441          * pending events.  We could try to optimize and
  442          * either initiate shutdown or clean up the task,
  443          * depending on its state, but it's easier to just
  444          * make the task ready and allow run() or the event
  445          * loop to deal with shutting down and termination.
  446          */
  447         task->state = task_state_ready;
  448         return (true);
  449     }
  450 
  451     return (false);
  452 }
  453 
  454 void
  455 isc_task_detach(isc_task_t **taskp) {
  456     isc__task_t *task;
  457     bool was_idle;
  458 
  459     /*
  460      * Detach *taskp from its task.
  461      */
  462 
  463     REQUIRE(taskp != NULL);
  464     task = (isc__task_t *)*taskp;
  465     REQUIRE(VALID_TASK(task));
  466 
  467     XTRACE("isc_task_detach");
  468 
  469     LOCK(&task->lock);
  470     was_idle = task_detach(task);
  471     UNLOCK(&task->lock);
  472 
  473     if (was_idle) {
  474         task_ready(task);
  475     }
  476 
  477     *taskp = NULL;
  478 }
  479 
  480 static inline bool
  481 task_send(isc__task_t *task, isc_event_t **eventp, int c) {
  482     bool was_idle = false;
  483     isc_event_t *event;
  484 
  485     /*
  486      * Caller must be holding the task lock.
  487      */
  488 
  489     REQUIRE(eventp != NULL);
  490     event = *eventp;
  491     *eventp = NULL;
  492     REQUIRE(event != NULL);
  493     REQUIRE(event->ev_type > 0);
  494     REQUIRE(task->state != task_state_done);
  495     REQUIRE(!ISC_LINK_LINKED(event, ev_ratelink));
  496 
  497     XTRACE("task_send");
  498 
  499     if (task->state == task_state_idle) {
  500         was_idle = true;
  501         task->threadid = c;
  502         INSIST(EMPTY(task->events));
  503         task->state = task_state_ready;
  504     }
  505     INSIST(task->state == task_state_ready ||
  506            task->state == task_state_running ||
  507            task->state == task_state_paused ||
  508            task->state == task_state_pausing);
  509     ENQUEUE(task->events, event, ev_link);
  510     task->nevents++;
  511 
  512     return (was_idle);
  513 }
  514 
  515 void
  516 isc_task_send(isc_task_t *task0, isc_event_t **eventp) {
  517     isc_task_sendto(task0, eventp, -1);
  518 }
  519 
  520 void
  521 isc_task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
  522     isc_task_sendtoanddetach(taskp, eventp, -1);
  523 }
  524 
  525 void
  526 isc_task_sendto(isc_task_t *task0, isc_event_t **eventp, int c) {
  527     isc__task_t *task = (isc__task_t *)task0;
  528     bool was_idle;
  529 
  530     /*
  531      * Send '*event' to 'task'.
  532      */
  533 
  534     REQUIRE(VALID_TASK(task));
  535     XTRACE("isc_task_send");
  536 
  537     /*
  538      * We're trying hard to hold locks for as short a time as possible.
  539      * We're also trying to hold as few locks as possible.  This is why
  540      * some processing is deferred until after the lock is released.
  541      */
  542     LOCK(&task->lock);
  543     /* If task is bound ignore provided cpu. */
  544     if (task->bound) {
  545         c = task->threadid;
  546     } else if (c < 0) {
  547         c = atomic_fetch_add_explicit(&task->manager->curq, 1,
  548                           memory_order_relaxed);
  549     }
  550     c %= task->manager->workers;
  551     was_idle = task_send(task, eventp, c);
  552     UNLOCK(&task->lock);
  553 
  554     if (was_idle) {
  555         /*
  556          * We need to add this task to the ready queue.
  557          *
  558          * We've waited until now to do it because making a task
  559          * ready requires locking the manager.  If we tried to do
  560          * this while holding the task lock, we could deadlock.
  561          *
  562          * We've changed the state to ready, so no one else will
  563          * be trying to add this task to the ready queue.  The
  564          * only way to leave the ready state is by executing the
  565          * task.  It thus doesn't matter if events are added,
  566          * removed, or a shutdown is started in the interval
  567          * between the time we released the task lock, and the time
  568          * we add the task to the ready queue.
  569          */
  570         task_ready(task);
  571     }
  572 }
  573 
  574 void
  575 isc_task_sendtoanddetach(isc_task_t **taskp, isc_event_t **eventp, int c) {
  576     bool idle1, idle2;
  577     isc__task_t *task;
  578 
  579     /*
  580      * Send '*event' to '*taskp' and then detach '*taskp' from its
  581      * task.
  582      */
  583 
  584     REQUIRE(taskp != NULL);
  585     task = (isc__task_t *)*taskp;
  586     REQUIRE(VALID_TASK(task));
  587     XTRACE("isc_task_sendanddetach");
  588 
  589     LOCK(&task->lock);
  590     if (task->bound) {
  591         c = task->threadid;
  592     } else if (c < 0) {
  593         c = atomic_fetch_add_explicit(&task->manager->curq, 1,
  594                           memory_order_relaxed);
  595     }
  596     c %= task->manager->workers;
  597     idle1 = task_send(task, eventp, c);
  598     idle2 = task_detach(task);
  599     UNLOCK(&task->lock);
  600 
  601     /*
  602      * If idle1, then idle2 shouldn't be true as well since we're holding
  603      * the task lock, and thus the task cannot switch from ready back to
  604      * idle.
  605      */
  606     INSIST(!(idle1 && idle2));
  607 
  608     if (idle1 || idle2) {
  609         task_ready(task);
  610     }
  611 
  612     *taskp = NULL;
  613 }
  614 
  615 #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
  616 
  617 static unsigned int
  618 dequeue_events(isc__task_t *task, void *sender, isc_eventtype_t first,
  619            isc_eventtype_t last, void *tag, isc_eventlist_t *events,
  620            bool purging) {
  621     isc_event_t *event, *next_event;
  622     unsigned int count = 0;
  623 
  624     REQUIRE(VALID_TASK(task));
  625     REQUIRE(last >= first);
  626 
  627     XTRACE("dequeue_events");
  628 
  629     /*
  630      * Events matching 'sender', whose type is >= first and <= last, and
  631      * whose tag is 'tag' will be dequeued.  If 'purging', matching events
  632      * which are marked as unpurgable will not be dequeued.
  633      *
  634      * sender == NULL means "any sender", and tag == NULL means "any tag".
  635      */
  636 
  637     LOCK(&task->lock);
  638 
  639     for (event = HEAD(task->events); event != NULL; event = next_event) {
  640         next_event = NEXT(event, ev_link);
  641         if (event->ev_type >= first && event->ev_type <= last &&
  642             (sender == NULL || event->ev_sender == sender) &&
  643             (tag == NULL || event->ev_tag == tag) &&
  644             (!purging || PURGE_OK(event)))
  645         {
  646             DEQUEUE(task->events, event, ev_link);
  647             task->nevents--;
  648             ENQUEUE(*events, event, ev_link);
  649             count++;
  650         }
  651     }
  652 
  653     UNLOCK(&task->lock);
  654 
  655     return (count);
  656 }
  657 
  658 unsigned int
  659 isc_task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
  660             isc_eventtype_t last, void *tag) {
  661     isc__task_t *task = (isc__task_t *)task0;
  662     unsigned int count;
  663     isc_eventlist_t events;
  664     isc_event_t *event, *next_event;
  665     REQUIRE(VALID_TASK(task));
  666 
  667     /*
  668      * Purge events from a task's event queue.
  669      */
  670 
  671     XTRACE("isc_task_purgerange");
  672 
  673     ISC_LIST_INIT(events);
  674 
  675     count = dequeue_events(task, sender, first, last, tag, &events, true);
  676 
  677     for (event = HEAD(events); event != NULL; event = next_event) {
  678         next_event = NEXT(event, ev_link);
  679         ISC_LIST_UNLINK(events, event, ev_link);
  680         isc_event_free(&event);
  681     }
  682 
  683     /*
  684      * Note that purging never changes the state of the task.
  685      */
  686 
  687     return (count);
  688 }
  689 
  690 unsigned int
  691 isc_task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
  692            void *tag) {
  693     /*
  694      * Purge events from a task's event queue.
  695      */
  696     REQUIRE(VALID_TASK(task));
  697 
  698     XTRACE("isc_task_purge");
  699 
  700     return (isc_task_purgerange(task, sender, type, type, tag));
  701 }
  702 
  703 bool
  704 isc_task_purgeevent(isc_task_t *task0, isc_event_t *event) {
  705     isc__task_t *task = (isc__task_t *)task0;
  706     isc_event_t *curr_event, *next_event;
  707 
  708     /*
  709      * Purge 'event' from a task's event queue.
  710      *
  711      * XXXRTH:  WARNING:  This method may be removed before beta.
  712      */
  713 
  714     REQUIRE(VALID_TASK(task));
  715 
  716     /*
  717      * If 'event' is on the task's event queue, it will be purged,
  718      * unless it is marked as unpurgeable.  'event' does not have to be
  719      * on the task's event queue; in fact, it can even be an invalid
  720      * pointer.  Purging only occurs if the event is actually on the task's
  721      * event queue.
  722      *
  723      * Purging never changes the state of the task.
  724      */
  725 
  726     LOCK(&task->lock);
  727     for (curr_event = HEAD(task->events); curr_event != NULL;
  728          curr_event = next_event)
  729     {
  730         next_event = NEXT(curr_event, ev_link);
  731         if (curr_event == event && PURGE_OK(event)) {
  732             DEQUEUE(task->events, curr_event, ev_link);
  733             task->nevents--;
  734             break;
  735         }
  736     }
  737     UNLOCK(&task->lock);
  738 
  739     if (curr_event == NULL) {
  740         return (false);
  741     }
  742 
  743     isc_event_free(&curr_event);
  744 
  745     return (true);
  746 }
  747 
  748 unsigned int
  749 isc_task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
  750              isc_eventtype_t last, void *tag, isc_eventlist_t *events) {
  751     /*
  752      * Remove events from a task's event queue.
  753      */
  754     REQUIRE(VALID_TASK(task));
  755 
  756     XTRACE("isc_task_unsendrange");
  757 
  758     return (dequeue_events((isc__task_t *)task, sender, first, last, tag,
  759                    events, false));
  760 }
  761 
  762 unsigned int
  763 isc_task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type, void *tag,
  764         isc_eventlist_t *events) {
  765     /*
  766      * Remove events from a task's event queue.
  767      */
  768 
  769     XTRACE("isc_task_unsend");
  770 
  771     return (dequeue_events((isc__task_t *)task, sender, type, type, tag,
  772                    events, false));
  773 }
  774 
  775 isc_result_t
  776 isc_task_onshutdown(isc_task_t *task0, isc_taskaction_t action, void *arg) {
  777     isc__task_t *task = (isc__task_t *)task0;
  778     bool disallowed = false;
  779     isc_result_t result = ISC_R_SUCCESS;
  780     isc_event_t *event;
  781 
  782     /*
  783      * Send a shutdown event with action 'action' and argument 'arg' when
  784      * 'task' is shutdown.
  785      */
  786 
  787     REQUIRE(VALID_TASK(task));
  788     REQUIRE(action != NULL);
  789 
  790     event = isc_event_allocate(task->manager->mctx, NULL,
  791                    ISC_TASKEVENT_SHUTDOWN, action, arg,
  792                    sizeof(*event));
  793 
  794     if (TASK_SHUTTINGDOWN(task)) {
  795         disallowed = true;
  796         result = ISC_R_SHUTTINGDOWN;
  797     } else {
  798         LOCK(&task->lock);
  799         ENQUEUE(task->on_shutdown, event, ev_link);
  800         UNLOCK(&task->lock);
  801     }
  802 
  803     if (disallowed) {
  804         isc_mem_put(task->manager->mctx, event, sizeof(*event));
  805     }
  806 
  807     return (result);
  808 }
  809 
  810 void
  811 isc_task_shutdown(isc_task_t *task0) {
  812     isc__task_t *task = (isc__task_t *)task0;
  813     bool was_idle;
  814 
  815     /*
  816      * Shutdown 'task'.
  817      */
  818 
  819     REQUIRE(VALID_TASK(task));
  820 
  821     LOCK(&task->lock);
  822     was_idle = task_shutdown(task);
  823     UNLOCK(&task->lock);
  824 
  825     if (was_idle) {
  826         task_ready(task);
  827     }
  828 }
  829 
  830 void
  831 isc_task_destroy(isc_task_t **taskp) {
  832     /*
  833      * Destroy '*taskp'.
  834      */
  835 
  836     REQUIRE(taskp != NULL);
  837 
  838     isc_task_shutdown(*taskp);
  839     isc_task_detach(taskp);
  840 }
  841 
  842 void
  843 isc_task_setname(isc_task_t *task0, const char *name, void *tag) {
  844     isc__task_t *task = (isc__task_t *)task0;
  845 
  846     /*
  847      * Name 'task'.
  848      */
  849 
  850     REQUIRE(VALID_TASK(task));
  851 
  852     LOCK(&task->lock);
  853     strlcpy(task->name, name, sizeof(task->name));
  854     task->tag = tag;
  855     UNLOCK(&task->lock);
  856 }
  857 
  858 const char *
  859 isc_task_getname(isc_task_t *task0) {
  860     isc__task_t *task = (isc__task_t *)task0;
  861 
  862     REQUIRE(VALID_TASK(task));
  863 
  864     return (task->name);
  865 }
  866 
  867 void *
  868 isc_task_gettag(isc_task_t *task0) {
  869     isc__task_t *task = (isc__task_t *)task0;
  870 
  871     REQUIRE(VALID_TASK(task));
  872 
  873     return (task->tag);
  874 }
  875 
  876 void
  877 isc_task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t) {
  878     isc__task_t *task = (isc__task_t *)task0;
  879 
  880     REQUIRE(VALID_TASK(task));
  881     REQUIRE(t != NULL);
  882 
  883     LOCK(&task->lock);
  884     *t = task->now;
  885     UNLOCK(&task->lock);
  886 }
  887 
  888 void
  889 isc_task_getcurrenttimex(isc_task_t *task0, isc_time_t *t) {
  890     isc__task_t *task = (isc__task_t *)task0;
  891 
  892     REQUIRE(VALID_TASK(task));
  893     REQUIRE(t != NULL);
  894 
  895     LOCK(&task->lock);
  896     *t = task->tnow;
  897     UNLOCK(&task->lock);
  898 }
  899 
  900 /***
  901  *** Task Manager.
  902  ***/
  903 
  904 /*
  905  * Return true if the current ready list for the manager, which is
  906  * either ready_tasks or the ready_priority_tasks, depending on whether
  907  * the manager is currently in normal or privileged execution mode.
  908  *
  909  * Caller must hold the task manager lock.
  910  */
  911 static inline bool
  912 empty_readyq(isc__taskmgr_t *manager, int c) {
  913     isc__tasklist_t queue;
  914 
  915     if (atomic_load_relaxed(&manager->mode) == isc_taskmgrmode_normal) {
  916         queue = manager->queues[c].ready_tasks;
  917     } else {
  918         queue = manager->queues[c].ready_priority_tasks;
  919     }
  920     return (EMPTY(queue));
  921 }
  922 
  923 /*
  924  * Dequeue and return a pointer to the first task on the current ready
  925  * list for the manager.
  926  * If the task is privileged, dequeue it from the other ready list
  927  * as well.
  928  *
  929  * Caller must hold the task manager lock.
  930  */
  931 static inline isc__task_t *
  932 pop_readyq(isc__taskmgr_t *manager, int c) {
  933     isc__task_t *task;
  934 
  935     if (atomic_load_relaxed(&manager->mode) == isc_taskmgrmode_normal) {
  936         task = HEAD(manager->queues[c].ready_tasks);
  937     } else {
  938         task = HEAD(manager->queues[c].ready_priority_tasks);
  939     }
  940 
  941     if (task != NULL) {
  942         DEQUEUE(manager->queues[c].ready_tasks, task, ready_link);
  943         if (ISC_LINK_LINKED(task, ready_priority_link)) {
  944             DEQUEUE(manager->queues[c].ready_priority_tasks, task,
  945                 ready_priority_link);
  946         }
  947     }
  948 
  949     return (task);
  950 }
  951 
  952 /*
  953  * Push 'task' onto the ready_tasks queue.  If 'task' has the privilege
  954  * flag set, then also push it onto the ready_priority_tasks queue.
  955  *
  956  * Caller must hold the task queue lock.
  957  */
  958 static inline void
  959 push_readyq(isc__taskmgr_t *manager, isc__task_t *task, int c) {
  960     if (ISC_LINK_LINKED(task, ready_link)) {
  961         return;
  962     }
  963     ENQUEUE(manager->queues[c].ready_tasks, task, ready_link);
  964     if (TASK_PRIVILEGED(task)) {
  965         ENQUEUE(manager->queues[c].ready_priority_tasks, task,
  966             ready_priority_link);
  967     }
  968     atomic_fetch_add_explicit(&manager->tasks_ready, 1,
  969                   memory_order_acquire);
  970 }
  971 
  972 static void
  973 dispatch(isc__taskmgr_t *manager, unsigned int threadid) {
  974     isc__task_t *task;
  975 
  976     REQUIRE(VALID_MANAGER(manager));
  977 
  978     /* Wait for everything to initialize */
  979     LOCK(&manager->lock);
  980     UNLOCK(&manager->lock);
  981 
  982     /*
  983      * Again we're trying to hold the lock for as short a time as possible
  984      * and to do as little locking and unlocking as possible.
  985      *
  986      * In both while loops, the appropriate lock must be held before the
  987      * while body starts.  Code which acquired the lock at the top of
  988      * the loop would be more readable, but would result in a lot of
  989      * extra locking.  Compare:
  990      *
  991      * Straightforward:
  992      *
  993      *  LOCK();
  994      *  ...
  995      *  UNLOCK();
  996      *  while (expression) {
  997      *      LOCK();
  998      *      ...
  999      *      UNLOCK();
 1000      *
 1001      *          Unlocked part here...
 1002      *
 1003      *      LOCK();
 1004      *      ...
 1005      *      UNLOCK();
 1006      *  }
 1007      *
 1008      * Note how if the loop continues we unlock and then immediately lock.
 1009      * For N iterations of the loop, this code does 2N+1 locks and 2N+1
 1010      * unlocks.  Also note that the lock is not held when the while
 1011      * condition is tested, which may or may not be important, depending
 1012      * on the expression.
 1013      *
 1014      * As written:
 1015      *
 1016      *  LOCK();
 1017      *  while (expression) {
 1018      *      ...
 1019      *      UNLOCK();
 1020      *
 1021      *          Unlocked part here...
 1022      *
 1023      *      LOCK();
 1024      *      ...
 1025      *  }
 1026      *  UNLOCK();
 1027      *
 1028      * For N iterations of the loop, this code does N+1 locks and N+1
 1029      * unlocks.  The while expression is always protected by the lock.
 1030      */
 1031     LOCK(&manager->queues[threadid].lock);
 1032 
 1033     while (!FINISHED(manager)) {
 1034         /*
 1035          * For reasons similar to those given in the comment in
 1036          * isc_task_send() above, it is safe for us to dequeue
 1037          * the task while only holding the manager lock, and then
 1038          * change the task to running state while only holding the
 1039          * task lock.
 1040          *
 1041          * If a pause has been requested, don't do any work
 1042          * until it's been released.
 1043          */
 1044         while ((empty_readyq(manager, threadid) &&
 1045             !atomic_load_relaxed(&manager->pause_req) &&
 1046             !atomic_load_relaxed(&manager->exclusive_req)) &&
 1047                !FINISHED(manager))
 1048         {
 1049             XTHREADTRACE("wait");
 1050             XTHREADTRACE(atomic_load_relaxed(&manager->pause_req)
 1051                          ? "paused"
 1052                          : "notpaused");
 1053             XTHREADTRACE(
 1054                 atomic_load_relaxed(&manager->exclusive_req)
 1055                     ? "excreq"
 1056                     : "notexcreq");
 1057             WAIT(&manager->queues[threadid].work_available,
 1058                  &manager->queues[threadid].lock);
 1059             XTHREADTRACE("awake");
 1060         }
 1061         XTHREADTRACE("working");
 1062 
 1063         if (atomic_load_relaxed(&manager->pause_req) ||
 1064             atomic_load_relaxed(&manager->exclusive_req))
 1065         {
 1066             UNLOCK(&manager->queues[threadid].lock);
 1067             XTHREADTRACE("halting");
 1068 
 1069             /*
 1070              * Switching to exclusive mode is done as a
 1071              * 2-phase-lock, checking if we have to switch is
 1072              * done without any locks on pause_req and
 1073              * exclusive_req to save time - the worst
 1074              * thing that can happen is that we'll launch one
 1075              * task more and exclusive task will be postponed a
 1076              * bit.
 1077              *
 1078              * Broadcasting on halt_cond seems suboptimal, but
 1079              * exclusive tasks are rare enough that we don't
 1080              * care.
 1081              */
 1082             LOCK(&manager->halt_lock);
 1083             manager->halted++;
 1084             BROADCAST(&manager->halt_cond);
 1085             while (atomic_load_relaxed(&manager->pause_req) ||
 1086                    atomic_load_relaxed(&manager->exclusive_req))
 1087             {
 1088                 WAIT(&manager->halt_cond, &manager->halt_lock);
 1089             }
 1090             manager->halted--;
 1091             SIGNAL(&manager->halt_cond);
 1092             UNLOCK(&manager->halt_lock);
 1093 
 1094             LOCK(&manager->queues[threadid].lock);
 1095             /* Restart the loop after */
 1096             continue;
 1097         }
 1098 
 1099         task = pop_readyq(manager, threadid);
 1100         if (task != NULL) {
 1101             unsigned int dispatch_count = 0;
 1102             bool done = false;
 1103             bool requeue = false;
 1104             bool finished = false;
 1105             isc_event_t *event;
 1106 
 1107             INSIST(VALID_TASK(task));
 1108 
 1109             /*
 1110              * Note we only unlock the queue lock if we actually
 1111              * have a task to do.  We must reacquire the queue
 1112              * lock before exiting the 'if (task != NULL)' block.
 1113              */
 1114             UNLOCK(&manager->queues[threadid].lock);
 1115             RUNTIME_CHECK(atomic_fetch_sub_explicit(
 1116                           &manager->tasks_ready, 1,
 1117                           memory_order_release) > 0);
 1118             atomic_fetch_add_explicit(&manager->tasks_running, 1,
 1119                           memory_order_acquire);
 1120 
 1121             LOCK(&task->lock);
 1122             /*
 1123              * It is possible because that we have a paused task
 1124              * in the queue - it might have been paused in the
 1125              * meantime and we never hold both queue and task lock
 1126              * to avoid deadlocks, just bail then.
 1127              */
 1128             if (task->state != task_state_ready) {
 1129                 UNLOCK(&task->lock);
 1130                 LOCK(&manager->queues[threadid].lock);
 1131                 continue;
 1132             }
 1133             INSIST(task->state == task_state_ready);
 1134             task->state = task_state_running;
 1135             XTRACE("running");
 1136             XTRACE(task->name);
 1137             TIME_NOW(&task->tnow);
 1138             task->now = isc_time_seconds(&task->tnow);
 1139             do {
 1140                 if (!EMPTY(task->events)) {
 1141                     event = HEAD(task->events);
 1142                     DEQUEUE(task->events, event, ev_link);
 1143                     task->nevents--;
 1144 
 1145                     /*
 1146                      * Execute the event action.
 1147                      */
 1148                     XTRACE("execute action");
 1149                     XTRACE(task->name);
 1150                     if (event->ev_action != NULL) {
 1151                         UNLOCK(&task->lock);
 1152                         (event->ev_action)(
 1153                             (isc_task_t *)task,
 1154                             event);
 1155                         LOCK(&task->lock);
 1156                     }
 1157                     XTRACE("execution complete");
 1158                     dispatch_count++;
 1159                 }
 1160 
 1161                 if (isc_refcount_current(&task->references) ==
 1162                         0 &&
 1163                     EMPTY(task->events) &&
 1164                     !TASK_SHUTTINGDOWN(task))
 1165                 {
 1166                     bool was_idle;
 1167 
 1168                     /*
 1169                      * There are no references and no
 1170                      * pending events for this task,
 1171                      * which means it will not become
 1172                      * runnable again via an external
 1173                      * action (such as sending an event
 1174                      * or detaching).
 1175                      *
 1176                      * We initiate shutdown to prevent
 1177                      * it from becoming a zombie.
 1178                      *
 1179                      * We do this here instead of in
 1180                      * the "if EMPTY(task->events)" block
 1181                      * below because:
 1182                      *
 1183                      *  If we post no shutdown events,
 1184                      *  we want the task to finish.
 1185                      *
 1186                      *  If we did post shutdown events,
 1187                      *  will still want the task's
 1188                      *  quantum to be applied.
 1189                      */
 1190                     was_idle = task_shutdown(task);
 1191                     INSIST(!was_idle);
 1192                 }
 1193 
 1194                 if (EMPTY(task->events)) {
 1195                     /*
 1196                      * Nothing else to do for this task
 1197                      * right now.
 1198                      */
 1199                     XTRACE("empty");
 1200                     if (isc_refcount_current(
 1201                             &task->references) == 0 &&
 1202                         TASK_SHUTTINGDOWN(task)) {
 1203                         /*
 1204                          * The task is done.
 1205                          */
 1206                         XTRACE("done");
 1207                         finished = true;
 1208                         task->state = task_state_done;
 1209                     } else {
 1210                         if (task->state ==
 1211                             task_state_running) {
 1212                             task->state =
 1213                                 task_state_idle;
 1214                         } else if (task->state ==
 1215                                task_state_pausing) {
 1216                             task->state =
 1217                                 task_state_paused;
 1218                         }
 1219                     }
 1220                     done = true;
 1221                 } else if (task->state == task_state_pausing) {
 1222                     /*
 1223                      * We got a pause request on this task,
 1224                      * stop working on it and switch the
 1225                      * state to paused.
 1226                      */
 1227                     XTRACE("pausing");
 1228                     task->state = task_state_paused;
 1229                     done = true;
 1230                 } else if (dispatch_count >= task->quantum) {
 1231                     /*
 1232                      * Our quantum has expired, but
 1233                      * there is more work to be done.
 1234                      * We'll requeue it to the ready
 1235                      * queue later.
 1236                      *
 1237                      * We don't check quantum until
 1238                      * dispatching at least one event,
 1239                      * so the minimum quantum is one.
 1240                      */
 1241                     XTRACE("quantum");
 1242                     task->state = task_state_ready;
 1243                     requeue = true;
 1244                     done = true;
 1245                 }
 1246             } while (!done);
 1247             UNLOCK(&task->lock);
 1248 
 1249             if (finished) {
 1250                 task_finished(task);
 1251             }
 1252 
 1253             RUNTIME_CHECK(atomic_fetch_sub_explicit(
 1254                           &manager->tasks_running, 1,
 1255                           memory_order_release) > 0);
 1256             LOCK(&manager->queues[threadid].lock);
 1257             if (requeue) {
 1258                 /*
 1259                  * We know we're awake, so we don't have
 1260                  * to wakeup any sleeping threads if the
 1261                  * ready queue is empty before we requeue.
 1262                  *
 1263                  * A possible optimization if the queue is
 1264                  * empty is to 'goto' the 'if (task != NULL)'
 1265                  * block, avoiding the ENQUEUE of the task
 1266                  * and the subsequent immediate DEQUEUE
 1267                  * (since it is the only executable task).
 1268                  * We don't do this because then we'd be
 1269                  * skipping the exit_requested check.  The
 1270                  * cost of ENQUEUE is low anyway, especially
 1271                  * when you consider that we'd have to do
 1272                  * an extra EMPTY check to see if we could
 1273                  * do the optimization.  If the ready queue
 1274                  * were usually nonempty, the 'optimization'
 1275                  * might even hurt rather than help.
 1276                  */
 1277                 push_readyq(manager, task, threadid);
 1278             }
 1279         }
 1280 
 1281         /*
 1282          * If we are in privileged execution mode and there are no
 1283          * tasks remaining on the current ready queue, then
 1284          * we're stuck.  Automatically drop privileges at that
 1285          * point and continue with the regular ready queue.
 1286          */
 1287         if (atomic_load_relaxed(&manager->mode) !=
 1288                 isc_taskmgrmode_normal &&
 1289             atomic_load_explicit(&manager->tasks_running,
 1290                      memory_order_acquire) == 0)
 1291         {
 1292             UNLOCK(&manager->queues[threadid].lock);
 1293             LOCK(&manager->lock);
 1294             /*
 1295              * Check once again, under lock. Mode can only
 1296              * change from privileged to normal anyway, and
 1297              * if we enter this loop twice at the same time
 1298              * we'll end up in a deadlock over queue locks.
 1299              *
 1300              */
 1301             if (atomic_load(&manager->mode) !=
 1302                     isc_taskmgrmode_normal &&
 1303                 atomic_load_explicit(&manager->tasks_running,
 1304                          memory_order_acquire) == 0)
 1305             {
 1306                 bool empty = true;
 1307                 unsigned int i;
 1308                 for (i = 0; i < manager->workers && empty; i++)
 1309                 {
 1310                     LOCK(&manager->queues[i].lock);
 1311                     empty &= empty_readyq(manager, i);
 1312                     UNLOCK(&manager->queues[i].lock);
 1313                 }
 1314                 if (empty) {
 1315                     atomic_store(&manager->mode,
 1316                              isc_taskmgrmode_normal);
 1317                     wake_all_queues(manager);
 1318                 }
 1319             }
 1320             UNLOCK(&manager->lock);
 1321             LOCK(&manager->queues[threadid].lock);
 1322         }
 1323     }
 1324     UNLOCK(&manager->queues[threadid].lock);
 1325     /*
 1326      * There might be other dispatchers waiting on empty tasks,
 1327      * wake them up.
 1328      */
 1329     wake_all_queues(manager);
 1330 }
 1331 
 1332 static isc_threadresult_t
 1333 #ifdef _WIN32
 1334     WINAPI
 1335 #endif /* ifdef _WIN32 */
 1336     run(void *queuep) {
 1337     isc__taskqueue_t *tq = queuep;
 1338     isc__taskmgr_t *manager = tq->manager;
 1339     int threadid = tq->threadid;
 1340     isc_thread_setaffinity(threadid);
 1341 
 1342     XTHREADTRACE("starting");
 1343 
 1344     dispatch(manager, threadid);
 1345 
 1346     XTHREADTRACE("exiting");
 1347 
 1348 #ifdef OPENSSL_LEAKS
 1349     ERR_remove_state(0);
 1350 #endif /* ifdef OPENSSL_LEAKS */
 1351 
 1352     return ((isc_threadresult_t)0);
 1353 }
 1354 
 1355 static void
 1356 manager_free(isc__taskmgr_t *manager) {
 1357     for (unsigned int i = 0; i < manager->workers; i++) {
 1358         isc_mutex_destroy(&manager->queues[i].lock);
 1359         isc_condition_destroy(&manager->queues[i].work_available);
 1360     }
 1361     isc_mutex_destroy(&manager->lock);
 1362     isc_mutex_destroy(&manager->excl_lock);
 1363     isc_mutex_destroy(&manager->halt_lock);
 1364     isc_condition_destroy(&manager->halt_cond);
 1365     isc_mem_put(manager->mctx, manager->queues,
 1366             manager->workers * sizeof(isc__taskqueue_t));
 1367     manager->common.impmagic = 0;
 1368     manager->common.magic = 0;
 1369     isc_mem_putanddetach(&manager->mctx, manager, sizeof(*manager));
 1370 }
 1371 
 1372 isc_result_t
 1373 isc_taskmgr_create(isc_mem_t *mctx, unsigned int workers,
 1374            unsigned int default_quantum, isc_nm_t *nm,
 1375            isc_taskmgr_t **managerp) {
 1376     unsigned int i;
 1377     isc__taskmgr_t *manager;
 1378 
 1379     /*
 1380      * Create a new task manager.
 1381      */
 1382 
 1383     REQUIRE(workers > 0);
 1384     REQUIRE(managerp != NULL && *managerp == NULL);
 1385 
 1386     manager = isc_mem_get(mctx, sizeof(*manager));
 1387     *manager = (isc__taskmgr_t){ .common.impmagic = TASK_MANAGER_MAGIC,
 1388                      .common.magic = ISCAPI_TASKMGR_MAGIC };
 1389 
 1390     atomic_store(&manager->mode, isc_taskmgrmode_normal);
 1391     isc_mutex_init(&manager->lock);
 1392     isc_mutex_init(&manager->excl_lock);
 1393 
 1394     isc_mutex_init(&manager->halt_lock);
 1395     isc_condition_init(&manager->halt_cond);
 1396 
 1397     manager->workers = workers;
 1398 
 1399     if (default_quantum == 0) {
 1400         default_quantum = DEFAULT_DEFAULT_QUANTUM;
 1401     }
 1402     manager->default_quantum = default_quantum;
 1403 
 1404     if (nm != NULL) {
 1405         isc_nm_attach(nm, &manager->nm);
 1406     }
 1407 
 1408     INIT_LIST(manager->tasks);
 1409     atomic_store(&manager->tasks_count, 0);
 1410     manager->queues = isc_mem_get(mctx, workers * sizeof(isc__taskqueue_t));
 1411     RUNTIME_CHECK(manager->queues != NULL);
 1412 
 1413     atomic_init(&manager->tasks_running, 0);
 1414     atomic_init(&manager->tasks_ready, 0);
 1415     atomic_init(&manager->curq, 0);
 1416     atomic_init(&manager->exiting, false);
 1417     atomic_store_relaxed(&manager->exclusive_req, false);
 1418     atomic_store_relaxed(&manager->pause_req, false);
 1419 
 1420     isc_mem_attach(mctx, &manager->mctx);
 1421 
 1422     LOCK(&manager->lock);
 1423     /*
 1424      * Start workers.
 1425      */
 1426     for (i = 0; i < workers; i++) {
 1427         INIT_LIST(manager->queues[i].ready_tasks);
 1428         INIT_LIST(manager->queues[i].ready_priority_tasks);
 1429         isc_mutex_init(&manager->queues[i].lock);
 1430         isc_condition_init(&manager->queues[i].work_available);
 1431 
 1432         manager->queues[i].manager = manager;
 1433         manager->queues[i].threadid = i;
 1434         isc_thread_create(run, &manager->queues[i],
 1435                   &manager->queues[i].thread);
 1436         char name[21];
 1437         snprintf(name, sizeof(name), "isc-worker%04u", i);
 1438         isc_thread_setname(manager->queues[i].thread, name);
 1439     }
 1440     UNLOCK(&manager->lock);
 1441 
 1442     isc_thread_setconcurrency(workers);
 1443 
 1444     *managerp = (isc_taskmgr_t *)manager;
 1445 
 1446     return (ISC_R_SUCCESS);
 1447 }
 1448 
 1449 void
 1450 isc_taskmgr_destroy(isc_taskmgr_t **managerp) {
 1451     isc__taskmgr_t *manager;
 1452     isc__task_t *task;
 1453     unsigned int i;
 1454     bool exiting;
 1455 
 1456     /*
 1457      * Destroy '*managerp'.
 1458      */
 1459 
 1460     REQUIRE(managerp != NULL);
 1461     manager = (isc__taskmgr_t *)*managerp;
 1462     REQUIRE(VALID_MANAGER(manager));
 1463 
 1464     XTHREADTRACE("isc_taskmgr_destroy");
 1465     /*
 1466      * Only one non-worker thread may ever call this routine.
 1467      * If a worker thread wants to initiate shutdown of the
 1468      * task manager, it should ask some non-worker thread to call
 1469      * isc_taskmgr_destroy(), e.g. by signalling a condition variable
 1470      * that the startup thread is sleeping on.
 1471      */
 1472 
 1473     /*
 1474      * Detach the exclusive task before acquiring the manager lock
 1475      */
 1476     LOCK(&manager->excl_lock);
 1477     if (manager->excl != NULL) {
 1478         isc_task_detach((isc_task_t **)&manager->excl);
 1479     }
 1480     UNLOCK(&manager->excl_lock);
 1481 
 1482     /*
 1483      * Unlike elsewhere, we're going to hold this lock a long time.
 1484      * We need to do so, because otherwise the list of tasks could
 1485      * change while we were traversing it.
 1486      *
 1487      * This is also the only function where we will hold both the
 1488      * task manager lock and a task lock at the same time.
 1489      */
 1490 
 1491     LOCK(&manager->lock);
 1492 
 1493     /*
 1494      * Make sure we only get called once.
 1495      */
 1496     exiting = false;
 1497 
 1498     INSIST(!!atomic_compare_exchange_strong(&manager->exiting, &exiting,
 1499                         true));
 1500 
 1501     /*
 1502      * If privileged mode was on, turn it off.
 1503      */
 1504     atomic_store(&manager->mode, isc_taskmgrmode_normal);
 1505 
 1506     /*
 1507      * Post shutdown event(s) to every task (if they haven't already been
 1508      * posted). To make things easier post idle tasks to worker 0.
 1509      */
 1510     LOCK(&manager->queues[0].lock);
 1511     for (task = HEAD(manager->tasks); task != NULL; task = NEXT(task, link))
 1512     {
 1513         LOCK(&task->lock);
 1514         if (task_shutdown(task)) {
 1515             task->threadid = 0;
 1516             push_readyq(manager, task, 0);
 1517         }
 1518         UNLOCK(&task->lock);
 1519     }
 1520     UNLOCK(&manager->queues[0].lock);
 1521 
 1522     /*
 1523      * Wake up any sleeping workers.  This ensures we get work done if
 1524      * there's work left to do, and if there are already no tasks left
 1525      * it will cause the workers to see manager->exiting.
 1526      */
 1527     wake_all_queues(manager);
 1528     UNLOCK(&manager->lock);
 1529 
 1530     /*
 1531      * Wait for all the worker threads to exit.
 1532      */
 1533     for (i = 0; i < manager->workers; i++) {
 1534         isc_thread_join(manager->queues[i].thread, NULL);
 1535     }
 1536 
 1537     /*
 1538      * Detach from the network manager if it was set.
 1539      */
 1540     if (manager->nm != NULL) {
 1541         isc_nm_detach(&manager->nm);
 1542     }
 1543 
 1544     manager_free(manager);
 1545 
 1546     *managerp = NULL;
 1547 }
 1548 
 1549 void
 1550 isc_taskmgr_setprivilegedmode(isc_taskmgr_t *manager0) {
 1551     isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
 1552 
 1553     atomic_store(&manager->mode, isc_taskmgrmode_privileged);
 1554 }
 1555 
 1556 isc_taskmgrmode_t
 1557 isc_taskmgr_mode(isc_taskmgr_t *manager0) {
 1558     isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
 1559     return (atomic_load(&manager->mode));
 1560 }
 1561 
 1562 void
 1563 isc__taskmgr_pause(isc_taskmgr_t *manager0) {
 1564     isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
 1565 
 1566     LOCK(&manager->halt_lock);
 1567     while (atomic_load_relaxed(&manager->exclusive_req) ||
 1568            atomic_load_relaxed(&manager->pause_req))
 1569     {
 1570         UNLOCK(&manager->halt_lock);
 1571         /* This is ugly but pause is used EXCLUSIVELY in tests */
 1572         isc_thread_yield();
 1573         LOCK(&manager->halt_lock);
 1574     }
 1575 
 1576     atomic_store_relaxed(&manager->pause_req, true);
 1577     while (manager->halted < manager->workers) {
 1578         wake_all_queues(manager);
 1579         WAIT(&manager->halt_cond, &manager->halt_lock);
 1580     }
 1581     UNLOCK(&manager->halt_lock);
 1582 }
 1583 
 1584 void
 1585 isc__taskmgr_resume(isc_taskmgr_t *manager0) {
 1586     isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
 1587     LOCK(&manager->halt_lock);
 1588     if (atomic_load(&manager->pause_req)) {
 1589         atomic_store(&manager->pause_req, false);
 1590         while (manager->halted > 0) {
 1591             BROADCAST(&manager->halt_cond);
 1592             WAIT(&manager->halt_cond, &manager->halt_lock);
 1593         }
 1594     }
 1595     UNLOCK(&manager->halt_lock);
 1596 }
 1597 
 1598 void
 1599 isc_taskmgr_setexcltask(isc_taskmgr_t *mgr0, isc_task_t *task0) {
 1600     isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
 1601     isc__task_t *task = (isc__task_t *)task0;
 1602 
 1603     REQUIRE(VALID_MANAGER(mgr));
 1604     REQUIRE(VALID_TASK(task));
 1605     LOCK(&mgr->excl_lock);
 1606     if (mgr->excl != NULL) {
 1607         isc_task_detach((isc_task_t **)&mgr->excl);
 1608     }
 1609     isc_task_attach(task0, (isc_task_t **)&mgr->excl);
 1610     UNLOCK(&mgr->excl_lock);
 1611 }
 1612 
 1613 isc_result_t
 1614 isc_taskmgr_excltask(isc_taskmgr_t *mgr0, isc_task_t **taskp) {
 1615     isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
 1616     isc_result_t result = ISC_R_SUCCESS;
 1617 
 1618     REQUIRE(VALID_MANAGER(mgr));
 1619     REQUIRE(taskp != NULL && *taskp == NULL);
 1620 
 1621     LOCK(&mgr->excl_lock);
 1622     if (mgr->excl != NULL) {
 1623         isc_task_attach((isc_task_t *)mgr->excl, taskp);
 1624     } else {
 1625         result = ISC_R_NOTFOUND;
 1626     }
 1627     UNLOCK(&mgr->excl_lock);
 1628 
 1629     return (result);
 1630 }
 1631 
 1632 isc_result_t
 1633 isc_task_beginexclusive(isc_task_t *task0) {
 1634     isc__task_t *task = (isc__task_t *)task0;
 1635     isc__taskmgr_t *manager;
 1636 
 1637     REQUIRE(VALID_TASK(task));
 1638 
 1639     manager = task->manager;
 1640 
 1641     REQUIRE(task->state == task_state_running);
 1642 
 1643     LOCK(&manager->excl_lock);
 1644     REQUIRE(task == task->manager->excl ||
 1645         (atomic_load_relaxed(&task->manager->exiting) &&
 1646          task->manager->excl == NULL));
 1647     UNLOCK(&manager->excl_lock);
 1648 
 1649     if (atomic_load_relaxed(&manager->exclusive_req) ||
 1650         atomic_load_relaxed(&manager->pause_req))
 1651     {
 1652         return (ISC_R_LOCKBUSY);
 1653     }
 1654 
 1655     LOCK(&manager->halt_lock);
 1656     INSIST(!atomic_load_relaxed(&manager->exclusive_req) &&
 1657            !atomic_load_relaxed(&manager->pause_req));
 1658     atomic_store_relaxed(&manager->exclusive_req, true);
 1659     while (manager->halted + 1 < manager->workers) {
 1660         wake_all_queues(manager);
 1661         WAIT(&manager->halt_cond, &manager->halt_lock);
 1662     }
 1663     UNLOCK(&manager->halt_lock);
 1664     if (manager->nm != NULL) {
 1665         isc_nm_pause(manager->nm);
 1666     }
 1667     return (ISC_R_SUCCESS);
 1668 }
 1669 
 1670 void
 1671 isc_task_endexclusive(isc_task_t *task0) {
 1672     isc__task_t *task = (isc__task_t *)task0;
 1673     isc__taskmgr_t *manager;
 1674 
 1675     REQUIRE(VALID_TASK(task));
 1676     REQUIRE(task->state == task_state_running);
 1677     manager = task->manager;
 1678 
 1679     if (manager->nm != NULL) {
 1680         isc_nm_resume(manager->nm);
 1681     }
 1682     LOCK(&manager->halt_lock);
 1683     REQUIRE(atomic_load_relaxed(&manager->exclusive_req));
 1684     atomic_store_relaxed(&manager->exclusive_req, false);
 1685     while (manager->halted > 0) {
 1686         BROADCAST(&manager->halt_cond);
 1687         WAIT(&manager->halt_cond, &manager->halt_lock);
 1688     }
 1689     UNLOCK(&manager->halt_lock);
 1690 }
 1691 
 1692 void
 1693 isc_task_pause(isc_task_t *task0) {
 1694     REQUIRE(ISCAPI_TASK_VALID(task0));
 1695     isc__task_t *task = (isc__task_t *)task0;
 1696 
 1697     LOCK(&task->lock);
 1698     task->pause_cnt++;
 1699     if (task->pause_cnt > 1) {
 1700         /*
 1701          * Someone already paused this thread, just increase
 1702          * the number of pausing clients.
 1703          */
 1704         UNLOCK(&task->lock);
 1705         return;
 1706     }
 1707 
 1708     INSIST(task->state == task_state_idle ||
 1709            task->state == task_state_ready ||
 1710            task->state == task_state_running);
 1711     if (task->state == task_state_running) {
 1712         task->state = task_state_pausing;
 1713     } else {
 1714         task->state = task_state_paused;
 1715     }
 1716     UNLOCK(&task->lock);
 1717 }
 1718 
 1719 void
 1720 isc_task_unpause(isc_task_t *task0) {
 1721     isc__task_t *task = (isc__task_t *)task0;
 1722     bool was_idle = false;
 1723 
 1724     REQUIRE(ISCAPI_TASK_VALID(task0));
 1725 
 1726     LOCK(&task->lock);
 1727     task->pause_cnt--;
 1728     INSIST(task->pause_cnt >= 0);
 1729     if (task->pause_cnt > 0) {
 1730         UNLOCK(&task->lock);
 1731         return;
 1732     }
 1733 
 1734     INSIST(task->state == task_state_paused ||
 1735            task->state == task_state_pausing);
 1736     /* If the task was pausing we can't reschedule it */
 1737     if (task->state == task_state_pausing) {
 1738         task->state = task_state_running;
 1739     } else {
 1740         task->state = task_state_idle;
 1741     }
 1742     if (task->state == task_state_idle && !EMPTY(task->events)) {
 1743         task->state = task_state_ready;
 1744         was_idle = true;
 1745     }
 1746     UNLOCK(&task->lock);
 1747 
 1748     if (was_idle) {
 1749         task_ready(task);
 1750     }
 1751 }
 1752 
 1753 void
 1754 isc_task_setprivilege(isc_task_t *task0, bool priv) {
 1755     REQUIRE(ISCAPI_TASK_VALID(task0));
 1756     isc__task_t *task = (isc__task_t *)task0;
 1757     isc__taskmgr_t *manager = task->manager;
 1758     uint_fast32_t oldflags, newflags;
 1759 
 1760     oldflags = atomic_load_acquire(&task->flags);
 1761     do {
 1762         if (priv) {
 1763             newflags = oldflags | TASK_F_PRIVILEGED;
 1764         } else {
 1765             newflags = oldflags & ~TASK_F_PRIVILEGED;
 1766         }
 1767         if (newflags == oldflags) {
 1768             return;
 1769         }
 1770     } while (!atomic_compare_exchange_weak_acq_rel(&task->flags, &oldflags,
 1771                                newflags));
 1772 
 1773     LOCK(&manager->queues[task->threadid].lock);
 1774     if (priv && ISC_LINK_LINKED(task, ready_link)) {
 1775         ENQUEUE(manager->queues[task->threadid].ready_priority_tasks,
 1776             task, ready_priority_link);
 1777     } else if (!priv && ISC_LINK_LINKED(task, ready_priority_link)) {
 1778         DEQUEUE(manager->queues[task->threadid].ready_priority_tasks,
 1779             task, ready_priority_link);
 1780     }
 1781     UNLOCK(&manager->queues[task->threadid].lock);
 1782 }
 1783 
 1784 bool
 1785 isc_task_privilege(isc_task_t *task0) {
 1786     isc__task_t *task = (isc__task_t *)task0;
 1787     REQUIRE(VALID_TASK(task));
 1788 
 1789     return (TASK_PRIVILEGED(task));
 1790 }
 1791 
 1792 bool
 1793 isc_task_exiting(isc_task_t *t) {
 1794     isc__task_t *task = (isc__task_t *)t;
 1795     REQUIRE(VALID_TASK(task));
 1796 
 1797     return (TASK_SHUTTINGDOWN(task));
 1798 }
 1799 
 1800 #ifdef HAVE_LIBXML2
 1801 #define TRY0(a)                     \
 1802     do {                        \
 1803         xmlrc = (a);        \
 1804         if (xmlrc < 0)      \
 1805             goto error; \
 1806     } while (0)
 1807 int
 1808 isc_taskmgr_renderxml(isc_taskmgr_t *mgr0, void *writer0) {
 1809     isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
 1810     isc__task_t *task = NULL;
 1811     int xmlrc;
 1812     xmlTextWriterPtr writer = (xmlTextWriterPtr)writer0;
 1813 
 1814     LOCK(&mgr->lock);
 1815 
 1816     /*
 1817      * Write out the thread-model, and some details about each depending
 1818      * on which type is enabled.
 1819      */
 1820     TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model"));
 1821     TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "type"));
 1822     TRY0(xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded"));
 1823     TRY0(xmlTextWriterEndElement(writer)); /* type */
 1824 
 1825     TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "worker-threads"));
 1826     TRY0(xmlTextWriterWriteFormatString(writer, "%d", mgr->workers));
 1827     TRY0(xmlTextWriterEndElement(writer)); /* worker-threads */
 1828 
 1829     TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum"));
 1830     TRY0(xmlTextWriterWriteFormatString(writer, "%d",
 1831                         mgr->default_quantum));
 1832     TRY0(xmlTextWriterEndElement(writer)); /* default-quantum */
 1833 
 1834     TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-count"));
 1835     TRY0(xmlTextWriterWriteFormatString(
 1836         writer, "%d", (int)atomic_load_relaxed(&mgr->tasks_count)));
 1837     TRY0(xmlTextWriterEndElement(writer)); /* tasks-count */
 1838 
 1839     TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-running"));
 1840     TRY0(xmlTextWriterWriteFormatString(
 1841         writer, "%d", (int)atomic_load_relaxed(&mgr->tasks_running)));
 1842     TRY0(xmlTextWriterEndElement(writer)); /* tasks-running */
 1843 
 1844     TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-ready"));
 1845     TRY0(xmlTextWriterWriteFormatString(
 1846         writer, "%d", (int)atomic_load_relaxed(&mgr->tasks_ready)));
 1847     TRY0(xmlTextWriterEndElement(writer)); /* tasks-ready */
 1848 
 1849     TRY0(xmlTextWriterEndElement(writer)); /* thread-model */
 1850 
 1851     TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks"));
 1852     task = ISC_LIST_HEAD(mgr->tasks);
 1853     while (task != NULL) {
 1854         LOCK(&task->lock);
 1855         TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "task"));
 1856 
 1857         if (task->name[0] != 0) {
 1858             TRY0(xmlTextWriterStartElement(writer,
 1859                                ISC_XMLCHAR "name"));
 1860             TRY0(xmlTextWriterWriteFormatString(writer, "%s",
 1861                                 task->name));
 1862             TRY0(xmlTextWriterEndElement(writer)); /* name */
 1863         }
 1864 
 1865         TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "reference"
 1866                                    "s"));
 1867         TRY0(xmlTextWriterWriteFormatString(
 1868             writer, "%" PRIuFAST32,
 1869             isc_refcount_current(&task->references)));
 1870         TRY0(xmlTextWriterEndElement(writer)); /* references */
 1871 
 1872         TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "id"));
 1873         TRY0(xmlTextWriterWriteFormatString(writer, "%p", task));
 1874         TRY0(xmlTextWriterEndElement(writer)); /* id */
 1875 
 1876         TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "state"));
 1877         TRY0(xmlTextWriterWriteFormatString(writer, "%s",
 1878                             statenames[task->state]));
 1879         TRY0(xmlTextWriterEndElement(writer)); /* state */
 1880 
 1881         TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum"));
 1882         TRY0(xmlTextWriterWriteFormatString(writer, "%d",
 1883                             task->quantum));
 1884         TRY0(xmlTextWriterEndElement(writer)); /* quantum */
 1885 
 1886         TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "events"));
 1887         TRY0(xmlTextWriterWriteFormatString(writer, "%d",
 1888                             task->nevents));
 1889         TRY0(xmlTextWriterEndElement(writer)); /* events */
 1890 
 1891         TRY0(xmlTextWriterEndElement(writer));
 1892 
 1893         UNLOCK(&task->lock);
 1894         task = ISC_LIST_NEXT(task, link);
 1895     }
 1896     TRY0(xmlTextWriterEndElement(writer)); /* tasks */
 1897 
 1898 error:
 1899     if (task != NULL) {
 1900         UNLOCK(&task->lock);
 1901     }
 1902     UNLOCK(&mgr->lock);
 1903 
 1904     return (xmlrc);
 1905 }
 1906 #endif /* HAVE_LIBXML2 */
 1907 
 1908 #ifdef HAVE_JSON_C
 1909 #define CHECKMEM(m)                              \
 1910     do {                                     \
 1911         if (m == NULL) {                 \
 1912             result = ISC_R_NOMEMORY; \
 1913             goto error;              \
 1914         }                                \
 1915     } while (0)
 1916 
 1917 isc_result_t
 1918 isc_taskmgr_renderjson(isc_taskmgr_t *mgr0, void *tasks0) {
 1919     isc_result_t result = ISC_R_SUCCESS;
 1920     isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
 1921     isc__task_t *task = NULL;
 1922     json_object *obj = NULL, *array = NULL, *taskobj = NULL;
 1923     json_object *tasks = (json_object *)tasks0;
 1924 
 1925     LOCK(&mgr->lock);
 1926 
 1927     /*
 1928      * Write out the thread-model, and some details about each depending
 1929      * on which type is enabled.
 1930      */
 1931     obj = json_object_new_string("threaded");
 1932     CHECKMEM(obj);
 1933     json_object_object_add(tasks, "thread-model", obj);
 1934 
 1935     obj = json_object_new_int(mgr->workers);
 1936     CHECKMEM(obj);
 1937     json_object_object_add(tasks, "worker-threads", obj);
 1938 
 1939     obj = json_object_new_int(mgr->default_quantum);
 1940     CHECKMEM(obj);
 1941     json_object_object_add(tasks, "default-quantum", obj);
 1942 
 1943     obj = json_object_new_int(atomic_load_relaxed(&mgr->tasks_count));
 1944     CHECKMEM(obj);
 1945     json_object_object_add(tasks, "tasks-count", obj);
 1946 
 1947     obj = json_object_new_int(atomic_load_relaxed(&mgr->tasks_running));
 1948     CHECKMEM(obj);
 1949     json_object_object_add(tasks, "tasks-running", obj);
 1950 
 1951     obj = json_object_new_int(atomic_load_relaxed(&mgr->tasks_ready));
 1952     CHECKMEM(obj);
 1953     json_object_object_add(tasks, "tasks-ready", obj);
 1954 
 1955     array = json_object_new_array();
 1956     CHECKMEM(array);
 1957 
 1958     for (task = ISC_LIST_HEAD(mgr->tasks); task != NULL;
 1959          task = ISC_LIST_NEXT(task, link))
 1960     {
 1961         char buf[255];
 1962 
 1963         LOCK(&task->lock);
 1964 
 1965         taskobj = json_object_new_object();
 1966         CHECKMEM(taskobj);
 1967         json_object_array_add(array, taskobj);
 1968 
 1969         snprintf(buf, sizeof(buf), "%p", task);
 1970         obj = json_object_new_string(buf);
 1971         CHECKMEM(obj);
 1972         json_object_object_add(taskobj, "id", obj);
 1973 
 1974         if (task->name[0] != 0) {
 1975             obj = json_object_new_string(task->name);
 1976             CHECKMEM(obj);
 1977             json_object_object_add(taskobj, "name", obj);
 1978         }
 1979 
 1980         obj = json_object_new_int(
 1981             isc_refcount_current(&task->references));
 1982         CHECKMEM(obj);
 1983         json_object_object_add(taskobj, "references", obj);
 1984 
 1985         obj = json_object_new_string(statenames[task->state]);
 1986         CHECKMEM(obj);
 1987         json_object_object_add(taskobj, "state", obj);
 1988 
 1989         obj = json_object_new_int(task->quantum);
 1990         CHECKMEM(obj);
 1991         json_object_object_add(taskobj, "quantum", obj);
 1992 
 1993         obj = json_object_new_int(task->nevents);
 1994         CHECKMEM(obj);
 1995         json_object_object_add(taskobj, "events", obj);
 1996 
 1997         UNLOCK(&task->lock);
 1998     }
 1999 
 2000     json_object_object_add(tasks, "tasks", array);
 2001     array = NULL;
 2002     result = ISC_R_SUCCESS;
 2003 
 2004 error:
 2005     if (array != NULL) {
 2006         json_object_put(array);
 2007     }
 2008 
 2009     if (task != NULL) {
 2010         UNLOCK(&task->lock);
 2011     }
 2012     UNLOCK(&mgr->lock);
 2013 
 2014     return (result);
 2015 }
 2016 #endif /* ifdef HAVE_JSON_C */
 2017 
 2018 isc_result_t
 2019 isc_taskmgr_createinctx(isc_mem_t *mctx, unsigned int workers,
 2020             unsigned int default_quantum,
 2021             isc_taskmgr_t **managerp) {
 2022     isc_result_t result;
 2023 
 2024     result = isc_taskmgr_create(mctx, workers, default_quantum, NULL,
 2025                     managerp);
 2026 
 2027     return (result);
 2028 }