"Fossies" - the Fresh Open Source Software Archive

Member "rrdtool-1.7.2/src/rrd_hw.c" (27 May 2019, 19904 Bytes) of package /linux/misc/rrdtool-1.7.2.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "rrd_hw.c" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 1.7.1_vs_1.7.2.

    1 /*****************************************************************************
    2  * RRDtool 1.7.2 Copyright by Tobi Oetiker
    3  *****************************************************************************
    4  * rrd_hw.c : Support for Holt-Winters Smoothing/ Aberrant Behavior Detection
    5  *****************************************************************************
    6  * Initial version by Jake Brutlag, WebTV Networks, 5/1/00
    7  *****************************************************************************/
    8 
    9 #include <stdlib.h>
   10 
   11 #include "rrd_tool.h"
   12 #include "rrd_hw.h"
   13 #include "rrd_hw_math.h"
   14 #include "rrd_hw_update.h"
   15 
   16 #define hw_dep_idx(rrd, rra_idx) rrd->rra_def[rra_idx].par[RRA_dependent_rra_idx].u_cnt
   17 
   18 /* #define DEBUG */
   19 
   20 /* private functions */
   21 static unsigned long MyMod(
   22     signed long val,
   23     unsigned long mod);
   24 
   25 int lookup_seasonal(
   26     rrd_t *rrd,
   27     unsigned long rra_idx,
   28     unsigned long rra_start,
   29     rrd_file_t *rrd_file,
   30     unsigned long offset,
   31     rrd_value_t **seasonal_coef)
   32 {
   33     unsigned long pos_tmp;
   34 
   35     /* rra_ptr[].cur_row points to the rra row to be written; this function
   36      * reads cur_row + offset */
   37     unsigned long row_idx = rrd->rra_ptr[rra_idx].cur_row + offset;
   38 
   39     /* handle wrap around */
   40     if (row_idx >= rrd->rra_def[rra_idx].row_cnt)
   41         row_idx = row_idx % (rrd->rra_def[rra_idx].row_cnt);
   42 
   43     /* rra_start points to the appropriate rra block in the file */
   44     /* compute the pointer to the appropriate location in the file */
   45     pos_tmp =
   46         rra_start +
   47         (row_idx) * (rrd->stat_head->ds_cnt) * sizeof(rrd_value_t);
   48 
   49     /* allocate memory if need be */
   50     if (*seasonal_coef == NULL)
   51         *seasonal_coef =
   52             (rrd_value_t *) malloc((rrd->stat_head->ds_cnt) *
   53                                    sizeof(rrd_value_t));
   54     if (*seasonal_coef == NULL) {
   55         rrd_set_error("memory allocation failure: seasonal coef");
   56         return -1;
   57     }
   58 
   59     if (!rrd_seek(rrd_file, pos_tmp, SEEK_SET)) {
   60         if (rrd_read
   61             (rrd_file, *seasonal_coef,
   62              sizeof(rrd_value_t) * rrd->stat_head->ds_cnt)
   63             == (ssize_t) (sizeof(rrd_value_t) * rrd->stat_head->ds_cnt)) {
   64             /* success! */
   65             /* we can safely ignore the rule requiring a seek operation between read
   66              * and write, because this read moves the file pointer to somewhere
   67              * in the file other than the next write location.
   68              * */
   69             return 0;
   70         } else {
   71             rrd_set_error("read operation failed in lookup_seasonal(): %lu\n",
   72                           pos_tmp);
   73         }
   74     } else {
   75         rrd_set_error("seek operation failed in lookup_seasonal(): %lu\n",
   76                       pos_tmp);
   77     }
   78 
   79     return -1;
   80 }
   81 
   82 /* For the specified CDP prep area and the FAILURES RRA,
   83  * erase all history of past violations.
   84  */
   85 void erase_violations(
   86     rrd_t *rrd,
   87     unsigned long cdp_idx,
   88     unsigned long rra_idx)
   89 {
   90     unsigned short i;
   91     char     *violations_array;
   92 
   93     /* check that rra_idx is a CF_FAILURES array */
   94     if (rrd_cf_conv(rrd->rra_def[rra_idx].cf_nam) != CF_FAILURES) {
   95 #ifdef DEBUG
   96         fprintf(stderr, "erase_violations called for non-FAILURES RRA: %s\n",
   97                 rrd->rra_def[rra_idx].cf_nam);
   98 #endif
   99         return;
  100     }
  101 #ifdef DEBUG
  102     fprintf(stderr, "scratch buffer before erase:\n");
  103     for (i = 0; i < MAX_CDP_PAR_EN; i++) {
  104         fprintf(stderr, "%lu ", rrd->cdp_prep[cdp_idx].scratch[i].u_cnt);
  105     }
  106     fprintf(stderr, "\n");
  107 #endif
  108 
  109     /* WARNING: an array of longs on disk is treated as an array of chars
  110      * in memory. */
  111     violations_array = (char *) ((void *) rrd->cdp_prep[cdp_idx].scratch);
  112     /* erase everything in the part of the CDP scratch array that will be
  113      * used to store violations for the current window */
  114     for (i = rrd->rra_def[rra_idx].par[RRA_window_len].u_cnt; i > 0; i--) {
  115         violations_array[i - 1] = 0;
  116     }
  117 #ifdef DEBUG
  118     fprintf(stderr, "scratch buffer after erase:\n");
  119     for (i = 0; i < MAX_CDP_PAR_EN; i++) {
  120         fprintf(stderr, "%lu ", rrd->cdp_prep[cdp_idx].scratch[i].u_cnt);
  121     }
  122     fprintf(stderr, "\n");
  123 #endif
  124 }
  125 
  126 /* Smooth a periodic array with a moving average: equal weights and
  127  * length = 5% of the period. */
  128 int apply_smoother(
  129     rrd_t *rrd,
  130     unsigned long rra_idx,
  131     unsigned long rra_start,
  132     rrd_file_t *rrd_file)
  133 {
  134     unsigned long i, j, k;
  135     unsigned long totalbytes;
  136     rrd_value_t *rrd_values;
  137     unsigned long row_length = rrd->stat_head->ds_cnt;
  138     unsigned long row_count = rrd->rra_def[rra_idx].row_cnt;
  139     unsigned long offset;
  140     FIFOqueue **buffers;
  141     rrd_value_t *working_average;
  142     rrd_value_t *rrd_values_cpy;
  143     rrd_value_t *baseline;
  144 
  145     if (atoi(rrd->stat_head->version) >= 4) {
  146         offset = floor(rrd->rra_def[rra_idx].
  147                        par[RRA_seasonal_smoothing_window].
  148                        u_val / 2 * row_count);
  149     } else {
  150         offset = floor(0.05 / 2 * row_count);
  151     }
  152 
  153     if (offset == 0)
  154         return 0;       /* no smoothing */
  155 
  156     /* allocate memory */
  157     totalbytes = sizeof(rrd_value_t) * row_length * row_count;
  158     rrd_values = (rrd_value_t *) malloc(totalbytes);
  159     if (rrd_values == NULL) {
  160         rrd_set_error("apply smoother: memory allocation failure");
  161         return -1;
  162     }
  163 
  164     /* rra_start is at the beginning of this rra */
  165     if (rrd_seek(rrd_file, rra_start, SEEK_SET)) {
  166         rrd_set_error("seek to rra %d failed", rra_start);
  167         free(rrd_values);
  168         return -1;
  169     }
  170 
  171     /* could read all data in a single block, but we need to
  172      * check for NA values */
  173     for (i = 0; i < row_count; ++i) {
  174         for (j = 0; j < row_length; ++j) {
  175             if (rrd_read
  176                 (rrd_file, &(rrd_values[i * row_length + j]),
  177                  sizeof(rrd_value_t) * 1)
  178                 != (ssize_t) (sizeof(rrd_value_t) * 1)) {
  179                 rrd_set_error("reading value failed: %s",
  180                               rrd_strerror(errno));
  181             }
  182             if (isnan(rrd_values[i * row_length + j])) {
  183                 /* can't apply smoothing, still uninitialized values */
  184 #ifdef DEBUG
  185                 fprintf(stderr,
  186                         "apply_smoother: NA detected in seasonal array: %ld %ld\n",
  187                         i, j);
  188 #endif
  189                 free(rrd_values);
  190                 return 0;
  191             }
  192         }
  193     }
  194 
  195     /* allocate queues, one for each data source */
  196     buffers = (FIFOqueue **) malloc(sizeof(FIFOqueue *) * row_length);
  197     for (i = 0; i < row_length; ++i) {
  198         queue_alloc(&(buffers[i]), 2 * offset + 1);
  199     }
  200     /* need working average initialized to 0 */
  201     working_average = (rrd_value_t *) calloc(row_length, sizeof(rrd_value_t));
  202     baseline = (rrd_value_t *) calloc(row_length, sizeof(rrd_value_t));
  203 
  204     /* compute sums of the first 2*offset terms */
  205     for (i = 0; i < 2 * offset; ++i) {
  206         k = MyMod(i - offset, row_count);
  207         for (j = 0; j < row_length; ++j) {
  208             queue_push(buffers[j], rrd_values[k * row_length + j]);
  209             working_average[j] += rrd_values[k * row_length + j];
  210         }
  211     }
  212 
  213     /* as we are working through the value, we have to make sure to not double
  214        apply the smoothing after wrapping around. so best is to copy the rrd_values first */
  215 
  216     rrd_values_cpy = (rrd_value_t *) calloc(row_length*row_count, sizeof(rrd_value_t));
  217     memcpy(rrd_values_cpy,rrd_values,sizeof(rrd_value_t)*row_length*row_count);
  218 
  219     /* compute moving averages */
  220     for (i = offset; i < row_count + offset; ++i) {
  221         for (j = 0; j < row_length; ++j) {
  222             k = MyMod(i, row_count);
  223             /* add a term to the sum */
  224             working_average[j] += rrd_values_cpy[k * row_length + j];
  225             queue_push(buffers[j], rrd_values_cpy[k * row_length + j]);
  226 
  227             /* reset k to be the center of the window */
  228             k = MyMod(i - offset, row_count);
  229             /* overwrite rdd_values entry, the old value is already
  230              * saved in buffers */
  231             rrd_values[k * row_length + j] =
  232                 working_average[j] / (2 * offset + 1);
  233             baseline[j] += rrd_values[k * row_length + j];
  234 
  235             /* remove a term from the sum */
  236             working_average[j] -= queue_pop(buffers[j]);
  237         }
  238     }
  239 
  240     for (i = 0; i < row_length; ++i) {
  241         queue_dealloc(buffers[i]);
  242         baseline[i] /= row_count;
  243     }
  244     free(rrd_values_cpy);
  245     free(buffers);
  246     free(working_average);
  247 
  248     if (rrd_cf_conv(rrd->rra_def[rra_idx].cf_nam) == CF_SEASONAL) {
  249         rrd_value_t (
  250     *init_seasonality) (
  251     rrd_value_t seasonal_coef,
  252     rrd_value_t intercept);
  253 
  254         switch (rrd_cf_conv(rrd->rra_def[hw_dep_idx(rrd, rra_idx)].cf_nam)) {
  255         case CF_HWPREDICT:
  256             init_seasonality = hw_additive_init_seasonality;
  257             break;
  258         case CF_MHWPREDICT:
  259             init_seasonality = hw_multiplicative_init_seasonality;
  260             break;
  261         default:
  262             rrd_set_error("apply smoother: SEASONAL rra doesn't have "
  263                           "valid dependency: %s",
  264                           rrd->rra_def[hw_dep_idx(rrd, rra_idx)].cf_nam);
  265             free(rrd_values);
  266             free(baseline);
  267             return -1;
  268         }
  269 
  270         for (j = 0; j < row_length; ++j) {
  271             for (i = 0; i < row_count; ++i) {
  272                 rrd_values[i * row_length + j] =
  273                     init_seasonality(rrd_values[i * row_length + j],
  274                                      baseline[j]);
  275             }
  276             /* update the baseline coefficient,
  277              * first, compute the cdp_index. */
  278             offset = hw_dep_idx(rrd, rra_idx) * row_length + j;
  279             (rrd->cdp_prep[offset]).scratch[CDP_hw_intercept].u_val +=
  280                 baseline[j];
  281         }
  282 /* if we are not running on mmap, lets write stuff to disk now */
  283 #ifndef HAVE_MMAP
  284         /* flush cdp to disk */
  285         if (rrd_seek(rrd_file, sizeof(stat_head_t) +
  286                      rrd->stat_head->ds_cnt * sizeof(ds_def_t) +
  287                      rrd->stat_head->rra_cnt * sizeof(rra_def_t) +
  288                      sizeof(live_head_t) +
  289                      rrd->stat_head->ds_cnt * sizeof(pdp_prep_t), SEEK_SET)) {
  290             rrd_set_error("apply_smoother: seek to cdp_prep failed");
  291             free(rrd_values);
  292             return -1;
  293         }
  294         if (rrd_write(rrd_file, rrd->cdp_prep,
  295                       sizeof(cdp_prep_t) *
  296                       (rrd->stat_head->rra_cnt) * rrd->stat_head->ds_cnt)
  297             != (ssize_t) (sizeof(cdp_prep_t) * (rrd->stat_head->rra_cnt) *
  298                           (rrd->stat_head->ds_cnt))) {
  299             rrd_set_error("apply_smoother: cdp_prep write failed");
  300             free(rrd_values);
  301             return -1;
  302         }
  303 #endif
  304 
  305     }
  306 
  307     /* endif CF_SEASONAL */
  308     /* flush updated values to disk */
  309     if (rrd_seek(rrd_file, rra_start, SEEK_SET)) {
  310         rrd_set_error("apply_smoother: seek to pos %d failed", rra_start);
  311         free(rrd_values);
  312         return -1;
  313     }
  314     /* write as a single block */
  315     if (rrd_write
  316         (rrd_file, rrd_values, sizeof(rrd_value_t) * row_length * row_count)
  317         != (ssize_t) (sizeof(rrd_value_t) * row_length * row_count)) {
  318         rrd_set_error("apply_smoother: write failed to %lu", rra_start);
  319         free(rrd_values);
  320         free(baseline);
  321         return -1;
  322     }
  323 
  324     free(rrd_values);
  325     free(baseline);
  326     return 0;
  327 }
  328 
  329 /* Reset aberrant behavior model coefficients, including intercept, slope,
  330  * seasonal, and seasonal deviation for the specified data source. */
  331 void reset_aberrant_coefficients(
  332     rrd_t *rrd,
  333     rrd_file_t *rrd_file,
  334     unsigned long ds_idx)
  335 {
  336     unsigned long cdp_idx, rra_idx, i;
  337     unsigned long cdp_start, rra_start;
  338     rrd_value_t nan_buffer = DNAN;
  339 
  340     /* compute the offset for the cdp area */
  341     cdp_start = sizeof(stat_head_t) +
  342         rrd->stat_head->ds_cnt * sizeof(ds_def_t) +
  343         rrd->stat_head->rra_cnt * sizeof(rra_def_t) +
  344         sizeof(live_head_t) + rrd->stat_head->ds_cnt * sizeof(pdp_prep_t);
  345     /* compute the offset for the first rra */
  346     rra_start = cdp_start +
  347         (rrd->stat_head->ds_cnt) * (rrd->stat_head->rra_cnt) *
  348         sizeof(cdp_prep_t) + rrd->stat_head->rra_cnt * sizeof(rra_ptr_t);
  349 
  350     /* loop over the RRAs */
  351     for (rra_idx = 0; rra_idx < rrd->stat_head->rra_cnt; rra_idx++) {
  352         cdp_idx = rra_idx * (rrd->stat_head->ds_cnt) + ds_idx;
  353         switch (rrd_cf_conv(rrd->rra_def[rra_idx].cf_nam)) {
  354         case CF_HWPREDICT:
  355         case CF_MHWPREDICT:
  356             init_hwpredict_cdp(&(rrd->cdp_prep[cdp_idx]));
  357             break;
  358         case CF_SEASONAL:
  359         case CF_DEVSEASONAL:
  360             /* don't use init_seasonal because it will reset burn-in, which
  361              * means different data sources will be calling for the smoother
  362              * at different times. */
  363             rrd->cdp_prep[cdp_idx].scratch[CDP_hw_seasonal].u_val = DNAN;
  364             rrd->cdp_prep[cdp_idx].scratch[CDP_hw_last_seasonal].u_val = DNAN;
  365             /* move to first entry of data source for this rra */
  366             rrd_seek(rrd_file, rra_start + ds_idx * sizeof(rrd_value_t),
  367                      SEEK_SET);
  368             /* entries for the same data source are not contiguous,
  369              * temporal entries are contiguous */
  370             for (i = 0; i < rrd->rra_def[rra_idx].row_cnt; ++i) {
  371                 if (rrd_write(rrd_file, &nan_buffer, sizeof(rrd_value_t) * 1)
  372                     != sizeof(rrd_value_t) * 1) {
  373                     rrd_set_error
  374                         ("reset_aberrant_coefficients: write failed data source %lu rra %s",
  375                          ds_idx, rrd->rra_def[rra_idx].cf_nam);
  376                     return;
  377                 }
  378                 rrd_seek(rrd_file, (rrd->stat_head->ds_cnt - 1) *
  379                          sizeof(rrd_value_t), SEEK_CUR);
  380             }
  381             break;
  382         case CF_FAILURES:
  383             erase_violations(rrd, cdp_idx, rra_idx);
  384             break;
  385         default:
  386             break;
  387         }
  388         /* move offset to the next rra */
  389         rra_start += rrd->rra_def[rra_idx].row_cnt * rrd->stat_head->ds_cnt *
  390             sizeof(rrd_value_t);
  391     }
  392     rrd_seek(rrd_file, cdp_start, SEEK_SET);
  393     if (rrd_write(rrd_file, rrd->cdp_prep,
  394                   sizeof(cdp_prep_t) *
  395                   (rrd->stat_head->rra_cnt) * rrd->stat_head->ds_cnt)
  396         != (ssize_t) (sizeof(cdp_prep_t) * (rrd->stat_head->rra_cnt) *
  397                       (rrd->stat_head->ds_cnt))) {
  398         rrd_set_error("reset_aberrant_coefficients: cdp_prep write failed");
  399     }
  400 }
  401 
  402 void init_hwpredict_cdp(
  403     cdp_prep_t *cdp)
  404 {
  405     cdp->scratch[CDP_hw_intercept].u_val = DNAN;
  406     cdp->scratch[CDP_hw_last_intercept].u_val = DNAN;
  407     cdp->scratch[CDP_hw_slope].u_val = DNAN;
  408     cdp->scratch[CDP_hw_last_slope].u_val = DNAN;
  409     cdp->scratch[CDP_null_count].u_cnt = 1;
  410     cdp->scratch[CDP_last_null_count].u_cnt = 1;
  411 }
  412 
  413 void init_seasonal_cdp(
  414     cdp_prep_t *cdp)
  415 {
  416     cdp->scratch[CDP_hw_seasonal].u_val = DNAN;
  417     cdp->scratch[CDP_hw_last_seasonal].u_val = DNAN;
  418     cdp->scratch[CDP_init_seasonal].u_cnt = 1;
  419 }
  420 
  421 int update_aberrant_CF(
  422     rrd_t *rrd,
  423     rrd_value_t pdp_val,
  424     enum cf_en current_cf,
  425     unsigned long cdp_idx,
  426     unsigned long rra_idx,
  427     unsigned long ds_idx,
  428     unsigned short CDP_scratch_idx,
  429     rrd_value_t *seasonal_coef)
  430 {
  431     static hw_functions_t hw_multiplicative_functions = {
  432         hw_multiplicative_calculate_prediction,
  433         hw_multiplicative_calculate_intercept,
  434         hw_calculate_slope,
  435         hw_multiplicative_calculate_seasonality,
  436         hw_multiplicative_init_seasonality,
  437         hw_calculate_seasonal_deviation,
  438         hw_init_seasonal_deviation,
  439         1.0             /* identity value */
  440     };
  441 
  442     static hw_functions_t hw_additive_functions = {
  443         hw_additive_calculate_prediction,
  444         hw_additive_calculate_intercept,
  445         hw_calculate_slope,
  446         hw_additive_calculate_seasonality,
  447         hw_additive_init_seasonality,
  448         hw_calculate_seasonal_deviation,
  449         hw_init_seasonal_deviation,
  450         0.0             /* identity value  */
  451     };
  452 
  453     rrd->cdp_prep[cdp_idx].scratch[CDP_scratch_idx].u_val = pdp_val;
  454     switch (current_cf) {
  455     case CF_HWPREDICT:
  456         return update_hwpredict(rrd, cdp_idx, rra_idx, ds_idx,
  457                                 CDP_scratch_idx, &hw_additive_functions);
  458     case CF_MHWPREDICT:
  459         return update_hwpredict(rrd, cdp_idx, rra_idx, ds_idx,
  460                                 CDP_scratch_idx,
  461                                 &hw_multiplicative_functions);
  462     case CF_DEVPREDICT:
  463         return update_devpredict(rrd, cdp_idx, rra_idx, ds_idx,
  464                                  CDP_scratch_idx);
  465     case CF_SEASONAL:
  466         switch (rrd_cf_conv(rrd->rra_def[hw_dep_idx(rrd, rra_idx)].cf_nam)) {
  467         case CF_HWPREDICT:
  468             return update_seasonal(rrd, cdp_idx, rra_idx, ds_idx,
  469                                    CDP_scratch_idx, seasonal_coef,
  470                                    &hw_additive_functions);
  471         case CF_MHWPREDICT:
  472             return update_seasonal(rrd, cdp_idx, rra_idx, ds_idx,
  473                                    CDP_scratch_idx, seasonal_coef,
  474                                    &hw_multiplicative_functions);
  475         default:
  476             return -1;
  477         }
  478     case CF_DEVSEASONAL:
  479         switch (rrd_cf_conv(rrd->rra_def[hw_dep_idx(rrd, rra_idx)].cf_nam)) {
  480         case CF_HWPREDICT:
  481             return update_devseasonal(rrd, cdp_idx, rra_idx, ds_idx,
  482                                       CDP_scratch_idx, seasonal_coef,
  483                                       &hw_additive_functions);
  484         case CF_MHWPREDICT:
  485             return update_devseasonal(rrd, cdp_idx, rra_idx, ds_idx,
  486                                       CDP_scratch_idx, seasonal_coef,
  487                                       &hw_multiplicative_functions);
  488         default:
  489             return -1;
  490         }
  491     case CF_FAILURES:
  492         switch (rrd_cf_conv
  493                 (rrd->rra_def[hw_dep_idx(rrd, hw_dep_idx(rrd, rra_idx))].
  494                  cf_nam)) {
  495         case CF_HWPREDICT:
  496             return update_failures(rrd, cdp_idx, rra_idx, ds_idx,
  497                                    CDP_scratch_idx, &hw_additive_functions);
  498         case CF_MHWPREDICT:
  499             return update_failures(rrd, cdp_idx, rra_idx, ds_idx,
  500                                    CDP_scratch_idx,
  501                                    &hw_multiplicative_functions);
  502         default:
  503             return -1;
  504         }
  505     case CF_AVERAGE:
  506     default:
  507         return 0;
  508     }
  509     return -1;
  510 }
  511 
  512 static unsigned long MyMod(
  513     signed long val,
  514     unsigned long mod)
  515 {
  516     unsigned long new_val;
  517 
  518     if (val < 0)
  519         new_val = ((unsigned long) labs(val)) % mod;
  520     else
  521         new_val = (val % mod);
  522 
  523     if (val < 0)
  524         return (mod - new_val);
  525     else
  526         return (new_val);
  527 }
  528 
  529 /* a standard fixed-capacity FIF0 queue implementation
  530  * No overflow checking is performed. */
  531 int queue_alloc(
  532     FIFOqueue **q,
  533     int capacity)
  534 {
  535     *q = (FIFOqueue *) malloc(sizeof(FIFOqueue));
  536     if (*q == NULL)
  537         return -1;
  538     (*q)->queue = (rrd_value_t *) malloc(sizeof(rrd_value_t) * capacity);
  539     if ((*q)->queue == NULL) {
  540         free(*q);
  541         return -1;
  542     }
  543     (*q)->capacity = capacity;
  544     (*q)->head = capacity;
  545     (*q)->tail = 0;
  546     return 0;
  547 }
  548 
  549 int queue_isempty(
  550     FIFOqueue *q)
  551 {
  552     return (q->head % q->capacity == q->tail);
  553 }
  554 
  555 void queue_push(
  556     FIFOqueue *q,
  557     rrd_value_t value)
  558 {
  559     q->queue[(q->tail)++] = value;
  560     q->tail = q->tail % q->capacity;
  561 }
  562 
  563 rrd_value_t queue_pop(
  564     FIFOqueue *q)
  565 {
  566     q->head = q->head % q->capacity;
  567     return q->queue[(q->head)++];
  568 }
  569 
  570 void queue_dealloc(
  571     FIFOqueue *q)
  572 {
  573     free(q->queue);
  574     free(q);
  575 }