"Fossies" - the Fresh Open Source Software Archive

Member "haproxy-2.0.9/src/compression.c" (15 Nov 2019, 20949 Bytes) of package /linux/misc/haproxy-2.0.9.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "compression.c" see the Fossies "Dox" file reference documentation.

    1 /*
    2  * HTTP compression.
    3  *
    4  * Copyright 2012 Exceliance, David Du Colombier <dducolombier@exceliance.fr>
    5  *                            William Lallemand <wlallemand@exceliance.fr>
    6  *
    7  * This program is free software; you can redistribute it and/or
    8  * modify it under the terms of the GNU General Public License
    9  * as published by the Free Software Foundation; either version
   10  * 2 of the License, or (at your option) any later version.
   11  *
   12  */
   13 
   14 #include <stdio.h>
   15 
   16 #if defined(USE_SLZ)
   17 #include <slz.h>
   18 #elif defined(USE_ZLIB)
   19 /* Note: the crappy zlib and openssl libs both define the "free_func" type.
   20  * That's a very clever idea to use such a generic name in general purpose
   21  * libraries, really... The zlib one is easier to redefine than openssl's,
   22  * so let's only fix this one.
   23  */
   24 #define free_func zlib_free_func
   25 #include <zlib.h>
   26 #undef free_func
   27 #endif /* USE_ZLIB */
   28 
   29 #include <common/cfgparse.h>
   30 #include <common/compat.h>
   31 #include <common/hathreads.h>
   32 #include <common/initcall.h>
   33 #include <common/memory.h>
   34 
   35 #include <types/global.h>
   36 #include <types/compression.h>
   37 
   38 #include <proto/acl.h>
   39 #include <proto/compression.h>
   40 #include <proto/freq_ctr.h>
   41 #include <proto/stream.h>
   42 
   43 
   44 #if defined(USE_ZLIB)
   45 __decl_spinlock(comp_pool_lock);
   46 #endif
   47 
   48 #ifdef USE_ZLIB
   49 
   50 static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size);
   51 static void free_zlib(void *opaque, void *ptr);
   52 
   53 /* zlib allocation  */
   54 static struct pool_head *zlib_pool_deflate_state = NULL;
   55 static struct pool_head *zlib_pool_window = NULL;
   56 static struct pool_head *zlib_pool_prev = NULL;
   57 static struct pool_head *zlib_pool_head = NULL;
   58 static struct pool_head *zlib_pool_pending_buf = NULL;
   59 
   60 long zlib_used_memory = 0;
   61 
   62 static int global_tune_zlibmemlevel = 8;            /* zlib memlevel */
   63 static int global_tune_zlibwindowsize = MAX_WBITS;  /* zlib window size */
   64 
   65 #endif
   66 
   67 unsigned int compress_min_idle = 0;
   68 
   69 static int identity_init(struct comp_ctx **comp_ctx, int level);
   70 static int identity_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out);
   71 static int identity_flush(struct comp_ctx *comp_ctx, struct buffer *out);
   72 static int identity_finish(struct comp_ctx *comp_ctx, struct buffer *out);
   73 static int identity_end(struct comp_ctx **comp_ctx);
   74 
   75 #if defined(USE_SLZ)
   76 
   77 static int rfc1950_init(struct comp_ctx **comp_ctx, int level);
   78 static int rfc1951_init(struct comp_ctx **comp_ctx, int level);
   79 static int rfc1952_init(struct comp_ctx **comp_ctx, int level);
   80 static int rfc195x_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out);
   81 static int rfc195x_flush(struct comp_ctx *comp_ctx, struct buffer *out);
   82 static int rfc195x_finish(struct comp_ctx *comp_ctx, struct buffer *out);
   83 static int rfc195x_end(struct comp_ctx **comp_ctx);
   84 
   85 #elif defined(USE_ZLIB)
   86 
   87 static int gzip_init(struct comp_ctx **comp_ctx, int level);
   88 static int raw_def_init(struct comp_ctx **comp_ctx, int level);
   89 static int deflate_init(struct comp_ctx **comp_ctx, int level);
   90 static int deflate_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out);
   91 static int deflate_flush(struct comp_ctx *comp_ctx, struct buffer *out);
   92 static int deflate_finish(struct comp_ctx *comp_ctx, struct buffer *out);
   93 static int deflate_end(struct comp_ctx **comp_ctx);
   94 
   95 #endif /* USE_ZLIB */
   96 
   97 
   98 const struct comp_algo comp_algos[] =
   99 {
  100     { "identity",     8, "identity", 8, identity_init, identity_add_data, identity_flush, identity_finish, identity_end },
  101 #if defined(USE_SLZ)
  102     { "deflate",      7, "deflate",  7, rfc1950_init,  rfc195x_add_data,  rfc195x_flush,  rfc195x_finish,  rfc195x_end },
  103     { "raw-deflate", 11, "deflate",  7, rfc1951_init,  rfc195x_add_data,  rfc195x_flush,  rfc195x_finish,  rfc195x_end },
  104     { "gzip",         4, "gzip",     4, rfc1952_init,  rfc195x_add_data,  rfc195x_flush,  rfc195x_finish,  rfc195x_end },
  105 #elif defined(USE_ZLIB)
  106     { "deflate",      7, "deflate",  7, deflate_init,  deflate_add_data,  deflate_flush,  deflate_finish,  deflate_end },
  107     { "raw-deflate", 11, "deflate",  7, raw_def_init,  deflate_add_data,  deflate_flush,  deflate_finish,  deflate_end },
  108     { "gzip",         4, "gzip",     4, gzip_init,     deflate_add_data,  deflate_flush,  deflate_finish,  deflate_end },
  109 #endif /* USE_ZLIB */
  110     { NULL,       0, NULL,          0, NULL ,         NULL,              NULL,           NULL,           NULL }
  111 };
  112 
  113 /*
  114  * Add a content-type in the configuration
  115  */
  116 int comp_append_type(struct comp *comp, const char *type)
  117 {
  118     struct comp_type *comp_type;
  119 
  120     comp_type = calloc(1, sizeof(*comp_type));
  121     comp_type->name_len = strlen(type);
  122     comp_type->name = strdup(type);
  123     comp_type->next = comp->types;
  124     comp->types = comp_type;
  125     return 0;
  126 }
  127 
  128 /*
  129  * Add an algorithm in the configuration
  130  */
  131 int comp_append_algo(struct comp *comp, const char *algo)
  132 {
  133     struct comp_algo *comp_algo;
  134     int i;
  135 
  136     for (i = 0; comp_algos[i].cfg_name; i++) {
  137         if (!strcmp(algo, comp_algos[i].cfg_name)) {
  138             comp_algo = calloc(1, sizeof(*comp_algo));
  139             memmove(comp_algo, &comp_algos[i], sizeof(struct comp_algo));
  140             comp_algo->next = comp->algos;
  141             comp->algos = comp_algo;
  142             return 0;
  143         }
  144     }
  145     return -1;
  146 }
  147 
  148 #if defined(USE_ZLIB) || defined(USE_SLZ)
  149 DECLARE_STATIC_POOL(pool_comp_ctx, "comp_ctx", sizeof(struct comp_ctx));
  150 
  151 /*
  152  * Alloc the comp_ctx
  153  */
  154 static inline int init_comp_ctx(struct comp_ctx **comp_ctx)
  155 {
  156 #ifdef USE_ZLIB
  157     z_stream *strm;
  158 
  159     if (global.maxzlibmem > 0 && (global.maxzlibmem - zlib_used_memory) < sizeof(struct comp_ctx))
  160         return -1;
  161 #endif
  162 
  163     *comp_ctx = pool_alloc(pool_comp_ctx);
  164     if (*comp_ctx == NULL)
  165         return -1;
  166 #if defined(USE_SLZ)
  167     (*comp_ctx)->direct_ptr = NULL;
  168     (*comp_ctx)->direct_len = 0;
  169     (*comp_ctx)->queued = BUF_NULL;
  170 #elif defined(USE_ZLIB)
  171     _HA_ATOMIC_ADD(&zlib_used_memory, sizeof(struct comp_ctx));
  172     __ha_barrier_atomic_store();
  173 
  174     strm = &(*comp_ctx)->strm;
  175     strm->zalloc = alloc_zlib;
  176     strm->zfree = free_zlib;
  177     strm->opaque = *comp_ctx;
  178 #endif
  179     return 0;
  180 }
  181 
  182 /*
  183  * Dealloc the comp_ctx
  184  */
  185 static inline int deinit_comp_ctx(struct comp_ctx **comp_ctx)
  186 {
  187     if (!*comp_ctx)
  188         return 0;
  189 
  190     pool_free(pool_comp_ctx, *comp_ctx);
  191     *comp_ctx = NULL;
  192 
  193 #ifdef USE_ZLIB
  194     _HA_ATOMIC_SUB(&zlib_used_memory, sizeof(struct comp_ctx));
  195     __ha_barrier_atomic_store();
  196 #endif
  197     return 0;
  198 }
  199 #endif
  200 
  201 
  202 /****************************
  203  **** Identity algorithm ****
  204  ****************************/
  205 
  206 /*
  207  * Init the identity algorithm
  208  */
  209 static int identity_init(struct comp_ctx **comp_ctx, int level)
  210 {
  211     return 0;
  212 }
  213 
  214 /*
  215  * Process data
  216  *   Return size of consumed data or -1 on error
  217  */
  218 static int identity_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out)
  219 {
  220     char *out_data = b_tail(out);
  221     int out_len = b_room(out);
  222 
  223     if (out_len < in_len)
  224         return -1;
  225 
  226     memcpy(out_data, in_data, in_len);
  227 
  228     b_add(out, in_len);
  229 
  230     return in_len;
  231 }
  232 
  233 static int identity_flush(struct comp_ctx *comp_ctx, struct buffer *out)
  234 {
  235     return 0;
  236 }
  237 
  238 static int identity_finish(struct comp_ctx *comp_ctx, struct buffer *out)
  239 {
  240     return 0;
  241 }
  242 
  243 /*
  244  * Deinit the algorithm
  245  */
  246 static int identity_end(struct comp_ctx **comp_ctx)
  247 {
  248     return 0;
  249 }
  250 
  251 
  252 #ifdef USE_SLZ
  253 
  254 /* SLZ's gzip format (RFC1952). Returns < 0 on error. */
  255 static int rfc1952_init(struct comp_ctx **comp_ctx, int level)
  256 {
  257     if (init_comp_ctx(comp_ctx) < 0)
  258         return -1;
  259 
  260     (*comp_ctx)->cur_lvl = !!level;
  261     return slz_rfc1952_init(&(*comp_ctx)->strm, !!level);
  262 }
  263 
  264 /* SLZ's raw deflate format (RFC1951). Returns < 0 on error. */
  265 static int rfc1951_init(struct comp_ctx **comp_ctx, int level)
  266 {
  267     if (init_comp_ctx(comp_ctx) < 0)
  268         return -1;
  269 
  270     (*comp_ctx)->cur_lvl = !!level;
  271     return slz_rfc1951_init(&(*comp_ctx)->strm, !!level);
  272 }
  273 
  274 /* SLZ's zlib format (RFC1950). Returns < 0 on error. */
  275 static int rfc1950_init(struct comp_ctx **comp_ctx, int level)
  276 {
  277     if (init_comp_ctx(comp_ctx) < 0)
  278         return -1;
  279 
  280     (*comp_ctx)->cur_lvl = !!level;
  281     return slz_rfc1950_init(&(*comp_ctx)->strm, !!level);
  282 }
  283 
  284 /* Return the size of consumed data or -1. The output buffer is unused at this
  285  * point, we only keep a reference to the input data or a copy of them if the
  286  * reference is already used.
  287  */
  288 static int rfc195x_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out)
  289 {
  290     static THREAD_LOCAL struct buffer tmpbuf = BUF_NULL;
  291 
  292     if (in_len <= 0)
  293         return 0;
  294 
  295     if (comp_ctx->direct_ptr && b_is_null(&comp_ctx->queued)) {
  296         /* data already being pointed to, we're in front of fragmented
  297          * data and need a buffer now. We reuse the same buffer, as it's
  298          * not used out of the scope of a series of add_data()*, end().
  299          */
  300         if (unlikely(!tmpbuf.size)) {
  301             /* this is the first time we need the compression buffer */
  302             if (b_alloc(&tmpbuf) == NULL)
  303                 return -1; /* no memory */
  304         }
  305         b_reset(&tmpbuf);
  306         memcpy(b_tail(&tmpbuf), comp_ctx->direct_ptr, comp_ctx->direct_len);
  307         b_add(&tmpbuf, comp_ctx->direct_len);
  308         comp_ctx->direct_ptr = NULL;
  309         comp_ctx->direct_len = 0;
  310         comp_ctx->queued = tmpbuf;
  311         /* fall through buffer copy */
  312     }
  313 
  314     if (!b_is_null(&comp_ctx->queued)) {
  315         /* data already pending */
  316         memcpy(b_tail(&comp_ctx->queued), in_data, in_len);
  317         b_add(&comp_ctx->queued, in_len);
  318         return in_len;
  319     }
  320 
  321     comp_ctx->direct_ptr = in_data;
  322     comp_ctx->direct_len = in_len;
  323     return in_len;
  324 }
  325 
  326 /* Compresses the data accumulated using add_data(), and optionally sends the
  327  * format-specific trailer if <finish> is non-null. <out> is expected to have a
  328  * large enough free non-wrapping space as verified by http_comp_buffer_init().
  329  * The number of bytes emitted is reported.
  330  */
  331 static int rfc195x_flush_or_finish(struct comp_ctx *comp_ctx, struct buffer *out, int finish)
  332 {
  333     struct slz_stream *strm = &comp_ctx->strm;
  334     const char *in_ptr;
  335     int in_len;
  336     int out_len;
  337 
  338     in_ptr = comp_ctx->direct_ptr;
  339     in_len = comp_ctx->direct_len;
  340 
  341     if (!b_is_null(&comp_ctx->queued)) {
  342         in_ptr = b_head(&comp_ctx->queued);
  343         in_len = b_data(&comp_ctx->queued);
  344     }
  345 
  346     out_len = b_data(out);
  347 
  348     if (in_ptr)
  349         b_add(out, slz_encode(strm, b_tail(out), in_ptr, in_len, !finish));
  350 
  351     if (finish)
  352         b_add(out, slz_finish(strm, b_tail(out)));
  353 
  354     out_len = b_data(out) - out_len;
  355 
  356     /* very important, we must wipe the data we've just flushed */
  357     comp_ctx->direct_len = 0;
  358     comp_ctx->direct_ptr = NULL;
  359     comp_ctx->queued     = BUF_NULL;
  360 
  361     /* Verify compression rate limiting and CPU usage */
  362     if ((global.comp_rate_lim > 0 && (read_freq_ctr(&global.comp_bps_out) > global.comp_rate_lim)) ||    /* rate */
  363        (ti->idle_pct < compress_min_idle)) {                                                                 /* idle */
  364         if (comp_ctx->cur_lvl > 0)
  365             strm->level = --comp_ctx->cur_lvl;
  366     }
  367     else if (comp_ctx->cur_lvl < global.tune.comp_maxlevel && comp_ctx->cur_lvl < 1) {
  368         strm->level = ++comp_ctx->cur_lvl;
  369     }
  370 
  371     /* and that's all */
  372     return out_len;
  373 }
  374 
  375 static int rfc195x_flush(struct comp_ctx *comp_ctx, struct buffer *out)
  376 {
  377     return rfc195x_flush_or_finish(comp_ctx, out, 0);
  378 }
  379 
  380 static int rfc195x_finish(struct comp_ctx *comp_ctx, struct buffer *out)
  381 {
  382     return rfc195x_flush_or_finish(comp_ctx, out, 1);
  383 }
  384 
  385 /* we just need to free the comp_ctx here, nothing was allocated */
  386 static int rfc195x_end(struct comp_ctx **comp_ctx)
  387 {
  388     deinit_comp_ctx(comp_ctx);
  389     return 0;
  390 }
  391 
  392 #elif defined(USE_ZLIB)  /* ! USE_SLZ */
  393 
  394 /*
  395  * This is a tricky allocation function using the zlib.
  396  * This is based on the allocation order in deflateInit2.
  397  */
  398 static void *alloc_zlib(void *opaque, unsigned int items, unsigned int size)
  399 {
  400     struct comp_ctx *ctx = opaque;
  401     static THREAD_LOCAL char round = 0; /* order in deflateInit2 */
  402     void *buf = NULL;
  403     struct pool_head *pool = NULL;
  404 
  405     if (global.maxzlibmem > 0 && (global.maxzlibmem - zlib_used_memory) < (long)(items * size))
  406         goto end;
  407 
  408     switch (round) {
  409         case 0:
  410             if (zlib_pool_deflate_state == NULL) {
  411                 HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
  412                 if (zlib_pool_deflate_state == NULL)
  413                     zlib_pool_deflate_state = create_pool("zlib_state", size * items, MEM_F_SHARED);
  414                 HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
  415             }
  416             pool = zlib_pool_deflate_state;
  417             ctx->zlib_deflate_state = buf = pool_alloc(pool);
  418         break;
  419 
  420         case 1:
  421             if (zlib_pool_window == NULL) {
  422                 HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
  423                 if (zlib_pool_window == NULL)
  424                     zlib_pool_window = create_pool("zlib_window", size * items, MEM_F_SHARED);
  425                 HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
  426             }
  427             pool = zlib_pool_window;
  428             ctx->zlib_window = buf = pool_alloc(pool);
  429         break;
  430 
  431         case 2:
  432             if (zlib_pool_prev == NULL) {
  433                 HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
  434                 if (zlib_pool_prev == NULL)
  435                     zlib_pool_prev = create_pool("zlib_prev", size * items, MEM_F_SHARED);
  436                 HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
  437             }
  438             pool = zlib_pool_prev;
  439             ctx->zlib_prev = buf = pool_alloc(pool);
  440         break;
  441 
  442         case 3:
  443             if (zlib_pool_head == NULL) {
  444                 HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
  445                 if (zlib_pool_head == NULL)
  446                     zlib_pool_head = create_pool("zlib_head", size * items, MEM_F_SHARED);
  447                 HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
  448             }
  449             pool = zlib_pool_head;
  450             ctx->zlib_head = buf = pool_alloc(pool);
  451         break;
  452 
  453         case 4:
  454             if (zlib_pool_pending_buf == NULL) {
  455                 HA_SPIN_LOCK(COMP_POOL_LOCK, &comp_pool_lock);
  456                 if (zlib_pool_pending_buf == NULL)
  457                     zlib_pool_pending_buf = create_pool("zlib_pending_buf", size * items, MEM_F_SHARED);
  458                 HA_SPIN_UNLOCK(COMP_POOL_LOCK, &comp_pool_lock);
  459             }
  460             pool = zlib_pool_pending_buf;
  461             ctx->zlib_pending_buf = buf = pool_alloc(pool);
  462         break;
  463     }
  464     if (buf != NULL) {
  465         _HA_ATOMIC_ADD(&zlib_used_memory, pool->size);
  466         __ha_barrier_atomic_store();
  467     }
  468 
  469 end:
  470 
  471     /* deflateInit2() first allocates and checks the deflate_state, then if
  472      * it succeeds, it allocates all other 4 areas at ones and checks them
  473      * at the end. So we want to correctly count the rounds depending on when
  474      * zlib is supposed to abort.
  475      */
  476     if (buf || round)
  477         round = (round + 1) % 5;
  478     return buf;
  479 }
  480 
  481 static void free_zlib(void *opaque, void *ptr)
  482 {
  483     struct comp_ctx *ctx = opaque;
  484     struct pool_head *pool = NULL;
  485 
  486     if (ptr == ctx->zlib_window)
  487         pool = zlib_pool_window;
  488     else if (ptr == ctx->zlib_deflate_state)
  489         pool = zlib_pool_deflate_state;
  490     else if (ptr == ctx->zlib_prev)
  491         pool = zlib_pool_prev;
  492     else if (ptr == ctx->zlib_head)
  493         pool = zlib_pool_head;
  494     else if (ptr == ctx->zlib_pending_buf)
  495         pool = zlib_pool_pending_buf;
  496 
  497     pool_free(pool, ptr);
  498     _HA_ATOMIC_SUB(&zlib_used_memory, pool->size);
  499     __ha_barrier_atomic_store();
  500 }
  501 
  502 /**************************
  503 ****  gzip algorithm   ****
  504 ***************************/
  505 static int gzip_init(struct comp_ctx **comp_ctx, int level)
  506 {
  507     z_stream *strm;
  508 
  509     if (init_comp_ctx(comp_ctx) < 0)
  510         return -1;
  511 
  512     strm = &(*comp_ctx)->strm;
  513 
  514     if (deflateInit2(strm, level, Z_DEFLATED, global_tune_zlibwindowsize + 16, global_tune_zlibmemlevel, Z_DEFAULT_STRATEGY) != Z_OK) {
  515         deinit_comp_ctx(comp_ctx);
  516         return -1;
  517     }
  518 
  519     (*comp_ctx)->cur_lvl = level;
  520 
  521     return 0;
  522 }
  523 
  524 /* Raw deflate algorithm */
  525 static int raw_def_init(struct comp_ctx **comp_ctx, int level)
  526 {
  527     z_stream *strm;
  528 
  529     if (init_comp_ctx(comp_ctx) < 0)
  530         return -1;
  531 
  532     strm = &(*comp_ctx)->strm;
  533 
  534     if (deflateInit2(strm, level, Z_DEFLATED, -global_tune_zlibwindowsize, global_tune_zlibmemlevel, Z_DEFAULT_STRATEGY) != Z_OK) {
  535         deinit_comp_ctx(comp_ctx);
  536         return -1;
  537     }
  538 
  539     (*comp_ctx)->cur_lvl = level;
  540     return 0;
  541 }
  542 
  543 /**************************
  544 **** Deflate algorithm ****
  545 ***************************/
  546 
  547 static int deflate_init(struct comp_ctx **comp_ctx, int level)
  548 {
  549     z_stream *strm;
  550 
  551     if (init_comp_ctx(comp_ctx) < 0)
  552         return -1;
  553 
  554     strm = &(*comp_ctx)->strm;
  555 
  556     if (deflateInit2(strm, level, Z_DEFLATED, global_tune_zlibwindowsize, global_tune_zlibmemlevel, Z_DEFAULT_STRATEGY) != Z_OK) {
  557         deinit_comp_ctx(comp_ctx);
  558         return -1;
  559     }
  560 
  561     (*comp_ctx)->cur_lvl = level;
  562 
  563     return 0;
  564 }
  565 
  566 /* Return the size of consumed data or -1 */
  567 static int deflate_add_data(struct comp_ctx *comp_ctx, const char *in_data, int in_len, struct buffer *out)
  568 {
  569     int ret;
  570     z_stream *strm = &comp_ctx->strm;
  571     char *out_data = b_tail(out);
  572     int out_len = b_room(out);
  573 
  574     if (in_len <= 0)
  575         return 0;
  576 
  577 
  578     if (out_len <= 0)
  579         return -1;
  580 
  581     strm->next_in = (unsigned char *)in_data;
  582     strm->avail_in = in_len;
  583     strm->next_out = (unsigned char *)out_data;
  584     strm->avail_out = out_len;
  585 
  586     ret = deflate(strm, Z_NO_FLUSH);
  587     if (ret != Z_OK)
  588         return -1;
  589 
  590     /* deflate update the available data out */
  591     b_add(out, out_len - strm->avail_out);
  592 
  593     return in_len - strm->avail_in;
  594 }
  595 
  596 static int deflate_flush_or_finish(struct comp_ctx *comp_ctx, struct buffer *out, int flag)
  597 {
  598     int ret;
  599     int out_len = 0;
  600     z_stream *strm = &comp_ctx->strm;
  601 
  602     strm->next_in = NULL;
  603     strm->avail_in = 0;
  604     strm->next_out = (unsigned char *)b_tail(out);
  605     strm->avail_out = b_room(out);
  606 
  607     ret = deflate(strm, flag);
  608     if (ret != Z_OK && ret != Z_STREAM_END)
  609         return -1;
  610 
  611     out_len = b_room(out) - strm->avail_out;
  612     b_add(out, out_len);
  613 
  614     /* compression limit */
  615     if ((global.comp_rate_lim > 0 && (read_freq_ctr(&global.comp_bps_out) > global.comp_rate_lim)) ||    /* rate */
  616        (ti->idle_pct < compress_min_idle)) {                                                                     /* idle */
  617         /* decrease level */
  618         if (comp_ctx->cur_lvl > 0) {
  619             comp_ctx->cur_lvl--;
  620             deflateParams(&comp_ctx->strm, comp_ctx->cur_lvl, Z_DEFAULT_STRATEGY);
  621         }
  622 
  623     } else if (comp_ctx->cur_lvl < global.tune.comp_maxlevel) {
  624         /* increase level */
  625         comp_ctx->cur_lvl++ ;
  626         deflateParams(&comp_ctx->strm, comp_ctx->cur_lvl, Z_DEFAULT_STRATEGY);
  627     }
  628 
  629     return out_len;
  630 }
  631 
  632 static int deflate_flush(struct comp_ctx *comp_ctx, struct buffer *out)
  633 {
  634     return deflate_flush_or_finish(comp_ctx, out, Z_SYNC_FLUSH);
  635 }
  636 
  637 static int deflate_finish(struct comp_ctx *comp_ctx, struct buffer *out)
  638 {
  639     return deflate_flush_or_finish(comp_ctx, out, Z_FINISH);
  640 }
  641 
  642 static int deflate_end(struct comp_ctx **comp_ctx)
  643 {
  644     z_stream *strm = &(*comp_ctx)->strm;
  645     int ret;
  646 
  647     ret = deflateEnd(strm);
  648 
  649     deinit_comp_ctx(comp_ctx);
  650 
  651     return ret;
  652 }
  653 
  654 /* config parser for global "tune.zlibmemlevel" */
  655 static int zlib_parse_global_memlevel(char **args, int section_type, struct proxy *curpx,
  656                                       struct proxy *defpx, const char *file, int line,
  657                                       char **err)
  658 {
  659         if (too_many_args(1, args, err, NULL))
  660                 return -1;
  661 
  662         if (*(args[1]) == 0) {
  663                 memprintf(err, "'%s' expects a numeric value between 1 and 9.", args[0]);
  664                 return -1;
  665         }
  666 
  667     global_tune_zlibmemlevel = atoi(args[1]);
  668     if (global_tune_zlibmemlevel < 1 || global_tune_zlibmemlevel > 9) {
  669                 memprintf(err, "'%s' expects a numeric value between 1 and 9.", args[0]);
  670                 return -1;
  671     }
  672         return 0;
  673 }
  674 
  675 
  676 /* config parser for global "tune.zlibwindowsize" */
  677 static int zlib_parse_global_windowsize(char **args, int section_type, struct proxy *curpx,
  678                                         struct proxy *defpx, const char *file, int line,
  679                                         char **err)
  680 {
  681         if (too_many_args(1, args, err, NULL))
  682                 return -1;
  683 
  684         if (*(args[1]) == 0) {
  685                 memprintf(err, "'%s' expects a numeric value between 8 and 15.", args[0]);
  686                 return -1;
  687         }
  688 
  689     global_tune_zlibwindowsize = atoi(args[1]);
  690     if (global_tune_zlibwindowsize < 8 || global_tune_zlibwindowsize > 15) {
  691                 memprintf(err, "'%s' expects a numeric value between 8 and 15.", args[0]);
  692                 return -1;
  693     }
  694         return 0;
  695 }
  696 
  697 #endif /* USE_ZLIB */
  698 
  699 
  700 /* config keyword parsers */
  701 static struct cfg_kw_list cfg_kws = {ILH, {
  702 #ifdef USE_ZLIB
  703     { CFG_GLOBAL, "tune.zlib.memlevel",   zlib_parse_global_memlevel },
  704     { CFG_GLOBAL, "tune.zlib.windowsize", zlib_parse_global_windowsize },
  705 #endif
  706     { 0, NULL, NULL }
  707 }};
  708 
  709 INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
  710 
  711 __attribute__((constructor))
  712 static void __comp_fetch_init(void)
  713 {
  714 #ifdef USE_SLZ
  715     slz_make_crc_table();
  716     slz_prepare_dist_table();
  717 #endif
  718 
  719 #if defined(USE_ZLIB) && defined(DEFAULT_MAXZLIBMEM)
  720     global.maxzlibmem = DEFAULT_MAXZLIBMEM * 1024U * 1024U;
  721 #endif
  722 }
  723 
  724 static void comp_register_build_opts(void)
  725 {
  726     char *ptr = NULL;
  727     int i;
  728 
  729 #ifdef USE_ZLIB
  730     memprintf(&ptr, "Built with zlib version : " ZLIB_VERSION);
  731     memprintf(&ptr, "%s\nRunning on zlib version : %s", ptr, zlibVersion());
  732 #elif defined(USE_SLZ)
  733     memprintf(&ptr, "Built with libslz for stateless compression.");
  734 #else
  735     memprintf(&ptr, "Built without compression support (neither USE_ZLIB nor USE_SLZ are set).");
  736 #endif
  737     memprintf(&ptr, "%s\nCompression algorithms supported :", ptr);
  738 
  739     for (i = 0; comp_algos[i].cfg_name; i++)
  740         memprintf(&ptr, "%s%s %s(\"%s\")", ptr, (i == 0 ? "" : ","), comp_algos[i].cfg_name, comp_algos[i].ua_name);
  741 
  742     if (i == 0)
  743         memprintf(&ptr, "%s none", ptr);
  744 
  745     hap_register_build_opts(ptr, 1);
  746 }
  747 
  748 INITCALL0(STG_REGISTER, comp_register_build_opts);