"Fossies" - the Fresh Open Source Software Archive

Member "haproxy-2.0.9/src/memory.c" (15 Nov 2019, 18404 Bytes) of package /linux/misc/haproxy-2.0.9.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "memory.c" see the Fossies "Dox" file reference documentation.

    1 /*
    2  * Memory management functions.
    3  *
    4  * Copyright 2000-2007 Willy Tarreau <w@1wt.eu>
    5  *
    6  * This program is free software; you can redistribute it and/or
    7  * modify it under the terms of the GNU General Public License
    8  * as published by the Free Software Foundation; either version
    9  * 2 of the License, or (at your option) any later version.
   10  *
   11  */
   12 #include <errno.h>
   13 
   14 #include <types/applet.h>
   15 #include <types/cli.h>
   16 #include <types/global.h>
   17 #include <types/stats.h>
   18 
   19 #include <common/cfgparse.h>
   20 #include <common/config.h>
   21 #include <common/debug.h>
   22 #include <common/hathreads.h>
   23 #include <common/initcall.h>
   24 #include <common/memory.h>
   25 #include <common/mini-clist.h>
   26 #include <common/standard.h>
   27 
   28 #include <types/activity.h>
   29 
   30 #include <proto/applet.h>
   31 #include <proto/cli.h>
   32 #include <proto/channel.h>
   33 #include <proto/log.h>
   34 #include <proto/stream_interface.h>
   35 #include <proto/stats.h>
   36 
   37 /* These are the most common pools, expected to be initialized first. These
   38  * ones are allocated from an array, allowing to map them to an index.
   39  */
   40 struct pool_head pool_base_start[MAX_BASE_POOLS] = { };
   41 unsigned int pool_base_count = 0;
   42 
   43 /* These ones are initialized per-thread on startup by init_pools() */
   44 struct pool_cache_head pool_cache[MAX_THREADS][MAX_BASE_POOLS];
   45 static struct list pool_lru_head[MAX_THREADS];           /* oldest objects   */
   46 THREAD_LOCAL size_t pool_cache_bytes = 0;                /* total cache size */
   47 THREAD_LOCAL size_t pool_cache_count = 0;                /* #cache objects   */
   48 
   49 static struct list pools = LIST_HEAD_INIT(pools);
   50 int mem_poison_byte = -1;
   51 
   52 #ifdef DEBUG_FAIL_ALLOC
   53 static int mem_fail_rate = 0;
   54 static int mem_should_fail(const struct pool_head *);
   55 #endif
   56 
   57 /* Try to find an existing shared pool with the same characteristics and
   58  * returns it, otherwise creates this one. NULL is returned if no memory
   59  * is available for a new creation. Two flags are supported :
   60  *   - MEM_F_SHARED to indicate that the pool may be shared with other users
   61  *   - MEM_F_EXACT to indicate that the size must not be rounded up
   62  */
   63 struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
   64 {
   65     struct pool_head *pool;
   66     struct pool_head *entry;
   67     struct list *start;
   68     unsigned int align;
   69     int thr, idx;
   70 
   71     /* We need to store a (void *) at the end of the chunks. Since we know
   72      * that the malloc() function will never return such a small size,
   73      * let's round the size up to something slightly bigger, in order to
   74      * ease merging of entries. Note that the rounding is a power of two.
   75      * This extra (void *) is not accounted for in the size computation
   76      * so that the visible parts outside are not affected.
   77      *
   78      * Note: for the LRU cache, we need to store 2 doubly-linked lists.
   79      */
   80 
   81     if (!(flags & MEM_F_EXACT)) {
   82         align = 4 * sizeof(void *); // 2 lists = 4 pointers min
   83         size  = ((size + POOL_EXTRA + align - 1) & -align) - POOL_EXTRA;
   84     }
   85 
   86     /* TODO: thread: we do not lock pool list for now because all pools are
   87      * created during HAProxy startup (so before threads creation) */
   88     start = &pools;
   89     pool = NULL;
   90 
   91     list_for_each_entry(entry, &pools, list) {
   92         if (entry->size == size) {
   93             /* either we can share this place and we take it, or
   94              * we look for a sharable one or for the next position
   95              * before which we will insert a new one.
   96              */
   97             if (flags & entry->flags & MEM_F_SHARED) {
   98                 /* we can share this one */
   99                 pool = entry;
  100                 DPRINTF(stderr, "Sharing %s with %s\n", name, pool->name);
  101                 break;
  102             }
  103         }
  104         else if (entry->size > size) {
  105             /* insert before this one */
  106             start = &entry->list;
  107             break;
  108         }
  109     }
  110 
  111     if (!pool) {
  112         if (pool_base_count < MAX_BASE_POOLS)
  113             pool = &pool_base_start[pool_base_count++];
  114 
  115         if (!pool) {
  116             /* look for a freed entry */
  117             for (entry = pool_base_start; entry != pool_base_start + MAX_BASE_POOLS; entry++) {
  118                 if (!entry->size) {
  119                     pool = entry;
  120                     break;
  121                 }
  122             }
  123         }
  124 
  125         if (!pool)
  126             pool = calloc(1, sizeof(*pool));
  127 
  128         if (!pool)
  129             return NULL;
  130         if (name)
  131             strlcpy2(pool->name, name, sizeof(pool->name));
  132         pool->size = size;
  133         pool->flags = flags;
  134         LIST_ADDQ(start, &pool->list);
  135 
  136         /* update per-thread pool cache if necessary */
  137         idx = pool_get_index(pool);
  138         if (idx >= 0) {
  139             for (thr = 0; thr < MAX_THREADS; thr++)
  140                 pool_cache[thr][idx].size = size;
  141         }
  142     }
  143     pool->users++;
  144 #ifndef CONFIG_HAP_LOCKLESS_POOLS
  145     HA_SPIN_INIT(&pool->lock);
  146 #endif
  147     return pool;
  148 }
  149 
  150 #ifdef CONFIG_HAP_LOCKLESS_POOLS
  151 /* Allocates new entries for pool <pool> until there are at least <avail> + 1
  152  * available, then returns the last one for immediate use, so that at least
  153  * <avail> are left available in the pool upon return. NULL is returned if the
  154  * last entry could not be allocated. It's important to note that at least one
  155  * allocation is always performed even if there are enough entries in the pool.
  156  * A call to the garbage collector is performed at most once in case malloc()
  157  * returns an error, before returning NULL.
  158  */
  159 void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
  160 {
  161     void *ptr = NULL, **free_list;
  162     int failed = 0;
  163     int size = pool->size;
  164     int limit = pool->limit;
  165     int allocated = pool->allocated, allocated_orig = allocated;
  166 
  167     /* stop point */
  168     avail += pool->used;
  169 
  170     while (1) {
  171         if (limit && allocated >= limit) {
  172             _HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
  173             activity[tid].pool_fail++;
  174             return NULL;
  175         }
  176 
  177         ptr = malloc(size + POOL_EXTRA);
  178         if (!ptr) {
  179             _HA_ATOMIC_ADD(&pool->failed, 1);
  180             if (failed) {
  181                 activity[tid].pool_fail++;
  182                 return NULL;
  183             }
  184             failed++;
  185             pool_gc(pool);
  186             continue;
  187         }
  188         if (++allocated > avail)
  189             break;
  190 
  191         free_list = pool->free_list;
  192         do {
  193             *POOL_LINK(pool, ptr) = free_list;
  194             __ha_barrier_store();
  195         } while (_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr) == 0);
  196     }
  197     __ha_barrier_atomic_store();
  198 
  199     _HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
  200     _HA_ATOMIC_ADD(&pool->used, 1);
  201 
  202 #ifdef DEBUG_MEMORY_POOLS
  203     /* keep track of where the element was allocated from */
  204     *POOL_LINK(pool, ptr) = (void *)pool;
  205 #endif
  206     return ptr;
  207 }
  208 void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
  209 {
  210     void *ptr;
  211 
  212     ptr = __pool_refill_alloc(pool, avail);
  213     return ptr;
  214 }
  215 /*
  216  * This function frees whatever can be freed in pool <pool>.
  217  */
  218 void pool_flush(struct pool_head *pool)
  219 {
  220     void **next, *temp;
  221     int removed = 0;
  222 
  223     if (!pool)
  224         return;
  225     do {
  226         next = pool->free_list;
  227     } while (!_HA_ATOMIC_CAS(&pool->free_list, &next, NULL));
  228     __ha_barrier_atomic_store();
  229     while (next) {
  230         temp = next;
  231         next = *POOL_LINK(pool, temp);
  232         removed++;
  233         free(temp);
  234     }
  235     pool->free_list = next;
  236     _HA_ATOMIC_SUB(&pool->allocated, removed);
  237     /* here, we should have pool->allocate == pool->used */
  238 }
  239 
  240 /*
  241  * This function frees whatever can be freed in all pools, but respecting
  242  * the minimum thresholds imposed by owners. It takes care of avoiding
  243  * recursion because it may be called from a signal handler.
  244  *
  245  * <pool_ctx> is unused
  246  */
  247 void pool_gc(struct pool_head *pool_ctx)
  248 {
  249     static int recurse;
  250     int cur_recurse = 0;
  251     struct pool_head *entry;
  252 
  253     if (recurse || !_HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
  254         return;
  255 
  256     list_for_each_entry(entry, &pools, list) {
  257         while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
  258             struct pool_free_list cmp, new;
  259 
  260             cmp.seq = entry->seq;
  261             __ha_barrier_load();
  262             cmp.free_list = entry->free_list;
  263             __ha_barrier_load();
  264             if (cmp.free_list == NULL)
  265                 break;
  266             new.free_list = *POOL_LINK(entry, cmp.free_list);
  267             new.seq = cmp.seq + 1;
  268             if (HA_ATOMIC_DWCAS(&entry->free_list, &cmp, &new) == 0)
  269                 continue;
  270             free(cmp.free_list);
  271             _HA_ATOMIC_SUB(&entry->allocated, 1);
  272         }
  273     }
  274 
  275     _HA_ATOMIC_STORE(&recurse, 0);
  276 }
  277 
  278 /* frees an object to the local cache, possibly pushing oldest objects to the
  279  * global pool. Must not be called directly.
  280  */
  281 void __pool_put_to_cache(struct pool_head *pool, void *ptr, ssize_t idx)
  282 {
  283     struct pool_cache_item *item = (struct pool_cache_item *)ptr;
  284     struct pool_cache_head *ph = &pool_cache[tid][idx];
  285 
  286     LIST_ADD(&ph->list, &item->by_pool);
  287     LIST_ADD(&pool_lru_head[tid], &item->by_lru);
  288     ph->count++;
  289     pool_cache_count++;
  290     pool_cache_bytes += ph->size;
  291 
  292     if (pool_cache_bytes <= CONFIG_HAP_POOL_CACHE_SIZE)
  293         return;
  294 
  295     do {
  296         item = LIST_PREV(&pool_lru_head[tid], struct pool_cache_item *, by_lru);
  297         /* note: by definition we remove oldest objects so they also are the
  298          * oldest in their own pools, thus their next is the pool's head.
  299          */
  300         ph = LIST_NEXT(&item->by_pool, struct pool_cache_head *, list);
  301         LIST_DEL(&item->by_pool);
  302         LIST_DEL(&item->by_lru);
  303         ph->count--;
  304         pool_cache_count--;
  305         pool_cache_bytes -= ph->size;
  306         __pool_free(pool_base_start + (ph - pool_cache[tid]), item);
  307     } while (pool_cache_bytes > CONFIG_HAP_POOL_CACHE_SIZE * 7 / 8);
  308 }
  309 
  310 #else /* CONFIG_HAP_LOCKLESS_POOLS */
  311 
  312 /* Allocates new entries for pool <pool> until there are at least <avail> + 1
  313  * available, then returns the last one for immediate use, so that at least
  314  * <avail> are left available in the pool upon return. NULL is returned if the
  315  * last entry could not be allocated. It's important to note that at least one
  316  * allocation is always performed even if there are enough entries in the pool.
  317  * A call to the garbage collector is performed at most once in case malloc()
  318  * returns an error, before returning NULL.
  319  */
  320 void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
  321 {
  322     void *ptr = NULL;
  323     int failed = 0;
  324 
  325 #ifdef DEBUG_FAIL_ALLOC
  326     if (mem_should_fail(pool))
  327         return NULL;
  328 #endif
  329     /* stop point */
  330     avail += pool->used;
  331 
  332     while (1) {
  333         if (pool->limit && pool->allocated >= pool->limit) {
  334             activity[tid].pool_fail++;
  335             return NULL;
  336         }
  337 
  338         ptr = pool_alloc_area(pool->size + POOL_EXTRA);
  339         if (!ptr) {
  340             pool->failed++;
  341             if (failed) {
  342                 activity[tid].pool_fail++;
  343                 return NULL;
  344             }
  345             failed++;
  346             pool_gc(pool);
  347             continue;
  348         }
  349         if (++pool->allocated > avail)
  350             break;
  351 
  352         *POOL_LINK(pool, ptr) = (void *)pool->free_list;
  353         pool->free_list = ptr;
  354     }
  355     pool->used++;
  356 #ifdef DEBUG_MEMORY_POOLS
  357     /* keep track of where the element was allocated from */
  358     *POOL_LINK(pool, ptr) = (void *)pool;
  359 #endif
  360     return ptr;
  361 }
  362 void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
  363 {
  364     void *ptr;
  365 
  366     HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
  367     ptr = __pool_refill_alloc(pool, avail);
  368     HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
  369     return ptr;
  370 }
  371 /*
  372  * This function frees whatever can be freed in pool <pool>.
  373  */
  374 void pool_flush(struct pool_head *pool)
  375 {
  376     void *temp, *next;
  377     if (!pool)
  378         return;
  379 
  380     HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
  381     next = pool->free_list;
  382     while (next) {
  383         temp = next;
  384         next = *POOL_LINK(pool, temp);
  385         pool->allocated--;
  386         pool_free_area(temp, pool->size + POOL_EXTRA);
  387     }
  388     pool->free_list = next;
  389     HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
  390     /* here, we should have pool->allocate == pool->used */
  391 }
  392 
  393 /*
  394  * This function frees whatever can be freed in all pools, but respecting
  395  * the minimum thresholds imposed by owners. It takes care of avoiding
  396  * recursion because it may be called from a signal handler.
  397  *
  398  * <pool_ctx> is used when pool_gc is called to release resources to allocate
  399  * an element in __pool_refill_alloc. It is important because <pool_ctx> is
  400  * already locked, so we need to skip the lock here.
  401  */
  402 void pool_gc(struct pool_head *pool_ctx)
  403 {
  404     static int recurse;
  405     int cur_recurse = 0;
  406     struct pool_head *entry;
  407 
  408     if (recurse || !_HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
  409         return;
  410 
  411     list_for_each_entry(entry, &pools, list) {
  412         void *temp, *next;
  413         //qfprintf(stderr, "Flushing pool %s\n", entry->name);
  414         if (entry != pool_ctx)
  415             HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
  416         next = entry->free_list;
  417         while (next &&
  418                (int)(entry->allocated - entry->used) > (int)entry->minavail) {
  419             temp = next;
  420             next = *POOL_LINK(entry, temp);
  421             entry->allocated--;
  422             pool_free_area(temp, entry->size + POOL_EXTRA);
  423         }
  424         entry->free_list = next;
  425         if (entry != pool_ctx)
  426             HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
  427     }
  428 
  429     _HA_ATOMIC_STORE(&recurse, 0);
  430 }
  431 #endif
  432 
  433 /*
  434  * This function destroys a pool by freeing it completely, unless it's still
  435  * in use. This should be called only under extreme circumstances. It always
  436  * returns NULL if the resulting pool is empty, easing the clearing of the old
  437  * pointer, otherwise it returns the pool.
  438  * .
  439  */
  440 void *pool_destroy(struct pool_head *pool)
  441 {
  442     if (pool) {
  443         pool_flush(pool);
  444         if (pool->used)
  445             return pool;
  446         pool->users--;
  447         if (!pool->users) {
  448             LIST_DEL(&pool->list);
  449 #ifndef CONFIG_HAP_LOCKLESS_POOLS
  450             HA_SPIN_DESTROY(&pool->lock);
  451 #endif
  452             if ((pool - pool_base_start) < MAX_BASE_POOLS)
  453                 memset(pool, 0, sizeof(*pool));
  454             else
  455                 free(pool);
  456         }
  457     }
  458     return NULL;
  459 }
  460 
  461 /* This destroys all pools on exit. It is *not* thread safe. */
  462 void pool_destroy_all()
  463 {
  464     struct pool_head *entry, *back;
  465 
  466     list_for_each_entry_safe(entry, back, &pools, list)
  467         pool_destroy(entry);
  468 }
  469 
  470 /* This function dumps memory usage information into the trash buffer. */
  471 void dump_pools_to_trash()
  472 {
  473     struct pool_head *entry;
  474     unsigned long allocated, used;
  475     int nbpools;
  476 
  477     allocated = used = nbpools = 0;
  478     chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
  479     list_for_each_entry(entry, &pools, list) {
  480 #ifndef CONFIG_HAP_LOCKLESS_POOLS
  481         HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
  482 #endif
  483         chunk_appendf(&trash, "  - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users, @%p=%02d%s\n",
  484              entry->name, entry->size, entry->allocated,
  485                  entry->size * entry->allocated, entry->used, entry->failed,
  486              entry->users, entry, (int)pool_get_index(entry),
  487              (entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");
  488 
  489         allocated += entry->allocated * entry->size;
  490         used += entry->used * entry->size;
  491         nbpools++;
  492 #ifndef CONFIG_HAP_LOCKLESS_POOLS
  493         HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
  494 #endif
  495     }
  496     chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
  497          nbpools, allocated, used);
  498 }
  499 
  500 /* Dump statistics on pools usage. */
  501 void dump_pools(void)
  502 {
  503     dump_pools_to_trash();
  504     qfprintf(stderr, "%s", trash.area);
  505 }
  506 
  507 /* This function returns the total number of failed pool allocations */
  508 int pool_total_failures()
  509 {
  510     struct pool_head *entry;
  511     int failed = 0;
  512 
  513     list_for_each_entry(entry, &pools, list)
  514         failed += entry->failed;
  515     return failed;
  516 }
  517 
  518 /* This function returns the total amount of memory allocated in pools (in bytes) */
  519 unsigned long pool_total_allocated()
  520 {
  521     struct pool_head *entry;
  522     unsigned long allocated = 0;
  523 
  524     list_for_each_entry(entry, &pools, list)
  525         allocated += entry->allocated * entry->size;
  526     return allocated;
  527 }
  528 
  529 /* This function returns the total amount of memory used in pools (in bytes) */
  530 unsigned long pool_total_used()
  531 {
  532     struct pool_head *entry;
  533     unsigned long used = 0;
  534 
  535     list_for_each_entry(entry, &pools, list)
  536         used += entry->used * entry->size;
  537     return used;
  538 }
  539 
  540 /* This function dumps memory usage information onto the stream interface's
  541  * read buffer. It returns 0 as long as it does not complete, non-zero upon
  542  * completion. No state is used.
  543  */
  544 static int cli_io_handler_dump_pools(struct appctx *appctx)
  545 {
  546     struct stream_interface *si = appctx->owner;
  547 
  548     dump_pools_to_trash();
  549     if (ci_putchk(si_ic(si), &trash) == -1) {
  550         si_rx_room_blk(si);
  551         return 0;
  552     }
  553     return 1;
  554 }
  555 
  556 /* callback used to create early pool <name> of size <size> and store the
  557  * resulting pointer into <ptr>. If the allocation fails, it quits with after
  558  * emitting an error message.
  559  */
  560 void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size)
  561 {
  562     *ptr = create_pool(name, size, MEM_F_SHARED);
  563     if (!*ptr) {
  564         ha_alert("Failed to allocate pool '%s' of size %u : %s. Aborting.\n",
  565              name, size, strerror(errno));
  566         exit(1);
  567     }
  568 }
  569 
  570 /* Initializes all per-thread arrays on startup */
  571 static void init_pools()
  572 {
  573     int thr, idx;
  574 
  575     for (thr = 0; thr < MAX_THREADS; thr++) {
  576         for (idx = 0; idx < MAX_BASE_POOLS; idx++) {
  577             LIST_INIT(&pool_cache[thr][idx].list);
  578             pool_cache[thr][idx].size = 0;
  579         }
  580         LIST_INIT(&pool_lru_head[thr]);
  581     }
  582 }
  583 
  584 INITCALL0(STG_PREPARE, init_pools);
  585 
  586 /* register cli keywords */
  587 static struct cli_kw_list cli_kws = {{ },{
  588     { { "show", "pools",  NULL }, "show pools     : report information about the memory pools usage", NULL, cli_io_handler_dump_pools },
  589     {{},}
  590 }};
  591 
  592 INITCALL1(STG_REGISTER, cli_register_kw, &cli_kws);
  593 
  594 #ifdef DEBUG_FAIL_ALLOC
  595 #define MEM_FAIL_MAX_CHAR 32
  596 #define MEM_FAIL_MAX_STR 128
  597 static int mem_fail_cur_idx;
  598 static char mem_fail_str[MEM_FAIL_MAX_CHAR * MEM_FAIL_MAX_STR];
  599 __decl_hathreads(static HA_SPINLOCK_T mem_fail_lock);
  600 
  601 int mem_should_fail(const struct pool_head *pool)
  602 {
  603     int ret = 0;
  604     int n;
  605 
  606     if (mem_fail_rate > 0 && !(global.mode & MODE_STARTING)) {
  607         int randnb = random() % 100;
  608 
  609         if (mem_fail_rate > randnb)
  610             ret = 1;
  611         else
  612             ret = 0;
  613     }
  614     HA_SPIN_LOCK(OTHER_LOCK, &mem_fail_lock);
  615     n = snprintf(&mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR],
  616         MEM_FAIL_MAX_CHAR - 2,
  617         "%d %.18s %d %d", mem_fail_cur_idx, pool->name, ret, tid);
  618     while (n < MEM_FAIL_MAX_CHAR - 1)
  619         mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR + n++] = ' ';
  620     if (mem_fail_cur_idx < MEM_FAIL_MAX_STR - 1)
  621         mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR + n] = '\n';
  622     else
  623         mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR + n] = 0;
  624     mem_fail_cur_idx++;
  625     if (mem_fail_cur_idx == MEM_FAIL_MAX_STR)
  626         mem_fail_cur_idx = 0;
  627     HA_SPIN_UNLOCK(OTHER_LOCK, &mem_fail_lock);
  628     return ret;
  629 
  630 }
  631 
  632 /* config parser for global "tune.fail-alloc" */
  633 static int mem_parse_global_fail_alloc(char **args, int section_type, struct proxy *curpx,
  634                                       struct proxy *defpx, const char *file, int line,
  635                                       char **err)
  636 {
  637     if (too_many_args(1, args, err, NULL))
  638         return -1;
  639     mem_fail_rate = atoi(args[1]);
  640     if (mem_fail_rate < 0 || mem_fail_rate > 100) {
  641         memprintf(err, "'%s' expects a numeric value between 0 and 100.", args[0]);
  642         return -1;
  643     }
  644     return 0;
  645 }
  646 #endif
  647 
  648 /* register global config keywords */
  649 static struct cfg_kw_list mem_cfg_kws = {ILH, {
  650 #ifdef DEBUG_FAIL_ALLOC
  651     { CFG_GLOBAL, "tune.fail-alloc", mem_parse_global_fail_alloc },
  652 #endif
  653     { 0, NULL, NULL }
  654 }};
  655 
  656 INITCALL1(STG_REGISTER, cfg_register_keywords, &mem_cfg_kws);
  657 
  658 /*
  659  * Local variables:
  660  *  c-indent-level: 8
  661  *  c-basic-offset: 8
  662  * End:
  663  */