"Fossies" - the Fresh Open Source Software Archive

Member "links-1.04/cache.c" (4 Jan 2017, 9650 Bytes) of package /linux/www/links-1.04.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "cache.c" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: 1.03_vs_1.04.

    1 #include "links.h"
    2 
    3 struct list_head cache = {&cache, &cache};
    4 
    5 my_uintptr_t cache_size = 0;
    6 
    7 int cache_count = 0;
    8 
    9 my_uintptr_t cache_info(int type)
   10 {
   11     my_uintptr_t i = 0;
   12     struct cache_entry *ce;
   13     switch (type) {
   14         case CI_BYTES:
   15             return cache_size;
   16         case CI_FILES:
   17             foreach(ce, cache) i++;
   18             return i;
   19         case CI_LOCKED:
   20             foreach(ce, cache) i += !!ce->refcount;
   21             return i;
   22         case CI_LOADING:
   23             foreach(ce, cache) i += is_entry_used(ce);
   24             return i;
   25         case CI_LIST:
   26             return (my_uintptr_t) &cache;
   27         default:
   28             internal("cache_info: bad request");
   29     }
   30     return 0;
   31 }
   32 
   33 unsigned char *extract_proxy(unsigned char *url)
   34 {
   35     char *a;
   36     if (strlen(url) < 8 || casecmp(url, "proxy://", 8)) return url;
   37     if (!(a = strchr(url + 8, '/'))) return url;
   38     return a + 1;
   39 }
   40 
   41 int find_in_cache(unsigned char *url, struct cache_entry **f)
   42 {
   43     struct cache_entry *e;
   44     url = extract_proxy(url);
   45     foreach(e, cache) if (!strcmp(e->url, url)) {
   46         e->refcount++;
   47         del_from_list(e);
   48         add_to_list(cache, e);
   49         *f = e;
   50         return 0;
   51     }
   52     return -1;
   53 }
   54 
   55 int get_cache_entry(unsigned char *url, struct cache_entry **f)
   56 {
   57     struct cache_entry *e;
   58     if (!find_in_cache(url, f)) return 0;
   59     shrink_memory(0);
   60     url = extract_proxy(url);
   61     e = mem_alloc(sizeof(struct cache_entry));
   62     memset(e, 0, sizeof(struct cache_entry));
   63     e->url = mem_alloc(strlen(url) + 1);
   64     strcpy(e->url, url);
   65     e->length = 0;
   66     e->incomplete = 1;
   67     e->data_size = 0;
   68     init_list(e->frag);
   69     e->count = cache_count++;
   70     e->refcount = 1;
   71     add_to_list(cache, e);
   72     *f = e;
   73     return 0;
   74 }
   75 
   76 #define sf(x) e->data_size += (x), cache_size += (x)
   77 
   78 int page_size = 4096;
   79 
   80 #define C_ALIGN(x) ((((x) + sizeof(struct fragment) + 64) | (page_size - 1)) - sizeof(struct fragment) - 64)
   81 
   82 int add_fragment(struct cache_entry *e, off_t offset, unsigned char *data, off_t length)
   83 {
   84     struct fragment *f;
   85     struct fragment *nf;
   86     int trunc = 0;
   87     volatile off_t ca;
   88     if (!length) return 0;
   89     e->incomplete = 1;
   90     if ((off_t)(0UL + offset + length) < 0 || (off_t)(0UL + offset + length) < offset) overalloc();
   91     if ((off_t)(0UL + offset + (off_t)C_ALIGN(length)) < 0 || (off_t)(0UL + offset + (off_t)C_ALIGN(length)) < offset) overalloc();
   92     if (e->length < offset + length) e->length = offset + length;
   93     e->count = cache_count++;
   94     if (!list_empty(e->frag)) {
   95         f = e->frag.prev;
   96         if (f->offset + f->length == offset) goto have_f;
   97     }
   98     foreach(f, e->frag) {
   99 have_f:
  100         if (f->offset > offset) break;
  101         if (f->offset <= offset && f->offset+f->length >= offset) {
  102             if (offset+length > f->offset+f->length) {
  103                 if (memcmp(f->data+offset-f->offset, data, f->offset+f->length-offset)) trunc = 1;
  104                 if (offset-f->offset+length <= f->real_length) {
  105                     sf((offset+length) - (f->offset+f->length));
  106                     f->length = offset-f->offset+length;
  107                 } else {
  108                     sf(-(f->offset+f->length-offset));
  109                     f->length = offset-f->offset;
  110                     f = f->next;
  111                     break;
  112                 }
  113             } else {
  114                 if (memcmp(f->data+offset-f->offset, data, length)) trunc = 1;
  115             }
  116             memcpy(f->data+offset-f->offset, data, length);
  117             goto ch_o;
  118         }
  119     }
  120 /* Intel C 9 has a bug and miscompiles this statement (< 0 test is true) */
  121     /*if (C_ALIGN(length) > MAXINT - sizeof(struct fragment) || C_ALIGN(length) < 0) overalloc();*/
  122     ca = C_ALIGN(length);
  123     if (ca > MAXINT - (int)sizeof(struct fragment) || ca < 0) overalloc();
  124     nf = mem_alloc(sizeof(struct fragment) + ca);
  125     sf(length);
  126     nf->offset = offset;
  127     nf->length = length;
  128     nf->real_length = C_ALIGN(length);
  129     memcpy(nf->data, data, length);
  130     add_at_pos(f->prev, nf);
  131     f = nf;
  132     ch_o:
  133     while ((void *)f->next != &e->frag && f->offset+f->length > f->next->offset) {
  134         if (f->offset+f->length < f->next->offset+f->next->length) {
  135             nf = mem_realloc(f, sizeof(struct fragment)+f->next->offset-f->offset+f->next->length);
  136             nf->prev->next = nf;
  137             nf->next->prev = nf;
  138             f = nf;
  139             if (memcmp(f->data+f->next->offset-f->offset, f->next->data, f->offset+f->length-f->next->offset)) trunc = 1;
  140             memcpy(f->data+f->length, f->next->data+f->offset+f->length-f->next->offset, f->next->offset+f->next->length-f->offset-f->length);
  141             sf(f->next->offset+f->next->length-f->offset-f->length);
  142             f->length = f->real_length = f->next->offset+f->next->length-f->offset;
  143         } else {
  144             if (memcmp(f->data+f->next->offset-f->offset, f->next->data, f->next->length)) trunc = 1;
  145         }
  146         nf = f->next;
  147         del_from_list(nf);
  148         sf(-nf->length);
  149         mem_free(nf);
  150     }
  151     if (trunc) truncate_entry(e, offset + length, 0);
  152     /*{
  153         foreach(f, e->frag) fprintf(stderr, "%d, %d, %d\n", f->offset, f->length, f->real_length);
  154         debug("a-");
  155     }*/
  156     if (e->length > e->max_length) {
  157         e->max_length = e->length;
  158         return 1;
  159     }
  160     return 0;
  161 }
  162 
  163 void defrag_entry(struct cache_entry *e)
  164 {
  165     struct fragment *f, *g, *h, *n, *x;
  166     off_t l;
  167     if (list_empty(e->frag)) return;
  168     f = e->frag.next;
  169     if (f->offset) return;
  170     for (g = f->next; g != (void *)&e->frag && g->offset <= g->prev->offset+g->prev->length; g = g->next) if (g->offset < g->prev->offset+g->prev->length) {
  171         internal("fragments overlay");
  172         return;
  173     }
  174     if (g == f->next && f->length == f->real_length) return;
  175     for (l = 0, h = f; h != g; h = h->next) {
  176         if ((off_t)(0UL + l + h->length) < 0 || (off_t)(0UL + l + h->length) < l) overalloc();
  177         l += h->length;
  178     }
  179     if (l > MAXINT - (int)sizeof(struct fragment)) overalloc();
  180     n = mem_alloc(sizeof(struct fragment) + l);
  181     n->offset = 0;
  182     n->length = l;
  183     n->real_length = l;
  184     /*{
  185         struct fragment *f;
  186         foreach(f, e->frag) fprintf(stderr, "%d, %d, %d\n", f->offset, f->length, f->real_length);
  187         debug("d1-");
  188     }*/
  189     for (l = 0, h = f; h != g; h = h->next) {
  190         memcpy(n->data + l, h->data, h->length);
  191         l += h->length;
  192         x = h;
  193         h = h->prev;
  194         del_from_list(x);
  195         mem_free(x);
  196     }
  197     add_to_list(e->frag, n);
  198     /*{
  199         foreach(f, e->frag) fprintf(stderr, "%d, %d, %d\n", f->offset, f->length, f->real_length);
  200         debug("d-");
  201     }*/
  202 }
  203 
  204 void truncate_entry(struct cache_entry *e, off_t off, int final)
  205 {
  206     int modified = 0;
  207     struct fragment *f, *g;
  208     if (e->length > off) e->length = off, e->incomplete = 1;
  209     foreach(f, e->frag) {
  210         if (f->offset >= off) {
  211             del:
  212             while ((void *)f != &e->frag) {
  213                 modified = 1;
  214                 sf(-f->length);
  215                 g = f->next;
  216                 del_from_list(f);
  217                 mem_free(f);
  218                 f = g;
  219             }
  220             goto ret;
  221         }
  222         if (f->offset + f->length > off) {
  223             modified = 1;
  224             sf(-(f->offset + f->length - off));
  225             f->length = off - f->offset;
  226             if (final) {
  227                 g = mem_realloc(f, sizeof(struct fragment) + f->length);
  228                 g->next->prev = g;
  229                 g->prev->next = g;
  230                 f = g;
  231                 f->real_length = f->length;
  232             }
  233             f = f->next;
  234             goto del;
  235         }
  236     }
  237     ret:
  238     if (modified) {
  239         e->count = cache_count++;
  240     }
  241 }
  242 
  243 void free_entry_to(struct cache_entry *e, off_t off)
  244 {
  245     struct fragment *f, *g;
  246     e->incomplete = 1;
  247     foreach(f, e->frag) {
  248         if (f->offset + f->length <= off) {
  249             sf(-f->length);
  250             g = f;
  251             f = f->prev;
  252             del_from_list(g);
  253             mem_free(g);
  254         } else if (f->offset < off) {
  255             sf(f->offset - off);
  256             memmove(f->data, f->data + (off - f->offset), f->length -= off - f->offset);
  257             f->offset = off;
  258         } else break;
  259     }
  260 }
  261 
  262 void delete_entry_content(struct cache_entry *e)
  263 {
  264     e->count = cache_count++;
  265     free_list(e->frag);
  266     e->length = 0;
  267     e->incomplete = 1;
  268     if (cache_size < (my_uintptr_t)e->data_size) {
  269         internal("cache_size underflow: %lu, %lu", (unsigned long)cache_size, (unsigned long)e->data_size);
  270     }
  271     cache_size -= e->data_size;
  272     e->data_size = 0;
  273     if (e->last_modified) {
  274         mem_free(e->last_modified);
  275         e->last_modified = NULL;
  276     }
  277 }
  278 
  279 void delete_cache_entry(struct cache_entry *e)
  280 {
  281     if (e->refcount) internal("deleting locked cache entry");
  282 #ifdef DEBUG
  283     if (is_entry_used(e)) internal("deleting loading cache entry");
  284 #endif
  285     delete_entry_content(e);
  286     del_from_list(e);
  287     mem_free(e->url);
  288     if (e->head) mem_free(e->head);
  289     if (e->last_modified) mem_free(e->last_modified);
  290     if (e->redirect) mem_free(e->redirect);
  291 #ifdef HAVE_SSL
  292     if (e->ssl_info) mem_free(e->ssl_info);
  293 #endif
  294     mem_free(e);
  295 }
  296 
  297 void garbage_collection(int u)
  298 {
  299     struct cache_entry *e, *f;
  300     my_uintptr_t ncs = cache_size;
  301     my_uintptr_t ccs = 0;
  302     if (!u && cache_size <= (my_uintptr_t)memory_cache_size) return;
  303     foreach(e, cache) {
  304         if (e->refcount || is_entry_used(e)) {
  305             if (ncs < (my_uintptr_t)e->data_size) {
  306                 internal("cache_size underflow: %lu, %lu", (unsigned long)ncs, (unsigned long)e->data_size);
  307             }
  308             ncs -= e->data_size;
  309         }
  310         ccs += e->data_size;
  311     }
  312     if (ccs != cache_size) internal("cache size badly computed: %lu != %lu", (unsigned long)cache_size, (unsigned long)ccs), cache_size = ccs;
  313     if (!u && ncs <= (my_uintptr_t)memory_cache_size) return;
  314     for (e = cache.prev; (void *)e != &cache; e = e->prev) {
  315         if (!u && (longlong)ncs <= (longlong)memory_cache_size * MEMORY_CACHE_GC_PERCENT) goto g;
  316         if (e->refcount || is_entry_used(e)) {
  317             e->tgc = 0;
  318             continue;
  319         }
  320         e->tgc = 1;
  321         if (ncs < (my_uintptr_t)e->data_size) {
  322             internal("cache_size underflow: %lu, %lu", (unsigned long)ncs, (unsigned long)e->data_size);
  323         }
  324         ncs -= e->data_size;
  325     }
  326     if (ncs) internal("cache_size(%lu) is larger than size of all objects(%lu)", (unsigned long)cache_size, (unsigned long)(cache_size - ncs));
  327     g:
  328     if ((void *)(e = e->next) == &cache) return;
  329     if (!u) for (f = e; (void *)f != &cache; f = f->next) {
  330         if (f->data_size && (longlong)ncs + f->data_size <= (longlong)memory_cache_size * MEMORY_CACHE_GC_PERCENT) {
  331             ncs += f->data_size;
  332             f->tgc = 0;
  333         }
  334     }
  335     for (f = e; (void *)f != &cache;) {
  336         f = f->next;
  337         if (f->prev->tgc) delete_cache_entry(f->prev);
  338     }
  339 }
  340 
  341 void init_cache(void)
  342 {
  343 #ifdef HAVE_GETPAGESIZE
  344     int getpg;
  345     EINTRLOOP(getpg, getpagesize());
  346     if (getpg > 0 && getpg < 0x10000 && !(getpg & (getpg - 1))) page_size = getpg;
  347 #endif
  348 }
  349