"Fossies" - the Fresh Open Source Software Archive

Member "btrfs-progs-v5.4.1/extent_io.c" (9 Jan 2020, 24234 Bytes) of package /linux/misc/btrfs-progs-v5.4.1.tar.xz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) C and C++ source code syntax highlighting (style: standard) with prefixed line numbers and code folding option. Alternatively you can here view or download the uninterpreted source code file. For more information about "extent_io.c" see the Fossies "Dox" file reference documentation and the latest Fossies "Diffs" side-by-side code changes report: v5.4_vs_v5.4.1.

    1 
    2 /*
    3  * Copyright (C) 2007 Oracle.  All rights reserved.
    4  *
    5  * This program is free software; you can redistribute it and/or
    6  * modify it under the terms of the GNU General Public
    7  * License v2 as published by the Free Software Foundation.
    8  *
    9  * This program is distributed in the hope that it will be useful,
   10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   12  * General Public License for more details.
   13  *
   14  * You should have received a copy of the GNU General Public
   15  * License along with this program; if not, write to the
   16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
   17  * Boston, MA 021110-1307, USA.
   18  */
   19 #include <stdio.h>
   20 #include <stdlib.h>
   21 #include <sys/types.h>
   22 #include <sys/stat.h>
   23 #include <fcntl.h>
   24 #include <unistd.h>
   25 #include <stdbool.h>
   26 #include "kerncompat.h"
   27 #include "extent_io.h"
   28 #include "kernel-lib/list.h"
   29 #include "ctree.h"
   30 #include "volumes.h"
   31 #include "common/utils.h"
   32 #include "common/internal.h"
   33 
   34 void extent_io_tree_init(struct extent_io_tree *tree)
   35 {
   36     cache_tree_init(&tree->state);
   37     cache_tree_init(&tree->cache);
   38     INIT_LIST_HEAD(&tree->lru);
   39     tree->cache_size = 0;
   40     tree->max_cache_size = (u64)total_memory() / 4;
   41 }
   42 
   43 void extent_io_tree_init_cache_max(struct extent_io_tree *tree,
   44                    u64 max_cache_size)
   45 {
   46     extent_io_tree_init(tree);
   47     tree->max_cache_size = max_cache_size;
   48 }
   49 
   50 static struct extent_state *alloc_extent_state(void)
   51 {
   52     struct extent_state *state;
   53 
   54     state = malloc(sizeof(*state));
   55     if (!state)
   56         return NULL;
   57     state->cache_node.objectid = 0;
   58     state->refs = 1;
   59     state->state = 0;
   60     state->xprivate = 0;
   61     return state;
   62 }
   63 
   64 static void btrfs_free_extent_state(struct extent_state *state)
   65 {
   66     state->refs--;
   67     BUG_ON(state->refs < 0);
   68     if (state->refs == 0)
   69         free(state);
   70 }
   71 
   72 static void free_extent_state_func(struct cache_extent *cache)
   73 {
   74     struct extent_state *es;
   75 
   76     es = container_of(cache, struct extent_state, cache_node);
   77     btrfs_free_extent_state(es);
   78 }
   79 
   80 static void free_extent_buffer_final(struct extent_buffer *eb);
   81 void extent_io_tree_cleanup(struct extent_io_tree *tree)
   82 {
   83     struct extent_buffer *eb;
   84 
   85     while(!list_empty(&tree->lru)) {
   86         eb = list_entry(tree->lru.next, struct extent_buffer, lru);
   87         if (eb->refs) {
   88             fprintf(stderr,
   89                 "extent buffer leak: start %llu len %u\n",
   90                 (unsigned long long)eb->start, eb->len);
   91             free_extent_buffer_nocache(eb);
   92         } else {
   93             free_extent_buffer_final(eb);
   94         }
   95     }
   96 
   97     cache_tree_free_extents(&tree->state, free_extent_state_func);
   98 }
   99 
  100 static inline void update_extent_state(struct extent_state *state)
  101 {
  102     state->cache_node.start = state->start;
  103     state->cache_node.size = state->end + 1 - state->start;
  104 }
  105 
  106 /*
  107  * Utility function to look for merge candidates inside a given range.
  108  * Any extents with matching state are merged together into a single
  109  * extent in the tree. Extents with EXTENT_IO in their state field are
  110  * not merged
  111  */
  112 static int merge_state(struct extent_io_tree *tree,
  113                struct extent_state *state)
  114 {
  115     struct extent_state *other;
  116     struct cache_extent *other_node;
  117 
  118     if (state->state & EXTENT_IOBITS)
  119         return 0;
  120 
  121     other_node = prev_cache_extent(&state->cache_node);
  122     if (other_node) {
  123         other = container_of(other_node, struct extent_state,
  124                      cache_node);
  125         if (other->end == state->start - 1 &&
  126             other->state == state->state) {
  127             state->start = other->start;
  128             update_extent_state(state);
  129             remove_cache_extent(&tree->state, &other->cache_node);
  130             btrfs_free_extent_state(other);
  131         }
  132     }
  133     other_node = next_cache_extent(&state->cache_node);
  134     if (other_node) {
  135         other = container_of(other_node, struct extent_state,
  136                      cache_node);
  137         if (other->start == state->end + 1 &&
  138             other->state == state->state) {
  139             other->start = state->start;
  140             update_extent_state(other);
  141             remove_cache_extent(&tree->state, &state->cache_node);
  142             btrfs_free_extent_state(state);
  143         }
  144     }
  145     return 0;
  146 }
  147 
  148 /*
  149  * insert an extent_state struct into the tree.  'bits' are set on the
  150  * struct before it is inserted.
  151  */
  152 static int insert_state(struct extent_io_tree *tree,
  153             struct extent_state *state, u64 start, u64 end,
  154             int bits)
  155 {
  156     int ret;
  157 
  158     BUG_ON(end < start);
  159     state->state |= bits;
  160     state->start = start;
  161     state->end = end;
  162     update_extent_state(state);
  163     ret = insert_cache_extent(&tree->state, &state->cache_node);
  164     BUG_ON(ret);
  165     merge_state(tree, state);
  166     return 0;
  167 }
  168 
  169 /*
  170  * split a given extent state struct in two, inserting the preallocated
  171  * struct 'prealloc' as the newly created second half.  'split' indicates an
  172  * offset inside 'orig' where it should be split.
  173  */
  174 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
  175                struct extent_state *prealloc, u64 split)
  176 {
  177     int ret;
  178     prealloc->start = orig->start;
  179     prealloc->end = split - 1;
  180     prealloc->state = orig->state;
  181     update_extent_state(prealloc);
  182     orig->start = split;
  183     update_extent_state(orig);
  184     ret = insert_cache_extent(&tree->state, &prealloc->cache_node);
  185     BUG_ON(ret);
  186     return 0;
  187 }
  188 
  189 /*
  190  * clear some bits on a range in the tree.
  191  */
  192 static int clear_state_bit(struct extent_io_tree *tree,
  193                 struct extent_state *state, int bits)
  194 {
  195     int ret = state->state & bits;
  196 
  197     state->state &= ~bits;
  198     if (state->state == 0) {
  199         remove_cache_extent(&tree->state, &state->cache_node);
  200         btrfs_free_extent_state(state);
  201     } else {
  202         merge_state(tree, state);
  203     }
  204     return ret;
  205 }
  206 
  207 /*
  208  * extent_buffer_bitmap_set - set an area of a bitmap
  209  * @eb: the extent buffer
  210  * @start: offset of the bitmap item in the extent buffer
  211  * @pos: bit number of the first bit
  212  * @len: number of bits to set
  213  */
  214 void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
  215                               unsigned long pos, unsigned long len)
  216 {
  217     u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos);
  218     const unsigned int size = pos + len;
  219     int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
  220     u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
  221 
  222     while (len >= bits_to_set) {
  223         *p |= mask_to_set;
  224         len -= bits_to_set;
  225         bits_to_set = BITS_PER_BYTE;
  226         mask_to_set = ~0;
  227         p++;
  228     }
  229     if (len) {
  230         mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
  231         *p |= mask_to_set;
  232     }
  233 }
  234 
  235 /*
  236  * extent_buffer_bitmap_clear - clear an area of a bitmap
  237  * @eb: the extent buffer
  238  * @start: offset of the bitmap item in the extent buffer
  239  * @pos: bit number of the first bit
  240  * @len: number of bits to clear
  241  */
  242 void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
  243                                 unsigned long pos, unsigned long len)
  244 {
  245     u8 *p = (u8 *)eb->data + start + BIT_BYTE(pos);
  246     const unsigned int size = pos + len;
  247     int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
  248     u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
  249 
  250     while (len >= bits_to_clear) {
  251         *p &= ~mask_to_clear;
  252         len -= bits_to_clear;
  253         bits_to_clear = BITS_PER_BYTE;
  254         mask_to_clear = ~0;
  255         p++;
  256     }
  257     if (len) {
  258         mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
  259         *p &= ~mask_to_clear;
  260     }
  261 }
  262 
  263 /*
  264  * clear some bits on a range in the tree.
  265  */
  266 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
  267 {
  268     struct extent_state *state;
  269     struct extent_state *prealloc = NULL;
  270     struct cache_extent *node;
  271     u64 last_end;
  272     int err;
  273     int set = 0;
  274 
  275 again:
  276     if (!prealloc) {
  277         prealloc = alloc_extent_state();
  278         if (!prealloc)
  279             return -ENOMEM;
  280     }
  281 
  282     /*
  283      * this search will find the extents that end after
  284      * our range starts
  285      */
  286     node = search_cache_extent(&tree->state, start);
  287     if (!node)
  288         goto out;
  289     state = container_of(node, struct extent_state, cache_node);
  290     if (state->start > end)
  291         goto out;
  292     last_end = state->end;
  293 
  294     /*
  295      *     | ---- desired range ---- |
  296      *  | state | or
  297      *  | ------------- state -------------- |
  298      *
  299      * We need to split the extent we found, and may flip
  300      * bits on second half.
  301      *
  302      * If the extent we found extends past our range, we
  303      * just split and search again.  It'll get split again
  304      * the next time though.
  305      *
  306      * If the extent we found is inside our range, we clear
  307      * the desired bit on it.
  308      */
  309     if (state->start < start) {
  310         err = split_state(tree, state, prealloc, start);
  311         BUG_ON(err == -EEXIST);
  312         prealloc = NULL;
  313         if (err)
  314             goto out;
  315         if (state->end <= end) {
  316             set |= clear_state_bit(tree, state, bits);
  317             if (last_end == (u64)-1)
  318                 goto out;
  319             start = last_end + 1;
  320         } else {
  321             start = state->start;
  322         }
  323         goto search_again;
  324     }
  325     /*
  326      * | ---- desired range ---- |
  327      *                        | state |
  328      * We need to split the extent, and clear the bit
  329      * on the first half
  330      */
  331     if (state->start <= end && state->end > end) {
  332         err = split_state(tree, state, prealloc, end + 1);
  333         BUG_ON(err == -EEXIST);
  334 
  335         set |= clear_state_bit(tree, prealloc, bits);
  336         prealloc = NULL;
  337         goto out;
  338     }
  339 
  340     start = state->end + 1;
  341     set |= clear_state_bit(tree, state, bits);
  342     if (last_end == (u64)-1)
  343         goto out;
  344     start = last_end + 1;
  345     goto search_again;
  346 out:
  347     if (prealloc)
  348         btrfs_free_extent_state(prealloc);
  349     return set;
  350 
  351 search_again:
  352     if (start > end)
  353         goto out;
  354     goto again;
  355 }
  356 
  357 /*
  358  * set some bits on a range in the tree.
  359  */
  360 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits)
  361 {
  362     struct extent_state *state;
  363     struct extent_state *prealloc = NULL;
  364     struct cache_extent *node;
  365     int err = 0;
  366     u64 last_start;
  367     u64 last_end;
  368 again:
  369     if (!prealloc) {
  370         prealloc = alloc_extent_state();
  371         if (!prealloc)
  372             return -ENOMEM;
  373     }
  374 
  375     /*
  376      * this search will find the extents that end after
  377      * our range starts
  378      */
  379     node = search_cache_extent(&tree->state, start);
  380     if (!node) {
  381         err = insert_state(tree, prealloc, start, end, bits);
  382         BUG_ON(err == -EEXIST);
  383         prealloc = NULL;
  384         goto out;
  385     }
  386 
  387     state = container_of(node, struct extent_state, cache_node);
  388     last_start = state->start;
  389     last_end = state->end;
  390 
  391     /*
  392      * | ---- desired range ---- |
  393      * | state |
  394      *
  395      * Just lock what we found and keep going
  396      */
  397     if (state->start == start && state->end <= end) {
  398         state->state |= bits;
  399         merge_state(tree, state);
  400         if (last_end == (u64)-1)
  401             goto out;
  402         start = last_end + 1;
  403         goto search_again;
  404     }
  405     /*
  406      *     | ---- desired range ---- |
  407      * | state |
  408      *   or
  409      * | ------------- state -------------- |
  410      *
  411      * We need to split the extent we found, and may flip bits on
  412      * second half.
  413      *
  414      * If the extent we found extends past our
  415      * range, we just split and search again.  It'll get split
  416      * again the next time though.
  417      *
  418      * If the extent we found is inside our range, we set the
  419      * desired bit on it.
  420      */
  421     if (state->start < start) {
  422         err = split_state(tree, state, prealloc, start);
  423         BUG_ON(err == -EEXIST);
  424         prealloc = NULL;
  425         if (err)
  426             goto out;
  427         if (state->end <= end) {
  428             state->state |= bits;
  429             start = state->end + 1;
  430             merge_state(tree, state);
  431             if (last_end == (u64)-1)
  432                 goto out;
  433             start = last_end + 1;
  434         } else {
  435             start = state->start;
  436         }
  437         goto search_again;
  438     }
  439     /*
  440      * | ---- desired range ---- |
  441      *     | state | or               | state |
  442      *
  443      * There's a hole, we need to insert something in it and
  444      * ignore the extent we found.
  445      */
  446     if (state->start > start) {
  447         u64 this_end;
  448         if (end < last_start)
  449             this_end = end;
  450         else
  451             this_end = last_start -1;
  452         err = insert_state(tree, prealloc, start, this_end,
  453                 bits);
  454         BUG_ON(err == -EEXIST);
  455         prealloc = NULL;
  456         if (err)
  457             goto out;
  458         start = this_end + 1;
  459         goto search_again;
  460     }
  461     /*
  462      * | ---- desired range ---- |
  463      * | ---------- state ---------- |
  464      * We need to split the extent, and set the bit
  465      * on the first half
  466      */
  467     err = split_state(tree, state, prealloc, end + 1);
  468     BUG_ON(err == -EEXIST);
  469 
  470     state->state |= bits;
  471     merge_state(tree, prealloc);
  472     prealloc = NULL;
  473 out:
  474     if (prealloc)
  475         btrfs_free_extent_state(prealloc);
  476     return err;
  477 search_again:
  478     if (start > end)
  479         goto out;
  480     goto again;
  481 }
  482 
  483 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
  484 {
  485     return set_extent_bits(tree, start, end, EXTENT_DIRTY);
  486 }
  487 
  488 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end)
  489 {
  490     return clear_extent_bits(tree, start, end, EXTENT_DIRTY);
  491 }
  492 
  493 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
  494               u64 *start_ret, u64 *end_ret, int bits)
  495 {
  496     struct cache_extent *node;
  497     struct extent_state *state;
  498     int ret = 1;
  499 
  500     /*
  501      * this search will find all the extents that end after
  502      * our range starts.
  503      */
  504     node = search_cache_extent(&tree->state, start);
  505     if (!node)
  506         goto out;
  507 
  508     while(1) {
  509         state = container_of(node, struct extent_state, cache_node);
  510         if (state->end >= start && (state->state & bits)) {
  511             *start_ret = state->start;
  512             *end_ret = state->end;
  513             ret = 0;
  514             break;
  515         }
  516         node = next_cache_extent(node);
  517         if (!node)
  518             break;
  519     }
  520 out:
  521     return ret;
  522 }
  523 
  524 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
  525            int bits, int filled)
  526 {
  527     struct extent_state *state = NULL;
  528     struct cache_extent *node;
  529     int bitset = 0;
  530 
  531     node = search_cache_extent(&tree->state, start);
  532     while (node && start <= end) {
  533         state = container_of(node, struct extent_state, cache_node);
  534 
  535         if (filled && state->start > start) {
  536             bitset = 0;
  537             break;
  538         }
  539         if (state->start > end)
  540             break;
  541         if (state->state & bits) {
  542             bitset = 1;
  543             if (!filled)
  544                 break;
  545         } else if (filled) {
  546             bitset = 0;
  547             break;
  548         }
  549         start = state->end + 1;
  550         if (start > end)
  551             break;
  552         node = next_cache_extent(node);
  553         if (!node) {
  554             if (filled)
  555                 bitset = 0;
  556             break;
  557         }
  558     }
  559     return bitset;
  560 }
  561 
  562 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
  563 {
  564     struct cache_extent *node;
  565     struct extent_state *state;
  566     int ret = 0;
  567 
  568     node = search_cache_extent(&tree->state, start);
  569     if (!node) {
  570         ret = -ENOENT;
  571         goto out;
  572     }
  573     state = container_of(node, struct extent_state, cache_node);
  574     if (state->start != start) {
  575         ret = -ENOENT;
  576         goto out;
  577     }
  578     state->xprivate = private;
  579 out:
  580     return ret;
  581 }
  582 
  583 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
  584 {
  585     struct cache_extent *node;
  586     struct extent_state *state;
  587     int ret = 0;
  588 
  589     node = search_cache_extent(&tree->state, start);
  590     if (!node) {
  591         ret = -ENOENT;
  592         goto out;
  593     }
  594     state = container_of(node, struct extent_state, cache_node);
  595     if (state->start != start) {
  596         ret = -ENOENT;
  597         goto out;
  598     }
  599     *private = state->xprivate;
  600 out:
  601     return ret;
  602 }
  603 
  604 static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *info,
  605                            u64 bytenr, u32 blocksize)
  606 {
  607     struct extent_buffer *eb;
  608 
  609     eb = calloc(1, sizeof(struct extent_buffer) + blocksize);
  610     if (!eb)
  611         return NULL;
  612 
  613     eb->start = bytenr;
  614     eb->len = blocksize;
  615     eb->refs = 1;
  616     eb->flags = 0;
  617     eb->fd = -1;
  618     eb->dev_bytenr = (u64)-1;
  619     eb->cache_node.start = bytenr;
  620     eb->cache_node.size = blocksize;
  621     eb->fs_info = info;
  622     eb->tree = &info->extent_cache;
  623     INIT_LIST_HEAD(&eb->recow);
  624     INIT_LIST_HEAD(&eb->lru);
  625 
  626     return eb;
  627 }
  628 
  629 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
  630 {
  631     struct extent_buffer *new;
  632 
  633     new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
  634     if (!new)
  635         return NULL;
  636     /* cloned eb is not linked into fs_info->extent_cache */
  637     new->tree = NULL;
  638 
  639     copy_extent_buffer(new, src, 0, 0, src->len);
  640     new->flags |= EXTENT_BUFFER_DUMMY;
  641 
  642     return new;
  643 }
  644 
  645 static void free_extent_buffer_final(struct extent_buffer *eb)
  646 {
  647     struct extent_io_tree *tree = eb->tree;
  648 
  649     BUG_ON(eb->refs);
  650     BUG_ON(tree && tree->cache_size < eb->len);
  651     list_del_init(&eb->lru);
  652     if (!(eb->flags & EXTENT_BUFFER_DUMMY)) {
  653         remove_cache_extent(&tree->cache, &eb->cache_node);
  654         tree->cache_size -= eb->len;
  655     }
  656     free(eb);
  657 }
  658 
  659 static void free_extent_buffer_internal(struct extent_buffer *eb, bool free_now)
  660 {
  661     if (!eb || IS_ERR(eb))
  662         return;
  663 
  664     eb->refs--;
  665     BUG_ON(eb->refs < 0);
  666     if (eb->refs == 0) {
  667         if (eb->flags & EXTENT_DIRTY) {
  668             warning(
  669             "dirty eb leak (aborted trans): start %llu len %u",
  670                 eb->start, eb->len);
  671         }
  672         list_del_init(&eb->recow);
  673         if (eb->flags & EXTENT_BUFFER_DUMMY || free_now)
  674             free_extent_buffer_final(eb);
  675     }
  676 }
  677 
  678 void free_extent_buffer(struct extent_buffer *eb)
  679 {
  680     free_extent_buffer_internal(eb, 0);
  681 }
  682 
  683 void free_extent_buffer_nocache(struct extent_buffer *eb)
  684 {
  685     free_extent_buffer_internal(eb, 1);
  686 }
  687 
  688 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
  689                      u64 bytenr, u32 blocksize)
  690 {
  691     struct extent_buffer *eb = NULL;
  692     struct cache_extent *cache;
  693 
  694     cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
  695     if (cache && cache->start == bytenr &&
  696         cache->size == blocksize) {
  697         eb = container_of(cache, struct extent_buffer, cache_node);
  698         list_move_tail(&eb->lru, &tree->lru);
  699         eb->refs++;
  700     }
  701     return eb;
  702 }
  703 
  704 struct extent_buffer *find_first_extent_buffer(struct extent_io_tree *tree,
  705                            u64 start)
  706 {
  707     struct extent_buffer *eb = NULL;
  708     struct cache_extent *cache;
  709 
  710     cache = search_cache_extent(&tree->cache, start);
  711     if (cache) {
  712         eb = container_of(cache, struct extent_buffer, cache_node);
  713         list_move_tail(&eb->lru, &tree->lru);
  714         eb->refs++;
  715     }
  716     return eb;
  717 }
  718 
  719 static void trim_extent_buffer_cache(struct extent_io_tree *tree)
  720 {
  721     struct extent_buffer *eb, *tmp;
  722 
  723     list_for_each_entry_safe(eb, tmp, &tree->lru, lru) {
  724         if (eb->refs == 0)
  725             free_extent_buffer_final(eb);
  726         if (tree->cache_size <= ((tree->max_cache_size * 9) / 10))
  727             break;
  728     }
  729 }
  730 
  731 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
  732                       u64 bytenr, u32 blocksize)
  733 {
  734     struct extent_buffer *eb;
  735     struct extent_io_tree *tree = &fs_info->extent_cache;
  736     struct cache_extent *cache;
  737 
  738     cache = lookup_cache_extent(&tree->cache, bytenr, blocksize);
  739     if (cache && cache->start == bytenr &&
  740         cache->size == blocksize) {
  741         eb = container_of(cache, struct extent_buffer, cache_node);
  742         list_move_tail(&eb->lru, &tree->lru);
  743         eb->refs++;
  744     } else {
  745         int ret;
  746 
  747         if (cache) {
  748             eb = container_of(cache, struct extent_buffer,
  749                       cache_node);
  750             free_extent_buffer(eb);
  751         }
  752         eb = __alloc_extent_buffer(fs_info, bytenr, blocksize);
  753         if (!eb)
  754             return NULL;
  755         ret = insert_cache_extent(&tree->cache, &eb->cache_node);
  756         if (ret) {
  757             free(eb);
  758             return NULL;
  759         }
  760         list_add_tail(&eb->lru, &tree->lru);
  761         tree->cache_size += blocksize;
  762         if (tree->cache_size >= tree->max_cache_size)
  763             trim_extent_buffer_cache(tree);
  764     }
  765     return eb;
  766 }
  767 
  768 /*
  769  * Allocate a dummy extent buffer which won't be inserted into extent buffer
  770  * cache.
  771  *
  772  * This mostly allows super block read write using existing eb infrastructure
  773  * without pulluting the eb cache.
  774  *
  775  * This is especially important to avoid injecting eb->start == SZ_64K, as
  776  * fuzzed image could have invalid tree bytenr covers super block range,
  777  * and cause ref count underflow.
  778  */
  779 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
  780                         u64 bytenr, u32 blocksize)
  781 {
  782     struct extent_buffer *ret;
  783 
  784     ret = __alloc_extent_buffer(fs_info, bytenr, blocksize);
  785     if (!ret)
  786         return NULL;
  787 
  788     ret->tree = NULL;
  789     ret->flags |= EXTENT_BUFFER_DUMMY;
  790 
  791     return ret;
  792 }
  793 
  794 int read_extent_from_disk(struct extent_buffer *eb,
  795               unsigned long offset, unsigned long len)
  796 {
  797     int ret;
  798     ret = pread(eb->fd, eb->data + offset, len, eb->dev_bytenr);
  799     if (ret < 0) {
  800         ret = -errno;
  801         goto out;
  802     }
  803     if (ret != len) {
  804         ret = -EIO;
  805         goto out;
  806     }
  807     ret = 0;
  808 out:
  809     return ret;
  810 }
  811 
  812 int write_extent_to_disk(struct extent_buffer *eb)
  813 {
  814     int ret;
  815     ret = pwrite(eb->fd, eb->data, eb->len, eb->dev_bytenr);
  816     if (ret < 0)
  817         goto out;
  818     if (ret != eb->len) {
  819         ret = -EIO;
  820         goto out;
  821     }
  822     ret = 0;
  823 out:
  824     return ret;
  825 }
  826 
  827 int read_data_from_disk(struct btrfs_fs_info *info, void *buf, u64 offset,
  828             u64 bytes, int mirror)
  829 {
  830     struct btrfs_multi_bio *multi = NULL;
  831     struct btrfs_device *device;
  832     u64 bytes_left = bytes;
  833     u64 read_len;
  834     u64 total_read = 0;
  835     int ret;
  836 
  837     while (bytes_left) {
  838         read_len = bytes_left;
  839         ret = btrfs_map_block(info, READ, offset, &read_len, &multi,
  840                       mirror, NULL);
  841         if (ret) {
  842             fprintf(stderr, "Couldn't map the block %Lu\n",
  843                 offset);
  844             return -EIO;
  845         }
  846         device = multi->stripes[0].dev;
  847 
  848         read_len = min(bytes_left, read_len);
  849         if (device->fd <= 0) {
  850             kfree(multi);
  851             return -EIO;
  852         }
  853 
  854         ret = pread(device->fd, buf + total_read, read_len,
  855                 multi->stripes[0].physical);
  856         kfree(multi);
  857         if (ret < 0) {
  858             fprintf(stderr, "Error reading %Lu, %d\n", offset,
  859                 ret);
  860             return ret;
  861         }
  862         if (ret != read_len) {
  863             fprintf(stderr, "Short read for %Lu, read %d, "
  864                 "read_len %Lu\n", offset, ret, read_len);
  865             return -EIO;
  866         }
  867 
  868         bytes_left -= read_len;
  869         offset += read_len;
  870         total_read += read_len;
  871     }
  872 
  873     return 0;
  874 }
  875 
  876 int write_data_to_disk(struct btrfs_fs_info *info, void *buf, u64 offset,
  877               u64 bytes, int mirror)
  878 {
  879     struct btrfs_multi_bio *multi = NULL;
  880     struct btrfs_device *device;
  881     u64 bytes_left = bytes;
  882     u64 this_len;
  883     u64 total_write = 0;
  884     u64 *raid_map = NULL;
  885     u64 dev_bytenr;
  886     int dev_nr;
  887     int ret = 0;
  888 
  889     while (bytes_left > 0) {
  890         this_len = bytes_left;
  891         dev_nr = 0;
  892 
  893         ret = btrfs_map_block(info, WRITE, offset, &this_len, &multi,
  894                       mirror, &raid_map);
  895         if (ret) {
  896             fprintf(stderr, "Couldn't map the block %Lu\n",
  897                 offset);
  898             return -EIO;
  899         }
  900 
  901         if (raid_map) {
  902             struct extent_buffer *eb;
  903             u64 stripe_len = this_len;
  904 
  905             this_len = min(this_len, bytes_left);
  906             this_len = min(this_len, (u64)info->nodesize);
  907 
  908             eb = malloc(sizeof(struct extent_buffer) + this_len);
  909             if (!eb) {
  910                 fprintf(stderr, "cannot allocate memory for eb\n");
  911                 ret = -ENOMEM;
  912                 goto out;
  913             }
  914 
  915             memset(eb, 0, sizeof(struct extent_buffer) + this_len);
  916             eb->start = offset;
  917             eb->len = this_len;
  918 
  919             memcpy(eb->data, buf + total_write, this_len);
  920             ret = write_raid56_with_parity(info, eb, multi,
  921                                stripe_len, raid_map);
  922             BUG_ON(ret);
  923 
  924             free(eb);
  925             kfree(raid_map);
  926             raid_map = NULL;
  927         } else while (dev_nr < multi->num_stripes) {
  928             device = multi->stripes[dev_nr].dev;
  929             if (device->fd <= 0) {
  930                 kfree(multi);
  931                 return -EIO;
  932             }
  933 
  934             dev_bytenr = multi->stripes[dev_nr].physical;
  935             this_len = min(this_len, bytes_left);
  936             dev_nr++;
  937 
  938             ret = pwrite(device->fd, buf + total_write, this_len, dev_bytenr);
  939             if (ret != this_len) {
  940                 if (ret < 0) {
  941                     fprintf(stderr, "Error writing to "
  942                         "device %d\n", errno);
  943                     ret = errno;
  944                     kfree(multi);
  945                     return ret;
  946                 } else {
  947                     fprintf(stderr, "Short write\n");
  948                     kfree(multi);
  949                     return -EIO;
  950                 }
  951             }
  952         }
  953 
  954         BUG_ON(bytes_left < this_len);
  955 
  956         bytes_left -= this_len;
  957         offset += this_len;
  958         total_write += this_len;
  959 
  960         kfree(multi);
  961         multi = NULL;
  962     }
  963     return 0;
  964 
  965 out:
  966     kfree(raid_map);
  967     return ret;
  968 }
  969 
  970 int set_extent_buffer_dirty(struct extent_buffer *eb)
  971 {
  972     struct extent_io_tree *tree = eb->tree;
  973     if (!(eb->flags & EXTENT_DIRTY)) {
  974         eb->flags |= EXTENT_DIRTY;
  975         set_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
  976         extent_buffer_get(eb);
  977     }
  978     return 0;
  979 }
  980 
  981 int clear_extent_buffer_dirty(struct extent_buffer *eb)
  982 {
  983     struct extent_io_tree *tree = eb->tree;
  984     if (eb->flags & EXTENT_DIRTY) {
  985         eb->flags &= ~EXTENT_DIRTY;
  986         clear_extent_dirty(tree, eb->start, eb->start + eb->len - 1);
  987         free_extent_buffer(eb);
  988     }
  989     return 0;
  990 }
  991 
  992 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
  993              unsigned long start, unsigned long len)
  994 {
  995     return memcmp(eb->data + start, ptrv, len);
  996 }
  997 
  998 void read_extent_buffer(const struct extent_buffer *eb, void *dst,
  999             unsigned long start, unsigned long len)
 1000 {
 1001     memcpy(dst, eb->data + start, len);
 1002 }
 1003 
 1004 void write_extent_buffer(struct extent_buffer *eb, const void *src,
 1005              unsigned long start, unsigned long len)
 1006 {
 1007     memcpy(eb->data + start, src, len);
 1008 }
 1009 
 1010 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
 1011             unsigned long dst_offset, unsigned long src_offset,
 1012             unsigned long len)
 1013 {
 1014     memcpy(dst->data + dst_offset, src->data + src_offset, len);
 1015 }
 1016 
 1017 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
 1018                unsigned long src_offset, unsigned long len)
 1019 {
 1020     memmove(dst->data + dst_offset, dst->data + src_offset, len);
 1021 }
 1022 
 1023 void memset_extent_buffer(struct extent_buffer *eb, char c,
 1024               unsigned long start, unsigned long len)
 1025 {
 1026     memset(eb->data + start, c, len);
 1027 }
 1028 
 1029 int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
 1030                unsigned long nr)
 1031 {
 1032     return le_test_bit(nr, (u8 *)eb->data + start);
 1033 }