summaryrefslogtreecommitdiff
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c762
1 files changed, 478 insertions, 284 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index ff43802a7c88..af0359dcf337 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -59,7 +59,7 @@ void btrfs_leak_debug_check(void)
while (!list_empty(&states)) {
state = list_entry(states.next, struct extent_state, leak_list);
- printk(KERN_ERR "btrfs state leak: start %llu end %llu "
+ printk(KERN_ERR "BTRFS: state leak: start %llu end %llu "
"state %lu in tree %p refs %d\n",
state->start, state->end, state->state, state->tree,
atomic_read(&state->refs));
@@ -69,7 +69,7 @@ void btrfs_leak_debug_check(void)
while (!list_empty(&buffers)) {
eb = list_entry(buffers.next, struct extent_buffer, leak_list);
- printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
+ printk(KERN_ERR "BTRFS: buffer leak start %llu len %lu "
"refs %d\n",
eb->start, eb->len, atomic_read(&eb->refs));
list_del(&eb->leak_list);
@@ -77,16 +77,22 @@ void btrfs_leak_debug_check(void)
}
}
-#define btrfs_debug_check_extent_io_range(inode, start, end) \
- __btrfs_debug_check_extent_io_range(__func__, (inode), (start), (end))
+#define btrfs_debug_check_extent_io_range(tree, start, end) \
+ __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
static inline void __btrfs_debug_check_extent_io_range(const char *caller,
- struct inode *inode, u64 start, u64 end)
+ struct extent_io_tree *tree, u64 start, u64 end)
{
- u64 isize = i_size_read(inode);
+ struct inode *inode;
+ u64 isize;
+ if (!tree->mapping)
+ return;
+
+ inode = tree->mapping->host;
+ isize = i_size_read(inode);
if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
printk_ratelimited(KERN_DEBUG
- "btrfs: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
+ "BTRFS: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
caller, btrfs_ino(inode), isize, start, end);
}
}
@@ -124,6 +130,8 @@ static noinline void flush_write_bio(void *data);
static inline struct btrfs_fs_info *
tree_fs_info(struct extent_io_tree *tree)
{
+ if (!tree->mapping)
+ return NULL;
return btrfs_sb(tree->mapping->host->i_sb);
}
@@ -186,11 +194,9 @@ void extent_io_tree_init(struct extent_io_tree *tree,
struct address_space *mapping)
{
tree->state = RB_ROOT;
- INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
tree->ops = NULL;
tree->dirty_bytes = 0;
spin_lock_init(&tree->lock);
- spin_lock_init(&tree->buffer_lock);
tree->mapping = mapping;
}
@@ -223,13 +229,24 @@ void free_extent_state(struct extent_state *state)
}
}
-static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
- struct rb_node *node)
+static struct rb_node *tree_insert(struct rb_root *root,
+ struct rb_node *search_start,
+ u64 offset,
+ struct rb_node *node,
+ struct rb_node ***p_in,
+ struct rb_node **parent_in)
{
- struct rb_node **p = &root->rb_node;
+ struct rb_node **p;
struct rb_node *parent = NULL;
struct tree_entry *entry;
+ if (p_in && parent_in) {
+ p = *p_in;
+ parent = *parent_in;
+ goto do_insert;
+ }
+
+ p = search_start ? &search_start : &root->rb_node;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct tree_entry, rb_node);
@@ -242,35 +259,43 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
return parent;
}
+do_insert:
rb_link_node(node, parent, p);
rb_insert_color(node, root);
return NULL;
}
static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
- struct rb_node **prev_ret,
- struct rb_node **next_ret)
+ struct rb_node **prev_ret,
+ struct rb_node **next_ret,
+ struct rb_node ***p_ret,
+ struct rb_node **parent_ret)
{
struct rb_root *root = &tree->state;
- struct rb_node *n = root->rb_node;
+ struct rb_node **n = &root->rb_node;
struct rb_node *prev = NULL;
struct rb_node *orig_prev = NULL;
struct tree_entry *entry;
struct tree_entry *prev_entry = NULL;
- while (n) {
- entry = rb_entry(n, struct tree_entry, rb_node);
- prev = n;
+ while (*n) {
+ prev = *n;
+ entry = rb_entry(prev, struct tree_entry, rb_node);
prev_entry = entry;
if (offset < entry->start)
- n = n->rb_left;
+ n = &(*n)->rb_left;
else if (offset > entry->end)
- n = n->rb_right;
+ n = &(*n)->rb_right;
else
- return n;
+ return *n;
}
+ if (p_ret)
+ *p_ret = n;
+ if (parent_ret)
+ *parent_ret = prev;
+
if (prev_ret) {
orig_prev = prev;
while (prev && offset > prev_entry->end) {
@@ -292,18 +317,27 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
return NULL;
}
-static inline struct rb_node *tree_search(struct extent_io_tree *tree,
- u64 offset)
+static inline struct rb_node *
+tree_search_for_insert(struct extent_io_tree *tree,
+ u64 offset,
+ struct rb_node ***p_ret,
+ struct rb_node **parent_ret)
{
struct rb_node *prev = NULL;
struct rb_node *ret;
- ret = __etree_search(tree, offset, &prev, NULL);
+ ret = __etree_search(tree, offset, &prev, NULL, p_ret, parent_ret);
if (!ret)
return prev;
return ret;
}
+static inline struct rb_node *tree_search(struct extent_io_tree *tree,
+ u64 offset)
+{
+ return tree_search_for_insert(tree, offset, NULL, NULL);
+}
+
static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
struct extent_state *other)
{
@@ -385,23 +419,25 @@ static void set_state_bits(struct extent_io_tree *tree,
*/
static int insert_state(struct extent_io_tree *tree,
struct extent_state *state, u64 start, u64 end,
+ struct rb_node ***p,
+ struct rb_node **parent,
unsigned long *bits)
{
struct rb_node *node;
if (end < start)
- WARN(1, KERN_ERR "btrfs end < start %llu %llu\n",
+ WARN(1, KERN_ERR "BTRFS: end < start %llu %llu\n",
end, start);
state->start = start;
state->end = end;
set_state_bits(tree, state, bits);
- node = tree_insert(&tree->state, end, &state->rb_node);
+ node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent);
if (node) {
struct extent_state *found;
found = rb_entry(node, struct extent_state, rb_node);
- printk(KERN_ERR "btrfs found node %llu %llu on insert of "
+ printk(KERN_ERR "BTRFS: found node %llu %llu on insert of "
"%llu %llu\n",
found->start, found->end, start, end);
return -EEXIST;
@@ -444,7 +480,8 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
prealloc->state = orig->state;
orig->start = split;
- node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
+ node = tree_insert(&tree->state, &orig->rb_node, prealloc->end,
+ &prealloc->rb_node, NULL, NULL);
if (node) {
free_extent_state(prealloc);
return -EEXIST;
@@ -542,7 +579,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
int err;
int clear = 0;
- btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
+ btrfs_debug_check_extent_io_range(tree, start, end);
if (bits & EXTENT_DELALLOC)
bits |= EXTENT_NORESERVE;
@@ -702,7 +739,7 @@ static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state *state;
struct rb_node *node;
- btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
+ btrfs_debug_check_extent_io_range(tree, start, end);
spin_lock(&tree->lock);
again:
@@ -712,6 +749,7 @@ again:
* our range starts
*/
node = tree_search(tree, start);
+process_node:
if (!node)
break;
@@ -732,7 +770,10 @@ again:
if (start > end)
break;
- cond_resched_lock(&tree->lock);
+ if (!cond_resched_lock(&tree->lock)) {
+ node = rb_next(node);
+ goto process_node;
+ }
}
out:
spin_unlock(&tree->lock);
@@ -783,11 +824,13 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state *state;
struct extent_state *prealloc = NULL;
struct rb_node *node;
+ struct rb_node **p;
+ struct rb_node *parent;
int err = 0;
u64 last_start;
u64 last_end;
- btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
+ btrfs_debug_check_extent_io_range(tree, start, end);
bits |= EXTENT_FIRST_DELALLOC;
again:
@@ -809,14 +852,16 @@ again:
* this search will find all the extents that end after
* our range starts.
*/
- node = tree_search(tree, start);
+ node = tree_search_for_insert(tree, start, &p, &parent);
if (!node) {
prealloc = alloc_extent_state_atomic(prealloc);
BUG_ON(!prealloc);
- err = insert_state(tree, prealloc, start, end, &bits);
+ err = insert_state(tree, prealloc, start, end,
+ &p, &parent, &bits);
if (err)
extent_io_tree_panic(tree, err);
+ cache_state(prealloc, cached_state);
prealloc = NULL;
goto out;
}
@@ -919,7 +964,7 @@ hit_next:
* the later extent.
*/
err = insert_state(tree, prealloc, start, this_end,
- &bits);
+ NULL, NULL, &bits);
if (err)
extent_io_tree_panic(tree, err);
@@ -1005,11 +1050,13 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state *state;
struct extent_state *prealloc = NULL;
struct rb_node *node;
+ struct rb_node **p;
+ struct rb_node *parent;
int err = 0;
u64 last_start;
u64 last_end;
- btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
+ btrfs_debug_check_extent_io_range(tree, start, end);
again:
if (!prealloc && (mask & __GFP_WAIT)) {
@@ -1032,17 +1079,19 @@ again:
* this search will find all the extents that end after
* our range starts.
*/
- node = tree_search(tree, start);
+ node = tree_search_for_insert(tree, start, &p, &parent);
if (!node) {
prealloc = alloc_extent_state_atomic(prealloc);
if (!prealloc) {
err = -ENOMEM;
goto out;
}
- err = insert_state(tree, prealloc, start, end, &bits);
- prealloc = NULL;
+ err = insert_state(tree, prealloc, start, end,
+ &p, &parent, &bits);
if (err)
extent_io_tree_panic(tree, err);
+ cache_state(prealloc, cached_state);
+ prealloc = NULL;
goto out;
}
state = rb_entry(node, struct extent_state, rb_node);
@@ -1135,7 +1184,7 @@ hit_next:
* the later extent.
*/
err = insert_state(tree, prealloc, start, this_end,
- &bits);
+ NULL, NULL, &bits);
if (err)
extent_io_tree_panic(tree, err);
cache_state(prealloc, cached_state);
@@ -1644,6 +1693,7 @@ again:
* shortening the size of the delalloc range we're searching
*/
free_extent_state(cached_state);
+ cached_state = NULL;
if (!loops) {
max_bytes = PAGE_CACHE_SIZE;
loops = 1;
@@ -1984,7 +2034,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
if (!bio)
return -EIO;
- bio->bi_size = 0;
+ bio->bi_iter.bi_size = 0;
map_length = length;
ret = btrfs_map_block(fs_info, WRITE, logical,
@@ -1995,7 +2045,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
}
BUG_ON(mirror_num != bbio->mirror_num);
sector = bbio->stripes[mirror_num-1].physical >> 9;
- bio->bi_sector = sector;
+ bio->bi_iter.bi_sector = sector;
dev = bbio->stripes[mirror_num-1].dev;
kfree(bbio);
if (!dev || !dev->bdev || !dev->writeable) {
@@ -2012,9 +2062,10 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
return -EIO;
}
- printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
- "(dev %s sector %llu)\n", page->mapping->host->i_ino,
- start, rcu_str_deref(dev->name), sector);
+ printk_ratelimited_in_rcu(KERN_INFO
+ "BTRFS: read error corrected: ino %lu off %llu "
+ "(dev %s sector %llu)\n", page->mapping->host->i_ino,
+ start, rcu_str_deref(dev->name), sector);
bio_put(bio);
return 0;
@@ -2156,7 +2207,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
return -EIO;
}
- if (em->start > start || em->start + em->len < start) {
+ if (em->start > start || em->start + em->len <= start) {
free_extent_map(em);
em = NULL;
}
@@ -2268,9 +2319,9 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
return -EIO;
}
bio->bi_end_io = failed_bio->bi_end_io;
- bio->bi_sector = failrec->logical >> 9;
+ bio->bi_iter.bi_sector = failrec->logical >> 9;
bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
- bio->bi_size = 0;
+ bio->bi_iter.bi_size = 0;
btrfs_failed_bio = btrfs_io_bio(failed_bio);
if (btrfs_failed_bio->csum) {
@@ -2303,7 +2354,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
{
int uptodate = (err == 0);
struct extent_io_tree *tree;
- int ret;
+ int ret = 0;
tree = &BTRFS_I(page->mapping->host)->io_tree;
@@ -2317,6 +2368,8 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
if (!uptodate) {
ClearPageUptodate(page);
SetPageError(page);
+ ret = ret < 0 ? ret : -EIO;
+ mapping_set_error(page->mapping, ret);
}
return 0;
}
@@ -2332,37 +2385,39 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
*/
static void end_bio_extent_writepage(struct bio *bio, int err)
{
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct extent_io_tree *tree;
+ struct bio_vec *bvec;
u64 start;
u64 end;
+ int i;
- do {
+ bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
/* We always issue full-page reads, but if some block
* in a page fails to read, blk_update_request() will
* advance bv_offset and adjust bv_len to compensate.
* Print a warning for nonzero offsets, and an error
* if they don't add up to a full page. */
- if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
- printk("%s page write in btrfs with offset %u and length %u\n",
- bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
- ? KERN_ERR "partial" : KERN_INFO "incomplete",
- bvec->bv_offset, bvec->bv_len);
+ if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
+ if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
+ btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
+ "partial page write in btrfs with offset %u and length %u",
+ bvec->bv_offset, bvec->bv_len);
+ else
+ btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
+ "incomplete page write in btrfs with offset %u and "
+ "length %u",
+ bvec->bv_offset, bvec->bv_len);
+ }
start = page_offset(page);
end = start + bvec->bv_offset + bvec->bv_len - 1;
- if (--bvec >= bio->bi_io_vec)
- prefetchw(&bvec->bv_page->flags);
-
if (end_extent_writepage(page, err, start, end))
continue;
end_page_writeback(page);
- } while (bvec >= bio->bi_io_vec);
+ }
bio_put(bio);
}
@@ -2392,9 +2447,8 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
*/
static void end_bio_extent_readpage(struct bio *bio, int err)
{
+ struct bio_vec *bvec;
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct bio_vec *bvec = bio->bi_io_vec;
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
struct extent_io_tree *tree;
u64 offset = 0;
@@ -2405,16 +2459,17 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
u64 extent_len = 0;
int mirror;
int ret;
+ int i;
if (err)
uptodate = 0;
- do {
+ bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
struct inode *inode = page->mapping->host;
pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
- "mirror=%lu\n", (u64)bio->bi_sector, err,
+ "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err,
io_bio->mirror_num);
tree = &BTRFS_I(inode)->io_tree;
@@ -2423,19 +2478,22 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
* advance bv_offset and adjust bv_len to compensate.
* Print a warning for nonzero offsets, and an error
* if they don't add up to a full page. */
- if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
- printk("%s page read in btrfs with offset %u and length %u\n",
- bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
- ? KERN_ERR "partial" : KERN_INFO "incomplete",
- bvec->bv_offset, bvec->bv_len);
+ if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) {
+ if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE)
+ btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
+ "partial page read in btrfs with offset %u and length %u",
+ bvec->bv_offset, bvec->bv_len);
+ else
+ btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
+ "incomplete page read in btrfs with offset %u and "
+ "length %u",
+ bvec->bv_offset, bvec->bv_len);
+ }
start = page_offset(page);
end = start + bvec->bv_offset + bvec->bv_len - 1;
len = bvec->bv_len;
- if (++bvec <= bvec_end)
- prefetchw(&bvec->bv_page->flags);
-
mirror = io_bio->mirror_num;
if (likely(uptodate && tree->ops &&
tree->ops->readpage_end_io_hook)) {
@@ -2474,6 +2532,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
test_bit(BIO_UPTODATE, &bio->bi_flags);
if (err)
uptodate = 0;
+ offset += len;
continue;
}
}
@@ -2516,7 +2575,7 @@ readpage_ok:
extent_start = start;
extent_len = end + 1 - start;
}
- } while (bvec <= bvec_end);
+ }
if (extent_len)
endio_readpage_release_extent(tree, extent_start, extent_len,
@@ -2547,9 +2606,8 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
}
if (bio) {
- bio->bi_size = 0;
bio->bi_bdev = bdev;
- bio->bi_sector = first_sector;
+ bio->bi_iter.bi_sector = first_sector;
btrfs_bio = btrfs_io_bio(bio);
btrfs_bio->csum = NULL;
btrfs_bio->csum_allocated = NULL;
@@ -2643,7 +2701,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
if (bio_ret && *bio_ret) {
bio = *bio_ret;
if (old_compressed)
- contig = bio->bi_sector == sector;
+ contig = bio->bi_iter.bi_sector == sector;
else
contig = bio_end_sector(bio) == sector;
@@ -2710,7 +2768,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
if (em_cached && *em_cached) {
em = *em_cached;
- if (em->in_tree && start >= em->start &&
+ if (extent_map_in_tree(em) && start >= em->start &&
start < extent_map_end(em)) {
atomic_inc(&em->refs);
return em;
@@ -3044,143 +3102,130 @@ static noinline void update_nr_written(struct page *page,
}
/*
- * the writepage semantics are similar to regular writepage. extent
- * records are inserted to lock ranges in the tree, and as dirty areas
- * are found, they are marked writeback. Then the lock bits are removed
- * and the end_io handler clears the writeback ranges
+ * helper for __extent_writepage, doing all of the delayed allocation setup.
+ *
+ * This returns 1 if our fill_delalloc function did all the work required
+ * to write the page (copy into inline extent). In this case the IO has
+ * been started and the page is already unlocked.
+ *
+ * This returns 0 if all went well (page still locked)
+ * This returns < 0 if there were errors (page still locked)
*/
-static int __extent_writepage(struct page *page, struct writeback_control *wbc,
- void *data)
+static noinline_for_stack int writepage_delalloc(struct inode *inode,
+ struct page *page, struct writeback_control *wbc,
+ struct extent_page_data *epd,
+ u64 delalloc_start,
+ unsigned long *nr_written)
+{
+ struct extent_io_tree *tree = epd->tree;
+ u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1;
+ u64 nr_delalloc;
+ u64 delalloc_to_write = 0;
+ u64 delalloc_end = 0;
+ int ret;
+ int page_started = 0;
+
+ if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
+ return 0;
+
+ while (delalloc_end < page_end) {
+ nr_delalloc = find_lock_delalloc_range(inode, tree,
+ page,
+ &delalloc_start,
+ &delalloc_end,
+ 128 * 1024 * 1024);
+ if (nr_delalloc == 0) {
+ delalloc_start = delalloc_end + 1;
+ continue;
+ }
+ ret = tree->ops->fill_delalloc(inode, page,
+ delalloc_start,
+ delalloc_end,
+ &page_started,
+ nr_written);
+ /* File system has been set read-only */
+ if (ret) {
+ SetPageError(page);
+ /* fill_delalloc should be return < 0 for error
+ * but just in case, we use > 0 here meaning the
+ * IO is started, so we don't want to return > 0
+ * unless things are going well.
+ */
+ ret = ret < 0 ? ret : -EIO;
+ goto done;
+ }
+ /*
+ * delalloc_end is already one less than the total
+ * length, so we don't subtract one from
+ * PAGE_CACHE_SIZE
+ */
+ delalloc_to_write += (delalloc_end - delalloc_start +
+ PAGE_CACHE_SIZE) >>
+ PAGE_CACHE_SHIFT;
+ delalloc_start = delalloc_end + 1;
+ }
+ if (wbc->nr_to_write < delalloc_to_write) {
+ int thresh = 8192;
+
+ if (delalloc_to_write < thresh * 2)
+ thresh = delalloc_to_write;
+ wbc->nr_to_write = min_t(u64, delalloc_to_write,
+ thresh);
+ }
+
+ /* did the fill delalloc function already unlock and start
+ * the IO?
+ */
+ if (page_started) {
+ /*
+ * we've unlocked the page, so we can't update
+ * the mapping's writeback index, just update
+ * nr_to_write.
+ */
+ wbc->nr_to_write -= *nr_written;
+ return 1;
+ }
+
+ ret = 0;
+
+done:
+ return ret;
+}
+
+/*
+ * helper for __extent_writepage. This calls the writepage start hooks,
+ * and does the loop to map the page into extents and bios.
+ *
+ * We return 1 if the IO is started and the page is unlocked,
+ * 0 if all went well (page still locked)
+ * < 0 if there were errors (page still locked)
+ */
+static noinline_for_stack int __extent_writepage_io(struct inode *inode,
+ struct page *page,
+ struct writeback_control *wbc,
+ struct extent_page_data *epd,
+ loff_t i_size,
+ unsigned long nr_written,
+ int write_flags, int *nr_ret)
{
- struct inode *inode = page->mapping->host;
- struct extent_page_data *epd = data;
struct extent_io_tree *tree = epd->tree;
u64 start = page_offset(page);
- u64 delalloc_start;
u64 page_end = start + PAGE_CACHE_SIZE - 1;
u64 end;
u64 cur = start;
u64 extent_offset;
- u64 last_byte = i_size_read(inode);
u64 block_start;
u64 iosize;
sector_t sector;
struct extent_state *cached_state = NULL;
struct extent_map *em;
struct block_device *bdev;
- int ret;
- int nr = 0;
size_t pg_offset = 0;
size_t blocksize;
- loff_t i_size = i_size_read(inode);
- unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
- u64 nr_delalloc;
- u64 delalloc_end;
- int page_started;
- int compressed;
- int write_flags;
- unsigned long nr_written = 0;
- bool fill_delalloc = true;
-
- if (wbc->sync_mode == WB_SYNC_ALL)
- write_flags = WRITE_SYNC;
- else
- write_flags = WRITE;
-
- trace___extent_writepage(page, inode, wbc);
-
- WARN_ON(!PageLocked(page));
-
- ClearPageError(page);
-
- pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
- if (page->index > end_index ||
- (page->index == end_index && !pg_offset)) {
- page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
- unlock_page(page);
- return 0;
- }
-
- if (page->index == end_index) {
- char *userpage;
-
- userpage = kmap_atomic(page);
- memset(userpage + pg_offset, 0,
- PAGE_CACHE_SIZE - pg_offset);
- kunmap_atomic(userpage);
- flush_dcache_page(page);
- }
- pg_offset = 0;
-
- set_page_extent_mapped(page);
-
- if (!tree->ops || !tree->ops->fill_delalloc)
- fill_delalloc = false;
-
- delalloc_start = start;
- delalloc_end = 0;
- page_started = 0;
- if (!epd->extent_locked && fill_delalloc) {
- u64 delalloc_to_write = 0;
- /*
- * make sure the wbc mapping index is at least updated
- * to this page.
- */
- update_nr_written(page, wbc, 0);
-
- while (delalloc_end < page_end) {
- nr_delalloc = find_lock_delalloc_range(inode, tree,
- page,
- &delalloc_start,
- &delalloc_end,
- 128 * 1024 * 1024);
- if (nr_delalloc == 0) {
- delalloc_start = delalloc_end + 1;
- continue;
- }
- ret = tree->ops->fill_delalloc(inode, page,
- delalloc_start,
- delalloc_end,
- &page_started,
- &nr_written);
- /* File system has been set read-only */
- if (ret) {
- SetPageError(page);
- goto done;
- }
- /*
- * delalloc_end is already one less than the total
- * length, so we don't subtract one from
- * PAGE_CACHE_SIZE
- */
- delalloc_to_write += (delalloc_end - delalloc_start +
- PAGE_CACHE_SIZE) >>
- PAGE_CACHE_SHIFT;
- delalloc_start = delalloc_end + 1;
- }
- if (wbc->nr_to_write < delalloc_to_write) {
- int thresh = 8192;
-
- if (delalloc_to_write < thresh * 2)
- thresh = delalloc_to_write;
- wbc->nr_to_write = min_t(u64, delalloc_to_write,
- thresh);
- }
+ int ret = 0;
+ int nr = 0;
+ bool compressed;
- /* did the fill delalloc function already unlock and start
- * the IO?
- */
- if (page_started) {
- ret = 0;
- /*
- * we've unlocked the page, so we can't update
- * the mapping's writeback index, just update
- * nr_to_write.
- */
- wbc->nr_to_write -= nr_written;
- goto done_unlocked;
- }
- }
if (tree->ops && tree->ops->writepage_start_hook) {
ret = tree->ops->writepage_start_hook(page, start,
page_end);
@@ -3190,9 +3235,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
wbc->pages_skipped++;
else
redirty_page_for_writepage(wbc, page);
+
update_nr_written(page, wbc, nr_written);
unlock_page(page);
- ret = 0;
+ ret = 1;
goto done_unlocked;
}
}
@@ -3204,7 +3250,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
update_nr_written(page, wbc, nr_written + 1);
end = page_end;
- if (last_byte <= start) {
+ if (i_size <= start) {
if (tree->ops && tree->ops->writepage_end_io_hook)
tree->ops->writepage_end_io_hook(page, start,
page_end, NULL, 1);
@@ -3214,7 +3260,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
blocksize = inode->i_sb->s_blocksize;
while (cur <= end) {
- if (cur >= last_byte) {
+ u64 em_end;
+ if (cur >= i_size) {
if (tree->ops && tree->ops->writepage_end_io_hook)
tree->ops->writepage_end_io_hook(page, cur,
page_end, NULL, 1);
@@ -3224,13 +3271,15 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
end - cur + 1, 1);
if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
+ ret = PTR_ERR_OR_ZERO(em);
break;
}
extent_offset = cur - em->start;
- BUG_ON(extent_map_end(em) <= cur);
+ em_end = extent_map_end(em);
+ BUG_ON(em_end <= cur);
BUG_ON(end < cur);
- iosize = min(extent_map_end(em) - cur, end - cur + 1);
+ iosize = min(em_end - cur, end - cur + 1);
iosize = ALIGN(iosize, blocksize);
sector = (em->block_start + extent_offset) >> 9;
bdev = em->bdev;
@@ -3266,13 +3315,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
pg_offset += iosize;
continue;
}
- /* leave this out until we have a page_mkwrite call */
- if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
- EXTENT_DIRTY, 0, NULL)) {
- cur = cur + iosize;
- pg_offset += iosize;
- continue;
- }
if (tree->ops && tree->ops->writepage_io_hook) {
ret = tree->ops->writepage_io_hook(page, cur,
@@ -3283,12 +3325,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
if (ret) {
SetPageError(page);
} else {
- unsigned long max_nr = end_index + 1;
+ unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1;
set_range_writeback(tree, cur, cur + iosize - 1);
if (!PageWriteback(page)) {
- printk(KERN_ERR "btrfs warning page %lu not "
- "writeback, cur %llu end %llu\n",
+ btrfs_err(BTRFS_I(inode)->root->fs_info,
+ "page %lu not writeback, cur %llu end %llu",
page->index, cur, end);
}
@@ -3305,35 +3347,107 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
nr++;
}
done:
- if (nr == 0) {
- /* make sure the mapping tag for page dirty gets cleared */
- set_page_writeback(page);
- end_page_writeback(page);
- }
- unlock_page(page);
+ *nr_ret = nr;
done_unlocked:
/* drop our reference on any cached states */
free_extent_state(cached_state);
- return 0;
+ return ret;
}
-static int eb_wait(void *word)
+/*
+ * the writepage semantics are similar to regular writepage. extent
+ * records are inserted to lock ranges in the tree, and as dirty areas
+ * are found, they are marked writeback. Then the lock bits are removed
+ * and the end_io handler clears the writeback ranges
+ */
+static int __extent_writepage(struct page *page, struct writeback_control *wbc,
+ void *data)
{
- io_schedule();
+ struct inode *inode = page->mapping->host;
+ struct extent_page_data *epd = data;
+ u64 start = page_offset(page);
+ u64 page_end = start + PAGE_CACHE_SIZE - 1;
+ int ret;
+ int nr = 0;
+ size_t pg_offset = 0;
+ loff_t i_size = i_size_read(inode);
+ unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
+ int write_flags;
+ unsigned long nr_written = 0;
+
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ write_flags = WRITE_SYNC;
+ else
+ write_flags = WRITE;
+
+ trace___extent_writepage(page, inode, wbc);
+
+ WARN_ON(!PageLocked(page));
+
+ ClearPageError(page);
+
+ pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
+ if (page->index > end_index ||
+ (page->index == end_index && !pg_offset)) {
+ page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ unlock_page(page);
+ return 0;
+ }
+
+ if (page->index == end_index) {
+ char *userpage;
+
+ userpage = kmap_atomic(page);
+ memset(userpage + pg_offset, 0,
+ PAGE_CACHE_SIZE - pg_offset);
+ kunmap_atomic(userpage);
+ flush_dcache_page(page);
+ }
+
+ pg_offset = 0;
+
+ set_page_extent_mapped(page);
+
+ ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written);
+ if (ret == 1)
+ goto done_unlocked;
+ if (ret)
+ goto done;
+
+ ret = __extent_writepage_io(inode, page, wbc, epd,
+ i_size, nr_written, write_flags, &nr);
+ if (ret == 1)
+ goto done_unlocked;
+
+done:
+ if (nr == 0) {
+ /* make sure the mapping tag for page dirty gets cleared */
+ set_page_writeback(page);
+ end_page_writeback(page);
+ }
+ if (PageError(page)) {
+ ret = ret < 0 ? ret : -EIO;
+ end_extent_writepage(page, ret, start, page_end);
+ }
+ unlock_page(page);
+ return ret;
+
+done_unlocked:
return 0;
}
void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
{
- wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
- TASK_UNINTERRUPTIBLE);
+ wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
+ TASK_UNINTERRUPTIBLE);
}
-static int lock_extent_buffer_for_io(struct extent_buffer *eb,
- struct btrfs_fs_info *fs_info,
- struct extent_page_data *epd)
+static noinline_for_stack int
+lock_extent_buffer_for_io(struct extent_buffer *eb,
+ struct btrfs_fs_info *fs_info,
+ struct extent_page_data *epd)
{
unsigned long i, num_pages;
int flush = 0;
@@ -3404,26 +3518,24 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
static void end_extent_buffer_writeback(struct extent_buffer *eb)
{
clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
- smp_mb__after_clear_bit();
+ smp_mb__after_atomic();
wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
}
static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
{
- int uptodate = err == 0;
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+ struct bio_vec *bvec;
struct extent_buffer *eb;
- int done;
+ int i, done;
- do {
+ bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
- bvec--;
eb = (struct extent_buffer *)page->private;
BUG_ON(!eb);
done = atomic_dec_and_test(&eb->io_pages);
- if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
+ if (err || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
ClearPageUptodate(page);
SetPageError(page);
@@ -3435,18 +3547,18 @@ static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
continue;
end_extent_buffer_writeback(eb);
- } while (bvec >= bio->bi_io_vec);
+ }
bio_put(bio);
-
}
-static int write_one_eb(struct extent_buffer *eb,
+static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
struct btrfs_fs_info *fs_info,
struct writeback_control *wbc,
struct extent_page_data *epd)
{
struct block_device *bdev = fs_info->fs_devices->latest_bdev;
+ struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
u64 offset = eb->start;
unsigned long i, num_pages;
unsigned long bio_flags = 0;
@@ -3464,7 +3576,7 @@ static int write_one_eb(struct extent_buffer *eb,
clear_page_dirty_for_io(p);
set_page_writeback(p);
- ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
+ ret = submit_extent_page(rw, tree, p, offset >> 9,
PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
-1, end_bio_extent_buffer_writepage,
0, epd->bio_flags, bio_flags);
@@ -3638,6 +3750,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
struct inode *inode = mapping->host;
int ret = 0;
int done = 0;
+ int err = 0;
int nr_to_write_done = 0;
struct pagevec pvec;
int nr_pages;
@@ -3724,8 +3837,8 @@ retry:
unlock_page(page);
ret = 0;
}
- if (ret)
- done = 1;
+ if (!err && ret < 0)
+ err = ret;
/*
* the filesystem may choose to bump up nr_to_write.
@@ -3737,7 +3850,7 @@ retry:
pagevec_release(&pvec);
cond_resched();
}
- if (!scanned && !done) {
+ if (!scanned && !done && !err) {
/*
* We hit the last page and there is more work to be done: wrap
* back to the start of the file
@@ -3747,7 +3860,7 @@ retry:
goto retry;
}
btrfs_add_delayed_iput(inode);
- return ret;
+ return err;
}
static void flush_epd_write_bio(struct extent_page_data *epd)
@@ -4082,12 +4195,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
struct btrfs_path *path;
- struct btrfs_file_extent_item *item;
int end = 0;
u64 em_start = 0;
u64 em_len = 0;
u64 em_end = 0;
- unsigned long emflags;
if (len == 0)
return -EINVAL;
@@ -4097,8 +4208,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return -ENOMEM;
path->leave_spinning = 1;
- start = ALIGN(start, BTRFS_I(inode)->root->sectorsize);
- len = ALIGN(len, BTRFS_I(inode)->root->sectorsize);
+ start = round_down(start, BTRFS_I(inode)->root->sectorsize);
+ len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start;
/*
* lookup the last file extent. We're not using i_size here
@@ -4112,8 +4223,6 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
WARN_ON(!ret);
path->slots[0]--;
- item = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_file_extent_item);
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
found_type = btrfs_key_type(&found_key);
@@ -4181,7 +4290,6 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
offset_in_extent = em_start - em->start;
em_end = extent_map_end(em);
em_len = em_end - em_start;
- emflags = em->flags;
disko = 0;
flags = 0;
@@ -4263,7 +4371,7 @@ static void __free_extent_buffer(struct extent_buffer *eb)
kmem_cache_free(extent_buffer_cache, eb);
}
-static int extent_buffer_under_io(struct extent_buffer *eb)
+int extent_buffer_under_io(struct extent_buffer *eb)
{
return (atomic_read(&eb->io_pages) ||
test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
@@ -4333,10 +4441,9 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
__free_extent_buffer(eb);
}
-static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
- u64 start,
- unsigned long len,
- gfp_t mask)
+static struct extent_buffer *
+__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
+ unsigned long len, gfp_t mask)
{
struct extent_buffer *eb = NULL;
@@ -4345,7 +4452,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
return NULL;
eb->start = start;
eb->len = len;
- eb->tree = tree;
+ eb->fs_info = fs_info;
eb->bflags = 0;
rwlock_init(&eb->lock);
atomic_set(&eb->write_locks, 0);
@@ -4464,7 +4571,8 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
spin_unlock(&eb->refs_lock);
}
-static void mark_extent_buffer_accessed(struct extent_buffer *eb)
+static void mark_extent_buffer_accessed(struct extent_buffer *eb,
+ struct page *accessed)
{
unsigned long num_pages, i;
@@ -4473,20 +4581,22 @@ static void mark_extent_buffer_accessed(struct extent_buffer *eb)
num_pages = num_extent_pages(eb->start, eb->len);
for (i = 0; i < num_pages; i++) {
struct page *p = extent_buffer_page(eb, i);
- mark_page_accessed(p);
+ if (p != accessed)
+ mark_page_accessed(p);
}
}
-struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
- u64 start)
+struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
+ u64 start)
{
struct extent_buffer *eb;
rcu_read_lock();
- eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
+ eb = radix_tree_lookup(&fs_info->buffer_radix,
+ start >> PAGE_CACHE_SHIFT);
if (eb && atomic_inc_not_zero(&eb->refs)) {
rcu_read_unlock();
- mark_extent_buffer_accessed(eb);
+ mark_extent_buffer_accessed(eb, NULL);
return eb;
}
rcu_read_unlock();
@@ -4494,7 +4604,54 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
return NULL;
}
-struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
+ u64 start, unsigned long len)
+{
+ struct extent_buffer *eb, *exists = NULL;
+ int ret;
+
+ eb = find_extent_buffer(fs_info, start);
+ if (eb)
+ return eb;
+ eb = alloc_dummy_extent_buffer(start, len);
+ if (!eb)
+ return NULL;
+ eb->fs_info = fs_info;
+again:
+ ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
+ if (ret)
+ goto free_eb;
+ spin_lock(&fs_info->buffer_lock);
+ ret = radix_tree_insert(&fs_info->buffer_radix,
+ start >> PAGE_CACHE_SHIFT, eb);
+ spin_unlock(&fs_info->buffer_lock);
+ radix_tree_preload_end();
+ if (ret == -EEXIST) {
+ exists = find_extent_buffer(fs_info, start);
+ if (exists)
+ goto free_eb;
+ else
+ goto again;
+ }
+ check_buffer_tree_ref(eb);
+ set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
+
+ /*
+ * We will free dummy extent buffer's if they come into
+ * free_extent_buffer with a ref count of 2, but if we are using this we
+ * want the buffers to stay in memory until we're done with them, so
+ * bump the ref count again.
+ */
+ atomic_inc(&eb->refs);
+ return eb;
+free_eb:
+ btrfs_release_extent_buffer(eb);
+ return exists;
+}
+#endif
+
+struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, unsigned long len)
{
unsigned long num_pages = num_extent_pages(start, len);
@@ -4503,16 +4660,15 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
struct extent_buffer *eb;
struct extent_buffer *exists = NULL;
struct page *p;
- struct address_space *mapping = tree->mapping;
+ struct address_space *mapping = fs_info->btree_inode->i_mapping;
int uptodate = 1;
int ret;
-
- eb = find_extent_buffer(tree, start);
+ eb = find_extent_buffer(fs_info, start);
if (eb)
return eb;
- eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
+ eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS);
if (!eb)
return NULL;
@@ -4535,7 +4691,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
spin_unlock(&mapping->private_lock);
unlock_page(p);
page_cache_release(p);
- mark_extent_buffer_accessed(exists);
+ mark_extent_buffer_accessed(exists, p);
goto free_eb;
}
@@ -4550,7 +4706,6 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
attach_extent_buffer_page(eb, p);
spin_unlock(&mapping->private_lock);
WARN_ON(PageDirty(p));
- mark_page_accessed(p);
eb->pages[i] = p;
if (!PageUptodate(p))
uptodate = 0;
@@ -4567,12 +4722,13 @@ again:
if (ret)
goto free_eb;
- spin_lock(&tree->buffer_lock);
- ret = radix_tree_insert(&tree->buffer, start >> PAGE_CACHE_SHIFT, eb);
- spin_unlock(&tree->buffer_lock);
+ spin_lock(&fs_info->buffer_lock);
+ ret = radix_tree_insert(&fs_info->buffer_radix,
+ start >> PAGE_CACHE_SHIFT, eb);
+ spin_unlock(&fs_info->buffer_lock);
radix_tree_preload_end();
if (ret == -EEXIST) {
- exists = find_extent_buffer(tree, start);
+ exists = find_extent_buffer(fs_info, start);
if (exists)
goto free_eb;
else
@@ -4580,6 +4736,7 @@ again:
}
/* add one reference for the tree */
check_buffer_tree_ref(eb);
+ set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
/*
* there is a race where release page may have
@@ -4623,17 +4780,17 @@ static int release_extent_buffer(struct extent_buffer *eb)
{
WARN_ON(atomic_read(&eb->refs) == 0);
if (atomic_dec_and_test(&eb->refs)) {
- if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
- spin_unlock(&eb->refs_lock);
- } else {
- struct extent_io_tree *tree = eb->tree;
+ if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
+ struct btrfs_fs_info *fs_info = eb->fs_info;
spin_unlock(&eb->refs_lock);
- spin_lock(&tree->buffer_lock);
- radix_tree_delete(&tree->buffer,
+ spin_lock(&fs_info->buffer_lock);
+ radix_tree_delete(&fs_info->buffer_radix,
eb->start >> PAGE_CACHE_SHIFT);
- spin_unlock(&tree->buffer_lock);
+ spin_unlock(&fs_info->buffer_lock);
+ } else {
+ spin_unlock(&eb->refs_lock);
}
/* Should be safe to release our pages at this point */
@@ -4906,6 +5063,43 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
}
}
+int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
+ unsigned long start,
+ unsigned long len)
+{
+ size_t cur;
+ size_t offset;
+ struct page *page;
+ char *kaddr;
+ char __user *dst = (char __user *)dstv;
+ size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
+ unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+ int ret = 0;
+
+ WARN_ON(start > eb->len);
+ WARN_ON(start + len > eb->start + eb->len);
+
+ offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1);
+
+ while (len > 0) {
+ page = extent_buffer_page(eb, i);
+
+ cur = min(len, (PAGE_CACHE_SIZE - offset));
+ kaddr = page_address(page);
+ if (copy_to_user(dst, kaddr + offset, cur)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ dst += cur;
+ len -= cur;
+ offset = 0;
+ i++;
+ }
+
+ return ret;
+}
+
int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
unsigned long min_len, char **map,
unsigned long *map_start,
@@ -5112,12 +5306,12 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_i;
if (src_offset + len > dst->len) {
- printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
+ printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
"len %lu dst len %lu\n", src_offset, len, dst->len);
BUG_ON(1);
}
if (dst_offset + len > dst->len) {
- printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
+ printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
"len %lu dst len %lu\n", dst_offset, len, dst->len);
BUG_ON(1);
}
@@ -5159,12 +5353,12 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_i;
if (src_offset + len > dst->len) {
- printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
+ printk(KERN_ERR "BTRFS: memmove bogus src_offset %lu move "
"len %lu len %lu\n", src_offset, len, dst->len);
BUG_ON(1);
}
if (dst_offset + len > dst->len) {
- printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
+ printk(KERN_ERR "BTRFS: memmove bogus dst_offset %lu move "
"len %lu len %lu\n", dst_offset, len, dst->len);
BUG_ON(1);
}