summaryrefslogtreecommitdiff
path: root/fs/btrfs/tree-log.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/tree-log.c')
-rw-r--r--fs/btrfs/tree-log.c1338
1 files changed, 1074 insertions, 264 deletions
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 8abeae4224f9..2c6791493637 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -18,13 +18,17 @@
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/list_sort.h>
#include "ctree.h"
#include "transaction.h"
#include "disk-io.h"
#include "locking.h"
#include "print-tree.h"
+#include "backref.h"
#include "compat.h"
#include "tree-log.h"
+#include "hash.h"
/* magic values for the inode_only field in btrfs_log_inode:
*
@@ -146,7 +150,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
root->log_multiple_pids = true;
}
- root->log_batch++;
+ atomic_inc(&root->log_batch);
atomic_inc(&root->log_writers);
mutex_unlock(&root->log_mutex);
return 0;
@@ -165,7 +169,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
err = ret;
}
mutex_unlock(&root->fs_info->tree_log_mutex);
- root->log_batch++;
+ atomic_inc(&root->log_batch);
atomic_inc(&root->log_writers);
mutex_unlock(&root->log_mutex);
return err;
@@ -274,18 +278,31 @@ static int process_one_buffer(struct btrfs_root *log,
struct extent_buffer *eb,
struct walk_control *wc, u64 gen)
{
+ int ret = 0;
+
+ /*
+ * If this fs is mixed then we need to be able to process the leaves to
+ * pin down any logged extents, so we have to read the block.
+ */
+ if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
+ ret = btrfs_read_buffer(eb, gen);
+ if (ret)
+ return ret;
+ }
+
if (wc->pin)
- btrfs_pin_extent_for_log_replay(wc->trans,
- log->fs_info->extent_root,
- eb->start, eb->len);
+ ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
+ eb->start, eb->len);
- if (btrfs_buffer_uptodate(eb, gen, 0)) {
+ if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
+ if (wc->pin && btrfs_header_level(eb) == 0)
+ ret = btrfs_exclude_logged_extents(log, eb);
if (wc->write)
btrfs_write_tree_block(eb);
if (wc->wait)
btrfs_wait_tree_block_writeback(eb);
}
- return 0;
+ return ret;
}
/*
@@ -315,6 +332,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
unsigned long src_ptr;
unsigned long dst_ptr;
int overwrite_root = 0;
+ bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
overwrite_root = 1;
@@ -324,6 +342,9 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
/* look for the key in the destination tree */
ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+
if (ret == 0) {
char *src_copy;
char *dst_copy;
@@ -365,6 +386,30 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
return 0;
}
+ /*
+ * We need to load the old nbytes into the inode so when we
+ * replay the extents we've logged we get the right nbytes.
+ */
+ if (inode_item) {
+ struct btrfs_inode_item *item;
+ u64 nbytes;
+
+ item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_item);
+ nbytes = btrfs_inode_nbytes(path->nodes[0], item);
+ item = btrfs_item_ptr(eb, slot,
+ struct btrfs_inode_item);
+ btrfs_set_inode_nbytes(eb, item, nbytes);
+ }
+ } else if (inode_item) {
+ struct btrfs_inode_item *item;
+
+ /*
+ * New inode, set nbytes to 0 so that the nbytes comes out
+ * properly when we replay the extents.
+ */
+ item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
+ btrfs_set_inode_nbytes(eb, item, 0);
}
insert:
btrfs_release_path(path);
@@ -378,9 +423,9 @@ insert:
found_size = btrfs_item_size_nr(path->nodes[0],
path->slots[0]);
if (found_size > item_size)
- btrfs_truncate_item(trans, root, path, item_size, 1);
+ btrfs_truncate_item(root, path, item_size, 1);
else if (found_size < item_size)
- btrfs_extend_item(trans, root, path,
+ btrfs_extend_item(root, path,
item_size - found_size);
} else if (ret) {
return ret;
@@ -482,11 +527,9 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
struct btrfs_key *key)
{
int found_type;
- u64 mask = root->sectorsize - 1;
u64 extent_end;
- u64 alloc_hint;
u64 start = key->offset;
- u64 saved_nbytes;
+ u64 nbytes = 0;
struct btrfs_file_extent_item *item;
struct inode *inode = NULL;
unsigned long size;
@@ -496,11 +539,20 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
found_type = btrfs_file_extent_type(eb, item);
if (found_type == BTRFS_FILE_EXTENT_REG ||
- found_type == BTRFS_FILE_EXTENT_PREALLOC)
- extent_end = start + btrfs_file_extent_num_bytes(eb, item);
- else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+ found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ nbytes = btrfs_file_extent_num_bytes(eb, item);
+ extent_end = start + nbytes;
+
+ /*
+ * We don't add to the inodes nbytes if we are prealloc or a
+ * hole.
+ */
+ if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
+ nbytes = 0;
+ } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size = btrfs_file_extent_inline_len(eb, item);
- extent_end = (start + size + mask) & ~mask;
+ nbytes = btrfs_file_extent_ram_bytes(eb, item);
+ extent_end = ALIGN(start + size, root->sectorsize);
} else {
ret = 0;
goto out;
@@ -548,11 +600,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
- saved_nbytes = inode_get_bytes(inode);
/* drop any overlapping extents */
- ret = btrfs_drop_extents(trans, inode, start, extent_end,
- &alloc_hint, 1);
- BUG_ON(ret);
+ ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
+ if (ret)
+ goto out;
if (found_type == BTRFS_FILE_EXTENT_REG ||
found_type == BTRFS_FILE_EXTENT_PREALLOC) {
@@ -562,7 +613,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
ret = btrfs_insert_empty_item(trans, root, path, key,
sizeof(*item));
- BUG_ON(ret);
+ if (ret)
+ goto out;
dest_offset = btrfs_item_ptr_offset(path->nodes[0],
path->slots[0]);
copy_extent_buffer(path->nodes[0], eb, dest_offset,
@@ -588,7 +640,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
ins.objectid, ins.offset,
0, root->root_key.objectid,
key->objectid, offset, 0);
- BUG_ON(ret);
+ if (ret)
+ goto out;
} else {
/*
* insert the extent pointer in the extent
@@ -597,7 +650,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
ret = btrfs_alloc_logged_file_extent(trans,
root, root->root_key.objectid,
key->objectid, offset, &ins);
- BUG_ON(ret);
+ if (ret)
+ goto out;
}
btrfs_release_path(path);
@@ -614,30 +668,34 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
ret = btrfs_lookup_csums_range(root->log_root,
csum_start, csum_end - 1,
&ordered_sums, 0);
- BUG_ON(ret);
+ if (ret)
+ goto out;
while (!list_empty(&ordered_sums)) {
struct btrfs_ordered_sum *sums;
sums = list_entry(ordered_sums.next,
struct btrfs_ordered_sum,
list);
- ret = btrfs_csum_file_blocks(trans,
+ if (!ret)
+ ret = btrfs_csum_file_blocks(trans,
root->fs_info->csum_root,
sums);
- BUG_ON(ret);
list_del(&sums->list);
kfree(sums);
}
+ if (ret)
+ goto out;
} else {
btrfs_release_path(path);
}
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
/* inline extents are easy, we just overwrite them */
ret = overwrite_item(trans, root, path, eb, slot, key);
- BUG_ON(ret);
+ if (ret)
+ goto out;
}
- inode_set_bytes(inode, saved_nbytes);
- btrfs_update_inode(trans, root, inode);
+ inode_add_bytes(inode, nbytes);
+ ret = btrfs_update_inode(trans, root, inode);
out:
if (inode)
iput(inode);
@@ -678,20 +736,21 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
inode = read_one_inode(root, location.objectid);
if (!inode) {
- kfree(name);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
ret = link_to_fixup_dir(trans, root, path, location.objectid);
- BUG_ON(ret);
+ if (ret)
+ goto out;
ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
- BUG_ON(ret);
+ if (ret)
+ goto out;
+ btrfs_run_delayed_items(trans, root);
+out:
kfree(name);
-
iput(inode);
-
- btrfs_run_delayed_items(trans, root);
return ret;
}
@@ -744,6 +803,7 @@ out:
*/
static noinline int backref_in_log(struct btrfs_root *log,
struct btrfs_key *key,
+ u64 ref_objectid,
char *name, int namelen)
{
struct btrfs_path *path;
@@ -764,8 +824,17 @@ static noinline int backref_in_log(struct btrfs_root *log,
if (ret != 0)
goto out;
- item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
+
+ if (key->type == BTRFS_INODE_EXTREF_KEY) {
+ if (btrfs_find_name_in_ext_backref(path, ref_objectid,
+ name, namelen, NULL))
+ match = 1;
+
+ goto out;
+ }
+
+ item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
ref = (struct btrfs_inode_ref *)ptr;
@@ -786,91 +855,42 @@ out:
return match;
}
-
-/*
- * replay one inode back reference item found in the log tree.
- * eb, slot and key refer to the buffer and key found in the log tree.
- * root is the destination we are replaying into, and path is for temp
- * use by this function. (it should be released on return).
- */
-static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
- struct btrfs_root *log,
struct btrfs_path *path,
- struct extent_buffer *eb, int slot,
- struct btrfs_key *key)
+ struct btrfs_root *log_root,
+ struct inode *dir, struct inode *inode,
+ struct extent_buffer *eb,
+ u64 inode_objectid, u64 parent_objectid,
+ u64 ref_index, char *name, int namelen,
+ int *search_done)
{
- struct btrfs_inode_ref *ref;
- struct btrfs_dir_item *di;
- struct inode *dir;
- struct inode *inode;
- unsigned long ref_ptr;
- unsigned long ref_end;
- char *name;
- int namelen;
int ret;
- int search_done = 0;
-
- /*
- * it is possible that we didn't log all the parent directories
- * for a given inode. If we don't find the dir, just don't
- * copy the back ref in. The link count fixup code will take
- * care of the rest
- */
- dir = read_one_inode(root, key->offset);
- if (!dir)
- return -ENOENT;
-
- inode = read_one_inode(root, key->objectid);
- if (!inode) {
- iput(dir);
- return -EIO;
- }
-
- ref_ptr = btrfs_item_ptr_offset(eb, slot);
- ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
+ char *victim_name;
+ int victim_name_len;
+ struct extent_buffer *leaf;
+ struct btrfs_dir_item *di;
+ struct btrfs_key search_key;
+ struct btrfs_inode_extref *extref;
again:
- ref = (struct btrfs_inode_ref *)ref_ptr;
-
- namelen = btrfs_inode_ref_name_len(eb, ref);
- name = kmalloc(namelen, GFP_NOFS);
- BUG_ON(!name);
-
- read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen);
-
- /* if we already have a perfect match, we're done */
- if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
- btrfs_inode_ref_index(eb, ref),
- name, namelen)) {
- goto out;
- }
-
- /*
- * look for a conflicting back reference in the metadata.
- * if we find one we have to unlink that name of the file
- * before we add our new link. Later on, we overwrite any
- * existing back reference, and we don't want to create
- * dangling pointers in the directory.
- */
-
- if (search_done)
- goto insert;
-
- ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
+ /* Search old style refs */
+ search_key.objectid = inode_objectid;
+ search_key.type = BTRFS_INODE_REF_KEY;
+ search_key.offset = parent_objectid;
+ ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
if (ret == 0) {
- char *victim_name;
- int victim_name_len;
struct btrfs_inode_ref *victim_ref;
unsigned long ptr;
unsigned long ptr_end;
- struct extent_buffer *leaf = path->nodes[0];
+
+ leaf = path->nodes[0];
/* are we trying to overwrite a back ref for the root directory
* if so, just jump out, we're done
*/
- if (key->objectid == key->offset)
- goto out_nowrite;
+ if (search_key.objectid == search_key.offset)
+ return 1;
/* check all the names in this back reference to see
* if they are in the log. if so, we allow them to stay
@@ -883,13 +903,16 @@ again:
victim_name_len = btrfs_inode_ref_name_len(leaf,
victim_ref);
victim_name = kmalloc(victim_name_len, GFP_NOFS);
- BUG_ON(!victim_name);
+ if (!victim_name)
+ return -ENOMEM;
read_extent_buffer(leaf, victim_name,
(unsigned long)(victim_ref + 1),
victim_name_len);
- if (!backref_in_log(log, key, victim_name,
+ if (!backref_in_log(log_root, &search_key,
+ parent_objectid,
+ victim_name,
victim_name_len)) {
btrfs_inc_nlink(inode);
btrfs_release_path(path);
@@ -897,28 +920,102 @@ again:
ret = btrfs_unlink_inode(trans, root, dir,
inode, victim_name,
victim_name_len);
+ kfree(victim_name);
+ if (ret)
+ return ret;
btrfs_run_delayed_items(trans, root);
+ *search_done = 1;
+ goto again;
}
kfree(victim_name);
+
ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
}
- BUG_ON(ret);
/*
* NOTE: we have searched root tree and checked the
* coresponding ref, it does not need to check again.
*/
- search_done = 1;
+ *search_done = 1;
+ }
+ btrfs_release_path(path);
+
+ /* Same search but for extended refs */
+ extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
+ inode_objectid, parent_objectid, 0,
+ 0);
+ if (!IS_ERR_OR_NULL(extref)) {
+ u32 item_size;
+ u32 cur_offset = 0;
+ unsigned long base;
+ struct inode *victim_parent;
+
+ leaf = path->nodes[0];
+
+ item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ base = btrfs_item_ptr_offset(leaf, path->slots[0]);
+
+ while (cur_offset < item_size) {
+ extref = (struct btrfs_inode_extref *)base + cur_offset;
+
+ victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
+
+ if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
+ goto next;
+
+ victim_name = kmalloc(victim_name_len, GFP_NOFS);
+ if (!victim_name)
+ return -ENOMEM;
+ read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
+ victim_name_len);
+
+ search_key.objectid = inode_objectid;
+ search_key.type = BTRFS_INODE_EXTREF_KEY;
+ search_key.offset = btrfs_extref_hash(parent_objectid,
+ victim_name,
+ victim_name_len);
+ ret = 0;
+ if (!backref_in_log(log_root, &search_key,
+ parent_objectid, victim_name,
+ victim_name_len)) {
+ ret = -ENOENT;
+ victim_parent = read_one_inode(root,
+ parent_objectid);
+ if (victim_parent) {
+ btrfs_inc_nlink(inode);
+ btrfs_release_path(path);
+
+ ret = btrfs_unlink_inode(trans, root,
+ victim_parent,
+ inode,
+ victim_name,
+ victim_name_len);
+ btrfs_run_delayed_items(trans, root);
+ }
+ iput(victim_parent);
+ kfree(victim_name);
+ if (ret)
+ return ret;
+ *search_done = 1;
+ goto again;
+ }
+ kfree(victim_name);
+ if (ret)
+ return ret;
+next:
+ cur_offset += victim_name_len + sizeof(*extref);
+ }
+ *search_done = 1;
}
btrfs_release_path(path);
/* look for a conflicting sequence number */
di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
- btrfs_inode_ref_index(eb, ref),
- name, namelen, 0);
+ ref_index, name, namelen, 0);
if (di && !IS_ERR(di)) {
ret = drop_one_dir_item(trans, root, path, dir, di);
- BUG_ON(ret);
+ if (ret)
+ return ret;
}
btrfs_release_path(path);
@@ -927,33 +1024,184 @@ again:
name, namelen, 0);
if (di && !IS_ERR(di)) {
ret = drop_one_dir_item(trans, root, path, dir, di);
- BUG_ON(ret);
+ if (ret)
+ return ret;
}
btrfs_release_path(path);
-insert:
- /* insert our name */
- ret = btrfs_add_link(trans, dir, inode, name, namelen, 0,
- btrfs_inode_ref_index(eb, ref));
- BUG_ON(ret);
+ return 0;
+}
- btrfs_update_inode(trans, root, inode);
+static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
+ u32 *namelen, char **name, u64 *index,
+ u64 *parent_objectid)
+{
+ struct btrfs_inode_extref *extref;
-out:
- ref_ptr = (unsigned long)(ref + 1) + namelen;
- kfree(name);
- if (ref_ptr < ref_end)
- goto again;
+ extref = (struct btrfs_inode_extref *)ref_ptr;
+
+ *namelen = btrfs_inode_extref_name_len(eb, extref);
+ *name = kmalloc(*namelen, GFP_NOFS);
+ if (*name == NULL)
+ return -ENOMEM;
+
+ read_extent_buffer(eb, *name, (unsigned long)&extref->name,
+ *namelen);
+
+ *index = btrfs_inode_extref_index(eb, extref);
+ if (parent_objectid)
+ *parent_objectid = btrfs_inode_extref_parent(eb, extref);
+
+ return 0;
+}
+
+static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
+ u32 *namelen, char **name, u64 *index)
+{
+ struct btrfs_inode_ref *ref;
+
+ ref = (struct btrfs_inode_ref *)ref_ptr;
+
+ *namelen = btrfs_inode_ref_name_len(eb, ref);
+ *name = kmalloc(*namelen, GFP_NOFS);
+ if (*name == NULL)
+ return -ENOMEM;
+
+ read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
+
+ *index = btrfs_inode_ref_index(eb, ref);
+
+ return 0;
+}
+
+/*
+ * replay one inode back reference item found in the log tree.
+ * eb, slot and key refer to the buffer and key found in the log tree.
+ * root is the destination we are replaying into, and path is for temp
+ * use by this function. (it should be released on return).
+ */
+static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_root *log,
+ struct btrfs_path *path,
+ struct extent_buffer *eb, int slot,
+ struct btrfs_key *key)
+{
+ struct inode *dir;
+ struct inode *inode;
+ unsigned long ref_ptr;
+ unsigned long ref_end;
+ char *name;
+ int namelen;
+ int ret;
+ int search_done = 0;
+ int log_ref_ver = 0;
+ u64 parent_objectid;
+ u64 inode_objectid;
+ u64 ref_index = 0;
+ int ref_struct_size;
+
+ ref_ptr = btrfs_item_ptr_offset(eb, slot);
+ ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
+
+ if (key->type == BTRFS_INODE_EXTREF_KEY) {
+ struct btrfs_inode_extref *r;
+
+ ref_struct_size = sizeof(struct btrfs_inode_extref);
+ log_ref_ver = 1;
+ r = (struct btrfs_inode_extref *)ref_ptr;
+ parent_objectid = btrfs_inode_extref_parent(eb, r);
+ } else {
+ ref_struct_size = sizeof(struct btrfs_inode_ref);
+ parent_objectid = key->offset;
+ }
+ inode_objectid = key->objectid;
+
+ /*
+ * it is possible that we didn't log all the parent directories
+ * for a given inode. If we don't find the dir, just don't
+ * copy the back ref in. The link count fixup code will take
+ * care of the rest
+ */
+ dir = read_one_inode(root, parent_objectid);
+ if (!dir)
+ return -ENOENT;
+
+ inode = read_one_inode(root, inode_objectid);
+ if (!inode) {
+ iput(dir);
+ return -EIO;
+ }
+
+ while (ref_ptr < ref_end) {
+ if (log_ref_ver) {
+ ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
+ &ref_index, &parent_objectid);
+ /*
+ * parent object can change from one array
+ * item to another.
+ */
+ if (!dir)
+ dir = read_one_inode(root, parent_objectid);
+ if (!dir)
+ return -ENOENT;
+ } else {
+ ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
+ &ref_index);
+ }
+ if (ret)
+ return ret;
+
+ /* if we already have a perfect match, we're done */
+ if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
+ ref_index, name, namelen)) {
+ /*
+ * look for a conflicting back reference in the
+ * metadata. if we find one we have to unlink that name
+ * of the file before we add our new link. Later on, we
+ * overwrite any existing back reference, and we don't
+ * want to create dangling pointers in the directory.
+ */
+
+ if (!search_done) {
+ ret = __add_inode_ref(trans, root, path, log,
+ dir, inode, eb,
+ inode_objectid,
+ parent_objectid,
+ ref_index, name, namelen,
+ &search_done);
+ if (ret == 1) {
+ ret = 0;
+ goto out;
+ }
+ if (ret)
+ goto out;
+ }
+
+ /* insert our name */
+ ret = btrfs_add_link(trans, dir, inode, name, namelen,
+ 0, ref_index);
+ if (ret)
+ goto out;
+
+ btrfs_update_inode(trans, root, inode);
+ }
+
+ ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
+ kfree(name);
+ if (log_ref_ver) {
+ iput(dir);
+ dir = NULL;
+ }
+ }
/* finally write the back reference in the inode */
ret = overwrite_item(trans, root, path, eb, slot, key);
- BUG_ON(ret);
-
-out_nowrite:
+out:
btrfs_release_path(path);
iput(dir);
iput(inode);
- return 0;
+ return ret;
}
static int insert_orphan_item(struct btrfs_trans_handle *trans,
@@ -966,25 +1214,55 @@ static int insert_orphan_item(struct btrfs_trans_handle *trans,
return ret;
}
+static int count_inode_extrefs(struct btrfs_root *root,
+ struct inode *inode, struct btrfs_path *path)
+{
+ int ret = 0;
+ int name_len;
+ unsigned int nlink = 0;
+ u32 item_size;
+ u32 cur_offset = 0;
+ u64 inode_objectid = btrfs_ino(inode);
+ u64 offset = 0;
+ unsigned long ptr;
+ struct btrfs_inode_extref *extref;
+ struct extent_buffer *leaf;
-/*
- * There are a few corners where the link count of the file can't
- * be properly maintained during replay. So, instead of adding
- * lots of complexity to the log code, we just scan the backrefs
- * for any file that has been through replay.
- *
- * The scan will update the link count on the inode to reflect the
- * number of back refs found. If it goes down to zero, the iput
- * will free the inode.
- */
-static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *inode)
+ while (1) {
+ ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
+ &extref, &offset);
+ if (ret)
+ break;
+
+ leaf = path->nodes[0];
+ item_size = btrfs_item_size_nr(leaf, path->slots[0]);
+ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+
+ while (cur_offset < item_size) {
+ extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
+ name_len = btrfs_inode_extref_name_len(leaf, extref);
+
+ nlink++;
+
+ cur_offset += name_len + sizeof(*extref);
+ }
+
+ offset++;
+ btrfs_release_path(path);
+ }
+ btrfs_release_path(path);
+
+ if (ret < 0)
+ return ret;
+ return nlink;
+}
+
+static int count_inode_refs(struct btrfs_root *root,
+ struct inode *inode, struct btrfs_path *path)
{
- struct btrfs_path *path;
int ret;
struct btrfs_key key;
- u64 nlink = 0;
+ unsigned int nlink = 0;
unsigned long ptr;
unsigned long ptr_end;
int name_len;
@@ -994,10 +1272,6 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
key.type = BTRFS_INODE_REF_KEY;
key.offset = (u64)-1;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
@@ -1031,6 +1305,50 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
}
btrfs_release_path(path);
+
+ return nlink;
+}
+
+/*
+ * There are a few corners where the link count of the file can't
+ * be properly maintained during replay. So, instead of adding
+ * lots of complexity to the log code, we just scan the backrefs
+ * for any file that has been through replay.
+ *
+ * The scan will update the link count on the inode to reflect the
+ * number of back refs found. If it goes down to zero, the iput
+ * will free the inode.
+ */
+static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *inode)
+{
+ struct btrfs_path *path;
+ int ret;
+ u64 nlink = 0;
+ u64 ino = btrfs_ino(inode);
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ ret = count_inode_refs(root, inode, path);
+ if (ret < 0)
+ goto out;
+
+ nlink = ret;
+
+ ret = count_inode_extrefs(root, inode, path);
+ if (ret == -ENOENT)
+ ret = 0;
+
+ if (ret < 0)
+ goto out;
+
+ nlink += ret;
+
+ ret = 0;
+
if (nlink != inode->i_nlink) {
set_nlink(inode, nlink);
btrfs_update_inode(trans, root, inode);
@@ -1041,14 +1359,15 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
if (S_ISDIR(inode->i_mode)) {
ret = replay_dir_deletes(trans, root, NULL, path,
ino, 1);
- BUG_ON(ret);
+ if (ret)
+ goto out;
}
ret = insert_orphan_item(trans, root, ino);
- BUG_ON(ret);
}
- btrfs_free_path(path);
- return 0;
+out:
+ btrfs_free_path(path);
+ return ret;
}
static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
@@ -1088,9 +1407,9 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
return -EIO;
ret = fixup_inode_link_count(trans, root, inode);
- BUG_ON(ret);
-
iput(inode);
+ if (ret)
+ goto out;
/*
* fixup on a directory may create new entries,
@@ -1132,12 +1451,15 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
if (ret == 0) {
- btrfs_inc_nlink(inode);
- btrfs_update_inode(trans, root, inode);
+ if (!inode->i_nlink)
+ set_nlink(inode, 1);
+ else
+ btrfs_inc_nlink(inode);
+ ret = btrfs_update_inode(trans, root, inode);
} else if (ret == -EEXIST) {
ret = 0;
} else {
- BUG();
+ BUG(); /* Logic Error */
}
iput(inode);
@@ -1206,7 +1528,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
struct inode *dir;
u8 log_type;
int exists;
- int ret;
+ int ret = 0;
dir = read_one_inode(root, key->objectid);
if (!dir)
@@ -1238,7 +1560,9 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
key->offset, name,
name_len, 1);
} else {
- BUG();
+ /* Corruption */
+ ret = -EINVAL;
+ goto out;
}
if (IS_ERR_OR_NULL(dst_di)) {
/* we need a sequence number to insert, so we only
@@ -1266,7 +1590,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
goto out;
ret = drop_one_dir_item(trans, root, path, dir, dst_di);
- BUG_ON(ret);
+ if (ret)
+ goto out;
if (key->type == BTRFS_DIR_INDEX_KEY)
goto insert;
@@ -1274,14 +1599,15 @@ out:
btrfs_release_path(path);
kfree(name);
iput(dir);
- return 0;
+ return ret;
insert:
btrfs_release_path(path);
ret = insert_one_name(trans, root, path, key->objectid, key->offset,
name, name_len, log_type, &log_key);
-
- BUG_ON(ret && ret != -ENOENT);
+ if (ret && ret != -ENOENT)
+ goto out;
+ ret = 0;
goto out;
}
@@ -1312,7 +1638,8 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
return -EIO;
name_len = btrfs_dir_name_len(eb, di);
ret = replay_one_name(trans, root, path, eb, di, key);
- BUG_ON(ret);
+ if (ret)
+ return ret;
ptr = (unsigned long)(di + 1);
ptr += name_len;
}
@@ -1473,16 +1800,21 @@ again:
ret = link_to_fixup_dir(trans, root,
path, location.objectid);
- BUG_ON(ret);
+ if (ret) {
+ kfree(name);
+ iput(inode);
+ goto out;
+ }
+
btrfs_inc_nlink(inode);
ret = btrfs_unlink_inode(trans, root, dir, inode,
name, name_len);
- BUG_ON(ret);
-
- btrfs_run_delayed_items(trans, root);
-
+ if (!ret)
+ btrfs_run_delayed_items(trans, root);
kfree(name);
iput(inode);
+ if (ret)
+ goto out;
/* there might still be more names under this key
* check and repeat if required
@@ -1586,7 +1918,8 @@ again:
ret = check_item_in_log(trans, root, log, path,
log_path, dir,
&found_key);
- BUG_ON(ret);
+ if (ret)
+ goto out;
if (found_key.offset == (u64)-1)
break;
dir_key.offset = found_key.offset + 1;
@@ -1663,11 +1996,13 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
if (S_ISDIR(mode)) {
ret = replay_dir_deletes(wc->trans,
root, log, path, key.objectid, 0);
- BUG_ON(ret);
+ if (ret)
+ break;
}
ret = overwrite_item(wc->trans, root, path,
eb, i, &key);
- BUG_ON(ret);
+ if (ret)
+ break;
/* for regular files, make sure corresponding
* orhpan item exist. extents past the new EOF
@@ -1676,12 +2011,14 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
if (S_ISREG(mode)) {
ret = insert_orphan_item(wc->trans, root,
key.objectid);
- BUG_ON(ret);
+ if (ret)
+ break;
}
ret = link_to_fixup_dir(wc->trans, root,
path, key.objectid);
- BUG_ON(ret);
+ if (ret)
+ break;
}
if (wc->stage < LOG_WALK_REPLAY_ALL)
continue;
@@ -1690,24 +2027,30 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
if (key.type == BTRFS_XATTR_ITEM_KEY) {
ret = overwrite_item(wc->trans, root, path,
eb, i, &key);
- BUG_ON(ret);
- } else if (key.type == BTRFS_INODE_REF_KEY) {
+ if (ret)
+ break;
+ } else if (key.type == BTRFS_INODE_REF_KEY ||
+ key.type == BTRFS_INODE_EXTREF_KEY) {
ret = add_inode_ref(wc->trans, root, log, path,
eb, i, &key);
- BUG_ON(ret && ret != -ENOENT);
+ if (ret && ret != -ENOENT)
+ break;
+ ret = 0;
} else if (key.type == BTRFS_EXTENT_DATA_KEY) {
ret = replay_one_extent(wc->trans, root, path,
eb, i, &key);
- BUG_ON(ret);
+ if (ret)
+ break;
} else if (key.type == BTRFS_DIR_ITEM_KEY ||
key.type == BTRFS_DIR_INDEX_KEY) {
ret = replay_one_dir_item(wc->trans, root, path,
eb, i, &key);
- BUG_ON(ret);
+ if (ret)
+ break;
}
}
btrfs_free_path(path);
- return 0;
+ return ret;
}
static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
@@ -1752,8 +2095,10 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
if (*level == 1) {
ret = wc->process_func(root, next, wc, ptr_gen);
- if (ret)
+ if (ret) {
+ free_extent_buffer(next);
return ret;
+ }
path->slots[*level]++;
if (wc->free) {
@@ -1773,7 +2118,10 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
BTRFS_TREE_LOG_OBJECTID);
ret = btrfs_free_and_pin_reserved_extent(root,
bytenr, blocksize);
- BUG_ON(ret); /* -ENOMEM or logic errors */
+ if (ret) {
+ free_extent_buffer(next);
+ return ret;
+ }
}
free_extent_buffer(next);
continue;
@@ -1846,7 +2194,8 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
ret = btrfs_free_and_pin_reserved_extent(root,
path->nodes[*level]->start,
path->nodes[*level]->len);
- BUG_ON(ret);
+ if (ret)
+ return ret;
}
free_extent_buffer(path->nodes[*level]);
path->nodes[*level] = NULL;
@@ -1868,7 +2217,6 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
int wret;
int level;
struct btrfs_path *path;
- int i;
int orig_level;
path = btrfs_alloc_path();
@@ -1920,17 +2268,12 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
BTRFS_TREE_LOG_OBJECTID);
ret = btrfs_free_and_pin_reserved_extent(log, next->start,
next->len);
- BUG_ON(ret); /* -ENOMEM or logic errors */
+ if (ret)
+ goto out;
}
}
out:
- for (i = 0; i <= orig_level; i++) {
- if (path->nodes[i]) {
- free_extent_buffer(path->nodes[i]);
- path->nodes[i] = NULL;
- }
- }
btrfs_free_path(path);
return ret;
}
@@ -2023,8 +2366,10 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
struct btrfs_root *log = root->log_root;
struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
unsigned long log_transid = 0;
+ struct blk_plug plug;
mutex_lock(&root->log_mutex);
+ log_transid = root->log_transid;
index1 = root->log_transid % 2;
if (atomic_read(&root->log_commit[index1])) {
wait_log_commit(trans, root, root->log_transid);
@@ -2037,7 +2382,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
wait_log_commit(trans, root, root->log_transid - 1);
while (1) {
- unsigned long batch = root->log_batch;
+ int batch = atomic_read(&root->log_batch);
/* when we're on an ssd, just kick the log commit out */
if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) {
mutex_unlock(&root->log_mutex);
@@ -2045,18 +2390,18 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_lock(&root->log_mutex);
}
wait_for_writer(trans, root);
- if (batch == root->log_batch)
+ if (batch == atomic_read(&root->log_batch))
break;
}
/* bail out if we need to do a full commit */
if (root->fs_info->last_trans_log_full_commit == trans->transid) {
ret = -EAGAIN;
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&root->log_mutex);
goto out;
}
- log_transid = root->log_transid;
if (log_transid % 2 == 0)
mark = EXTENT_DIRTY;
else
@@ -2065,16 +2410,18 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
/* we start IO on all the marked extents here, but we don't actually
* wait for them until later.
*/
+ blk_start_plug(&plug);
ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
if (ret) {
+ blk_finish_plug(&plug);
btrfs_abort_transaction(trans, root, ret);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&root->log_mutex);
goto out;
}
btrfs_set_root_node(&log->root_item, log->node);
- root->log_batch = 0;
root->log_transid++;
log->log_transid = root->log_transid;
root->log_start_pid = 0;
@@ -2087,7 +2434,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&root->log_mutex);
mutex_lock(&log_root_tree->log_mutex);
- log_root_tree->log_batch++;
+ atomic_inc(&log_root_tree->log_batch);
atomic_inc(&log_root_tree->log_writers);
mutex_unlock(&log_root_tree->log_mutex);
@@ -2101,6 +2448,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
}
if (ret) {
+ blk_finish_plug(&plug);
if (ret != -ENOSPC) {
btrfs_abort_transaction(trans, root, ret);
mutex_unlock(&log_root_tree->log_mutex);
@@ -2108,6 +2456,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
}
root->fs_info->last_trans_log_full_commit = trans->transid;
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out;
@@ -2115,9 +2464,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
index2 = log_root_tree->log_transid % 2;
if (atomic_read(&log_root_tree->log_commit[index2])) {
+ blk_finish_plug(&plug);
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
wait_log_commit(trans, log_root_tree,
log_root_tree->log_transid);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = 0;
goto out;
@@ -2136,28 +2487,35 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* check the full commit flag again
*/
if (root->fs_info->last_trans_log_full_commit == trans->transid) {
+ blk_finish_plug(&plug);
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out_wake_log_root;
}
- ret = btrfs_write_and_wait_marked_extents(log_root_tree,
- &log_root_tree->dirty_log_pages,
- EXTENT_DIRTY | EXTENT_NEW);
+ ret = btrfs_write_marked_extents(log_root_tree,
+ &log_root_tree->dirty_log_pages,
+ EXTENT_DIRTY | EXTENT_NEW);
+ blk_finish_plug(&plug);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ btrfs_wait_marked_extents(log_root_tree,
+ &log_root_tree->dirty_log_pages,
+ EXTENT_NEW | EXTENT_DIRTY);
+ btrfs_wait_logged_extents(log, log_transid);
btrfs_set_super_log_root(root->fs_info->super_for_commit,
log_root_tree->node->start);
btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
btrfs_header_level(log_root_tree->node));
- log_root_tree->log_batch = 0;
log_root_tree->log_transid++;
smp_mb();
@@ -2171,9 +2529,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* in and cause problems either.
*/
btrfs_scrub_pause_super(root);
- write_ctree_super(trans, root->fs_info->tree_root, 1);
+ ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
btrfs_scrub_continue_super(root);
- ret = 0;
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_wake_log_root;
+ }
mutex_lock(&root->log_mutex);
if (root->last_log_commit < log_transid)
@@ -2204,12 +2565,18 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
.process_func = process_one_buffer
};
- ret = walk_log_tree(trans, log, &wc);
- BUG_ON(ret);
+ if (trans) {
+ ret = walk_log_tree(trans, log, &wc);
+
+ /* I don't think this can happen but just in case */
+ if (ret)
+ btrfs_abort_transaction(trans, log, ret);
+ }
while (1) {
ret = find_first_extent_bit(&log->dirty_log_pages,
- 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
+ 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
+ NULL);
if (ret)
break;
@@ -2217,6 +2584,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
}
+ /*
+ * We may have short-circuited the log tree with the full commit logic
+ * and left ordered extents on our list, so clear these out to keep us
+ * from leaking inodes and memory.
+ */
+ btrfs_free_logged_extents(log, 0);
+ btrfs_free_logged_extents(log, 1);
+
free_extent_buffer(log->node);
kfree(log);
}
@@ -2303,7 +2678,10 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
if (di) {
ret = btrfs_delete_one_dir_name(trans, log, path, di);
bytes_del += name_len;
- BUG_ON(ret);
+ if (ret) {
+ err = ret;
+ goto fail;
+ }
}
btrfs_release_path(path);
di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
@@ -2315,7 +2693,10 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
if (di) {
ret = btrfs_delete_one_dir_name(trans, log, path, di);
bytes_del += name_len;
- BUG_ON(ret);
+ if (ret) {
+ err = ret;
+ goto fail;
+ }
}
/* update the directory size in the log to reflect the names
@@ -2466,7 +2847,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
path->keep_locks = 1;
ret = btrfs_search_forward(root, &min_key, &max_key,
- path, 0, trans->transid);
+ path, trans->transid);
/*
* we didn't find anything from this transaction, see if there
@@ -2646,6 +3027,7 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
int ret;
struct btrfs_key key;
struct btrfs_key found_key;
+ int start_slot;
key.objectid = objectid;
key.type = max_key_type;
@@ -2653,7 +3035,7 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
while (1) {
ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
- BUG_ON(ret == 0);
+ BUG_ON(ret == 0); /* Logic error */
if (ret < 0)
break;
@@ -2667,8 +3049,18 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
if (found_key.objectid != objectid)
break;
- ret = btrfs_del_item(trans, log, path);
- if (ret)
+ found_key.offset = 0;
+ found_key.type = 0;
+ ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
+ &start_slot);
+
+ ret = btrfs_del_items(trans, log, path, start_slot,
+ path->slots[0] - start_slot + 1);
+ /*
+ * If start slot isn't 0 then we don't need to re-search, we've
+ * found the last guy with the objectid in this tree.
+ */
+ if (ret || start_slot != 0)
break;
btrfs_release_path(path);
}
@@ -2678,14 +3070,89 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
return ret;
}
+static void fill_inode_item(struct btrfs_trans_handle *trans,
+ struct extent_buffer *leaf,
+ struct btrfs_inode_item *item,
+ struct inode *inode, int log_inode_only)
+{
+ struct btrfs_map_token token;
+
+ btrfs_init_map_token(&token);
+
+ if (log_inode_only) {
+ /* set the generation to zero so the recover code
+ * can tell the difference between an logging
+ * just to say 'this inode exists' and a logging
+ * to say 'update this inode with these values'
+ */
+ btrfs_set_token_inode_generation(leaf, item, 0, &token);
+ btrfs_set_token_inode_size(leaf, item, 0, &token);
+ } else {
+ btrfs_set_token_inode_generation(leaf, item,
+ BTRFS_I(inode)->generation,
+ &token);
+ btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
+ }
+
+ btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
+ btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
+ btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
+ btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
+
+ btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
+ inode->i_atime.tv_sec, &token);
+ btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
+ inode->i_atime.tv_nsec, &token);
+
+ btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
+ inode->i_mtime.tv_sec, &token);
+ btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
+ inode->i_mtime.tv_nsec, &token);
+
+ btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
+ inode->i_ctime.tv_sec, &token);
+ btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
+ inode->i_ctime.tv_nsec, &token);
+
+ btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
+ &token);
+
+ btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
+ btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
+ btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
+ btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
+ btrfs_set_token_inode_block_group(leaf, item, 0, &token);
+}
+
+static int log_inode_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *log, struct btrfs_path *path,
+ struct inode *inode)
+{
+ struct btrfs_inode_item *inode_item;
+ struct btrfs_key key;
+ int ret;
+
+ memcpy(&key, &BTRFS_I(inode)->location, sizeof(key));
+ ret = btrfs_insert_empty_item(trans, log, path, &key,
+ sizeof(*inode_item));
+ if (ret && ret != -EEXIST)
+ return ret;
+ inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_item);
+ fill_inode_item(trans, path->nodes[0], inode_item, inode, 0);
+ btrfs_release_path(path);
+ return 0;
+}
+
static noinline int copy_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *log,
+ struct inode *inode,
struct btrfs_path *dst_path,
struct extent_buffer *src,
int start_slot, int nr, int inode_only)
{
unsigned long src_offset;
unsigned long dst_offset;
+ struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
struct btrfs_file_extent_item *extent;
struct btrfs_inode_item *inode_item;
int ret;
@@ -2694,6 +3161,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
char *ins_data;
int i;
struct list_head ordered_sums;
+ int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
INIT_LIST_HEAD(&ordered_sums);
@@ -2722,29 +3190,23 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
src_offset = btrfs_item_ptr_offset(src, start_slot + i);
- copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
- src_offset, ins_sizes[i]);
-
- if (inode_only == LOG_INODE_EXISTS &&
- ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
+ if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
inode_item = btrfs_item_ptr(dst_path->nodes[0],
dst_path->slots[0],
struct btrfs_inode_item);
- btrfs_set_inode_size(dst_path->nodes[0], inode_item, 0);
-
- /* set the generation to zero so the recover code
- * can tell the difference between an logging
- * just to say 'this inode exists' and a logging
- * to say 'update this inode with these values'
- */
- btrfs_set_inode_generation(dst_path->nodes[0],
- inode_item, 0);
+ fill_inode_item(trans, dst_path->nodes[0], inode_item,
+ inode, inode_only == LOG_INODE_EXISTS);
+ } else {
+ copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
+ src_offset, ins_sizes[i]);
}
+
/* take a reference on file data extents so that truncates
* or deletes of this inode don't have to relog the inode
* again
*/
- if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY) {
+ if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY &&
+ !skip_csum) {
int found_type;
extent = btrfs_item_ptr(src, start_slot + i,
struct btrfs_file_extent_item);
@@ -2753,8 +3215,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
continue;
found_type = btrfs_file_extent_type(src, extent);
- if (found_type == BTRFS_FILE_EXTENT_REG ||
- found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+ if (found_type == BTRFS_FILE_EXTENT_REG) {
u64 ds, dl, cs, cl;
ds = btrfs_file_extent_disk_bytenr(src,
extent);
@@ -2777,7 +3238,11 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
log->fs_info->csum_root,
ds + cs, ds + cs + cl - 1,
&ordered_sums, 0);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_release_path(dst_path);
+ kfree(ins_data);
+ return ret;
+ }
}
}
}
@@ -2803,6 +3268,297 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
return ret;
}
+static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct extent_map *em1, *em2;
+
+ em1 = list_entry(a, struct extent_map, list);
+ em2 = list_entry(b, struct extent_map, list);
+
+ if (em1->start < em2->start)
+ return -1;
+ else if (em1->start > em2->start)
+ return 1;
+ return 0;
+}
+
+static int log_one_extent(struct btrfs_trans_handle *trans,
+ struct inode *inode, struct btrfs_root *root,
+ struct extent_map *em, struct btrfs_path *path)
+{
+ struct btrfs_root *log = root->log_root;
+ struct btrfs_file_extent_item *fi;
+ struct extent_buffer *leaf;
+ struct btrfs_ordered_extent *ordered;
+ struct list_head ordered_sums;
+ struct btrfs_map_token token;
+ struct btrfs_key key;
+ u64 mod_start = em->mod_start;
+ u64 mod_len = em->mod_len;
+ u64 csum_offset;
+ u64 csum_len;
+ u64 extent_offset = em->start - em->orig_start;
+ u64 block_len;
+ int ret;
+ int index = log->log_transid % 2;
+ bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
+
+ ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
+ em->start + em->len, NULL, 0);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&ordered_sums);
+ btrfs_init_map_token(&token);
+ key.objectid = btrfs_ino(inode);
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = em->start;
+
+ ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*fi));
+ if (ret)
+ return ret;
+ leaf = path->nodes[0];
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+
+ btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
+ &token);
+ if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
+ skip_csum = true;
+ btrfs_set_token_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_PREALLOC,
+ &token);
+ } else {
+ btrfs_set_token_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_REG,
+ &token);
+ if (em->block_start == 0)
+ skip_csum = true;
+ }
+
+ block_len = max(em->block_len, em->orig_block_len);
+ if (em->compress_type != BTRFS_COMPRESS_NONE) {
+ btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
+ em->block_start,
+ &token);
+ btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
+ &token);
+ } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
+ btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
+ em->block_start -
+ extent_offset, &token);
+ btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
+ &token);
+ } else {
+ btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
+ btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
+ &token);
+ }
+
+ btrfs_set_token_file_extent_offset(leaf, fi,
+ em->start - em->orig_start,
+ &token);
+ btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
+ btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
+ btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
+ &token);
+ btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
+ btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
+ btrfs_mark_buffer_dirty(leaf);
+
+ btrfs_release_path(path);
+ if (ret) {
+ return ret;
+ }
+
+ if (skip_csum)
+ return 0;
+
+ if (em->compress_type) {
+ csum_offset = 0;
+ csum_len = block_len;
+ }
+
+ /*
+ * First check and see if our csums are on our outstanding ordered
+ * extents.
+ */
+again:
+ spin_lock_irq(&log->log_extents_lock[index]);
+ list_for_each_entry(ordered, &log->logged_list[index], log_list) {
+ struct btrfs_ordered_sum *sum;
+
+ if (!mod_len)
+ break;
+
+ if (ordered->inode != inode)
+ continue;
+
+ if (ordered->file_offset + ordered->len <= mod_start ||
+ mod_start + mod_len <= ordered->file_offset)
+ continue;
+
+ /*
+ * We are going to copy all the csums on this ordered extent, so
+ * go ahead and adjust mod_start and mod_len in case this
+ * ordered extent has already been logged.
+ */
+ if (ordered->file_offset > mod_start) {
+ if (ordered->file_offset + ordered->len >=
+ mod_start + mod_len)
+ mod_len = ordered->file_offset - mod_start;
+ /*
+ * If we have this case
+ *
+ * |--------- logged extent ---------|
+ * |----- ordered extent ----|
+ *
+ * Just don't mess with mod_start and mod_len, we'll
+ * just end up logging more csums than we need and it
+ * will be ok.
+ */
+ } else {
+ if (ordered->file_offset + ordered->len <
+ mod_start + mod_len) {
+ mod_len = (mod_start + mod_len) -
+ (ordered->file_offset + ordered->len);
+ mod_start = ordered->file_offset +
+ ordered->len;
+ } else {
+ mod_len = 0;
+ }
+ }
+
+ /*
+ * To keep us from looping for the above case of an ordered
+ * extent that falls inside of the logged extent.
+ */
+ if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
+ &ordered->flags))
+ continue;
+ atomic_inc(&ordered->refs);
+ spin_unlock_irq(&log->log_extents_lock[index]);
+ /*
+ * we've dropped the lock, we must either break or
+ * start over after this.
+ */
+
+ wait_event(ordered->wait, ordered->csum_bytes_left == 0);
+
+ list_for_each_entry(sum, &ordered->list, list) {
+ ret = btrfs_csum_file_blocks(trans, log, sum);
+ if (ret) {
+ btrfs_put_ordered_extent(ordered);
+ goto unlocked;
+ }
+ }
+ btrfs_put_ordered_extent(ordered);
+ goto again;
+
+ }
+ spin_unlock_irq(&log->log_extents_lock[index]);
+unlocked:
+
+ if (!mod_len || ret)
+ return ret;
+
+ csum_offset = mod_start - em->start;
+ csum_len = mod_len;
+
+ /* block start is already adjusted for the file extent offset. */
+ ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
+ em->block_start + csum_offset,
+ em->block_start + csum_offset +
+ csum_len - 1, &ordered_sums, 0);
+ if (ret)
+ return ret;
+
+ while (!list_empty(&ordered_sums)) {
+ struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
+ struct btrfs_ordered_sum,
+ list);
+ if (!ret)
+ ret = btrfs_csum_file_blocks(trans, log, sums);
+ list_del(&sums->list);
+ kfree(sums);
+ }
+
+ return ret;
+}
+
+static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct inode *inode,
+ struct btrfs_path *path)
+{
+ struct extent_map *em, *n;
+ struct list_head extents;
+ struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
+ u64 test_gen;
+ int ret = 0;
+ int num = 0;
+
+ INIT_LIST_HEAD(&extents);
+
+ write_lock(&tree->lock);
+ test_gen = root->fs_info->last_trans_committed;
+
+ list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
+ list_del_init(&em->list);
+
+ /*
+ * Just an arbitrary number, this can be really CPU intensive
+ * once we start getting a lot of extents, and really once we
+ * have a bunch of extents we just want to commit since it will
+ * be faster.
+ */
+ if (++num > 32768) {
+ list_del_init(&tree->modified_extents);
+ ret = -EFBIG;
+ goto process;
+ }
+
+ if (em->generation <= test_gen)
+ continue;
+ /* Need a ref to keep it from getting evicted from cache */
+ atomic_inc(&em->refs);
+ set_bit(EXTENT_FLAG_LOGGING, &em->flags);
+ list_add_tail(&em->list, &extents);
+ num++;
+ }
+
+ list_sort(NULL, &extents, extent_cmp);
+
+process:
+ while (!list_empty(&extents)) {
+ em = list_entry(extents.next, struct extent_map, list);
+
+ list_del_init(&em->list);
+
+ /*
+ * If we had an error we just need to delete everybody from our
+ * private list.
+ */
+ if (ret) {
+ clear_em_logging(tree, em);
+ free_extent_map(em);
+ continue;
+ }
+
+ write_unlock(&tree->lock);
+
+ ret = log_one_extent(trans, inode, root, em, path);
+ write_lock(&tree->lock);
+ clear_em_logging(tree, em);
+ free_extent_map(em);
+ }
+ WARN_ON(!list_empty(&extents));
+ write_unlock(&tree->lock);
+
+ btrfs_release_path(path);
+ return ret;
+}
+
/* log a single inode in the tree log.
* At least one parent directory for this inode must exist in the tree
* or be logged already.
@@ -2832,10 +3588,9 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
int nritems;
int ins_start_slot = 0;
int ins_nr;
+ bool fast_search = false;
u64 ino = btrfs_ino(inode);
- log = root->log_root;
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -2851,25 +3606,32 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
max_key.objectid = ino;
- /* today the code can only do partial logging of directories */
- if (!S_ISDIR(inode->i_mode))
- inode_only = LOG_INODE_ALL;
- if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode))
+ /* today the code can only do partial logging of directories */
+ if (S_ISDIR(inode->i_mode) ||
+ (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags) &&
+ inode_only == LOG_INODE_EXISTS))
max_key.type = BTRFS_XATTR_ITEM_KEY;
else
max_key.type = (u8)-1;
max_key.offset = (u64)-1;
- ret = btrfs_commit_inode_delayed_items(trans, inode);
- if (ret) {
- btrfs_free_path(path);
- btrfs_free_path(dst_path);
- return ret;
+ /* Only run delayed items if we are a dir or a new file */
+ if (S_ISDIR(inode->i_mode) ||
+ BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) {
+ ret = btrfs_commit_inode_delayed_items(trans, inode);
+ if (ret) {
+ btrfs_free_path(path);
+ btrfs_free_path(dst_path);
+ return ret;
+ }
}
mutex_lock(&BTRFS_I(inode)->log_mutex);
+ btrfs_get_logged_extents(log, inode);
+
/*
* a brute force approach to making sure we get the most uptodate
* copies of everything.
@@ -2881,7 +3643,30 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
max_key_type = BTRFS_XATTR_ITEM_KEY;
ret = drop_objectid_items(trans, log, path, ino, max_key_type);
} else {
- ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0);
+ if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags)) {
+ clear_bit(BTRFS_INODE_COPY_EVERYTHING,
+ &BTRFS_I(inode)->runtime_flags);
+ ret = btrfs_truncate_inode_items(trans, log,
+ inode, 0, 0);
+ } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
+ &BTRFS_I(inode)->runtime_flags)) {
+ if (inode_only == LOG_INODE_ALL)
+ fast_search = true;
+ max_key.type = BTRFS_XATTR_ITEM_KEY;
+ ret = drop_objectid_items(trans, log, path, ino,
+ max_key.type);
+ } else {
+ if (inode_only == LOG_INODE_ALL)
+ fast_search = true;
+ ret = log_inode_item(trans, log, dst_path, inode);
+ if (ret) {
+ err = ret;
+ goto out_unlock;
+ }
+ goto log_extents;
+ }
+
}
if (ret) {
err = ret;
@@ -2892,7 +3677,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
while (1) {
ins_nr = 0;
ret = btrfs_search_forward(root, &min_key, &max_key,
- path, 0, trans->transid);
+ path, trans->transid);
if (ret != 0)
break;
again:
@@ -2912,7 +3697,7 @@ again:
goto next_slot;
}
- ret = copy_items(trans, log, dst_path, src, ins_start_slot,
+ ret = copy_items(trans, inode, dst_path, src, ins_start_slot,
ins_nr, inode_only);
if (ret) {
err = ret;
@@ -2930,7 +3715,7 @@ next_slot:
goto again;
}
if (ins_nr) {
- ret = copy_items(trans, log, dst_path, src,
+ ret = copy_items(trans, inode, dst_path, src,
ins_start_slot,
ins_nr, inode_only);
if (ret) {
@@ -2951,8 +3736,7 @@ next_slot:
break;
}
if (ins_nr) {
- ret = copy_items(trans, log, dst_path, src,
- ins_start_slot,
+ ret = copy_items(trans, inode, dst_path, src, ins_start_slot,
ins_nr, inode_only);
if (ret) {
err = ret;
@@ -2960,7 +3744,25 @@ next_slot:
}
ins_nr = 0;
}
- WARN_ON(ins_nr);
+
+log_extents:
+ if (fast_search) {
+ btrfs_release_path(dst_path);
+ ret = btrfs_log_changed_extents(trans, root, inode, dst_path);
+ if (ret) {
+ err = ret;
+ goto out_unlock;
+ }
+ } else {
+ struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
+ struct extent_map *em, *n;
+
+ write_lock(&tree->lock);
+ list_for_each_entry_safe(em, n, &tree->modified_extents, list)
+ list_del_init(&em->list);
+ write_unlock(&tree->lock);
+ }
+
if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
btrfs_release_path(path);
btrfs_release_path(dst_path);
@@ -2971,7 +3773,10 @@ next_slot:
}
}
BTRFS_I(inode)->logged_trans = trans->transid;
+ BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
out_unlock:
+ if (err)
+ btrfs_free_logged_extents(log, log->log_transid);
mutex_unlock(&BTRFS_I(inode)->log_mutex);
btrfs_free_path(path);
@@ -3052,9 +3857,9 @@ out:
* only logging is done of any parent directories that are older than
* the last committed transaction
*/
-int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode,
- struct dentry *parent, int exists_only)
+static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct inode *inode,
+ struct dentry *parent, int exists_only)
{
int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
struct super_block *sb;
@@ -3138,7 +3943,6 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
end_trans:
dput(old_parent);
if (ret < 0) {
- BUG_ON(ret != -ENOSPC);
root->fs_info->last_trans_log_full_commit = trans->transid;
ret = 1;
}
@@ -3230,8 +4034,7 @@ again:
if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
break;
- log = btrfs_read_fs_root_no_radix(log_root_tree,
- &found_key);
+ log = btrfs_read_fs_root(log_root_tree, &found_key);
if (IS_ERR(log)) {
ret = PTR_ERR(log);
btrfs_error(fs_info, ret,
@@ -3246,6 +4049,9 @@ again:
wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
if (IS_ERR(wc.replay_dest)) {
ret = PTR_ERR(wc.replay_dest);
+ free_extent_buffer(log->node);
+ free_extent_buffer(log->commit_root);
+ kfree(log);
btrfs_error(fs_info, ret, "Couldn't read target root "
"for tree log recovery.");
goto error;
@@ -3254,12 +4060,10 @@ again:
wc.replay_dest->log_root = log;
btrfs_record_root_in_trans(trans, wc.replay_dest);
ret = walk_log_tree(trans, log, &wc);
- BUG_ON(ret);
- if (wc.stage == LOG_WALK_REPLAY_ALL) {
+ if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
ret = fixup_inode_link_counts(trans, wc.replay_dest,
path);
- BUG_ON(ret);
}
key.offset = found_key.offset - 1;
@@ -3268,6 +4072,9 @@ again:
free_extent_buffer(log->commit_root);
kfree(log);
+ if (ret)
+ goto error;
+
if (found_key.offset == 0)
break;
}
@@ -3288,17 +4095,20 @@ again:
btrfs_free_path(path);
+ /* step 4: commit the transaction, which also unpins the blocks */
+ ret = btrfs_commit_transaction(trans, fs_info->tree_root);
+ if (ret)
+ return ret;
+
free_extent_buffer(log_root_tree->node);
log_root_tree->log_root = NULL;
fs_info->log_root_recovering = 0;
-
- /* step 4: commit the transaction, which also unpins the blocks */
- btrfs_commit_transaction(trans, fs_info->tree_root);
-
kfree(log_root_tree);
- return 0;
+ return 0;
error:
+ if (wc.trans)
+ btrfs_end_transaction(wc.trans, fs_info->tree_root);
btrfs_free_path(path);
return ret;
}