summaryrefslogtreecommitdiff
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
authorMiao Xie <miaox@cn.fujitsu.com>2014-03-06 13:38:19 +0800
committerJosef Bacik <jbacik@fb.com>2014-03-10 15:17:22 -0400
commit8257b2dc3c1a1057b84a589827354abdc4c767fd (patch)
treec8e501b32a70f731b00a26795f9063b0a5088f8d /fs/btrfs/file.c
parent52483bc26f0e95c91e8fd07f9def588bf89664f8 (diff)
Btrfs: introduce btrfs_{start, end}_nocow_write() for each subvolume
If the snapshot creation happened after the nocow write but before the dirty data flush, we would fail to flush the dirty data because of no space. So we must keep track of when those nocow write operations start and when they end, if there are nocow writers, the snapshot creators must wait. In order to implement this function, I introduce btrfs_{start, end}_nocow_write(), which is similar to mnt_{want,drop}_write(). These two functions are only used for nocow file write operations. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Josef Bacik <jbacik@fb.com>
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c21
1 files changed, 17 insertions, 4 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index d88f2dc4d045..006cf9f0e852 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1410,6 +1410,10 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
u64 num_bytes;
int ret;
+ ret = btrfs_start_nocow_write(root);
+ if (!ret)
+ return -ENOSPC;
+
lockstart = round_down(pos, root->sectorsize);
lockend = round_up(pos + *write_bytes, root->sectorsize) - 1;
@@ -1427,11 +1431,13 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
num_bytes = lockend - lockstart + 1;
ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL);
- if (ret <= 0)
+ if (ret <= 0) {
ret = 0;
- else
+ btrfs_end_nocow_write(root);
+ } else {
*write_bytes = min_t(size_t, *write_bytes ,
num_bytes - pos + lockstart);
+ }
unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
@@ -1520,6 +1526,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
if (!only_release_metadata)
btrfs_free_reserved_data_space(inode,
reserve_bytes);
+ else
+ btrfs_end_nocow_write(root);
break;
}
@@ -1608,6 +1616,9 @@ again:
}
release_bytes = 0;
+ if (only_release_metadata)
+ btrfs_end_nocow_write(root);
+
if (only_release_metadata && copied > 0) {
u64 lockstart = round_down(pos, root->sectorsize);
u64 lockend = lockstart +
@@ -1634,10 +1645,12 @@ again:
kfree(pages);
if (release_bytes) {
- if (only_release_metadata)
+ if (only_release_metadata) {
+ btrfs_end_nocow_write(root);
btrfs_delalloc_release_metadata(inode, release_bytes);
- else
+ } else {
btrfs_delalloc_release_space(inode, release_bytes);
+ }
}
return num_written ? num_written : ret;