summaryrefslogtreecommitdiff
path: root/fs/bcachefs/io_write.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bcachefs/io_write.c')
-rw-r--r--fs/bcachefs/io_write.c119
1 files changed, 56 insertions, 63 deletions
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index 6a5da02ce266..66ee07be149e 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -33,7 +33,6 @@
#include <linux/blkdev.h>
#include <linux/moduleparam.h>
-#include <linux/prefetch.h>
#include <linux/random.h>
#include <linux/sched/mm.h>
@@ -365,7 +364,7 @@ int bch2_extent_update(struct btree_trans *trans,
min(k->k.p.offset << 9, new_i_size),
i_sectors_delta, &inode) ?:
(bch2_inode_opts_get_inode(c, &inode, &opts),
- bch2_bkey_set_needs_rebalance(c, &opts, k,
+ bch2_bkey_set_needs_rebalance(trans, NULL, &opts, k,
SET_NEEDS_REBALANCE_foreground,
change_cookie)) ?:
bch2_trans_update(trans, iter, k, 0) ?:
@@ -1271,7 +1270,7 @@ static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
return bch2_extent_update_i_size_sectors(trans, iter,
min(new->k.p.offset << 9, new_i_size), 0, &inode) ?:
(bch2_inode_opts_get_inode(c, &inode, &opts),
- bch2_bkey_set_needs_rebalance(c, &opts, new,
+ bch2_bkey_set_needs_rebalance(trans, NULL, &opts, new,
SET_NEEDS_REBALANCE_foreground,
op->opts.change_cookie)) ?:
bch2_trans_update(trans, iter, new,
@@ -1323,11 +1322,25 @@ static CLOSURE_CALLBACK(bch2_nocow_write_done)
bch2_write_done(cl);
}
-struct bucket_to_lock {
- struct bpos b;
- unsigned gen;
- struct nocow_lock_bucket *l;
-};
+static bool bkey_get_dev_iorefs(struct bch_fs *c, struct bkey_ptrs_c ptrs)
+{
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE,
+ BCH_DEV_WRITE_REF_io_write);
+ if (unlikely(!ca)) {
+ bkey_for_each_ptr(ptrs, ptr2) {
+ if (ptr2 == ptr)
+ break;
+ enumerated_ref_put(&bch2_dev_have_ref(c, ptr2->dev)->io_ref[WRITE],
+ BCH_DEV_WRITE_REF_io_write);
+ }
+
+ return false;
+ }
+ }
+
+ return true;
+}
static void bch2_nocow_write(struct bch_write_op *op)
{
@@ -1335,15 +1348,14 @@ static void bch2_nocow_write(struct bch_write_op *op)
struct btree_trans *trans;
struct btree_iter iter;
struct bkey_s_c k;
- DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets;
+ struct bkey_ptrs_c ptrs;
u32 snapshot;
- struct bucket_to_lock *stale_at;
+ const struct bch_extent_ptr *stale_at;
int stale, ret;
if (op->flags & BCH_WRITE_move)
return;
- darray_init(&buckets);
trans = bch2_trans_get(c);
retry:
bch2_trans_begin(trans);
@@ -1358,8 +1370,6 @@ retry:
while (1) {
struct bio *bio = &op->wbio.bio;
- buckets.nr = 0;
-
ret = bch2_trans_relock(trans);
if (ret)
break;
@@ -1381,50 +1391,42 @@ retry:
break;
/* Get iorefs before dropping btree locks: */
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE,
- BCH_DEV_WRITE_REF_io_write);
- if (unlikely(!ca))
- goto err_get_ioref;
-
- struct bpos b = PTR_BUCKET_POS(ca, ptr);
- struct nocow_lock_bucket *l =
- bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
- prefetch(l);
-
- /* XXX allocating memory with btree locks held - rare */
- darray_push_gfp(&buckets, ((struct bucket_to_lock) {
- .b = b, .gen = ptr->gen, .l = l,
- }), GFP_KERNEL|__GFP_NOFAIL);
-
- if (ptr->unwritten)
- op->flags |= BCH_WRITE_convert_unwritten;
- }
+ ptrs = bch2_bkey_ptrs_c(k);
+ if (!bkey_get_dev_iorefs(c, ptrs))
+ goto out;
/* Unlock before taking nocow locks, doing IO: */
bkey_reassemble(op->insert_keys.top, k);
- bch2_trans_unlock(trans);
+ k = bkey_i_to_s_c(op->insert_keys.top);
+ ptrs = bch2_bkey_ptrs_c(k);
- bch2_cut_front(op->pos, op->insert_keys.top);
- if (op->flags & BCH_WRITE_convert_unwritten)
- bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
+ bch2_trans_unlock(trans);
- darray_for_each(buckets, i) {
- struct bch_dev *ca = bch2_dev_have_ref(c, i->b.inode);
+ bch2_bkey_nocow_lock(c, ptrs, BUCKET_NOCOW_LOCK_UPDATE);
- __bch2_bucket_nocow_lock(&c->nocow_locks, i->l,
- bucket_to_u64(i->b),
- BUCKET_NOCOW_LOCK_UPDATE);
+ /*
+ * This could be handled better: If we're able to trylock the
+ * nocow locks with btree locks held we know dirty pointers
+ * can't be stale
+ */
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
- int gen = bucket_gen_get(ca, i->b.offset);
- stale = gen < 0 ? gen : gen_after(gen, i->gen);
+ int gen = bucket_gen_get(ca, PTR_BUCKET_NR(ca, ptr));
+ stale = gen < 0 ? gen : gen_after(gen, ptr->gen);
if (unlikely(stale)) {
- stale_at = i;
+ stale_at = ptr;
goto err_bucket_stale;
}
+
+ if (ptr->unwritten)
+ op->flags |= BCH_WRITE_convert_unwritten;
}
+ bch2_cut_front(op->pos, op->insert_keys.top);
+ if (op->flags & BCH_WRITE_convert_unwritten)
+ bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
+
bio = &op->wbio.bio;
if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
bio = bio_split(bio, k.k->p.offset - op->pos.offset,
@@ -1458,8 +1460,6 @@ err:
goto retry;
bch2_trans_put(trans);
- darray_exit(&buckets);
-
if (ret) {
bch2_write_op_error(op, op->pos.offset,
"%s(): btree lookup error: %s", __func__, bch2_err_str(ret));
@@ -1484,24 +1484,11 @@ err:
continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
}
return;
-err_get_ioref:
- darray_for_each(buckets, i)
- enumerated_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref[WRITE],
- BCH_DEV_WRITE_REF_io_write);
-
- /* Fall back to COW path: */
- goto out;
err_bucket_stale:
- darray_for_each(buckets, i) {
- bch2_bucket_nocow_unlock(&c->nocow_locks, i->b, BUCKET_NOCOW_LOCK_UPDATE);
- if (i == stale_at)
- break;
- }
-
CLASS(printbuf, buf)();
if (bch2_fs_inconsistent_on(stale < 0, c,
- "pointer to invalid bucket in nocow path on device %llu\n %s",
- stale_at->b.inode,
+ "pointer to invalid bucket in nocow path on device %u\n %s",
+ stale_at->dev,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = bch_err_throw(c, data_write_invalid_ptr);
} else {
@@ -1509,7 +1496,13 @@ err_bucket_stale:
ret = bch_err_throw(c, transaction_restart);
}
- goto err_get_ioref;
+ bch2_bkey_nocow_unlock(c, k, BUCKET_NOCOW_LOCK_UPDATE);
+ bkey_for_each_ptr(ptrs, ptr)
+ enumerated_ref_put(&bch2_dev_have_ref(c, ptr->dev)->io_ref[WRITE],
+ BCH_DEV_WRITE_REF_io_write);
+
+ /* Fall back to COW path: */
+ goto out;
}
static void __bch2_write(struct bch_write_op *op)