summaryrefslogtreecommitdiff
path: root/libbcachefs/move.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-02-16 06:31:28 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2022-02-16 06:32:49 -0500
commitde4e778a6add88e6ccf5a7730a8d343d3fb5f064 (patch)
tree90d61ca635315abeb69a13f468ab27f2e6d7804a /libbcachefs/move.c
parenta1d66a2a4e7c536b2b722f6b1758d36f00ae3c55 (diff)
Update bcachefs sources to a0d7001b0f bcachefs: Change bch2_dev_lookup() to not use lookup_bdev()
Diffstat (limited to 'libbcachefs/move.c')
-rw-r--r--libbcachefs/move.c43
1 files changed, 24 insertions, 19 deletions
diff --git a/libbcachefs/move.c b/libbcachefs/move.c
index 3e3dcec3..7ca7ce39 100644
--- a/libbcachefs/move.c
+++ b/libbcachefs/move.c
@@ -487,19 +487,22 @@ static void move_read_endio(struct bio *bio)
closure_put(&ctxt->cl);
}
-static void do_pending_writes(struct moving_context *ctxt)
+static void do_pending_writes(struct moving_context *ctxt, struct btree_trans *trans)
{
struct moving_io *io;
+ if (trans)
+ bch2_trans_unlock(trans);
+
while ((io = next_pending_write(ctxt))) {
list_del(&io->list);
closure_call(&io->cl, move_write, NULL, &ctxt->cl);
}
}
-#define move_ctxt_wait_event(_ctxt, _cond) \
+#define move_ctxt_wait_event(_ctxt, _trans, _cond) \
do { \
- do_pending_writes(_ctxt); \
+ do_pending_writes(_ctxt, _trans); \
\
if (_cond) \
break; \
@@ -507,11 +510,12 @@ do { \
next_pending_write(_ctxt) || (_cond)); \
} while (1)
-static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
+static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt,
+ struct btree_trans *trans)
{
unsigned sectors_pending = atomic_read(&ctxt->write_sectors);
- move_ctxt_wait_event(ctxt,
+ move_ctxt_wait_event(ctxt, trans,
!atomic_read(&ctxt->write_sectors) ||
atomic_read(&ctxt->write_sectors) != sectors_pending);
}
@@ -533,14 +537,6 @@ static int bch2_move_extent(struct btree_trans *trans,
unsigned sectors = k.k->size, pages;
int ret = -ENOMEM;
- move_ctxt_wait_event(ctxt,
- atomic_read(&ctxt->write_sectors) <
- SECTORS_IN_FLIGHT_PER_DEVICE);
-
- move_ctxt_wait_event(ctxt,
- atomic_read(&ctxt->read_sectors) <
- SECTORS_IN_FLIGHT_PER_DEVICE);
-
/* write path might have to decompress data: */
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
@@ -691,12 +687,19 @@ static int __bch2_move_data(struct bch_fs *c,
schedule_timeout(delay);
if (unlikely(freezing(current))) {
- bch2_trans_unlock(&trans);
- move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
+ move_ctxt_wait_event(ctxt, &trans, list_empty(&ctxt->reads));
try_to_freeze();
}
} while (delay);
+ move_ctxt_wait_event(ctxt, &trans,
+ atomic_read(&ctxt->write_sectors) <
+ SECTORS_IN_FLIGHT_PER_DEVICE);
+
+ move_ctxt_wait_event(ctxt, &trans,
+ atomic_read(&ctxt->read_sectors) <
+ SECTORS_IN_FLIGHT_PER_DEVICE);
+
bch2_trans_begin(&trans);
k = bch2_btree_iter_peek(&iter);
@@ -748,10 +751,12 @@ static int __bch2_move_data(struct bch_fs *c,
BUG();
}
- /* unlock before doing IO: */
+ /*
+ * The iterator gets unlocked by __bch2_read_extent - need to
+ * save a copy of @k elsewhere:
+ */
bch2_bkey_buf_reassemble(&sk, c, k);
k = bkey_i_to_s_c(sk.k);
- bch2_trans_unlock(&trans);
ret2 = bch2_move_extent(&trans, ctxt, wp, io_opts, btree_id, k,
data_cmd, data_opts);
@@ -761,7 +766,7 @@ static int __bch2_move_data(struct bch_fs *c,
if (ret2 == -ENOMEM) {
/* memory allocation failure, wait for some IO to finish */
- bch2_move_ctxt_wait_for_io(ctxt);
+ bch2_move_ctxt_wait_for_io(ctxt, &trans);
continue;
}
@@ -846,7 +851,7 @@ int bch2_move_data(struct bch_fs *c,
}
- move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads));
+ move_ctxt_wait_event(&ctxt, NULL, list_empty(&ctxt.reads));
closure_sync(&ctxt.cl);
EBUG_ON(atomic_read(&ctxt.write_sectors));