diff options
Diffstat (limited to 'fs/bcachefs/move.c')
-rw-r--r-- | fs/bcachefs/move.c | 92 |
1 files changed, 43 insertions, 49 deletions
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 54dd6fec81db..a38996f5366f 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -330,7 +330,7 @@ int bch2_move_extent(struct moving_context *ctxt, { struct btree_trans *trans = ctxt->trans; struct bch_fs *c = trans->c; - int ret = -ENOMEM; + int ret = 0; if (trace_io_move_enabled()) trace_io_move2(c, k, &io_opts, &data_opts); @@ -351,11 +351,10 @@ int bch2_move_extent(struct moving_context *ctxt, struct moving_io *io = allocate_dropping_locks(trans, ret, kzalloc(sizeof(struct moving_io), _gfp)); - if (!io) - goto err; - + if (!io && !ret) + ret = bch_err_throw(c, ENOMEM_move_extent); if (ret) - goto err_free; + goto err; INIT_LIST_HEAD(&io->io_list); io->write.ctxt = ctxt; @@ -366,7 +365,7 @@ int bch2_move_extent(struct moving_context *ctxt, ret = bch2_data_update_init(trans, iter, ctxt, &io->write, ctxt->wp, &io_opts, data_opts, iter->btree_id, k); if (ret) - goto err_free; + goto err; io->write.op.end_io = move_write_done; } else { @@ -380,7 +379,7 @@ int bch2_move_extent(struct moving_context *ctxt, ret = bch2_data_update_bios_init(&io->write, c, &io_opts); if (ret) - goto err_free; + goto err; } io->write.rbio.bio.bi_end_io = move_read_endio; @@ -423,9 +422,8 @@ int bch2_move_extent(struct moving_context *ctxt, BCH_READ_last_fragment, data_opts.scrub ? data_opts.read_dev : -1); return 0; -err_free: - kfree(io); err: + kfree(io); if (bch2_err_matches(ret, EROFS) || bch2_err_matches(ret, BCH_ERR_transaction_restart)) return ret; @@ -513,25 +511,22 @@ int bch2_move_get_io_opts_one(struct btree_trans *trans, *io_opts = bch2_opts_to_inode_opts(c->opts); /* reflink btree? */ - if (!extent_k.k->p.inode) - goto out; - - struct btree_iter inode_iter; - struct bkey_s_c inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, - SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot), - BTREE_ITER_cached); - int ret = bkey_err(inode_k); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - return ret; + if (extent_k.k->p.inode) { + CLASS(btree_iter, inode_iter)(trans, BTREE_ID_inodes, + SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot), + BTREE_ITER_cached); + struct bkey_s_c inode_k = bch2_btree_iter_peek_slot(&inode_iter); + int ret = bkey_err(inode_k); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ret; - if (!ret && bkey_is_inode(inode_k.k)) { - struct bch_inode_unpacked inode; - bch2_inode_unpack(inode_k, &inode); - bch2_inode_opts_get(io_opts, c, &inode); + if (!ret && bkey_is_inode(inode_k.k)) { + struct bch_inode_unpacked inode; + bch2_inode_unpack(inode_k, &inode); + bch2_inode_opts_get(io_opts, c, &inode); + } } - bch2_trans_iter_exit(trans, &inode_iter); - /* seem to be spinning here? */ -out: + return bch2_get_update_rebalance_opts(trans, io_opts, extent_iter, extent_k); } @@ -596,14 +591,14 @@ static struct bkey_s_c bch2_lookup_indirect_extent_for_move(struct btree_trans * BTREE_ID_reflink, reflink_pos, BTREE_ITER_not_extents); - struct bkey_s_c k = bch2_btree_iter_peek(trans, iter); + struct bkey_s_c k = bch2_btree_iter_peek(iter); if (!k.k || bkey_err(k)) { - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return k; } if (bkey_lt(reflink_pos, bkey_start_pos(k.k))) { - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return bkey_s_c_null; } @@ -648,13 +643,13 @@ retry_root: BTREE_ITER_prefetch| BTREE_ITER_not_extents| BTREE_ITER_all_snapshots); - struct btree *b = bch2_btree_iter_peek_node(trans, &iter); + struct btree *b = bch2_btree_iter_peek_node(&iter); ret = PTR_ERR_OR_ZERO(b); if (ret) goto root_err; if (b != btree_node_root(c, b)) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); goto retry_root; } @@ -678,7 +673,7 @@ retry_root: root_err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); goto retry_root; } @@ -698,7 +693,7 @@ root_err: bch2_trans_begin(trans); - k = bch2_btree_iter_peek(trans, &iter); + k = bch2_btree_iter_peek(&iter); if (!k.k) break; @@ -719,7 +714,7 @@ root_err: REFLINK_P_MAY_UPDATE_OPTIONS(bkey_s_c_to_reflink_p(k).v)) { struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); - bch2_trans_iter_exit(trans, &reflink_iter); + bch2_trans_iter_exit(&reflink_iter); k = bch2_lookup_indirect_extent_for_move(trans, &reflink_iter, p); ret = bkey_err(k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) @@ -783,12 +778,12 @@ next: if (ctxt->stats) atomic64_add(k.k->size, &ctxt->stats->sectors_seen); next_nondata: - if (!bch2_btree_iter_advance(trans, &iter)) + if (!bch2_btree_iter_advance(&iter)) break; } out: - bch2_trans_iter_exit(trans, &reflink_iter); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&reflink_iter); + bch2_trans_iter_exit(&iter); bch2_bkey_buf_exit(&sk, c); per_snapshot_io_opts_exit(&snapshot_io_opts); @@ -855,7 +850,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, struct bch_fs *c = trans->c; bool is_kthread = current->flags & PF_KTHREAD; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); - struct btree_iter iter = {}, bp_iter = {}; + struct btree_iter iter = {}; struct bkey_buf sk; struct bkey_s_c k; struct bkey_buf last_flushed; @@ -880,7 +875,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, */ bch2_trans_begin(trans); - bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, bp_start, 0); + CLASS(btree_iter, bp_iter)(trans, BTREE_ID_backpointers, bp_start, 0); ret = bch2_btree_write_buffer_tryflush(trans); if (!bch2_err_matches(ret, EROFS)) @@ -894,7 +889,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, bch2_trans_begin(trans); - k = bch2_btree_iter_peek(trans, &bp_iter); + k = bch2_btree_iter_peek(&bp_iter); ret = bkey_err(k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; @@ -938,7 +933,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, if (!bp.v->level) { ret = bch2_move_get_io_opts_one(trans, &io_opts, &iter, k); if (ret) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); continue; } } @@ -951,13 +946,13 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, pred, arg, p); if (!p) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); goto next; } if (data_opts.scrub && !bch2_dev_idx_is_online(c, data_opts.read_dev)) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); ret = bch_err_throw(c, device_offline); break; } @@ -976,7 +971,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, else ret = bch2_btree_node_scrub(trans, bp.v->btree_id, bp.v->level, k, data_opts.read_dev); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; @@ -991,14 +986,13 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, if (ctxt->stats) atomic64_add(sectors, &ctxt->stats->sectors_seen); next: - bch2_btree_iter_advance(trans, &bp_iter); + bch2_btree_iter_advance(&bp_iter); } while (check_mismatch_done < bucket_end) bch2_check_bucket_backpointer_mismatch(trans, ca, check_mismatch_done++, copygc, &last_flushed); err: - bch2_trans_iter_exit(trans, &bp_iter); bch2_bkey_buf_exit(&sk, c); bch2_bkey_buf_exit(&last_flushed, c); return ret; @@ -1114,7 +1108,7 @@ static int bch2_move_btree(struct bch_fs *c, retry: ret = 0; while (bch2_trans_begin(trans), - (b = bch2_btree_iter_peek_node(trans, &iter)) && + (b = bch2_btree_iter_peek_node(&iter)) && !(ret = PTR_ERR_OR_ZERO(b))) { if (kthread && kthread_should_stop()) break; @@ -1134,12 +1128,12 @@ retry: if (ret) break; next: - bch2_btree_iter_next_node(trans, &iter); + bch2_btree_iter_next_node(&iter); } if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (kthread && kthread_should_stop()) break; |