diff options
Diffstat (limited to 'fs/bcachefs/alloc_background.c')
-rw-r--r-- | fs/bcachefs/alloc_background.c | 152 |
1 files changed, 82 insertions, 70 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index 4c1604fd80f9..1c2cd841e8a0 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -20,6 +20,7 @@ #include "enumerated_ref.h" #include "error.h" #include "lru.h" +#include "progress.h" #include "recovery.h" #include "varint.h" @@ -337,9 +338,10 @@ void bch2_alloc_v4_swab(struct bkey_s k) } static inline void __bch2_alloc_v4_to_text(struct printbuf *out, struct bch_fs *c, - unsigned dev, const struct bch_alloc_v4 *a) + struct bkey_s_c k, + const struct bch_alloc_v4 *a) { - struct bch_dev *ca = c ? bch2_dev_tryget_noerror(c, dev) : NULL; + struct bch_dev *ca = c ? bch2_dev_tryget_noerror(c, k.k->p.inode) : NULL; prt_newline(out); printbuf_indent_add(out, 2); @@ -348,11 +350,14 @@ static inline void __bch2_alloc_v4_to_text(struct printbuf *out, struct bch_fs * bch2_prt_data_type(out, a->data_type); prt_newline(out); prt_printf(out, "journal_seq_nonempty %llu\n", a->journal_seq_nonempty); - prt_printf(out, "journal_seq_empty %llu\n", a->journal_seq_empty); + if (bkey_val_bytes(k.k) > offsetof(struct bch_alloc_v4, journal_seq_empty)) + prt_printf(out, "journal_seq_empty %llu\n", a->journal_seq_empty); + prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a)); prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a)); prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors); - prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors); + if (bkey_val_bytes(k.k) > offsetof(struct bch_alloc_v4, stripe_sectors)) + prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors); prt_printf(out, "cached_sectors %u\n", a->cached_sectors); prt_printf(out, "stripe %u\n", a->stripe); prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy); @@ -372,12 +377,12 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c struct bch_alloc_v4 _a; const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a); - __bch2_alloc_v4_to_text(out, c, k.k->p.inode, a); + __bch2_alloc_v4_to_text(out, c, k, a); } void bch2_alloc_v4_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) { - __bch2_alloc_v4_to_text(out, c, k.k->p.inode, bkey_s_c_to_alloc_v4(k).v); + __bch2_alloc_v4_to_text(out, c, k, bkey_s_c_to_alloc_v4(k).v); } void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) @@ -385,7 +390,7 @@ void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) if (k.k->type == KEY_TYPE_alloc_v4) { void *src, *dst; - *out = *bkey_s_c_to_alloc_v4(k).v; + bkey_val_copy(out, bkey_s_c_to_alloc_v4(k)); src = alloc_v4_backpointers(out); SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s); @@ -482,7 +487,7 @@ bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_i goto err; return a; err: - bch2_trans_iter_exit(trans, iter); + bch2_trans_iter_exit(iter); return ERR_PTR(ret); } @@ -501,18 +506,18 @@ struct bkey_i_alloc_v4 *bch2_trans_start_alloc_update(struct btree_trans *trans, if ((void *) k.v >= trans->mem && (void *) k.v < trans->mem + trans->mem_top) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return container_of(bkey_s_c_to_alloc_v4(k).v, struct bkey_i_alloc_v4, v); } struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k); if (IS_ERR(a)) { - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return a; } ret = bch2_trans_update_ip(trans, &iter, &a->k_i, flags, _RET_IP_); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return unlikely(ret) ? ERR_PTR(ret) : a; } @@ -635,7 +640,7 @@ int bch2_alloc_read(struct bch_fs *c) * bch2_check_alloc_key() which runs later: */ if (!ca) { - bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0)); + bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); continue; } @@ -656,17 +661,17 @@ int bch2_alloc_read(struct bch_fs *c) * bch2_check_alloc_key() which runs later: */ if (!ca) { - bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0)); + bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); continue; } if (k.k->p.offset < ca->mi.first_bucket) { - bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode, ca->mi.first_bucket)); + bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode, ca->mi.first_bucket)); continue; } if (k.k->p.offset >= ca->mi.nbuckets) { - bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0)); + bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); continue; } @@ -752,7 +757,7 @@ static int bch2_bucket_do_index(struct btree_trans *trans, ret = bch2_btree_bit_mod_iter(trans, &iter, set); fsck_err: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -788,7 +793,7 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans, g->v.gens[offset] = gen; ret = bch2_trans_update(trans, &iter, &g->k_i, 0); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -1039,10 +1044,9 @@ invalid_bucket: * This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for * extents style btrees, but works on non-extents btrees: */ -static struct bkey_s_c bch2_get_key_or_hole(struct btree_trans *trans, struct btree_iter *iter, - struct bpos end, struct bkey *hole) +static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole) { - struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter); + struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); if (bkey_err(k)) return k; @@ -1053,9 +1057,9 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_trans *trans, struct bt struct btree_iter iter2; struct bpos next; - bch2_trans_copy_iter(trans, &iter2, iter); + bch2_trans_copy_iter(&iter2, iter); - struct btree_path *path = btree_iter_path(trans, iter); + struct btree_path *path = btree_iter_path(iter->trans, iter); if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX)) end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p)); @@ -1065,9 +1069,9 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_trans *trans, struct bt * btree node min/max is a closed interval, upto takes a half * open interval: */ - k = bch2_btree_iter_peek_max(trans, &iter2, end); + k = bch2_btree_iter_peek_max(&iter2, end); next = iter2.pos; - bch2_trans_iter_exit(trans, &iter2); + bch2_trans_iter_exit(&iter2); BUG_ON(next.offset >= iter->pos.offset + U32_MAX); @@ -1107,14 +1111,13 @@ static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *buck return *ca != NULL; } -static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_trans *trans, - struct btree_iter *iter, - struct bch_dev **ca, struct bkey *hole) +static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, + struct bch_dev **ca, struct bkey *hole) { - struct bch_fs *c = trans->c; + struct bch_fs *c = iter->trans->c; struct bkey_s_c k; again: - k = bch2_get_key_or_hole(trans, iter, POS_MAX, hole); + k = bch2_get_key_or_hole(iter, POS_MAX, hole); if (bkey_err(k)) return k; @@ -1127,7 +1130,7 @@ again: if (!next_bucket(c, ca, &hole_start)) return bkey_s_c_null; - bch2_btree_iter_set_pos(trans, iter, hole_start); + bch2_btree_iter_set_pos(iter, hole_start); goto again; } @@ -1168,8 +1171,8 @@ int bch2_check_alloc_key(struct btree_trans *trans, a = bch2_alloc_to_v4(alloc_k, &a_convert); - bch2_btree_iter_set_pos(trans, discard_iter, alloc_k.k->p); - k = bch2_btree_iter_peek_slot(trans, discard_iter); + bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p); + k = bch2_btree_iter_peek_slot(discard_iter); ret = bkey_err(k); if (ret) return ret; @@ -1182,8 +1185,8 @@ int bch2_check_alloc_key(struct btree_trans *trans, return ret; } - bch2_btree_iter_set_pos(trans, freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a)); - k = bch2_btree_iter_peek_slot(trans, freespace_iter); + bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a)); + k = bch2_btree_iter_peek_slot(freespace_iter); ret = bkey_err(k); if (ret) return ret; @@ -1196,8 +1199,8 @@ int bch2_check_alloc_key(struct btree_trans *trans, return ret; } - bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset)); - k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter); + bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset)); + k = bch2_btree_iter_peek_slot(bucket_gens_iter); ret = bkey_err(k); if (ret) return ret; @@ -1246,9 +1249,9 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans, if (!ca->mi.freespace_initialized) return 0; - bch2_btree_iter_set_pos(trans, freespace_iter, start); + bch2_btree_iter_set_pos(freespace_iter, start); - k = bch2_btree_iter_peek_slot(trans, freespace_iter); + k = bch2_btree_iter_peek_slot(freespace_iter); ret = bkey_err(k); if (ret) return ret; @@ -1294,9 +1297,9 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans, unsigned i, gens_offset, gens_end_offset; int ret; - bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(start, &gens_offset)); + bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset)); - k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter); + k = bch2_btree_iter_peek_slot(bucket_gens_iter); ret = bkey_err(k); if (ret) return ret; @@ -1360,7 +1363,7 @@ static int bch2_recheck_discard_freespace_key(struct btree_trans *trans, struct ret = k.k->type != KEY_TYPE_set ? __bch2_check_discard_freespace_key(trans, &iter, &gen, FSCK_ERR_SILENT) : 0; - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -1431,8 +1434,8 @@ int __bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_i *gen = a->gen; out: fsck_err: - bch2_set_btree_iter_dontneed(trans, &alloc_iter); - bch2_trans_iter_exit(trans, &alloc_iter); + bch2_set_btree_iter_dontneed(&alloc_iter); + bch2_trans_iter_exit(&alloc_iter); return ret; delete: if (!async_repair) { @@ -1549,6 +1552,9 @@ int bch2_check_alloc_info(struct bch_fs *c) struct bkey_s_c k; int ret = 0; + struct progress_indicator_state progress; + bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_alloc)); + CLASS(btree_trans, trans)(c); bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN, BTREE_ITER_prefetch); @@ -1564,7 +1570,7 @@ int bch2_check_alloc_info(struct bch_fs *c) bch2_trans_begin(trans); - k = bch2_get_key_or_real_bucket_hole(trans, &iter, &ca, &hole); + k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole); ret = bkey_err(k); if (ret) goto bkey_err; @@ -1572,6 +1578,8 @@ int bch2_check_alloc_info(struct bch_fs *c) if (!k.k) break; + progress_update_iter(trans, &progress, &iter); + if (k.k->type) { next = bpos_nosnap_successor(k.k->p); @@ -1602,17 +1610,17 @@ int bch2_check_alloc_info(struct bch_fs *c) if (ret) goto bkey_err; - bch2_btree_iter_set_pos(trans, &iter, next); + bch2_btree_iter_set_pos(&iter, next); bkey_err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) break; } - bch2_trans_iter_exit(trans, &bucket_gens_iter); - bch2_trans_iter_exit(trans, &freespace_iter); - bch2_trans_iter_exit(trans, &discard_iter); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&bucket_gens_iter); + bch2_trans_iter_exit(&freespace_iter); + bch2_trans_iter_exit(&discard_iter); + bch2_trans_iter_exit(&iter); bch2_dev_put(ca); ca = NULL; @@ -1630,7 +1638,7 @@ bkey_err: BTREE_ITER_prefetch); while (1) { bch2_trans_begin(trans); - k = bch2_btree_iter_peek(trans, &iter); + k = bch2_btree_iter_peek(&iter); if (!k.k) break; @@ -1647,9 +1655,9 @@ bkey_err: break; } - bch2_btree_iter_set_pos(trans, &iter, bpos_nosnap_successor(iter.pos)); + bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos)); } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (ret) return ret; @@ -1673,7 +1681,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, CLASS(printbuf, buf)(); int ret; - alloc_k = bch2_btree_iter_peek(trans, alloc_iter); + alloc_k = bch2_btree_iter_peek(alloc_iter); if (!alloc_k.k) return 0; @@ -1732,12 +1740,16 @@ int bch2_check_alloc_to_lru_refs(struct bch_fs *c) bch2_bkey_buf_init(&last_flushed); bkey_init(&last_flushed.k->k); + struct progress_indicator_state progress; + bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_alloc)); + CLASS(btree_trans, trans)(c); int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, POS_MIN, BTREE_ITER_prefetch, k, - NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed)) ?: - bch2_check_stripe_to_lru_refs(trans); + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ + progress_update_iter(trans, &progress, &iter); + bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed); + }))?: bch2_check_stripe_to_lru_refs(trans); bch2_bkey_buf_exit(&last_flushed, c); return ret; @@ -1785,7 +1797,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, { struct bch_fs *c = trans->c; struct bpos pos = need_discard_iter->pos; - struct btree_iter iter = {}; + struct btree_iter iter = { NULL }; struct bkey_s_c k; struct bkey_i_alloc_v4 *a; CLASS(printbuf, buf)(); @@ -1878,7 +1890,7 @@ fsck_err: discard_in_flight_remove(ca, iter.pos.offset); if (!ret) s->seen++; - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } @@ -1958,7 +1970,7 @@ static int bch2_do_discards_fast_one(struct btree_trans *trans, ret = bch2_discard_one_bucket(trans, ca, &need_discard_iter, discard_pos_done, s, true); out: fsck_err: - bch2_trans_iter_exit(trans, &need_discard_iter); + bch2_trans_iter_exit(&need_discard_iter); return ret; } @@ -2051,7 +2063,7 @@ static int invalidate_one_bp(struct btree_trans *trans, bch2_bkey_drop_device(bkey_i_to_s(n), ca->dev_idx); err: - bch2_trans_iter_exit(trans, &extent_iter); + bch2_trans_iter_exit(&extent_iter); return ret; } @@ -2152,7 +2164,7 @@ static int invalidate_one_bucket(struct btree_trans *trans, --*nr_to_invalidate; out: fsck_err: - bch2_trans_iter_exit(trans, &alloc_iter); + bch2_trans_iter_exit(&alloc_iter); return ret; } @@ -2161,9 +2173,9 @@ static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter { struct bkey_s_c k; again: - k = bch2_btree_iter_peek_max(trans, iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); + k = bch2_btree_iter_peek_max(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); if (!k.k && !*wrapped) { - bch2_btree_iter_set_pos(trans, iter, lru_pos(ca->dev_idx, 0, 0)); + bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0)); *wrapped = true; goto again; } @@ -2213,9 +2225,9 @@ restart_err: if (ret) break; - bch2_btree_iter_advance(trans, &iter); + bch2_btree_iter_advance(&iter); } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); err: bch2_bkey_buf_exit(&last_flushed, c); enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_do_invalidates); @@ -2281,7 +2293,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, break; } - k = bch2_get_key_or_hole(trans, &iter, end, &hole); + k = bch2_get_key_or_hole(&iter, end, &hole); ret = bkey_err(k); if (ret) goto bkey_err; @@ -2300,7 +2312,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, if (ret) goto bkey_err; - bch2_btree_iter_advance(trans, &iter); + bch2_btree_iter_advance(&iter); } else { struct bkey_i *freespace; @@ -2320,7 +2332,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, if (ret) goto bkey_err; - bch2_btree_iter_set_pos(trans, &iter, k.k->p); + bch2_btree_iter_set_pos(&iter, k.k->p); } bkey_err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) @@ -2329,7 +2341,7 @@ bkey_err: break; } - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); if (ret < 0) { bch_err_msg(ca, ret, "initializing free space"); @@ -2433,7 +2445,7 @@ static int __bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev, ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: bch2_trans_commit(trans, NULL, NULL, 0); out: - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_exit(&iter); return ret; } |