From e96bad2ae38808d11b4789360750fe70b2fc9ea3 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 20 Apr 2021 20:21:12 -0400 Subject: bcachefs: Update bch2_btree_verify() bch2_btree_verify() verifies that the btree node on disk matches what we have in memory. This patch changes it to verify every replica, and also fixes it for interior btree nodes - there's a mem_ptr field which is used as a scratch space and needs to be zeroed out for comparing with what's on disk. Signed-off-by: Kent Overstreet --- fs/bcachefs/bcachefs.h | 12 ++--- fs/bcachefs/btree_cache.c | 22 +-------- fs/bcachefs/btree_cache.h | 1 + fs/bcachefs/debug.c | 120 ++++++++++++++++++++++++++++++---------------- fs/bcachefs/debug.h | 4 -- 5 files changed, 87 insertions(+), 72 deletions(-) diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index aade56244422..ebc941d9439c 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -259,7 +259,11 @@ do { \ BCH_DEBUG_PARAM(btree_gc_rewrite_disabled, \ "Disables rewriting of btree nodes during mark and sweep")\ BCH_DEBUG_PARAM(btree_shrinker_disabled, \ - "Disables the shrinker callback for the btree node cache") + "Disables the shrinker callback for the btree node cache")\ + BCH_DEBUG_PARAM(verify_btree_ondisk, \ + "Reread btree nodes at various points to verify the " \ + "mergesort in the read path against modifications " \ + "done in memory") /* Parameters that should only be compiled in in debug mode: */ #define BCH_DEBUG_PARAMS_DEBUG() \ @@ -273,10 +277,6 @@ do { \ "information) when iterating over keys") \ BCH_DEBUG_PARAM(debug_check_btree_accounting, \ "Verify btree accounting for keys within a node") \ - BCH_DEBUG_PARAM(verify_btree_ondisk, \ - "Reread btree nodes at various points to verify the " \ - "mergesort in the read path against modifications " \ - "done in memory") \ BCH_DEBUG_PARAM(journal_seq_verify, \ "Store the journal sequence number in the version " \ "number of every btree key, and verify that btree " \ @@ -813,11 +813,9 @@ struct bch_fs { /* DEBUG JUNK */ struct dentry *debug; struct btree_debug btree_debug[BTREE_ID_NR]; -#ifdef CONFIG_BCACHEFS_DEBUG struct btree *verify_data; struct btree_node *verify_ondisk; struct mutex verify_lock; -#endif u64 *unused_inode_hints; unsigned inode_shard_bits; diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 26e8c04f981a..edbd374cc573 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -100,7 +100,7 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c) return b; } -static struct btree *btree_node_mem_alloc(struct bch_fs *c) +struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) { struct btree_cache *bc = &c->btree_cache; struct btree *b = __btree_node_mem_alloc(c); @@ -360,12 +360,10 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) flags = memalloc_nofs_save(); mutex_lock(&bc->lock); -#ifdef CONFIG_BCACHEFS_DEBUG if (c->verify_data) list_move(&c->verify_data->list, &bc->live); kvpfree(c->verify_ondisk, btree_bytes(c)); -#endif for (i = 0; i < BTREE_ID_NR; i++) if (c->btree_roots[i].b) @@ -419,31 +417,15 @@ int bch2_fs_btree_cache_init(struct bch_fs *c) bch2_recalc_btree_reserve(c); for (i = 0; i < bc->reserve; i++) - if (!btree_node_mem_alloc(c)) { + if (!__bch2_btree_node_mem_alloc(c)) { ret = -ENOMEM; goto out; } list_splice_init(&bc->live, &bc->freeable); -#ifdef CONFIG_BCACHEFS_DEBUG mutex_init(&c->verify_lock); - c->verify_ondisk = kvpmalloc(btree_bytes(c), GFP_KERNEL); - if (!c->verify_ondisk) { - ret = -ENOMEM; - goto out; - } - - c->verify_data = btree_node_mem_alloc(c); - if (!c->verify_data) { - ret = -ENOMEM; - goto out; - } - - list_del_init(&c->verify_data->list); -#endif - bc->shrink.count_objects = bch2_btree_cache_count; bc->shrink.scan_objects = bch2_btree_cache_scan; bc->shrink.seeks = 4; diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h index 4791c3b64452..c517cc029454 100644 --- a/fs/bcachefs/btree_cache.h +++ b/fs/bcachefs/btree_cache.h @@ -17,6 +17,7 @@ int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *, void bch2_btree_cache_cannibalize_unlock(struct bch_fs *); int bch2_btree_cache_cannibalize_lock(struct bch_fs *, struct closure *); +struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *); struct btree *bch2_btree_node_mem_alloc(struct bch_fs *); struct btree *bch2_btree_node_get(struct bch_fs *, struct btree_iter *, diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c index 90364b55aa40..4215c119e0a2 100644 --- a/fs/bcachefs/debug.c +++ b/fs/bcachefs/debug.c @@ -29,40 +29,19 @@ static struct dentry *bch_debug; -#ifdef CONFIG_BCACHEFS_DEBUG - -void __bch2_btree_verify(struct bch_fs *c, struct btree *b) +static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b, + struct extent_ptr_decoded pick) { struct btree *v = c->verify_data; - struct btree_node *n_ondisk, *n_sorted, *n_inmemory; - struct bset *sorted, *inmemory; - struct extent_ptr_decoded pick; - struct bch_dev *ca; + struct btree_node *n_ondisk = c->verify_ondisk; + struct btree_node *n_sorted = c->verify_data->data; + struct bset *sorted, *inmemory = &b->data->keys; + struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev); struct bio *bio; + bool failed = false; - if (c->opts.nochanges) - return; - - btree_node_io_lock(b); - mutex_lock(&c->verify_lock); - - n_ondisk = c->verify_ondisk; - n_sorted = c->verify_data->data; - n_inmemory = b->data; - - bkey_copy(&v->key, &b->key); - v->written = 0; - v->c.level = b->c.level; - v->c.btree_id = b->c.btree_id; - bch2_btree_keys_init(v); - - if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), - NULL, &pick) <= 0) - return; - - ca = bch_dev_bkey_exists(c, pick.ptr.dev); if (!bch2_dev_get_ioref(ca, READ)) - return; + return false; bio = bio_alloc_bioset(GFP_NOIO, buf_pages(n_sorted, btree_bytes(c)), @@ -79,12 +58,12 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b) memcpy(n_ondisk, n_sorted, btree_bytes(c)); + v->written = 0; if (bch2_btree_node_read_done(c, ca, v, false)) - goto out; + return false; n_sorted = c->verify_data->data; sorted = &n_sorted->keys; - inmemory = &n_inmemory->keys; if (inmemory->u64s != sorted->u64s || memcmp(inmemory->start, @@ -102,8 +81,8 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b) printk(KERN_ERR "*** read back in:\n"); bch2_dump_bset(c, v, sorted, 0); - while (offset < b->written) { - if (!offset ) { + while (offset < v->written) { + if (!offset) { i = &n_ondisk->keys; sectors = vstruct_blocks(n_ondisk, c->block_bits) << c->block_bits; @@ -122,25 +101,84 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b) offset += sectors; } - printk(KERN_ERR "*** block %u/%u not written\n", - offset >> c->block_bits, btree_blocks(c)); - for (j = 0; j < le16_to_cpu(inmemory->u64s); j++) if (inmemory->_data[j] != sorted->_data[j]) break; - printk(KERN_ERR "b->written %u\n", b->written); - console_unlock(); - panic("verify failed at %u\n", j); + bch_err(c, "verify failed at key %u", j); + + failed = true; + } + + if (v->written != b->written) { + bch_err(c, "written wrong: expected %u, got %u", + b->written, v->written); + failed = true; + } + + return failed; +} + +void __bch2_btree_verify(struct bch_fs *c, struct btree *b) +{ + struct bkey_ptrs_c ptrs; + struct extent_ptr_decoded p; + const union bch_extent_entry *entry; + struct btree *v; + struct bset *inmemory = &b->data->keys; + struct bkey_packed *k; + bool failed = false; + + if (c->opts.nochanges) + return; + + btree_node_io_lock(b); + mutex_lock(&c->verify_lock); + + if (!c->verify_ondisk) { + c->verify_ondisk = kvpmalloc(btree_bytes(c), GFP_KERNEL); + if (!c->verify_ondisk) + goto out; + } + + if (!c->verify_data) { + c->verify_data = __bch2_btree_node_mem_alloc(c); + if (!c->verify_data) + goto out; + + list_del_init(&c->verify_data->list); + } + + BUG_ON(b->nsets != 1); + + for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_next(k)) + if (k->type == KEY_TYPE_btree_ptr_v2) { + struct bch_btree_ptr_v2 *v = (void *) bkeyp_val(&b->format, k); + v->mem_ptr = 0; + } + + v = c->verify_data; + bkey_copy(&v->key, &b->key); + v->c.level = b->c.level; + v->c.btree_id = b->c.btree_id; + bch2_btree_keys_init(v); + + ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&b->key)); + bkey_for_each_ptr_decode(&b->key.k, ptrs, p, entry) + failed |= bch2_btree_verify_replica(c, b, p); + + if (failed) { + char buf[200]; + + bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(&b->key)); + bch2_fs_fatal_error(c, "btree node verify failed for : %s\n", buf); } out: mutex_unlock(&c->verify_lock); btree_node_io_unlock(b); } -#endif - #ifdef CONFIG_DEBUG_FS /* XXX: bch_fs refcounting */ diff --git a/fs/bcachefs/debug.h b/fs/bcachefs/debug.h index 7ac1615e9447..0b86736e5e1b 100644 --- a/fs/bcachefs/debug.h +++ b/fs/bcachefs/debug.h @@ -8,11 +8,7 @@ struct bio; struct btree; struct bch_fs; -#ifdef CONFIG_BCACHEFS_DEBUG void __bch2_btree_verify(struct bch_fs *, struct btree *); -#else -static inline void __bch2_btree_verify(struct bch_fs *c, struct btree *b) {} -#endif static inline void bch2_btree_verify(struct bch_fs *c, struct btree *b) { -- cgit v1.2.3