diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2021-12-24 00:34:48 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2021-12-26 23:35:13 -0500 |
commit | 6928a301bc8b817c9153e0464c1263577b4bfd09 (patch) | |
tree | 6c62b66c99bfbc8766b9b7a8e29358cf294f1d13 | |
parent | c763bcbf98442cf58726a75e2d970f4cdaf47cfd (diff) |
bcachefs: Kill allocator short-circuit invalidate
The allocator thread invalidates buckets (increments their generation
number) prior to discarding them and putting them on freelists. We've
had a short circuit path for some time to only update the in-memory
bucket mark when doing the invalidate if we're not invalidating cached
data, but that short-circuit path hasn't really been needed for quite
some time (likely since the btree key cache code was added).
We're deleting it now as part of deleting/converting code that uses the
in memory bucket array.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r-- | fs/bcachefs/alloc_background.c | 84 |
1 files changed, 21 insertions, 63 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index ee5becb1c820..eb267e34e043 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -657,33 +657,11 @@ static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca) return nr; } -/* - * returns sequence number of most recent journal entry that updated this - * bucket: - */ -static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m) -{ - if (m.journal_seq_valid) { - u64 journal_seq = atomic64_read(&c->journal.seq); - u64 bucket_seq = journal_seq; - - bucket_seq &= ~((u64) U16_MAX); - bucket_seq |= m.journal_seq; - - if (bucket_seq > journal_seq) - bucket_seq -= 1 << 16; - - return bucket_seq; - } else { - return 0; - } -} - static int bucket_invalidate_btree(struct btree_trans *trans, - struct bch_dev *ca, u64 b) + struct bch_dev *ca, u64 b, + struct bkey_alloc_unpacked *u) { struct bch_fs *c = trans->c; - struct bkey_alloc_unpacked u; struct btree_iter iter; int ret; @@ -697,16 +675,16 @@ static int bucket_invalidate_btree(struct btree_trans *trans, if (ret) goto err; - u = alloc_mem_to_key(c, &iter); + *u = alloc_mem_to_key(c, &iter); - u.gen++; - u.data_type = 0; - u.dirty_sectors = 0; - u.cached_sectors = 0; - u.read_time = atomic64_read(&c->io_clock[READ].now); - u.write_time = atomic64_read(&c->io_clock[WRITE].now); + u->gen++; + u->data_type = 0; + u->dirty_sectors = 0; + u->cached_sectors = 0; + u->read_time = atomic64_read(&c->io_clock[READ].now); + u->write_time = atomic64_read(&c->io_clock[WRITE].now); - ret = bch2_alloc_write(trans, &iter, &u, + ret = bch2_alloc_write(trans, &iter, u, BTREE_TRIGGER_BUCKET_INVALIDATE); err: bch2_trans_iter_exit(trans, &iter); @@ -716,21 +694,23 @@ err: static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 *journal_seq, unsigned flags) { - struct bucket *g; - struct bucket_mark m; + struct bkey_alloc_unpacked u; size_t b; int ret = 0; + /* + * If the read-only path is trying to shut down, we can't be generating + * new btree updates: + */ + if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) + return 1; + BUG_ON(!ca->alloc_heap.used || !ca->alloc_heap.data[0].nr); b = ca->alloc_heap.data[0].bucket; /* first, put on free_inc and mark as owned by allocator: */ percpu_down_read(&c->mark_lock); - g = bucket(ca, b); - m = READ_ONCE(g->mark); - - BUG_ON(m.dirty_sectors); bch2_mark_alloc_bucket(c, ca, b, true); @@ -739,37 +719,15 @@ static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca, BUG_ON(!fifo_push(&ca->free_inc, b)); spin_unlock(&c->freelist_lock); - /* - * If we're not invalidating cached data, we only increment the bucket - * gen in memory here, the incremented gen will be updated in the btree - * by bch2_trans_mark_pointer(): - */ - if (!m.cached_sectors && - !bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) { - BUG_ON(m.data_type); - bucket_cmpxchg(g, m, m.gen++); - percpu_up_read(&c->mark_lock); - goto out; - } - percpu_up_read(&c->mark_lock); - /* - * If the read-only path is trying to shut down, we can't be generating - * new btree updates: - */ - if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) { - ret = 1; - goto out; - } - ret = bch2_trans_do(c, NULL, journal_seq, BTREE_INSERT_NOCHECK_RW| BTREE_INSERT_NOFAIL| BTREE_INSERT_JOURNAL_RESERVED| flags, - bucket_invalidate_btree(&trans, ca, b)); -out: + bucket_invalidate_btree(&trans, ca, b, &u)); + if (!ret) { /* remove from alloc_heap: */ struct alloc_heap_entry e, *top = ca->alloc_heap.data; @@ -785,7 +743,7 @@ out: * bucket (i.e. deleting the last reference) before writing to * this bucket again: */ - *journal_seq = max(*journal_seq, bucket_journal_seq(c, m)); + *journal_seq = max(*journal_seq, u.journal_seq); } else { size_t b2; |