diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2024-11-29 21:08:00 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2024-11-29 21:27:09 -0500 |
commit | de51418b60d7bf7d783d0ed112de00a63928c337 (patch) | |
tree | 077e848a35906d272a78676389312af7589de97a | |
parent | 6829fb201072c495ce9e97850664540a0f8294f1 (diff) |
Update bcachefs sources to bc01863fb6ef bcachefs: bcachefs_metadata_version_disk_accounting_big_endian
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
125 files changed, 4508 insertions, 2939 deletions
diff --git a/.bcachefs_revision b/.bcachefs_revision index 8e53d115..bbe1be9a 100644 --- a/.bcachefs_revision +++ b/.bcachefs_revision @@ -1 +1 @@ -1dbc147a6eb08a0137a81a1fe4eb528186594bfe +bc01863fb6eff06f7b028e4c5cd8850d21d7f10d diff --git a/Makefile.compiler b/Makefile.compiler index 057305ea..e0842496 100644 --- a/Makefile.compiler +++ b/Makefile.compiler @@ -53,13 +53,11 @@ cc-option = $(call __cc-option, $(CC),\ # cc-option-yn # Usage: flag := $(call cc-option-yn,-march=winchip-c6) -cc-option-yn = $(call try-run,\ - $(CC) -Werror $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n) +cc-option-yn = $(if $(call cc-option,$1),y,n) # cc-disable-warning # Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable) -cc-disable-warning = $(call try-run,\ - $(CC) -Werror $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) +cc-disable-warning = $(if $(call cc-option,-W$(strip $1)),-Wno-$(strip $1)) # gcc-min-version # Usage: cflags-$(call gcc-min-version, 70100) += -foo @@ -75,8 +73,11 @@ ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3)) # __rustc-option # Usage: MY_RUSTFLAGS += $(call __rustc-option,$(RUSTC),$(MY_RUSTFLAGS),-Cinstrument-coverage,-Zinstrument-coverage) +# TODO: remove RUSTC_BOOTSTRAP=1 when we raise the minimum GNU Make version to 4.4 __rustc-option = $(call try-run,\ - $(1) $(2) $(3) --crate-type=rlib /dev/null --out-dir=$$TMPOUT -o "$$TMP",$(3),$(4)) + echo '#![allow(missing_docs)]#![feature(no_core)]#![no_core]' | RUSTC_BOOTSTRAP=1\ + $(1) --sysroot=/dev/null $(filter-out --sysroot=/dev/null,$(2)) $(3)\ + --crate-type=rlib --out-dir=$(TMPOUT) --emit=obj=- - >/dev/null,$(3),$(4)) # rustc-option # Usage: rustflags-y += $(call rustc-option,-Cinstrument-coverage,-Zinstrument-coverage) @@ -85,5 +86,4 @@ rustc-option = $(call __rustc-option, $(RUSTC),\ # rustc-option-yn # Usage: flag := $(call rustc-option-yn,-Cinstrument-coverage) -rustc-option-yn = $(call try-run,\ - $(RUSTC) $(KBUILD_RUSTFLAGS) $(1) --crate-type=rlib /dev/null --out-dir=$$TMPOUT -o "$$TMP",y,n) +rustc-option-yn = $(if $(call rustc-option,$1),y,n) diff --git a/bch_bindgen/src/btree.rs b/bch_bindgen/src/btree.rs index 33791178..1aaf3018 100644 --- a/bch_bindgen/src/btree.rs +++ b/bch_bindgen/src/btree.rs @@ -81,9 +81,9 @@ impl<'t> BtreeIter<'t> { } } - pub fn peek_upto<'i>(&'i mut self, end: c::bpos) -> Result<Option<BkeySC<'i>>, bch_errcode> { + pub fn peek_max<'i>(&'i mut self, end: c::bpos) -> Result<Option<BkeySC<'i>>, bch_errcode> { unsafe { - let k = c::bch2_btree_iter_peek_upto(&mut self.raw, end); + let k = c::bch2_btree_iter_peek_max(&mut self.raw, end); errptr_to_result_c(k.k).map(|_| { if !k.k.is_null() { Some(BkeySC { @@ -99,7 +99,7 @@ impl<'t> BtreeIter<'t> { } pub fn peek(&mut self) -> Result<Option<BkeySC>, bch_errcode> { - self.peek_upto(SPOS_MAX) + self.peek_max(SPOS_MAX) } pub fn peek_and_restart(&mut self) -> Result<Option<BkeySC>, bch_errcode> { diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h new file mode 100644 index 00000000..40c6224a --- /dev/null +++ b/include/linux/fs_parser.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Filesystem parameter description and parser + * + * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _LINUX_FS_PARSER_H +#define _LINUX_FS_PARSER_H + +struct constant_table { + const char *name; + int value; +}; + +extern int lookup_constant(const struct constant_table tbl[], const char *name, int not_found); + +extern const struct constant_table bool_names[]; + +#endif /* _LINUX_FS_PARSER_H */ diff --git a/include/linux/string_choices.h b/include/linux/string_choices.h new file mode 100644 index 00000000..120ca0f2 --- /dev/null +++ b/include/linux/string_choices.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_STRING_CHOICES_H_ +#define _LINUX_STRING_CHOICES_H_ + +/* + * Here provide a series of helpers in the str_$TRUE_$FALSE format (you can + * also expand some helpers as needed), where $TRUE and $FALSE are their + * corresponding literal strings. These helpers can be used in the printing + * and also in other places where constant strings are required. Using these + * helpers offers the following benefits: + * 1) Reducing the hardcoding of strings, which makes the code more elegant + * through these simple literal-meaning helpers. + * 2) Unifying the output, which prevents the same string from being printed + * in various forms, such as enable/disable, enabled/disabled, en/dis. + * 3) Deduping by the linker, which results in a smaller binary file. + */ + +#include <linux/types.h> + +static inline const char *str_enable_disable(bool v) +{ + return v ? "enable" : "disable"; +} +#define str_disable_enable(v) str_enable_disable(!(v)) + +static inline const char *str_enabled_disabled(bool v) +{ + return v ? "enabled" : "disabled"; +} +#define str_disabled_enabled(v) str_enabled_disabled(!(v)) + +static inline const char *str_hi_lo(bool v) +{ + return v ? "hi" : "lo"; +} +#define str_lo_hi(v) str_hi_lo(!(v)) + +static inline const char *str_high_low(bool v) +{ + return v ? "high" : "low"; +} +#define str_low_high(v) str_high_low(!(v)) + +static inline const char *str_read_write(bool v) +{ + return v ? "read" : "write"; +} +#define str_write_read(v) str_read_write(!(v)) + +static inline const char *str_on_off(bool v) +{ + return v ? "on" : "off"; +} +#define str_off_on(v) str_on_off(!(v)) + +static inline const char *str_yes_no(bool v) +{ + return v ? "yes" : "no"; +} +#define str_no_yes(v) str_yes_no(!(v)) + +static inline const char *str_up_down(bool v) +{ + return v ? "up" : "down"; +} +#define str_down_up(v) str_up_down(!(v)) + +static inline const char *str_true_false(bool v) +{ + return v ? "true" : "false"; +} +#define str_false_true(v) str_true_false(!(v)) + +/** + * str_plural - Return the simple pluralization based on English counts + * @num: Number used for deciding pluralization + * + * If @num is 1, returns empty string, otherwise returns "s". + */ +static inline const char *str_plural(size_t num) +{ + return num == 1 ? "" : "s"; +} + +#endif diff --git a/include/linux/unaligned.h b/include/linux/unaligned.h new file mode 100644 index 00000000..f6d94a85 --- /dev/null +++ b/include/linux/unaligned.h @@ -0,0 +1 @@ +#include <asm/unaligned.h> diff --git a/libbcachefs/alloc_background.c b/libbcachefs/alloc_background.c index c84a9157..b2d57045 100644 --- a/libbcachefs/alloc_background.c +++ b/libbcachefs/alloc_background.c @@ -198,7 +198,7 @@ static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a) } int bch2_alloc_v1_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); int ret = 0; @@ -213,7 +213,7 @@ fsck_err: } int bch2_alloc_v2_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_alloc_unpacked u; int ret = 0; @@ -226,7 +226,7 @@ fsck_err: } int bch2_alloc_v3_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_alloc_unpacked u; int ret = 0; @@ -239,7 +239,7 @@ fsck_err: } int bch2_alloc_v4_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bch_alloc_v4 a; int ret = 0; @@ -322,7 +322,6 @@ fsck_err: void bch2_alloc_v4_swab(struct bkey_s k) { struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v; - struct bch_backpointer *bp, *bps; a->journal_seq = swab64(a->journal_seq); a->flags = swab32(a->flags); @@ -333,13 +332,6 @@ void bch2_alloc_v4_swab(struct bkey_s k) a->stripe = swab32(a->stripe); a->nr_external_backpointers = swab32(a->nr_external_backpointers); a->stripe_sectors = swab32(a->stripe_sectors); - - bps = alloc_v4_backpointers(a); - for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) { - bp->bucket_offset = swab40(bp->bucket_offset); - bp->bucket_len = swab32(bp->bucket_len); - bch2_bpos_swab(&bp->pos); - } } void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) @@ -517,7 +509,7 @@ static unsigned alloc_gen(struct bkey_s_c k, unsigned offset) } int bch2_bucket_gens_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; @@ -664,74 +656,80 @@ int bch2_alloc_read(struct bch_fs *c) /* Free space/discard btree: */ +static int __need_discard_or_freespace_err(struct btree_trans *trans, + struct bkey_s_c alloc_k, + bool set, bool discard, bool repair) +{ + struct bch_fs *c = trans->c; + enum bch_fsck_flags flags = FSCK_CAN_IGNORE|(repair ? FSCK_CAN_FIX : 0); + enum bch_sb_error_id err_id = discard + ? BCH_FSCK_ERR_need_discard_key_wrong + : BCH_FSCK_ERR_freespace_key_wrong; + enum btree_id btree = discard ? BTREE_ID_need_discard : BTREE_ID_freespace; + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, alloc_k); + + int ret = __bch2_fsck_err(NULL, trans, flags, err_id, + "bucket incorrectly %sset in %s btree\n" + " %s", + set ? "" : "un", + bch2_btree_id_str(btree), + buf.buf); + if (ret == -BCH_ERR_fsck_ignore || + ret == -BCH_ERR_fsck_errors_not_fixed) + ret = 0; + + printbuf_exit(&buf); + return ret; +} + +#define need_discard_or_freespace_err(...) \ + fsck_err_wrap(__need_discard_or_freespace_err(__VA_ARGS__)) + +#define need_discard_or_freespace_err_on(cond, ...) \ + (unlikely(cond) ? need_discard_or_freespace_err(__VA_ARGS__) : false) + static int bch2_bucket_do_index(struct btree_trans *trans, struct bch_dev *ca, struct bkey_s_c alloc_k, const struct bch_alloc_v4 *a, bool set) { - struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_s_c old; - struct bkey_i *k; enum btree_id btree; - enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted; - enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted; - struct printbuf buf = PRINTBUF; - int ret; + struct bpos pos; if (a->data_type != BCH_DATA_free && a->data_type != BCH_DATA_need_discard) return 0; - k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k)); - if (IS_ERR(k)) - return PTR_ERR(k); - - bkey_init(&k->k); - k->k.type = new_type; - switch (a->data_type) { case BCH_DATA_free: btree = BTREE_ID_freespace; - k->k.p = alloc_freespace_pos(alloc_k.k->p, *a); - bch2_key_resize(&k->k, 1); + pos = alloc_freespace_pos(alloc_k.k->p, *a); break; case BCH_DATA_need_discard: btree = BTREE_ID_need_discard; - k->k.p = alloc_k.k->p; + pos = alloc_k.k->p; break; default: return 0; } - old = bch2_bkey_get_iter(trans, &iter, btree, - bkey_start_pos(&k->k), - BTREE_ITER_intent); - ret = bkey_err(old); + struct btree_iter iter; + struct bkey_s_c old = bch2_bkey_get_iter(trans, &iter, btree, pos, BTREE_ITER_intent); + int ret = bkey_err(old); if (ret) return ret; - if (ca->mi.freespace_initialized && - c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info && - bch2_trans_inconsistent_on(old.k->type != old_type, trans, - "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n" - " for %s", - set ? "setting" : "clearing", - bch2_btree_id_str(btree), - iter.pos.inode, - iter.pos.offset, - bch2_bkey_types[old.k->type], - bch2_bkey_types[old_type], - (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { - ret = -EIO; - goto err; - } + need_discard_or_freespace_err_on(ca->mi.freespace_initialized && + !old.k->type != set, + trans, alloc_k, set, + btree == BTREE_ID_need_discard, false); - ret = bch2_trans_update(trans, &iter, k, 0); -err: + ret = bch2_btree_bit_mod_iter(trans, &iter, set); +fsck_err: bch2_trans_iter_exit(trans, &iter); - printbuf_exit(&buf); return ret; } @@ -928,37 +926,43 @@ int bch2_trigger_alloc(struct btree_trans *trans, } if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) { - u64 journal_seq = trans->journal_res.seq; - u64 bucket_journal_seq = new_a->journal_seq; + u64 transaction_seq = trans->journal_res.seq; - if ((flags & BTREE_TRIGGER_insert) && - data_type_is_empty(old_a->data_type) != - data_type_is_empty(new_a->data_type) && - new.k->type == KEY_TYPE_alloc_v4) { - struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v; + if (log_fsck_err_on(transaction_seq && new_a->journal_seq > transaction_seq, + trans, alloc_key_journal_seq_in_future, + "bucket journal seq in future (currently at %llu)\n%s", + journal_cur_seq(&c->journal), + (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf))) + new_a->journal_seq = transaction_seq; - /* - * If the btree updates referring to a bucket weren't flushed - * before the bucket became empty again, then the we don't have - * to wait on a journal flush before we can reuse the bucket: - */ - v->journal_seq = bucket_journal_seq = - data_type_is_empty(new_a->data_type) && - (journal_seq == v->journal_seq || - bch2_journal_noflush_seq(&c->journal, v->journal_seq)) - ? 0 : journal_seq; - } + int is_empty_delta = (int) data_type_is_empty(new_a->data_type) - + (int) data_type_is_empty(old_a->data_type); - if (!data_type_is_empty(old_a->data_type) && - data_type_is_empty(new_a->data_type) && - bucket_journal_seq) { - ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, - c->journal.flushed_seq_ondisk, - new.k->p.inode, new.k->p.offset, - bucket_journal_seq); - if (bch2_fs_fatal_err_on(ret, c, - "setting bucket_needs_journal_commit: %s", bch2_err_str(ret))) - goto err; + /* Record journal sequence number of empty -> nonempty transition: */ + if (is_empty_delta < 0) + new_a->journal_seq = max(new_a->journal_seq, transaction_seq); + + /* + * Bucket becomes empty: mark it as waiting for a journal flush, + * unless updates since empty -> nonempty transition were never + * flushed - we may need to ask the journal not to flush + * intermediate sequence numbers: + */ + if (is_empty_delta > 0) { + if (new_a->journal_seq == transaction_seq || + bch2_journal_noflush_seq(&c->journal, new_a->journal_seq)) + new_a->journal_seq = 0; + else { + new_a->journal_seq = transaction_seq; + + ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, + c->journal.flushed_seq_ondisk, + new.k->p.inode, new.k->p.offset, + transaction_seq); + if (bch2_fs_fatal_err_on(ret, c, + "setting bucket_needs_journal_commit: %s", bch2_err_str(ret))) + goto err; + } } if (new_a->gen != old_a->gen) { @@ -1006,6 +1010,7 @@ int bch2_trigger_alloc(struct btree_trans *trans, rcu_read_unlock(); } err: +fsck_err: printbuf_exit(&buf); bch2_dev_put(ca); return ret; @@ -1045,7 +1050,7 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos * btree node min/max is a closed interval, upto takes a half * open interval: */ - k = bch2_btree_iter_peek_upto(&iter2, end); + k = bch2_btree_iter_peek_max(&iter2, end); next = iter2.pos; bch2_trans_iter_exit(iter->trans, &iter2); @@ -1129,7 +1134,6 @@ int bch2_check_alloc_key(struct btree_trans *trans, struct bch_fs *c = trans->c; struct bch_alloc_v4 a_convert; const struct bch_alloc_v4 *a; - unsigned discard_key_type, freespace_key_type; unsigned gens_offset; struct bkey_s_c k; struct printbuf buf = PRINTBUF; @@ -1149,64 +1153,30 @@ int bch2_check_alloc_key(struct btree_trans *trans, a = bch2_alloc_to_v4(alloc_k, &a_convert); - discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0; bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p); k = bch2_btree_iter_peek_slot(discard_iter); ret = bkey_err(k); if (ret) goto err; - if (fsck_err_on(k.k->type != discard_key_type, - trans, need_discard_key_wrong, - "incorrect key in need_discard btree (got %s should be %s)\n" - " %s", - bch2_bkey_types[k.k->type], - bch2_bkey_types[discard_key_type], - (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { - struct bkey_i *update = - bch2_trans_kmalloc(trans, sizeof(*update)); - - ret = PTR_ERR_OR_ZERO(update); - if (ret) - goto err; - - bkey_init(&update->k); - update->k.type = discard_key_type; - update->k.p = discard_iter->pos; - - ret = bch2_trans_update(trans, discard_iter, update, 0); + bool is_discarded = a->data_type == BCH_DATA_need_discard; + if (need_discard_or_freespace_err_on(!!k.k->type != is_discarded, + trans, alloc_k, !is_discarded, true, true)) { + ret = bch2_btree_bit_mod_iter(trans, discard_iter, is_discarded); if (ret) goto err; } - freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0; bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a)); k = bch2_btree_iter_peek_slot(freespace_iter); ret = bkey_err(k); if (ret) goto err; - if (fsck_err_on(k.k->type != freespace_key_type, - trans, freespace_key_wrong, - "incorrect key in freespace btree (got %s should be %s)\n" - " %s", - bch2_bkey_types[k.k->type], - bch2_bkey_types[freespace_key_type], - (printbuf_reset(&buf), - bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { - struct bkey_i *update = - bch2_trans_kmalloc(trans, sizeof(*update)); - - ret = PTR_ERR_OR_ZERO(update); - if (ret) - goto err; - - bkey_init(&update->k); - update->k.type = freespace_key_type; - update->k.p = freespace_iter->pos; - bch2_key_resize(&update->k, 1); - - ret = bch2_trans_update(trans, freespace_iter, update, 0); + bool is_free = a->data_type == BCH_DATA_free; + if (need_discard_or_freespace_err_on(!!k.k->type != is_free, + trans, alloc_k, !is_free, false, true)) { + ret = bch2_btree_bit_mod_iter(trans, freespace_iter, is_free); if (ret) goto err; } @@ -1368,51 +1338,87 @@ fsck_err: return ret; } -static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_trans *trans, - struct btree_iter *iter) +struct check_discard_freespace_key_async { + struct work_struct work; + struct bch_fs *c; + struct bbpos pos; +}; + +static int bch2_recheck_discard_freespace_key(struct btree_trans *trans, struct bbpos pos) +{ + struct btree_iter iter; + struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, pos.btree, pos.pos, 0); + int ret = bkey_err(k); + if (ret) + return ret; + + u8 gen; + ret = k.k->type != KEY_TYPE_set + ? bch2_check_discard_freespace_key(trans, &iter, &gen, false) + : 0; + bch2_trans_iter_exit(trans, &iter); + return ret; +} + +static void check_discard_freespace_key_work(struct work_struct *work) +{ + struct check_discard_freespace_key_async *w = + container_of(work, struct check_discard_freespace_key_async, work); + + bch2_trans_do(w->c, bch2_recheck_discard_freespace_key(trans, w->pos)); + bch2_write_ref_put(w->c, BCH_WRITE_REF_check_discard_freespace_key); + kfree(w); +} + +int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_iter *iter, u8 *gen, + bool async_repair) { struct bch_fs *c = trans->c; - struct btree_iter alloc_iter; - struct bkey_s_c alloc_k; - struct bch_alloc_v4 a_convert; - const struct bch_alloc_v4 *a; - u64 genbits; - struct bpos pos; enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard ? BCH_DATA_need_discard : BCH_DATA_free; struct printbuf buf = PRINTBUF; - int ret; - pos = iter->pos; - pos.offset &= ~(~0ULL << 56); - genbits = iter->pos.offset & (~0ULL << 56); + struct bpos bucket = iter->pos; + bucket.offset &= ~(~0ULL << 56); + u64 genbits = iter->pos.offset & (~0ULL << 56); - alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0); - ret = bkey_err(alloc_k); + struct btree_iter alloc_iter; + struct bkey_s_c alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, + BTREE_ID_alloc, bucket, BTREE_ITER_cached); + int ret = bkey_err(alloc_k); if (ret) return ret; - if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), - trans, need_discard_freespace_key_to_invalid_dev_bucket, - "entry in %s btree for nonexistant dev:bucket %llu:%llu", - bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset)) - goto delete; + if (!bch2_dev_bucket_exists(c, bucket)) { + if (fsck_err(trans, need_discard_freespace_key_to_invalid_dev_bucket, + "entry in %s btree for nonexistant dev:bucket %llu:%llu", + bch2_btree_id_str(iter->btree_id), bucket.inode, bucket.offset)) + goto delete; + ret = 1; + goto out; + } - a = bch2_alloc_to_v4(alloc_k, &a_convert); + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert); + + if (a->data_type != state || + (state == BCH_DATA_free && + genbits != alloc_freespace_genbits(*a))) { + if (fsck_err(trans, need_discard_freespace_key_bad, + "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)", + (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf), + bch2_btree_id_str(iter->btree_id), + iter->pos.inode, + iter->pos.offset, + a->data_type == state, + genbits >> 56, alloc_freespace_genbits(*a) >> 56)) + goto delete; + ret = 1; + goto out; + } - if (fsck_err_on(a->data_type != state || - (state == BCH_DATA_free && - genbits != alloc_freespace_genbits(*a)), - trans, need_discard_freespace_key_bad, - "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)", - (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf), - bch2_btree_id_str(iter->btree_id), - iter->pos.inode, - iter->pos.offset, - a->data_type == state, - genbits >> 56, alloc_freespace_genbits(*a) >> 56)) - goto delete; + *gen = a->gen; out: fsck_err: bch2_set_btree_iter_dontneed(&alloc_iter); @@ -1420,11 +1426,40 @@ fsck_err: printbuf_exit(&buf); return ret; delete: - ret = bch2_btree_delete_extent_at(trans, iter, - iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?: - bch2_trans_commit(trans, NULL, NULL, - BCH_TRANS_COMMIT_no_enospc); - goto out; + if (!async_repair) { + ret = bch2_btree_bit_mod_iter(trans, iter, false) ?: + bch2_trans_commit(trans, NULL, NULL, + BCH_TRANS_COMMIT_no_enospc) ?: + -BCH_ERR_transaction_restart_commit; + goto out; + } else { + /* + * We can't repair here when called from the allocator path: the + * commit will recurse back into the allocator + */ + struct check_discard_freespace_key_async *w = + kzalloc(sizeof(*w), GFP_KERNEL); + if (!w) + goto out; + + if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_check_discard_freespace_key)) { + kfree(w); + goto out; + } + + INIT_WORK(&w->work, check_discard_freespace_key_work); + w->c = c; + w->pos = BBPOS(iter->btree_id, iter->pos); + queue_work(c->write_ref_wq, &w->work); + goto out; + } +} + +static int bch2_check_discard_freespace_key_fsck(struct btree_trans *trans, struct btree_iter *iter) +{ + u8 gen; + int ret = bch2_check_discard_freespace_key(trans, iter, &gen, false); + return ret < 0 ? ret : 0; } /* @@ -1581,7 +1616,7 @@ bkey_err: ret = for_each_btree_key(trans, iter, BTREE_ID_need_discard, POS_MIN, BTREE_ITER_prefetch, k, - bch2_check_discard_freespace_key(trans, &iter)); + bch2_check_discard_freespace_key_fsck(trans, &iter)); if (ret) goto err; @@ -1594,7 +1629,7 @@ bkey_err: break; ret = bkey_err(k) ?: - bch2_check_discard_freespace_key(trans, &iter); + bch2_check_discard_freespace_key_fsck(trans, &iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { ret = 0; continue; @@ -1757,7 +1792,8 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, struct bch_dev *ca, struct btree_iter *need_discard_iter, struct bpos *discard_pos_done, - struct discard_buckets_state *s) + struct discard_buckets_state *s, + bool fastpath) { struct bch_fs *c = trans->c; struct bpos pos = need_discard_iter->pos; @@ -1793,27 +1829,14 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, if (ret) goto out; - if (bch2_bucket_sectors_total(a->v)) { - if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info, - trans, "attempting to discard bucket with dirty data\n%s", - (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - ret = -EIO; - goto out; - } - if (a->v.data_type != BCH_DATA_need_discard) { - if (data_type_is_empty(a->v.data_type) && - BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) { - a->v.gen++; - SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false); - goto write; + if (need_discard_or_freespace_err(trans, k, true, true, true)) { + ret = bch2_btree_bit_mod_iter(trans, need_discard_iter, false); + if (ret) + goto out; + goto commit; } - if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info, - trans, "bucket incorrectly set in need_discard btree\n" - "%s", - (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - ret = -EIO; goto out; } @@ -1827,10 +1850,12 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, goto out; } - if (discard_in_flight_add(ca, iter.pos.offset, true)) - goto out; + if (!fastpath) { + if (discard_in_flight_add(ca, iter.pos.offset, true)) + goto out; - discard_locked = true; + discard_locked = true; + } if (!bkey_eq(*discard_pos_done, iter.pos) && ca->mi.discard && !c->opts.nochanges) { @@ -1844,6 +1869,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, ca->mi.bucket_size, GFP_KERNEL); *discard_pos_done = iter.pos; + s->discarded++; ret = bch2_trans_relock_notrace(trans); if (ret) @@ -1851,22 +1877,25 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, } SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false); -write: alloc_data_type_set(&a->v, a->v.data_type); - ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: - bch2_trans_commit(trans, NULL, NULL, - BCH_WATERMARK_btree| - BCH_TRANS_COMMIT_no_enospc); + ret = bch2_trans_update(trans, &iter, &a->k_i, 0); + if (ret) + goto out; +commit: + ret = bch2_trans_commit(trans, NULL, NULL, + BCH_WATERMARK_btree| + BCH_TRANS_COMMIT_no_enospc); if (ret) goto out; count_event(c, bucket_discard); - s->discarded++; out: +fsck_err: if (discard_locked) discard_in_flight_remove(ca, iter.pos.offset); - s->seen++; + if (!ret) + s->seen++; bch2_trans_iter_exit(trans, &iter); printbuf_exit(&buf); return ret; @@ -1886,11 +1915,11 @@ static void bch2_do_discards_work(struct work_struct *work) * successful commit: */ ret = bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, + for_each_btree_key_max(trans, iter, BTREE_ID_need_discard, POS(ca->dev_idx, 0), POS(ca->dev_idx, U64_MAX), 0, k, - bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s))); + bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s, false))); trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret)); @@ -1923,27 +1952,29 @@ void bch2_do_discards(struct bch_fs *c) bch2_dev_do_discards(ca); } -static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpos bucket) +static int bch2_do_discards_fast_one(struct btree_trans *trans, + struct bch_dev *ca, + u64 bucket, + struct bpos *discard_pos_done, + struct discard_buckets_state *s) { - struct btree_iter iter; - bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_intent); - struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); - int ret = bkey_err(k); - if (ret) - goto err; - - struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, k); - ret = PTR_ERR_OR_ZERO(a); + struct btree_iter need_discard_iter; + struct bkey_s_c discard_k = bch2_bkey_get_iter(trans, &need_discard_iter, + BTREE_ID_need_discard, POS(ca->dev_idx, bucket), 0); + int ret = bkey_err(discard_k); if (ret) - goto err; + return ret; - BUG_ON(a->v.dirty_sectors); - SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false); - alloc_data_type_set(&a->v, a->v.data_type); + if (log_fsck_err_on(discard_k.k->type != KEY_TYPE_set, + trans, discarding_bucket_not_in_need_discard_btree, + "attempting to discard bucket %u:%llu not in need_discard btree", + ca->dev_idx, bucket)) + goto out; - ret = bch2_trans_update(trans, &iter, &a->k_i, 0); -err: - bch2_trans_iter_exit(trans, &iter); + ret = bch2_discard_one_bucket(trans, ca, &need_discard_iter, discard_pos_done, s, true); +out: +fsck_err: + bch2_trans_iter_exit(trans, &need_discard_iter); return ret; } @@ -1951,6 +1982,10 @@ static void bch2_do_discards_fast_work(struct work_struct *work) { struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work); struct bch_fs *c = ca->fs; + struct discard_buckets_state s = {}; + struct bpos discard_pos_done = POS_MAX; + struct btree_trans *trans = bch2_trans_get(c); + int ret = 0; while (1) { bool got_bucket = false; @@ -1971,16 +2006,8 @@ static void bch2_do_discards_fast_work(struct work_struct *work) if (!got_bucket) break; - if (ca->mi.discard && !c->opts.nochanges) - blkdev_issue_discard(ca->disk_sb.bdev, - bucket_to_sector(ca, bucket), - ca->mi.bucket_size, - GFP_KERNEL); - - int ret = bch2_trans_commit_do(c, NULL, NULL, - BCH_WATERMARK_btree| - BCH_TRANS_COMMIT_no_enospc, - bch2_clear_bucket_needs_discard(trans, POS(ca->dev_idx, bucket))); + ret = lockrestart_do(trans, + bch2_do_discards_fast_one(trans, ca, bucket, &discard_pos_done, &s)); bch_err_fn(c, ret); discard_in_flight_remove(ca, bucket); @@ -1989,6 +2016,9 @@ static void bch2_do_discards_fast_work(struct work_struct *work) break; } + trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret)); + + bch2_trans_put(trans); percpu_ref_put(&ca->io_ref); bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast); } @@ -2030,8 +2060,11 @@ static int invalidate_one_bucket(struct btree_trans *trans, return 1; if (!bch2_dev_bucket_exists(c, bucket)) { - prt_str(&buf, "lru entry points to invalid bucket"); - goto err; + if (fsck_err(trans, lru_entry_to_invalid_bucket, + "lru key points to nonexistent device:bucket %llu:%llu", + bucket.inode, bucket.offset)) + return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false); + goto out; } if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset)) @@ -2072,28 +2105,9 @@ static int invalidate_one_bucket(struct btree_trans *trans, trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors); --*nr_to_invalidate; out: +fsck_err: printbuf_exit(&buf); return ret; -err: - prt_str(&buf, "\n lru key: "); - bch2_bkey_val_to_text(&buf, c, lru_k); - - prt_str(&buf, "\n lru entry: "); - bch2_lru_pos_to_text(&buf, lru_iter->pos); - - prt_str(&buf, "\n alloc key: "); - if (!a) - bch2_bpos_to_text(&buf, bucket); - else - bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i)); - - bch_err(c, "%s", buf.buf); - if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) { - bch2_inconsistent_error(c); - ret = -EINVAL; - } - - goto out; } static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter *iter, @@ -2101,7 +2115,7 @@ static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter { struct bkey_s_c k; again: - k = bch2_btree_iter_peek_upto(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); + k = bch2_btree_iter_peek_max(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); if (!k.k && !*wrapped) { bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0)); *wrapped = true; diff --git a/libbcachefs/alloc_background.h b/libbcachefs/alloc_background.h index f8e87c67..de25ba4e 100644 --- a/libbcachefs/alloc_background.h +++ b/libbcachefs/alloc_background.h @@ -8,8 +8,6 @@ #include "debug.h" #include "super.h" -enum bch_validate_flags; - /* How out of date a pointer gen is allowed to be: */ #define BUCKET_GC_GEN_MAX 96U @@ -168,6 +166,9 @@ static inline bool data_type_movable(enum bch_data_type type) static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a, struct bch_dev *ca) { + if (a.data_type >= BCH_DATA_NR) + return 0; + if (!data_type_movable(a.data_type) || !bch2_bucket_sectors_fragmented(ca, a)) return 0; @@ -242,10 +243,14 @@ struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int); -int bch2_alloc_v1_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); -int bch2_alloc_v2_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); -int bch2_alloc_v3_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); -int bch2_alloc_v4_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_alloc_v1_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); +int bch2_alloc_v2_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); +int bch2_alloc_v3_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); +int bch2_alloc_v4_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_alloc_v4_swab(struct bkey_s); void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); @@ -279,7 +284,7 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); }) int bch2_bucket_gens_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); #define bch2_bkey_ops_bucket_gens ((struct bkey_ops) { \ @@ -304,6 +309,8 @@ int bch2_alloc_key_to_dev_counters(struct btree_trans *, struct bch_dev *, int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, enum btree_iter_update_trigger_flags); + +int bch2_check_discard_freespace_key(struct btree_trans *, struct btree_iter *, u8 *, bool); int bch2_check_alloc_info(struct bch_fs *); int bch2_check_alloc_to_lru_refs(struct bch_fs *); void bch2_dev_do_discards(struct bch_dev *); diff --git a/libbcachefs/alloc_foreground.c b/libbcachefs/alloc_foreground.c index 32f2a241..095bfe7c 100644 --- a/libbcachefs/alloc_foreground.c +++ b/libbcachefs/alloc_foreground.c @@ -156,12 +156,24 @@ static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c) return ob; } +static inline bool is_superblock_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b) +{ + if (c->curr_recovery_pass > BCH_RECOVERY_PASS_trans_mark_dev_sbs) + return false; + + return bch2_is_superblock_bucket(ca, b); +} + static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob) { BUG_ON(c->open_buckets_partial_nr >= ARRAY_SIZE(c->open_buckets_partial)); spin_lock(&c->freelist_lock); + rcu_read_lock(); + bch2_dev_rcu(c, ob->dev)->nr_partial_buckets++; + rcu_read_unlock(); + ob->on_partial_list = true; c->open_buckets_partial[c->open_buckets_partial_nr++] = ob - c->open_buckets; @@ -171,20 +183,6 @@ static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob) closure_wake_up(&c->freelist_wait); } -/* _only_ for allocating the journal on a new device: */ -long bch2_bucket_alloc_new_fs(struct bch_dev *ca) -{ - while (ca->new_fs_bucket_idx < ca->mi.nbuckets) { - u64 b = ca->new_fs_bucket_idx++; - - if (!is_superblock_bucket(ca, b) && - (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse))) - return b; - } - - return -1; -} - static inline unsigned open_buckets_reserved(enum bch_watermark watermark) { switch (watermark) { @@ -203,14 +201,16 @@ static inline unsigned open_buckets_reserved(enum bch_watermark watermark) } static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, - u64 bucket, + u64 bucket, u8 gen, enum bch_watermark watermark, - const struct bch_alloc_v4 *a, struct bucket_alloc_state *s, struct closure *cl) { struct open_bucket *ob; + if (unlikely(is_superblock_bucket(c, ca, bucket))) + return NULL; + if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) { s->skipped_nouse++; return NULL; @@ -257,7 +257,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * ob->valid = true; ob->sectors_free = ca->mi.bucket_size; ob->dev = ca->dev_idx; - ob->gen = a->gen; + ob->gen = gen; ob->bucket = bucket; spin_unlock(&ob->lock); @@ -272,111 +272,26 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * } static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca, - enum bch_watermark watermark, u64 free_entry, + enum bch_watermark watermark, struct bucket_alloc_state *s, - struct bkey_s_c freespace_k, + struct btree_iter *freespace_iter, struct closure *cl) { struct bch_fs *c = trans->c; - struct btree_iter iter = { NULL }; - struct bkey_s_c k; - struct open_bucket *ob; - struct bch_alloc_v4 a_convert; - const struct bch_alloc_v4 *a; - u64 b = free_entry & ~(~0ULL << 56); - unsigned genbits = free_entry >> 56; - struct printbuf buf = PRINTBUF; - int ret; - - if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) { - prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n" - " freespace key ", - ca->mi.first_bucket, ca->mi.nbuckets); - bch2_bkey_val_to_text(&buf, c, freespace_k); - bch2_trans_inconsistent(trans, "%s", buf.buf); - ob = ERR_PTR(-EIO); - goto err; - } - - k = bch2_bkey_get_iter(trans, &iter, - BTREE_ID_alloc, POS(ca->dev_idx, b), - BTREE_ITER_cached); - ret = bkey_err(k); - if (ret) { - ob = ERR_PTR(ret); - goto err; - } + u64 b = freespace_iter->pos.offset & ~(~0ULL << 56); + u8 gen; - a = bch2_alloc_to_v4(k, &a_convert); - - if (a->data_type != BCH_DATA_free) { - if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) { - ob = NULL; - goto err; - } - - prt_printf(&buf, "non free bucket in freespace btree\n" - " freespace key "); - bch2_bkey_val_to_text(&buf, c, freespace_k); - prt_printf(&buf, "\n "); - bch2_bkey_val_to_text(&buf, c, k); - bch2_trans_inconsistent(trans, "%s", buf.buf); - ob = ERR_PTR(-EIO); - goto err; - } - - if (genbits != (alloc_freespace_genbits(*a) >> 56) && - c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) { - prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n" - " freespace key ", - genbits, alloc_freespace_genbits(*a) >> 56); - bch2_bkey_val_to_text(&buf, c, freespace_k); - prt_printf(&buf, "\n "); - bch2_bkey_val_to_text(&buf, c, k); - bch2_trans_inconsistent(trans, "%s", buf.buf); - ob = ERR_PTR(-EIO); - goto err; - } - - if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_extents_to_backpointers) { - struct bch_backpointer bp; - struct bpos bp_pos = POS_MIN; - - ret = bch2_get_next_backpointer(trans, ca, POS(ca->dev_idx, b), -1, - &bp_pos, &bp, - BTREE_ITER_nopreserve); - if (ret) { - ob = ERR_PTR(ret); - goto err; - } - - if (!bkey_eq(bp_pos, POS_MAX)) { - /* - * Bucket may have data in it - we don't call - * bc2h_trans_inconnsistent() because fsck hasn't - * finished yet - */ - ob = NULL; - goto err; - } - } + int ret = bch2_check_discard_freespace_key(trans, freespace_iter, &gen, true); + if (ret < 0) + return ERR_PTR(ret); + if (ret) + return NULL; - ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl); - if (!ob) - bch2_set_btree_iter_dontneed(&iter); -err: - if (iter.path) - bch2_set_btree_iter_dontneed(&iter); - bch2_trans_iter_exit(trans, &iter); - printbuf_exit(&buf); - return ob; + return __try_alloc_bucket(c, ca, b, gen, watermark, s, cl); } /* * This path is for before the freespace btree is initialized: - * - * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock & - * journal buckets - journal buckets will be < ca->new_fs_bucket_idx */ static noinline struct open_bucket * bch2_bucket_alloc_early(struct btree_trans *trans, @@ -388,7 +303,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans, struct btree_iter iter, citer; struct bkey_s_c k, ck; struct open_bucket *ob = NULL; - u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx); + u64 first_bucket = ca->mi.first_bucket; u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap]; u64 alloc_start = max(first_bucket, *dev_alloc_cursor); u64 alloc_cursor = alloc_start; @@ -411,10 +326,6 @@ again: if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets))) break; - if (ca->new_fs_bucket_idx && - is_superblock_bucket(ca, k.k->p.offset)) - continue; - if (s->btree_bitmap != BTREE_BITMAP_ANY && s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, bucket_to_sector(ca, bucket), ca->mi.bucket_size)) { @@ -448,7 +359,7 @@ again: s->buckets_seen++; - ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl); + ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, a->gen, watermark, s, cl); next: bch2_set_btree_iter_dontneed(&citer); bch2_trans_iter_exit(trans, &citer); @@ -485,20 +396,21 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor)); u64 alloc_cursor = alloc_start; int ret; - - BUG_ON(ca->new_fs_bucket_idx); again: - for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace, - POS(ca->dev_idx, alloc_cursor), 0, k, ret) { - if (k.k->p.inode != ca->dev_idx) - break; + for_each_btree_key_max_norestart(trans, iter, BTREE_ID_freespace, + POS(ca->dev_idx, alloc_cursor), + POS(ca->dev_idx, U64_MAX), + 0, k, ret) { + /* + * peek normally dosen't trim extents - they can span iter.pos, + * which is not what we want here: + */ + iter.k.size = iter.k.p.offset - iter.pos.offset; - for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k)); - alloc_cursor < k.k->p.offset; - alloc_cursor++) { + while (iter.k.size) { s->buckets_seen++; - u64 bucket = alloc_cursor & ~(~0ULL << 56); + u64 bucket = iter.pos.offset & ~(~0ULL << 56); if (s->btree_bitmap != BTREE_BITMAP_ANY && s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, bucket_to_sector(ca, bucket), ca->mi.bucket_size)) { @@ -507,32 +419,36 @@ again: goto fail; bucket = sector_to_bucket(ca, - round_up(bucket_to_sector(ca, bucket) + 1, + round_up(bucket_to_sector(ca, bucket + 1), 1ULL << ca->mi.btree_bitmap_shift)); - u64 genbits = alloc_cursor >> 56; - alloc_cursor = bucket | (genbits << 56); + alloc_cursor = bucket|(iter.pos.offset & (~0ULL << 56)); - if (alloc_cursor > k.k->p.offset) - bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor)); + bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor)); s->skipped_mi_btree_bitmap++; - continue; + goto next; } - ob = try_alloc_bucket(trans, ca, watermark, - alloc_cursor, s, k, cl); + ob = try_alloc_bucket(trans, ca, watermark, s, &iter, cl); if (ob) { + if (!IS_ERR(ob)) + *dev_alloc_cursor = iter.pos.offset; bch2_set_btree_iter_dontneed(&iter); break; } - } + iter.k.size--; + iter.pos.offset++; + } +next: if (ob || ret) break; } fail: bch2_trans_iter_exit(trans, &iter); - if (!ob && ret) + BUG_ON(ob && ret); + + if (ret) ob = ERR_PTR(ret); if (!ob && alloc_start > ca->mi.first_bucket) { @@ -540,8 +456,6 @@ fail: goto again; } - *dev_alloc_cursor = alloc_cursor; - return ob; } @@ -591,6 +505,7 @@ static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca, * @watermark: how important is this allocation? * @data_type: BCH_DATA_journal, btree, user... * @cl: if not NULL, closure to be used to wait if buckets not available + * @nowait: if true, do not wait for buckets to become available * @usage: for secondarily also returning the current device usage * * Returns: an open_bucket on success, or an ERR_PTR() on failure. @@ -625,6 +540,10 @@ again: bch2_dev_do_invalidates(ca); if (!avail) { + if (watermark > BCH_WATERMARK_normal && + c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations) + goto alloc; + if (cl && !waiting) { closure_wait(&c->freelist_wait, cl); waiting = true; @@ -972,7 +891,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c, u64 avail; bch2_dev_usage_read_fast(ca, &usage); - avail = dev_buckets_free(ca, usage, watermark); + avail = dev_buckets_free(ca, usage, watermark) + ca->nr_partial_buckets; if (!avail) continue; @@ -981,6 +900,10 @@ static int bucket_alloc_set_partial(struct bch_fs *c, i); ob->on_partial_list = false; + rcu_read_lock(); + bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--; + rcu_read_unlock(); + ret = add_new_bucket(c, ptrs, devs_may_alloc, nr_replicas, nr_effective, have_cache, ob); @@ -1191,7 +1114,13 @@ void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca, --c->open_buckets_partial_nr; swap(c->open_buckets_partial[i], c->open_buckets_partial[c->open_buckets_partial_nr]); + ob->on_partial_list = false; + + rcu_read_lock(); + bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--; + rcu_read_unlock(); + spin_unlock(&c->freelist_lock); bch2_open_bucket_put(c, ob); spin_lock(&c->freelist_lock); diff --git a/libbcachefs/alloc_foreground.h b/libbcachefs/alloc_foreground.h index 1a16fd5b..4f87745d 100644 --- a/libbcachefs/alloc_foreground.h +++ b/libbcachefs/alloc_foreground.h @@ -28,8 +28,6 @@ struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *, struct bch_devs_mask *); void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *); -long bch2_bucket_alloc_new_fs(struct bch_dev *); - static inline struct bch_dev *ob_dev(struct bch_fs *c, struct open_bucket *ob) { return bch2_dev_have_ref(c, ob->dev); diff --git a/libbcachefs/backpointers.c b/libbcachefs/backpointers.c index c709893f..b53d98c7 100644 --- a/libbcachefs/backpointers.c +++ b/libbcachefs/backpointers.c @@ -14,11 +14,59 @@ #include <linux/mm.h> +int bch2_backpointer_validate(struct bch_fs *c, struct bkey_s_c k, + struct bkey_validate_context from) +{ + struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k); + int ret = 0; + + bkey_fsck_err_on(bp.v->level > BTREE_MAX_DEPTH, + c, backpointer_level_bad, + "backpointer level bad: %u >= %u", + bp.v->level, BTREE_MAX_DEPTH); + + bkey_fsck_err_on(bp.k->p.inode == BCH_SB_MEMBER_INVALID, + c, backpointer_dev_bad, + "backpointer for BCH_SB_MEMBER_INVALID"); +fsck_err: + return ret; +} + +void bch2_backpointer_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) +{ + struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k); + + rcu_read_lock(); + struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp.k->p.inode); + if (ca) { + u32 bucket_offset; + struct bpos bucket = bp_pos_to_bucket_and_offset(ca, bp.k->p, &bucket_offset); + rcu_read_unlock(); + prt_printf(out, "bucket=%llu:%llu:%u", bucket.inode, bucket.offset, bucket_offset); + } else { + rcu_read_unlock(); + prt_printf(out, "sector=%llu:%llu", bp.k->p.inode, bp.k->p.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT); + } + + bch2_btree_id_level_to_text(out, bp.v->btree_id, bp.v->level); + prt_printf(out, " suboffset=%u len=%u pos=", + (u32) bp.k->p.offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT), + bp.v->bucket_len); + bch2_bpos_to_text(out, bp.v->pos); +} + +void bch2_backpointer_swab(struct bkey_s k) +{ + struct bkey_s_backpointer bp = bkey_s_to_backpointer(k); + + bp.v->bucket_len = swab32(bp.v->bucket_len); + bch2_bpos_swab(&bp.v->pos); +} + static bool extent_matches_bp(struct bch_fs *c, enum btree_id btree_id, unsigned level, struct bkey_s_c k, - struct bpos bucket, - struct bch_backpointer bp) + struct bkey_s_c_backpointer bp) { struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); const union bch_extent_entry *entry; @@ -27,7 +75,7 @@ static bool extent_matches_bp(struct bch_fs *c, rcu_read_lock(); bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { struct bpos bucket2; - struct bch_backpointer bp2; + struct bkey_i_backpointer bp2; if (p.ptr.cached) continue; @@ -37,8 +85,8 @@ static bool extent_matches_bp(struct bch_fs *c, continue; bch2_extent_ptr_to_bp(c, ca, btree_id, level, k, p, entry, &bucket2, &bp2); - if (bpos_eq(bucket, bucket2) && - !memcmp(&bp, &bp2, sizeof(bp))) { + if (bpos_eq(bp.k->p, bp2.k.p) && + !memcmp(bp.v, &bp2.v, sizeof(bp2.v))) { rcu_read_unlock(); return true; } @@ -48,72 +96,10 @@ static bool extent_matches_bp(struct bch_fs *c, return false; } -int bch2_backpointer_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) -{ - struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k); - - rcu_read_lock(); - struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp.k->p.inode); - if (!ca) { - /* these will be caught by fsck */ - rcu_read_unlock(); - return 0; - } - - struct bpos bucket = bp_pos_to_bucket(ca, bp.k->p); - struct bpos bp_pos = bucket_pos_to_bp_noerror(ca, bucket, bp.v->bucket_offset); - rcu_read_unlock(); - int ret = 0; - - bkey_fsck_err_on((bp.v->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT) >= ca->mi.bucket_size || - !bpos_eq(bp.k->p, bp_pos), - c, backpointer_bucket_offset_wrong, - "backpointer bucket_offset wrong"); -fsck_err: - return ret; -} - -void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp) -{ - bch2_btree_id_level_to_text(out, bp->btree_id, bp->level); - prt_printf(out, " offset=%llu:%u len=%u pos=", - (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT), - (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT), - bp->bucket_len); - bch2_bpos_to_text(out, bp->pos); -} - -void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) -{ - rcu_read_lock(); - struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.k->p.inode); - if (ca) { - struct bpos bucket = bp_pos_to_bucket(ca, k.k->p); - rcu_read_unlock(); - prt_str(out, "bucket="); - bch2_bpos_to_text(out, bucket); - prt_str(out, " "); - } else { - rcu_read_unlock(); - } - - bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v); -} - -void bch2_backpointer_swab(struct bkey_s k) -{ - struct bkey_s_backpointer bp = bkey_s_to_backpointer(k); - - bp.v->bucket_offset = swab40(bp.v->bucket_offset); - bp.v->bucket_len = swab32(bp.v->bucket_len); - bch2_bpos_swab(&bp.v->pos); -} - static noinline int backpointer_mod_err(struct btree_trans *trans, - struct bch_backpointer bp, - struct bkey_s_c bp_k, struct bkey_s_c orig_k, + struct bkey_i_backpointer *new_bp, + struct bkey_s_c found_bp, bool insert) { struct bch_fs *c = trans->c; @@ -121,12 +107,12 @@ static noinline int backpointer_mod_err(struct btree_trans *trans, if (insert) { prt_printf(&buf, "existing backpointer found when inserting "); - bch2_backpointer_to_text(&buf, &bp); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new_bp->k_i)); prt_newline(&buf); printbuf_indent_add(&buf, 2); prt_printf(&buf, "found "); - bch2_bkey_val_to_text(&buf, c, bp_k); + bch2_bkey_val_to_text(&buf, c, found_bp); prt_newline(&buf); prt_printf(&buf, "for "); @@ -138,11 +124,11 @@ static noinline int backpointer_mod_err(struct btree_trans *trans, printbuf_indent_add(&buf, 2); prt_printf(&buf, "searching for "); - bch2_backpointer_to_text(&buf, &bp); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new_bp->k_i)); prt_newline(&buf); prt_printf(&buf, "got "); - bch2_bkey_val_to_text(&buf, c, bp_k); + bch2_bkey_val_to_text(&buf, c, found_bp); prt_newline(&buf); prt_printf(&buf, "for "); @@ -161,108 +147,59 @@ static noinline int backpointer_mod_err(struct btree_trans *trans, } int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans, - struct bch_dev *ca, - struct bpos bucket, - struct bch_backpointer bp, struct bkey_s_c orig_k, + struct bkey_i_backpointer *bp, bool insert) { struct btree_iter bp_iter; - struct bkey_s_c k; - struct bkey_i_backpointer *bp_k; - int ret; - - bp_k = bch2_trans_kmalloc_nomemzero(trans, sizeof(struct bkey_i_backpointer)); - ret = PTR_ERR_OR_ZERO(bp_k); - if (ret) - return ret; - - bkey_backpointer_init(&bp_k->k_i); - bp_k->k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset); - bp_k->v = bp; - - if (!insert) { - bp_k->k.type = KEY_TYPE_deleted; - set_bkey_val_u64s(&bp_k->k, 0); - } - - k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, - bp_k->k.p, + struct bkey_s_c k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, + bp->k.p, BTREE_ITER_intent| BTREE_ITER_slots| BTREE_ITER_with_updates); - ret = bkey_err(k); + int ret = bkey_err(k); if (ret) - goto err; + return ret; if (insert ? k.k->type : (k.k->type != KEY_TYPE_backpointer || - memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp)))) { - ret = backpointer_mod_err(trans, bp, k, orig_k, insert); + memcmp(bkey_s_c_to_backpointer(k).v, &bp->v, sizeof(bp->v)))) { + ret = backpointer_mod_err(trans, orig_k, bp, k, insert); if (ret) goto err; } - ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0); + if (!insert) { + bp->k.type = KEY_TYPE_deleted; + set_bkey_val_u64s(&bp->k, 0); + } + + ret = bch2_trans_update(trans, &bp_iter, &bp->k_i, 0); err: bch2_trans_iter_exit(trans, &bp_iter); return ret; } -/* - * Find the next backpointer >= *bp_offset: - */ -int bch2_get_next_backpointer(struct btree_trans *trans, - struct bch_dev *ca, - struct bpos bucket, int gen, - struct bpos *bp_pos, - struct bch_backpointer *bp, - unsigned iter_flags) +static int bch2_backpointer_del(struct btree_trans *trans, struct bpos pos) { - struct bpos bp_end_pos = bucket_pos_to_bp(ca, bpos_nosnap_successor(bucket), 0); - struct btree_iter alloc_iter = { NULL }, bp_iter = { NULL }; - struct bkey_s_c k; - int ret = 0; - - if (bpos_ge(*bp_pos, bp_end_pos)) - goto done; - - if (gen >= 0) { - k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, - bucket, BTREE_ITER_cached|iter_flags); - ret = bkey_err(k); - if (ret) - goto out; - - if (k.k->type != KEY_TYPE_alloc_v4 || - bkey_s_c_to_alloc_v4(k).v->gen != gen) - goto done; - } - - *bp_pos = bpos_max(*bp_pos, bucket_pos_to_bp(ca, bucket, 0)); - - for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers, - *bp_pos, iter_flags, k, ret) { - if (bpos_ge(k.k->p, bp_end_pos)) - break; + return likely(!bch2_backpointers_no_use_write_buffer) + ? bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, pos) + : bch2_btree_delete(trans, BTREE_ID_backpointers, pos, 0); +} - *bp_pos = k.k->p; - *bp = *bkey_s_c_to_backpointer(k).v; - goto out; - } -done: - *bp_pos = SPOS_MAX; -out: - bch2_trans_iter_exit(trans, &bp_iter); - bch2_trans_iter_exit(trans, &alloc_iter); - return ret; +static int bch2_backpointers_maybe_flush(struct btree_trans *trans, + struct bkey_s_c visiting_k, + struct bkey_buf *last_flushed) +{ + return likely(!bch2_backpointers_no_use_write_buffer) + ? bch2_btree_write_buffer_maybe_flush(trans, visiting_k, last_flushed) + : 0; } -static void backpointer_not_found(struct btree_trans *trans, - struct bpos bp_pos, - struct bch_backpointer bp, - struct bkey_s_c k) +static void backpointer_target_not_found(struct btree_trans *trans, + struct bkey_s_c_backpointer bp, + struct bkey_s_c target_k) { struct bch_fs *c = trans->c; struct printbuf buf = PRINTBUF; @@ -275,23 +212,12 @@ static void backpointer_not_found(struct btree_trans *trans, if (likely(!bch2_backpointers_no_use_write_buffer)) return; - struct bpos bucket; - if (!bp_pos_to_bucket_nodev(c, bp_pos, &bucket)) - return; - prt_printf(&buf, "backpointer doesn't match %s it points to:\n ", - bp.level ? "btree node" : "extent"); - prt_printf(&buf, "bucket: "); - bch2_bpos_to_text(&buf, bucket); - prt_printf(&buf, "\n "); - - prt_printf(&buf, "backpointer pos: "); - bch2_bpos_to_text(&buf, bp_pos); + bp.v->level ? "btree node" : "extent"); + bch2_bkey_val_to_text(&buf, c, bp.s_c); prt_printf(&buf, "\n "); - bch2_backpointer_to_text(&buf, &bp); - prt_printf(&buf, "\n "); - bch2_bkey_val_to_text(&buf, c, k); + bch2_bkey_val_to_text(&buf, c, target_k); if (c->curr_recovery_pass >= BCH_RECOVERY_PASS_check_extents_to_backpointers) bch_err_ratelimited(c, "%s", buf.buf); else @@ -301,21 +227,19 @@ static void backpointer_not_found(struct btree_trans *trans, } struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans, + struct bkey_s_c_backpointer bp, struct btree_iter *iter, - struct bpos bp_pos, - struct bch_backpointer bp, unsigned iter_flags) { - if (likely(!bp.level)) { - struct bch_fs *c = trans->c; + struct bch_fs *c = trans->c; - struct bpos bucket; - if (!bp_pos_to_bucket_nodev(c, bp_pos, &bucket)) - return bkey_s_c_err(-EIO); + if (unlikely(bp.v->btree_id >= btree_id_nr_alive(c))) + return bkey_s_c_null; + if (likely(!bp.v->level)) { bch2_trans_node_iter_init(trans, iter, - bp.btree_id, - bp.pos, + bp.v->btree_id, + bp.v->pos, 0, 0, iter_flags); struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); @@ -324,14 +248,15 @@ struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans, return k; } - if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp)) + if (k.k && + extent_matches_bp(c, bp.v->btree_id, bp.v->level, k, bp)) return k; bch2_trans_iter_exit(trans, iter); - backpointer_not_found(trans, bp_pos, bp, k); + backpointer_target_not_found(trans, bp, k); return bkey_s_c_null; } else { - struct btree *b = bch2_backpointer_get_node(trans, iter, bp_pos, bp); + struct btree *b = bch2_backpointer_get_node(trans, bp, iter); if (IS_ERR_OR_NULL(b)) { bch2_trans_iter_exit(trans, iter); @@ -342,39 +267,33 @@ struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans, } struct btree *bch2_backpointer_get_node(struct btree_trans *trans, - struct btree_iter *iter, - struct bpos bp_pos, - struct bch_backpointer bp) + struct bkey_s_c_backpointer bp, + struct btree_iter *iter) { struct bch_fs *c = trans->c; - BUG_ON(!bp.level); - - struct bpos bucket; - if (!bp_pos_to_bucket_nodev(c, bp_pos, &bucket)) - return ERR_PTR(-EIO); + BUG_ON(!bp.v->level); bch2_trans_node_iter_init(trans, iter, - bp.btree_id, - bp.pos, + bp.v->btree_id, + bp.v->pos, 0, - bp.level - 1, + bp.v->level - 1, 0); struct btree *b = bch2_btree_iter_peek_node(iter); if (IS_ERR_OR_NULL(b)) goto err; - BUG_ON(b->c.level != bp.level - 1); + BUG_ON(b->c.level != bp.v->level - 1); - if (extent_matches_bp(c, bp.btree_id, bp.level, - bkey_i_to_s_c(&b->key), - bucket, bp)) + if (extent_matches_bp(c, bp.v->btree_id, bp.v->level, + bkey_i_to_s_c(&b->key), bp)) return b; if (btree_node_will_make_reachable(b)) { b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node); } else { - backpointer_not_found(trans, bp_pos, bp, bkey_i_to_s_c(&b->key)); + backpointer_target_not_found(trans, bp, bkey_i_to_s_c(&b->key)); b = NULL; } err: @@ -382,9 +301,12 @@ err: return b; } -static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter, - struct bkey_s_c k) +static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, struct bkey_s_c k, + struct bkey_buf *last_flushed) { + if (k.k->type != KEY_TYPE_backpointer) + return 0; + struct bch_fs *c = trans->c; struct btree_iter alloc_iter = { NULL }; struct bkey_s_c alloc_k; @@ -393,10 +315,14 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_ struct bpos bucket; if (!bp_pos_to_bucket_nodev_noerror(c, k.k->p, &bucket)) { + ret = bch2_backpointers_maybe_flush(trans, k, last_flushed); + if (ret) + goto out; + if (fsck_err(trans, backpointer_to_missing_device, "backpointer for missing device:\n%s", (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - ret = bch2_btree_delete_at(trans, bp_iter, 0); + ret = bch2_backpointer_del(trans, k.k->p); goto out; } @@ -405,13 +331,16 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_ if (ret) goto out; - if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, - trans, backpointer_to_missing_alloc, - "backpointer for nonexistent alloc key: %llu:%llu:0\n%s", - alloc_iter.pos.inode, alloc_iter.pos.offset, - (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - ret = bch2_btree_delete_at(trans, bp_iter, 0); - goto out; + if (alloc_k.k->type != KEY_TYPE_alloc_v4) { + ret = bch2_backpointers_maybe_flush(trans, k, last_flushed); + if (ret) + goto out; + + if (fsck_err(trans, backpointer_to_missing_alloc, + "backpointer for nonexistent alloc key: %llu:%llu:0\n%s", + alloc_iter.pos.inode, alloc_iter.pos.offset, + (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) + ret = bch2_backpointer_del(trans, k.k->p); } out: fsck_err: @@ -423,18 +352,24 @@ fsck_err: /* verify that every backpointer has a corresponding alloc key */ int bch2_check_btree_backpointers(struct bch_fs *c) { + struct bkey_buf last_flushed; + bch2_bkey_buf_init(&last_flushed); + bkey_init(&last_flushed.k->k); + int ret = bch2_trans_run(c, for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers, POS_MIN, 0, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_check_btree_backpointer(trans, &iter, k))); + bch2_check_backpointer_has_valid_bucket(trans, k, &last_flushed))); + + bch2_bkey_buf_exit(&last_flushed, c); bch_err_fn(c, ret); return ret; } struct extents_to_bp_state { - struct bpos bucket_start; - struct bpos bucket_end; + struct bpos bp_start; + struct bpos bp_end; struct bkey_buf last_flushed; }; @@ -522,41 +457,25 @@ err: static int check_bp_exists(struct btree_trans *trans, struct extents_to_bp_state *s, - struct bpos bucket, - struct bch_backpointer bp, + struct bkey_i_backpointer *bp, struct bkey_s_c orig_k) { struct bch_fs *c = trans->c; - struct btree_iter bp_iter = {}; struct btree_iter other_extent_iter = {}; struct printbuf buf = PRINTBUF; - struct bkey_s_c bp_k; - int ret = 0; - struct bch_dev *ca = bch2_dev_bucket_tryget(c, bucket); - if (!ca) { - prt_str(&buf, "extent for nonexistent device:bucket "); - bch2_bpos_to_text(&buf, bucket); - prt_str(&buf, "\n "); - bch2_bkey_val_to_text(&buf, c, orig_k); - bch_err(c, "%s", buf.buf); - ret = -BCH_ERR_fsck_repair_unimplemented; - goto err; - } - - if (bpos_lt(bucket, s->bucket_start) || - bpos_gt(bucket, s->bucket_end)) - goto out; + if (bpos_lt(bp->k.p, s->bp_start) || + bpos_gt(bp->k.p, s->bp_end)) + return 0; - bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, - bucket_pos_to_bp(ca, bucket, bp.bucket_offset), - 0); - ret = bkey_err(bp_k); + struct btree_iter bp_iter; + struct bkey_s_c bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, bp->k.p, 0); + int ret = bkey_err(bp_k); if (ret) goto err; if (bp_k.k->type != KEY_TYPE_backpointer || - memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) { + memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp->v, sizeof(bp->v))) { ret = bch2_btree_write_buffer_maybe_flush(trans, orig_k, &s->last_flushed); if (ret) goto err; @@ -568,7 +487,6 @@ err: fsck_err: bch2_trans_iter_exit(trans, &other_extent_iter); bch2_trans_iter_exit(trans, &bp_iter); - bch2_dev_put(ca); printbuf_exit(&buf); return ret; check_existing_bp: @@ -576,10 +494,10 @@ check_existing_bp: if (bp_k.k->type != KEY_TYPE_backpointer) goto missing; - struct bch_backpointer other_bp = *bkey_s_c_to_backpointer(bp_k).v; + struct bkey_s_c_backpointer other_bp = bkey_s_c_to_backpointer(bp_k); struct bkey_s_c other_extent = - bch2_backpointer_get_key(trans, &other_extent_iter, bp_k.k->p, other_bp, 0); + bch2_backpointer_get_key(trans, other_bp, &other_extent_iter, 0); ret = bkey_err(other_extent); if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) ret = 0; @@ -598,19 +516,23 @@ check_existing_bp: bch_err(c, "%s", buf.buf); if (other_extent.k->size <= orig_k.k->size) { - ret = drop_dev_and_update(trans, other_bp.btree_id, other_extent, bucket.inode); + ret = drop_dev_and_update(trans, other_bp.v->btree_id, + other_extent, bp->k.p.inode); if (ret) goto err; goto out; } else { - ret = drop_dev_and_update(trans, bp.btree_id, orig_k, bucket.inode); + ret = drop_dev_and_update(trans, bp->v.btree_id, orig_k, bp->k.p.inode); if (ret) goto err; goto missing; } } - ret = check_extent_checksum(trans, other_bp.btree_id, other_extent, bp.btree_id, orig_k, bucket.inode); + ret = check_extent_checksum(trans, + other_bp.v->btree_id, other_extent, + bp->v.btree_id, orig_k, + bp->k.p.inode); if (ret < 0) goto err; if (ret) { @@ -618,7 +540,8 @@ check_existing_bp: goto missing; } - ret = check_extent_checksum(trans, bp.btree_id, orig_k, other_bp.btree_id, other_extent, bucket.inode); + ret = check_extent_checksum(trans, bp->v.btree_id, orig_k, + other_bp.v->btree_id, other_extent, bp->k.p.inode); if (ret < 0) goto err; if (ret) { @@ -627,7 +550,7 @@ check_existing_bp: } printbuf_reset(&buf); - prt_printf(&buf, "duplicate extents pointing to same space on dev %llu\n ", bucket.inode); + prt_printf(&buf, "duplicate extents pointing to same space on dev %llu\n ", bp->k.p.inode); bch2_bkey_val_to_text(&buf, c, orig_k); prt_str(&buf, "\n "); bch2_bkey_val_to_text(&buf, c, other_extent); @@ -636,22 +559,15 @@ check_existing_bp: goto err; missing: printbuf_reset(&buf); - prt_str(&buf, "missing backpointer for btree="); - bch2_btree_id_to_text(&buf, bp.btree_id); - prt_printf(&buf, " l=%u ", bp.level); + prt_str(&buf, "missing backpointer "); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&bp->k_i)); + prt_newline(&buf); bch2_bkey_val_to_text(&buf, c, orig_k); prt_printf(&buf, "\n got: "); bch2_bkey_val_to_text(&buf, c, bp_k); - struct bkey_i_backpointer n_bp_k; - bkey_backpointer_init(&n_bp_k.k_i); - n_bp_k.k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset); - n_bp_k.v = bp; - prt_printf(&buf, "\n want: "); - bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&n_bp_k.k_i)); - if (fsck_err(trans, ptr_to_missing_backpointer, "%s", buf.buf)) - ret = bch2_bucket_backpointer_mod(trans, ca, bucket, bp, orig_k, true); + ret = bch2_bucket_backpointer_mod(trans, orig_k, bp, true); goto out; } @@ -669,8 +585,8 @@ static int check_extent_to_backpointers(struct btree_trans *trans, ptrs = bch2_bkey_ptrs_c(k); bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { - struct bpos bucket_pos = POS_MIN; - struct bch_backpointer bp; + struct bpos bucket_pos; + struct bkey_i_backpointer bp; if (p.ptr.cached) continue; @@ -684,7 +600,7 @@ static int check_extent_to_backpointers(struct btree_trans *trans, if (!ca) continue; - ret = check_bp_exists(trans, s, bucket_pos, bp, k); + ret = check_bp_exists(trans, s, &bp, k); if (ret) return ret; } @@ -898,7 +814,7 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans, int bch2_check_extents_to_backpointers(struct bch_fs *c) { struct btree_trans *trans = bch2_trans_get(c); - struct extents_to_bp_state s = { .bucket_start = POS_MIN }; + struct extents_to_bp_state s = { .bp_start = POS_MIN }; int ret; bch2_bkey_buf_init(&s.last_flushed); @@ -909,35 +825,35 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) ret = bch2_get_btree_in_memory_pos(trans, BIT_ULL(BTREE_ID_backpointers), BIT_ULL(BTREE_ID_backpointers), - BBPOS(BTREE_ID_backpointers, s.bucket_start), &end); + BBPOS(BTREE_ID_backpointers, s.bp_start), &end); if (ret) break; - s.bucket_end = end.pos; + s.bp_end = end.pos; - if ( bpos_eq(s.bucket_start, POS_MIN) && - !bpos_eq(s.bucket_end, SPOS_MAX)) + if ( bpos_eq(s.bp_start, POS_MIN) && + !bpos_eq(s.bp_end, SPOS_MAX)) bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass", __func__, btree_nodes_fit_in_ram(c)); - if (!bpos_eq(s.bucket_start, POS_MIN) || - !bpos_eq(s.bucket_end, SPOS_MAX)) { + if (!bpos_eq(s.bp_start, POS_MIN) || + !bpos_eq(s.bp_end, SPOS_MAX)) { struct printbuf buf = PRINTBUF; prt_str(&buf, "check_extents_to_backpointers(): "); - bch2_bpos_to_text(&buf, s.bucket_start); + bch2_bpos_to_text(&buf, s.bp_start); prt_str(&buf, "-"); - bch2_bpos_to_text(&buf, s.bucket_end); + bch2_bpos_to_text(&buf, s.bp_end); bch_verbose(c, "%s", buf.buf); printbuf_exit(&buf); } ret = bch2_check_extents_to_backpointers_pass(trans, &s); - if (ret || bpos_eq(s.bucket_end, SPOS_MAX)) + if (ret || bpos_eq(s.bp_end, SPOS_MAX)) break; - s.bucket_start = bpos_successor(s.bucket_end); + s.bp_start = bpos_successor(s.bp_end); } bch2_trans_put(trans); bch2_bkey_buf_exit(&s.last_flushed, c); @@ -951,29 +867,31 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) static int check_one_backpointer(struct btree_trans *trans, struct bbpos start, struct bbpos end, - struct bkey_s_c_backpointer bp, + struct bkey_s_c bp_k, struct bkey_buf *last_flushed) { + if (bp_k.k->type != KEY_TYPE_backpointer) + return 0; + + struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(bp_k); struct bch_fs *c = trans->c; - struct btree_iter iter; struct bbpos pos = bp_to_bbpos(*bp.v); - struct bkey_s_c k; struct printbuf buf = PRINTBUF; - int ret; if (bbpos_cmp(pos, start) < 0 || bbpos_cmp(pos, end) > 0) return 0; - k = bch2_backpointer_get_key(trans, &iter, bp.k->p, *bp.v, 0); - ret = bkey_err(k); + struct btree_iter iter; + struct bkey_s_c k = bch2_backpointer_get_key(trans, bp, &iter, 0); + int ret = bkey_err(k); if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) return 0; if (ret) return ret; if (!k.k) { - ret = bch2_btree_write_buffer_maybe_flush(trans, bp.s_c, last_flushed); + ret = bch2_backpointers_maybe_flush(trans, bp.s_c, last_flushed); if (ret) goto out; @@ -981,7 +899,7 @@ static int check_one_backpointer(struct btree_trans *trans, "backpointer for missing %s\n %s", bp.v->level ? "btree node" : "extent", (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) { - ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p); + ret = bch2_backpointer_del(trans, bp.k->p); goto out; } } @@ -1008,9 +926,7 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ progress_update_iter(trans, &progress, &iter, "backpointers_to_extents"); - check_one_backpointer(trans, start, end, - bkey_s_c_to_backpointer(k), - &last_flushed); + check_one_backpointer(trans, start, end, k, &last_flushed); })); bch2_bkey_buf_exit(&last_flushed, c); diff --git a/libbcachefs/backpointers.h b/libbcachefs/backpointers.h index 3b29fdf5..caffc684 100644 --- a/libbcachefs/backpointers.h +++ b/libbcachefs/backpointers.h @@ -18,14 +18,14 @@ static inline u64 swab40(u64 x) ((x & 0xff00000000ULL) >> 32)); } -int bch2_backpointer_validate(struct bch_fs *, struct bkey_s_c k, enum bch_validate_flags); -void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *); -void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); +int bch2_backpointer_validate(struct bch_fs *, struct bkey_s_c k, + struct bkey_validate_context); +void bch2_backpointer_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); void bch2_backpointer_swab(struct bkey_s); #define bch2_bkey_ops_backpointer ((struct bkey_ops) { \ .key_validate = bch2_backpointer_validate, \ - .val_to_text = bch2_backpointer_k_to_text, \ + .val_to_text = bch2_backpointer_to_text, \ .swab = bch2_backpointer_swab, \ .min_val_size = 32, \ }) @@ -43,22 +43,24 @@ static inline struct bpos bp_pos_to_bucket(const struct bch_dev *ca, struct bpos return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector)); } +static inline struct bpos bp_pos_to_bucket_and_offset(const struct bch_dev *ca, struct bpos bp_pos, + u32 *bucket_offset) +{ + u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT; + + return POS(bp_pos.inode, sector_to_bucket_and_offset(ca, bucket_sector, bucket_offset)); +} + static inline bool bp_pos_to_bucket_nodev_noerror(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket) { rcu_read_lock(); - struct bch_dev *ca = bch2_dev_rcu(c, bp_pos.inode); + struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp_pos.inode); if (ca) *bucket = bp_pos_to_bucket(ca, bp_pos); rcu_read_unlock(); return ca != NULL; } -static inline bool bp_pos_to_bucket_nodev(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket) -{ - return !bch2_fs_inconsistent_on(!bp_pos_to_bucket_nodev_noerror(c, bp_pos, bucket), - c, "backpointer for missing device %llu", bp_pos.inode); -} - static inline struct bpos bucket_pos_to_bp_noerror(const struct bch_dev *ca, struct bpos bucket, u64 bucket_offset) @@ -80,31 +82,35 @@ static inline struct bpos bucket_pos_to_bp(const struct bch_dev *ca, return ret; } -int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bch_dev *, - struct bpos bucket, struct bch_backpointer, struct bkey_s_c, bool); +static inline struct bpos bucket_pos_to_bp_start(const struct bch_dev *ca, struct bpos bucket) +{ + return bucket_pos_to_bp(ca, bucket, 0); +} + +static inline struct bpos bucket_pos_to_bp_end(const struct bch_dev *ca, struct bpos bucket) +{ + return bpos_nosnap_predecessor(bucket_pos_to_bp(ca, bpos_nosnap_successor(bucket), 0)); +} + +int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, + struct bkey_s_c, + struct bkey_i_backpointer *, + bool); static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans, - struct bch_dev *ca, - struct bpos bucket, - struct bch_backpointer bp, struct bkey_s_c orig_k, + struct bkey_i_backpointer *bp, bool insert) { if (unlikely(bch2_backpointers_no_use_write_buffer)) - return bch2_bucket_backpointer_mod_nowritebuffer(trans, ca, bucket, bp, orig_k, insert); - - struct bkey_i_backpointer bp_k; - - bkey_backpointer_init(&bp_k.k_i); - bp_k.k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset); - bp_k.v = bp; + return bch2_bucket_backpointer_mod_nowritebuffer(trans, orig_k, bp, insert); if (!insert) { - bp_k.k.type = KEY_TYPE_deleted; - set_bkey_val_u64s(&bp_k.k, 0); + bp->k.type = KEY_TYPE_deleted; + set_bkey_val_u64s(&bp->k, 0); } - return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k.k_i); + return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp->k_i); } static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k, @@ -138,17 +144,21 @@ static inline void __bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca, enum btree_id btree_id, unsigned level, struct bkey_s_c k, struct extent_ptr_decoded p, const union bch_extent_entry *entry, - struct bpos *bucket_pos, struct bch_backpointer *bp, + struct bpos *bucket, struct bkey_i_backpointer *bp, u64 sectors) { u32 bucket_offset; - *bucket_pos = PTR_BUCKET_POS_OFFSET(ca, &p.ptr, &bucket_offset); - *bp = (struct bch_backpointer) { + *bucket = PTR_BUCKET_POS_OFFSET(ca, &p.ptr, &bucket_offset); + + u64 bp_bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + p.crc.offset; + + bkey_backpointer_init(&bp->k_i); + bp->k.p = bucket_pos_to_bp(ca, *bucket, bp_bucket_offset); + bp->v = (struct bch_backpointer) { .btree_id = btree_id, .level = level, .data_type = bch2_bkey_ptr_data_type(k, p, entry), - .bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + - p.crc.offset, + .bucket_gen = p.ptr.gen, .bucket_len = sectors, .pos = k.k->p, }; @@ -158,20 +168,17 @@ static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca, enum btree_id btree_id, unsigned level, struct bkey_s_c k, struct extent_ptr_decoded p, const union bch_extent_entry *entry, - struct bpos *bucket_pos, struct bch_backpointer *bp) + struct bpos *bucket_pos, struct bkey_i_backpointer *bp) { u64 sectors = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p); __bch2_extent_ptr_to_bp(c, ca, btree_id, level, k, p, entry, bucket_pos, bp, sectors); } -int bch2_get_next_backpointer(struct btree_trans *, struct bch_dev *ca, struct bpos, int, - struct bpos *, struct bch_backpointer *, unsigned); -struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *, - struct bpos, struct bch_backpointer, - unsigned); -struct btree *bch2_backpointer_get_node(struct btree_trans *, struct btree_iter *, - struct bpos, struct bch_backpointer); +struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct bkey_s_c_backpointer, + struct btree_iter *, unsigned); +struct btree *bch2_backpointer_get_node(struct btree_trans *, struct bkey_s_c_backpointer, + struct btree_iter *); int bch2_check_btree_backpointers(struct bch_fs *); int bch2_check_extents_to_backpointers(struct bch_fs *); diff --git a/libbcachefs/bcachefs.h b/libbcachefs/bcachefs.h index ecf24e85..b12c9c78 100644 --- a/libbcachefs/bcachefs.h +++ b/libbcachefs/bcachefs.h @@ -205,6 +205,7 @@ #include <linux/zstd.h> #include "bcachefs_format.h" +#include "btree_journal_iter_types.h" #include "disk_accounting_types.h" #include "errcode.h" #include "fifo.h" @@ -559,10 +560,10 @@ struct bch_dev { struct bch_dev_usage __percpu *usage; /* Allocator: */ - u64 new_fs_bucket_idx; u64 alloc_cursor[3]; unsigned nr_open_buckets; + unsigned nr_partial_buckets; unsigned nr_btree_reserve; size_t inc_gen_needs_gc; @@ -613,6 +614,7 @@ struct bch_dev { x(going_ro) \ x(write_disable_complete) \ x(clean_shutdown) \ + x(recovery_running) \ x(fsck_running) \ x(initial_gc_unfixed) \ x(need_delete_dead_snapshots) \ @@ -657,28 +659,6 @@ struct journal_seq_blacklist_table { } entries[]; }; -struct journal_keys { - /* must match layout in darray_types.h */ - size_t nr, size; - struct journal_key { - u64 journal_seq; - u32 journal_offset; - enum btree_id btree_id:8; - unsigned level:8; - bool allocated; - bool overwritten; - struct bkey_i *k; - } *data; - /* - * Gap buffer: instead of all the empty space in the array being at the - * end of the buffer - from @nr to @size - the empty space is at @gap. - * This means that sequential insertions are O(n) instead of O(n^2). - */ - size_t gap; - atomic_t ref; - bool initial_ref_held; -}; - struct btree_trans_buf { struct btree_trans *trans; }; @@ -700,6 +680,7 @@ struct btree_trans_buf { x(dio_write) \ x(discard) \ x(discard_fast) \ + x(check_discard_freespace_key) \ x(invalidate) \ x(delete_dead_snapshots) \ x(gc_gens) \ @@ -743,6 +724,12 @@ struct bch_fs { struct percpu_ref writes; #endif /* + * Certain operations are only allowed in single threaded mode, during + * recovery, and we want to assert that this is the case: + */ + struct task_struct *recovery_task; + + /* * Analagous to c->writes, for asynchronous ops that don't necessarily * need fs to be read-write */ @@ -842,9 +829,10 @@ struct bch_fs { struct work_struct btree_interior_update_work; struct workqueue_struct *btree_node_rewrite_worker; - - struct list_head pending_node_rewrites; - struct mutex pending_node_rewrites_lock; + struct list_head btree_node_rewrites; + struct list_head btree_node_rewrites_pending; + spinlock_t btree_node_rewrites_lock; + struct closure_waitlist btree_node_rewrites_wait; /* btree_io.c: */ spinlock_t btree_write_error_lock; @@ -975,8 +963,7 @@ struct bch_fs { struct rhashtable promote_table; mempool_t compression_bounce[2]; - mempool_t compress_workspace[BCH_COMPRESSION_TYPE_NR]; - mempool_t decompress_workspace; + mempool_t compress_workspace[BCH_COMPRESSION_OPT_NR]; size_t zstd_workspace_size; struct crypto_shash *sha256; @@ -1035,6 +1022,7 @@ struct bch_fs { struct list_head vfs_inodes_list; struct mutex vfs_inodes_lock; struct rhashtable vfs_inodes_table; + struct rhltable vfs_inodes_by_inum_table; /* VFS IO PATH - fs-io.c */ struct bio_set writepage_bioset; @@ -1095,8 +1083,6 @@ struct bch_fs { u64 counters_on_mount[BCH_COUNTER_NR]; u64 __percpu *counters; - unsigned copy_gc_enabled:1; - struct bch2_time_stats times[BCH_TIME_STAT_NR]; struct btree_transaction_stats btree_transaction_stats[BCH_TRANSACTIONS_NR]; diff --git a/libbcachefs/bcachefs_format.h b/libbcachefs/bcachefs_format.h index c5e3824d..cef22c15 100644 --- a/libbcachefs/bcachefs_format.h +++ b/libbcachefs/bcachefs_format.h @@ -463,7 +463,8 @@ struct bch_backpointer { __u8 btree_id; __u8 level; __u8 data_type; - __u64 bucket_offset:40; + __u8 bucket_gen; + __u32 pad; __u32 bucket_len; struct bpos pos; } __packed __aligned(8); @@ -677,7 +678,9 @@ struct bch_sb_field_ext { x(disk_accounting_v3, BCH_VERSION(1, 10)) \ x(disk_accounting_inum, BCH_VERSION(1, 11)) \ x(rebalance_work_acct_fix, BCH_VERSION(1, 12)) \ - x(inode_has_child_snapshots, BCH_VERSION(1, 13)) + x(inode_has_child_snapshots, BCH_VERSION(1, 13)) \ + x(backpointer_bucket_gen, BCH_VERSION(1, 14)) \ + x(disk_accounting_big_endian, BCH_VERSION(1, 15)) enum bcachefs_metadata_version { bcachefs_metadata_version_min = 9, @@ -1030,7 +1033,7 @@ static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type) x(crc64, 2) \ x(xxhash, 3) -enum bch_csum_opts { +enum bch_csum_opt { #define x(t, n) BCH_CSUM_OPT_##t = n, BCH_CSUM_OPTS() #undef x diff --git a/libbcachefs/bkey.c b/libbcachefs/bkey.c index 587d7318..995ba32e 100644 --- a/libbcachefs/bkey.c +++ b/libbcachefs/bkey.c @@ -643,7 +643,7 @@ int bch2_bkey_format_invalid(struct bch_fs *c, enum bch_validate_flags flags, struct printbuf *err) { - unsigned i, bits = KEY_PACKED_BITS_START; + unsigned bits = KEY_PACKED_BITS_START; if (f->nr_fields != BKEY_NR_FIELDS) { prt_printf(err, "incorrect number of fields: got %u, should be %u", @@ -655,9 +655,8 @@ int bch2_bkey_format_invalid(struct bch_fs *c, * Verify that the packed format can't represent fields larger than the * unpacked format: */ - for (i = 0; i < f->nr_fields; i++) { - if ((!c || c->sb.version_min >= bcachefs_metadata_version_snapshot) && - bch2_bkey_format_field_overflows(f, i)) { + for (unsigned i = 0; i < f->nr_fields; i++) { + if (bch2_bkey_format_field_overflows(f, i)) { unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i]; u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1)); unsigned packed_bits = min(64, f->bits_per_field[i]); diff --git a/libbcachefs/bkey.h b/libbcachefs/bkey.h index 41df24a5..054e2d5e 100644 --- a/libbcachefs/bkey.h +++ b/libbcachefs/bkey.h @@ -9,13 +9,6 @@ #include "util.h" #include "vstructs.h" -enum bch_validate_flags { - BCH_VALIDATE_write = BIT(0), - BCH_VALIDATE_commit = BIT(1), - BCH_VALIDATE_journal = BIT(2), - BCH_VALIDATE_silent = BIT(3), -}; - #if 0 /* diff --git a/libbcachefs/bkey_methods.c b/libbcachefs/bkey_methods.c index e7ac227b..15c93576 100644 --- a/libbcachefs/bkey_methods.c +++ b/libbcachefs/bkey_methods.c @@ -28,7 +28,7 @@ const char * const bch2_bkey_types[] = { }; static int deleted_key_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { return 0; } @@ -42,7 +42,7 @@ static int deleted_key_validate(struct bch_fs *c, struct bkey_s_c k, }) static int empty_val_key_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; @@ -59,7 +59,7 @@ fsck_err: }) static int key_type_cookie_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { return 0; } @@ -83,7 +83,7 @@ static void key_type_cookie_to_text(struct printbuf *out, struct bch_fs *c, }) static int key_type_inline_data_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { return 0; } @@ -124,7 +124,7 @@ const struct bkey_ops bch2_bkey_null_ops = { }; int bch2_bkey_val_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { if (test_bit(BCH_FS_no_invalid_checks, &c->flags)) return 0; @@ -140,7 +140,7 @@ int bch2_bkey_val_validate(struct bch_fs *c, struct bkey_s_c k, if (!ops->key_validate) return 0; - ret = ops->key_validate(c, k, flags); + ret = ops->key_validate(c, k, from); fsck_err: return ret; } @@ -161,9 +161,10 @@ const char *bch2_btree_node_type_str(enum btree_node_type type) } int __bch2_bkey_validate(struct bch_fs *c, struct bkey_s_c k, - enum btree_node_type type, - enum bch_validate_flags flags) + struct bkey_validate_context from) { + enum btree_node_type type = __btree_node_type(from.level, from.btree); + if (test_bit(BCH_FS_no_invalid_checks, &c->flags)) return 0; @@ -177,7 +178,7 @@ int __bch2_bkey_validate(struct bch_fs *c, struct bkey_s_c k, return 0; bkey_fsck_err_on(k.k->type < KEY_TYPE_MAX && - (type == BKEY_TYPE_btree || (flags & BCH_VALIDATE_commit)) && + (type == BKEY_TYPE_btree || (from.flags & BCH_VALIDATE_commit)) && !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type)), c, bkey_invalid_type_for_btree, "invalid key type for btree %s (%s)", @@ -228,15 +229,15 @@ fsck_err: } int bch2_bkey_validate(struct bch_fs *c, struct bkey_s_c k, - enum btree_node_type type, - enum bch_validate_flags flags) + struct bkey_validate_context from) { - return __bch2_bkey_validate(c, k, type, flags) ?: - bch2_bkey_val_validate(c, k, flags); + return __bch2_bkey_validate(c, k, from) ?: + bch2_bkey_val_validate(c, k, from); } int bch2_bkey_in_btree_node(struct bch_fs *c, struct btree *b, - struct bkey_s_c k, enum bch_validate_flags flags) + struct bkey_s_c k, + struct bkey_validate_context from) { int ret = 0; diff --git a/libbcachefs/bkey_methods.h b/libbcachefs/bkey_methods.h index 018fb72e..bf34111c 100644 --- a/libbcachefs/bkey_methods.h +++ b/libbcachefs/bkey_methods.h @@ -22,7 +22,7 @@ extern const struct bkey_ops bch2_bkey_null_ops; */ struct bkey_ops { int (*key_validate)(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags); + struct bkey_validate_context from); void (*val_to_text)(struct printbuf *, struct bch_fs *, struct bkey_s_c); void (*swab)(struct bkey_s); @@ -48,13 +48,14 @@ static inline const struct bkey_ops *bch2_bkey_type_ops(enum bch_bkey_type type) : &bch2_bkey_null_ops; } -int bch2_bkey_val_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); -int __bch2_bkey_validate(struct bch_fs *, struct bkey_s_c, enum btree_node_type, - enum bch_validate_flags); -int bch2_bkey_validate(struct bch_fs *, struct bkey_s_c, enum btree_node_type, - enum bch_validate_flags); +int bch2_bkey_val_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); +int __bch2_bkey_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); +int bch2_bkey_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); int bch2_bkey_in_btree_node(struct bch_fs *, struct btree *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context from); void bch2_bpos_to_text(struct printbuf *, struct bpos); void bch2_bkey_to_text(struct printbuf *, const struct bkey *); diff --git a/libbcachefs/bkey_types.h b/libbcachefs/bkey_types.h index c9ae9e42..2af6279b 100644 --- a/libbcachefs/bkey_types.h +++ b/libbcachefs/bkey_types.h @@ -210,4 +210,30 @@ static inline struct bkey_i_##name *bkey_##name##_init(struct bkey_i *_k)\ BCH_BKEY_TYPES(); #undef x +enum bch_validate_flags { + BCH_VALIDATE_write = BIT(0), + BCH_VALIDATE_commit = BIT(1), + BCH_VALIDATE_journal = BIT(2), + BCH_VALIDATE_silent = BIT(3), +}; + +#define BKEY_VALIDATE_CONTEXTS() \ + x(unknown) \ + x(commit) \ + x(journal) \ + x(btree_root) \ + x(btree_node) + +struct bkey_validate_context { + enum { +#define x(n) BKEY_VALIDATE_##n, + BKEY_VALIDATE_CONTEXTS() +#undef x + } from:8; + u8 level; + enum btree_id btree; + bool root:1; + enum bch_validate_flags flags:8; +}; + #endif /* _BCACHEFS_BKEY_TYPES_H */ diff --git a/libbcachefs/bset.c b/libbcachefs/bset.c index d1f60926..9a4a83d6 100644 --- a/libbcachefs/bset.c +++ b/libbcachefs/bset.c @@ -13,7 +13,7 @@ #include "trace.h" #include "util.h" -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/console.h> #include <linux/random.h> #include <linux/prefetch.h> diff --git a/libbcachefs/btree_cache.c b/libbcachefs/btree_cache.c index 3baa9f3c..1117be90 100644 --- a/libbcachefs/btree_cache.c +++ b/libbcachefs/btree_cache.c @@ -59,16 +59,38 @@ static inline size_t btree_cache_can_free(struct btree_cache_list *list) static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b) { + BUG_ON(!list_empty(&b->list)); + if (b->c.lock.readers) - list_move(&b->list, &bc->freed_pcpu); + list_add(&b->list, &bc->freed_pcpu); else - list_move(&b->list, &bc->freed_nonpcpu); + list_add(&b->list, &bc->freed_nonpcpu); +} + +static void __bch2_btree_node_to_freelist(struct btree_cache *bc, struct btree *b) +{ + BUG_ON(!list_empty(&b->list)); + BUG_ON(!b->data); + + bc->nr_freeable++; + list_add(&b->list, &bc->freeable); } -static void btree_node_data_free(struct bch_fs *c, struct btree *b) +void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b) { struct btree_cache *bc = &c->btree_cache; + mutex_lock(&bc->lock); + __bch2_btree_node_to_freelist(bc, b); + mutex_unlock(&bc->lock); + + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); +} + +static void __btree_node_data_free(struct btree_cache *bc, struct btree *b) +{ + BUG_ON(!list_empty(&b->list)); BUG_ON(btree_node_hashed(b)); /* @@ -94,11 +116,17 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b) #endif b->aux_data = NULL; - bc->nr_freeable--; - btree_node_to_freedlist(bc, b); } +static void btree_node_data_free(struct btree_cache *bc, struct btree *b) +{ + BUG_ON(list_empty(&b->list)); + list_del_init(&b->list); + --bc->nr_freeable; + __btree_node_data_free(bc, b); +} + static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg, const void *obj) { @@ -174,21 +202,10 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) bch2_btree_lock_init(&b->c, 0); - bc->nr_freeable++; - list_add(&b->list, &bc->freeable); + __bch2_btree_node_to_freelist(bc, b); return b; } -void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b) -{ - mutex_lock(&c->btree_cache.lock); - list_move(&b->list, &c->btree_cache.freeable); - mutex_unlock(&c->btree_cache.lock); - - six_unlock_write(&b->c.lock); - six_unlock_intent(&b->c.lock); -} - static inline bool __btree_node_pinned(struct btree_cache *bc, struct btree *b) { struct bbpos pos = BBPOS(b->c.btree_id, b->key.k.p); @@ -236,11 +253,11 @@ void bch2_btree_cache_unpin(struct bch_fs *c) /* Btree in memory cache - hash table */ -void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) +void __bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) { lockdep_assert_held(&bc->lock); - int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params); + int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params); BUG_ON(ret); /* Cause future lookups for this node to fail: */ @@ -248,17 +265,22 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) if (b->c.btree_id < BTREE_ID_NR) --bc->nr_by_btree[b->c.btree_id]; + --bc->live[btree_node_pinned(b)].nr; + list_del_init(&b->list); +} - bc->live[btree_node_pinned(b)].nr--; - bc->nr_freeable++; - list_move(&b->list, &bc->freeable); +void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) +{ + __bch2_btree_node_hash_remove(bc, b); + __bch2_btree_node_to_freelist(bc, b); } int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) { + BUG_ON(!list_empty(&b->list)); BUG_ON(b->hash_val); - b->hash_val = btree_ptr_hash_val(&b->key); + b->hash_val = btree_ptr_hash_val(&b->key); int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash, bch_btree_cache_params); if (ret) @@ -270,10 +292,8 @@ int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) bool p = __btree_node_pinned(bc, b); mod_bit(BTREE_NODE_pinned, &b->flags, p); - list_move_tail(&b->list, &bc->live[p].list); + list_add_tail(&b->list, &bc->live[p].list); bc->live[p].nr++; - - bc->nr_freeable--; return 0; } @@ -306,7 +326,7 @@ void bch2_btree_node_update_key_early(struct btree_trans *trans, if (!IS_ERR_OR_NULL(b)) { mutex_lock(&c->btree_cache.lock); - bch2_btree_node_hash_remove(&c->btree_cache, b); + __bch2_btree_node_hash_remove(&c->btree_cache, b); bkey_copy(&b->key, new); ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); @@ -485,7 +505,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, goto out; if (!btree_node_reclaim(c, b, true)) { - btree_node_data_free(c, b); + btree_node_data_free(bc, b); six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); freed++; @@ -501,10 +521,10 @@ restart: bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++; --touched;; } else if (!btree_node_reclaim(c, b, true)) { - bch2_btree_node_hash_remove(bc, b); + __bch2_btree_node_hash_remove(bc, b); + __btree_node_data_free(bc, b); freed++; - btree_node_data_free(c, b); bc->nr_freed++; six_unlock_write(&b->c.lock); @@ -587,7 +607,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) BUG_ON(btree_node_read_in_flight(b) || btree_node_write_in_flight(b)); - btree_node_data_free(c, b); + btree_node_data_free(bc, b); } BUG_ON(!bch2_journal_error(&c->journal) && @@ -786,8 +806,8 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea BUG_ON(!six_trylock_intent(&b->c.lock)); BUG_ON(!six_trylock_write(&b->c.lock)); -got_node: +got_node: /* * btree_free() doesn't free memory; it sticks the node on the end of * the list. Check if there's any freed nodes there: @@ -796,7 +816,12 @@ got_node: if (!btree_node_reclaim(c, b2, false)) { swap(b->data, b2->data); swap(b->aux_data, b2->aux_data); + + list_del_init(&b2->list); + --bc->nr_freeable; btree_node_to_freedlist(bc, b2); + mutex_unlock(&bc->lock); + six_unlock_write(&b2->c.lock); six_unlock_intent(&b2->c.lock); goto got_mem; @@ -810,11 +835,8 @@ got_node: goto err; } - mutex_lock(&bc->lock); - bc->nr_freeable++; got_mem: - mutex_unlock(&bc->lock); - + BUG_ON(!list_empty(&b->list)); BUG_ON(btree_node_hashed(b)); BUG_ON(btree_node_dirty(b)); BUG_ON(btree_node_write_in_flight(b)); @@ -845,7 +867,7 @@ err: if (bc->alloc_lock == current) { b2 = btree_node_cannibalize(c); clear_btree_node_just_written(b2); - bch2_btree_node_hash_remove(bc, b2); + __bch2_btree_node_hash_remove(bc, b2); if (b) { swap(b->data, b2->data); @@ -855,9 +877,9 @@ err: six_unlock_intent(&b2->c.lock); } else { b = b2; - list_del_init(&b->list); } + BUG_ON(!list_empty(&b->list)); mutex_unlock(&bc->lock); trace_and_count(c, btree_cache_cannibalize, trans); @@ -936,7 +958,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans, b->hash_val = 0; mutex_lock(&bc->lock); - list_add(&b->list, &bc->freeable); + __bch2_btree_node_to_freelist(bc, b); mutex_unlock(&bc->lock); six_unlock_write(&b->c.lock); @@ -1109,7 +1131,7 @@ retry: if (unlikely(btree_node_read_error(b))) { six_unlock_type(&b->c.lock, lock_type); - return ERR_PTR(-BCH_ERR_btree_node_read_error); + return ERR_PTR(-BCH_ERR_btree_node_read_err_cached); } EBUG_ON(b->c.btree_id != path->btree_id); @@ -1199,7 +1221,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path * if (unlikely(btree_node_read_error(b))) { six_unlock_type(&b->c.lock, lock_type); - return ERR_PTR(-BCH_ERR_btree_node_read_error); + return ERR_PTR(-BCH_ERR_btree_node_read_err_cached); } EBUG_ON(b->c.btree_id != path->btree_id); @@ -1281,7 +1303,7 @@ lock_node: if (unlikely(btree_node_read_error(b))) { six_unlock_read(&b->c.lock); - b = ERR_PTR(-BCH_ERR_btree_node_read_error); + b = ERR_PTR(-BCH_ERR_btree_node_read_err_cached); goto out; } @@ -1310,9 +1332,12 @@ int bch2_btree_node_prefetch(struct btree_trans *trans, b = bch2_btree_node_fill(trans, path, k, btree_id, level, SIX_LOCK_read, false); - if (!IS_ERR_OR_NULL(b)) + int ret = PTR_ERR_OR_ZERO(b); + if (ret) + return ret; + if (b) six_unlock_read(&b->c.lock); - return bch2_trans_relock(trans) ?: PTR_ERR_OR_ZERO(b); + return 0; } void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k) @@ -1351,7 +1376,7 @@ wait_on_io: mutex_lock(&bc->lock); bch2_btree_node_hash_remove(bc, b); - btree_node_data_free(c, b); + btree_node_data_free(bc, b); mutex_unlock(&bc->lock); out: six_unlock_write(&b->c.lock); @@ -1381,9 +1406,14 @@ void bch2_btree_id_level_to_text(struct printbuf *out, enum btree_id btree, unsi void bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b) { bch2_btree_id_to_text(out, b->c.btree_id); - prt_printf(out, " level %u/%u\n ", - b->c.level, - bch2_btree_id_root(c, b->c.btree_id)->level); + prt_printf(out, " level %u/", b->c.level); + struct btree_root *r = bch2_btree_id_root(c, b->c.btree_id); + if (r) + prt_printf(out, "%u", r->level); + else + prt_printf(out, "(unknown)"); + prt_printf(out, "\n "); + bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key)); } diff --git a/libbcachefs/btree_cache.h b/libbcachefs/btree_cache.h index 6fc5990f..dcc34fe4 100644 --- a/libbcachefs/btree_cache.h +++ b/libbcachefs/btree_cache.h @@ -14,7 +14,9 @@ void bch2_recalc_btree_reserve(struct bch_fs *); void bch2_btree_node_to_freelist(struct bch_fs *, struct btree *); +void __bch2_btree_node_hash_remove(struct btree_cache *, struct btree *); void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *); + int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *); int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *, unsigned, enum btree_id); @@ -126,14 +128,19 @@ static inline struct btree_root *bch2_btree_id_root(struct bch_fs *c, unsigned i } else { unsigned idx = id - BTREE_ID_NR; - EBUG_ON(idx >= c->btree_roots_extra.nr); + /* This can happen when we're called from btree_node_scan */ + if (idx >= c->btree_roots_extra.nr) + return NULL; + return &c->btree_roots_extra.data[idx]; } } static inline struct btree *btree_node_root(struct bch_fs *c, struct btree *b) { - return bch2_btree_id_root(c, b->c.btree_id)->b; + struct btree_root *r = bch2_btree_id_root(c, b->c.btree_id); + + return r ? r->b : NULL; } const char *bch2_btree_id_str(enum btree_id); /* avoid */ diff --git a/libbcachefs/btree_gc.c b/libbcachefs/btree_gc.c index 7fbf3885..e59924cf 100644 --- a/libbcachefs/btree_gc.c +++ b/libbcachefs/btree_gc.c @@ -182,7 +182,7 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max) bch2_btree_node_drop_keys_outside_node(b); mutex_lock(&c->btree_cache.lock); - bch2_btree_node_hash_remove(&c->btree_cache, b); + __bch2_btree_node_hash_remove(&c->btree_cache, b); bkey_copy(&b->key, &new->k_i); ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); @@ -348,7 +348,7 @@ again: bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k)); if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO), - trans, btree_node_unreadable, + trans, btree_node_read_error, "Topology repair: unreadable btree node at\n" " %s", buf.buf)) { @@ -521,6 +521,7 @@ int bch2_check_topology(struct bch_fs *c) struct btree_root *r = bch2_btree_id_root(c, i); bool reconstructed_root = false; + printbuf_reset(&buf); bch2_btree_id_to_text(&buf, i); if (r->error) { @@ -904,11 +905,11 @@ static int bch2_gc_alloc_done(struct bch_fs *c) for_each_member_device(c, ca) { ret = bch2_trans_run(c, - for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc, + for_each_btree_key_max_commit(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, ca->mi.first_bucket), POS(ca->dev_idx, ca->mi.nbuckets - 1), BTREE_ITER_slots|BTREE_ITER_prefetch, k, - NULL, NULL, BCH_TRANS_COMMIT_lazy_rw, + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, bch2_alloc_write_key(trans, &iter, ca, k))); if (ret) { bch2_dev_put(ca); @@ -937,98 +938,6 @@ static int bch2_gc_alloc_start(struct bch_fs *c) return ret; } -static int bch2_gc_write_reflink_key(struct btree_trans *trans, - struct btree_iter *iter, - struct bkey_s_c k, - size_t *idx) -{ - struct bch_fs *c = trans->c; - const __le64 *refcount = bkey_refcount_c(k); - struct printbuf buf = PRINTBUF; - struct reflink_gc *r; - int ret = 0; - - if (!refcount) - return 0; - - while ((r = genradix_ptr(&c->reflink_gc_table, *idx)) && - r->offset < k.k->p.offset) - ++*idx; - - if (!r || - r->offset != k.k->p.offset || - r->size != k.k->size) { - bch_err(c, "unexpected inconsistency walking reflink table at gc finish"); - return -EINVAL; - } - - if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), - trans, reflink_v_refcount_wrong, - "reflink key has wrong refcount:\n" - " %s\n" - " should be %u", - (bch2_bkey_val_to_text(&buf, c, k), buf.buf), - r->refcount)) { - struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); - ret = PTR_ERR_OR_ZERO(new); - if (ret) - goto out; - - if (!r->refcount) - new->k.type = KEY_TYPE_deleted; - else - *bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount); - ret = bch2_trans_update(trans, iter, new, 0); - } -out: -fsck_err: - printbuf_exit(&buf); - return ret; -} - -static int bch2_gc_reflink_done(struct bch_fs *c) -{ - size_t idx = 0; - - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, - BTREE_ID_reflink, POS_MIN, - BTREE_ITER_prefetch, k, - NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_gc_write_reflink_key(trans, &iter, k, &idx))); - c->reflink_gc_nr = 0; - return ret; -} - -static int bch2_gc_reflink_start(struct bch_fs *c) -{ - c->reflink_gc_nr = 0; - - int ret = bch2_trans_run(c, - for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN, - BTREE_ITER_prefetch, k, ({ - const __le64 *refcount = bkey_refcount_c(k); - - if (!refcount) - continue; - - struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table, - c->reflink_gc_nr++, GFP_KERNEL); - if (!r) { - ret = -BCH_ERR_ENOMEM_gc_reflink_start; - break; - } - - r->offset = k.k->p.offset; - r->size = k.k->size; - r->refcount = 0; - 0; - }))); - - bch_err_fn(c, ret); - return ret; -} - static int bch2_gc_write_stripes_key(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) diff --git a/libbcachefs/btree_io.c b/libbcachefs/btree_io.c index 0afd21f4..9df9fc1c 100644 --- a/libbcachefs/btree_io.c +++ b/libbcachefs/btree_io.c @@ -732,11 +732,8 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, c, ca, b, i, NULL, bset_past_end_of_btree_node, "bset past end of btree node (offset %u len %u but written %zu)", - offset, sectors, ptr_written ?: btree_sectors(c))) { + offset, sectors, ptr_written ?: btree_sectors(c))) i->u64s = 0; - ret = 0; - goto out; - } btree_err_on(offset && !i->u64s, -BCH_ERR_btree_node_read_err_fixable, @@ -828,20 +825,38 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, BSET_BIG_ENDIAN(i), write, &bn->format); } -out: fsck_err: printbuf_exit(&buf2); printbuf_exit(&buf1); return ret; } +static int btree_node_bkey_val_validate(struct bch_fs *c, struct btree *b, + struct bkey_s_c k, + enum bch_validate_flags flags) +{ + return bch2_bkey_val_validate(c, k, (struct bkey_validate_context) { + .from = BKEY_VALIDATE_btree_node, + .level = b->c.level, + .btree = b->c.btree_id, + .flags = flags + }); +} + static int bset_key_validate(struct bch_fs *c, struct btree *b, struct bkey_s_c k, - bool updated_range, int rw) + bool updated_range, + enum bch_validate_flags flags) { - return __bch2_bkey_validate(c, k, btree_node_type(b), 0) ?: - (!updated_range ? bch2_bkey_in_btree_node(c, b, k, 0) : 0) ?: - (rw == WRITE ? bch2_bkey_val_validate(c, k, 0) : 0); + struct bkey_validate_context from = (struct bkey_validate_context) { + .from = BKEY_VALIDATE_btree_node, + .level = b->c.level, + .btree = b->c.btree_id, + .flags = flags, + }; + return __bch2_bkey_validate(c, k, from) ?: + (!updated_range ? bch2_bkey_in_btree_node(c, b, k, from) : 0) ?: + (flags & BCH_VALIDATE_write ? btree_node_bkey_val_validate(c, b, k, flags) : 0); } static bool bkey_packed_valid(struct bch_fs *c, struct btree *b, @@ -858,7 +873,21 @@ static bool bkey_packed_valid(struct bch_fs *c, struct btree *b, struct bkey tmp; struct bkey_s u = __bkey_disassemble(b, k, &tmp); - return !__bch2_bkey_validate(c, u.s_c, btree_node_type(b), BCH_VALIDATE_silent); + return !__bch2_bkey_validate(c, u.s_c, + (struct bkey_validate_context) { + .from = BKEY_VALIDATE_btree_node, + .level = b->c.level, + .btree = b->c.btree_id, + .flags = BCH_VALIDATE_silent + }); +} + +static inline int btree_node_read_bkey_cmp(const struct btree *b, + const struct bkey_packed *l, + const struct bkey_packed *r) +{ + return bch2_bkey_cmp_packed(b, l, r) + ?: (int) bkey_deleted(r) - (int) bkey_deleted(l); } static int validate_bset_keys(struct bch_fs *c, struct btree *b, @@ -921,7 +950,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, BSET_BIG_ENDIAN(i), write, &b->format, k); - if (prev && bkey_iter_cmp(b, prev, k) > 0) { + if (prev && btree_node_read_bkey_cmp(b, prev, k) >= 0) { struct bkey up = bkey_unpack_key(b, prev); printbuf_reset(&buf); @@ -968,6 +997,7 @@ drop_this_key: got_good_key: le16_add_cpu(&i->u64s, -next_good_key); memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k); + set_btree_node_need_rewrite(b); } fsck_err: printbuf_exit(&buf); @@ -1041,39 +1071,51 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, while (b->written < (ptr_written ?: btree_sectors(c))) { unsigned sectors; - struct nonce nonce; bool first = !b->written; - bool csum_bad; - if (!b->written) { + if (first) { + bne = NULL; i = &b->data->keys; + } else { + bne = write_block(b); + i = &bne->keys; - btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)), - -BCH_ERR_btree_node_read_err_want_retry, - c, ca, b, i, NULL, - bset_unknown_csum, - "unknown checksum type %llu", BSET_CSUM_TYPE(i)); - - nonce = btree_nonce(i, b->written << 9); + if (i->seq != b->data->keys.seq) + break; + } - struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data); - csum_bad = bch2_crc_cmp(b->data->csum, csum); - if (csum_bad) - bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); + struct nonce nonce = btree_nonce(i, b->written << 9); + bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)); - btree_err_on(csum_bad, - -BCH_ERR_btree_node_read_err_want_retry, - c, ca, b, i, NULL, - bset_bad_csum, - "%s", - (printbuf_reset(&buf), - bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum), - buf.buf)); - - ret = bset_encrypt(c, i, b->written << 9); - if (bch2_fs_fatal_err_on(ret, c, - "decrypting btree node: %s", bch2_err_str(ret))) - goto fsck_err; + btree_err_on(!good_csum_type, + bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) + ? -BCH_ERR_btree_node_read_err_must_retry + : -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, NULL, + bset_unknown_csum, + "unknown checksum type %llu", BSET_CSUM_TYPE(i)); + + if (first) { + if (good_csum_type) { + struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data); + bool csum_bad = bch2_crc_cmp(b->data->csum, csum); + if (csum_bad) + bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); + + btree_err_on(csum_bad, + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, NULL, + bset_bad_csum, + "%s", + (printbuf_reset(&buf), + bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum), + buf.buf)); + + ret = bset_encrypt(c, i, b->written << 9); + if (bch2_fs_fatal_err_on(ret, c, + "decrypting btree node: %s", bch2_err_str(ret))) + goto fsck_err; + } btree_err_on(btree_node_type_is_extents(btree_node_type(b)) && !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data), @@ -1084,37 +1126,26 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, sectors = vstruct_sectors(b->data, c->block_bits); } else { - bne = write_block(b); - i = &bne->keys; - - if (i->seq != b->data->keys.seq) - break; - - btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)), - -BCH_ERR_btree_node_read_err_want_retry, - c, ca, b, i, NULL, - bset_unknown_csum, - "unknown checksum type %llu", BSET_CSUM_TYPE(i)); - - nonce = btree_nonce(i, b->written << 9); - struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); - csum_bad = bch2_crc_cmp(bne->csum, csum); - if (ca && csum_bad) - bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); - - btree_err_on(csum_bad, - -BCH_ERR_btree_node_read_err_want_retry, - c, ca, b, i, NULL, - bset_bad_csum, - "%s", - (printbuf_reset(&buf), - bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum), - buf.buf)); - - ret = bset_encrypt(c, i, b->written << 9); - if (bch2_fs_fatal_err_on(ret, c, - "decrypting btree node: %s", bch2_err_str(ret))) - goto fsck_err; + if (good_csum_type) { + struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); + bool csum_bad = bch2_crc_cmp(bne->csum, csum); + if (ca && csum_bad) + bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); + + btree_err_on(csum_bad, + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, NULL, + bset_bad_csum, + "%s", + (printbuf_reset(&buf), + bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum), + buf.buf)); + + ret = bset_encrypt(c, i, b->written << 9); + if (bch2_fs_fatal_err_on(ret, c, + "decrypting btree node: %s", bch2_err_str(ret))) + goto fsck_err; + } sectors = vstruct_sectors(bne, c->block_bits); } @@ -1219,7 +1250,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, struct bkey tmp; struct bkey_s u = __bkey_disassemble(b, k, &tmp); - ret = bch2_bkey_val_validate(c, u.s_c, READ); + ret = btree_node_bkey_val_validate(c, b, u.s_c, READ); if (ret == -BCH_ERR_fsck_delete_bkey || (bch2_inject_invalid_keys && !bversion_cmp(u.k->bversion, MAX_VERSION))) { @@ -1229,6 +1260,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k); set_btree_bset_end(b, b->set); + set_btree_node_need_rewrite(b); continue; } if (ret) @@ -1342,15 +1374,18 @@ start: rb->start_time); bio_put(&rb->bio); - if (saw_error && + if ((saw_error || + btree_node_need_rewrite(b)) && !btree_node_read_error(b) && c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) { - printbuf_reset(&buf); - bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); - prt_str(&buf, " "); - bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); - bch_err_ratelimited(c, "%s: rewriting btree node at due to error\n %s", - __func__, buf.buf); + if (saw_error) { + printbuf_reset(&buf); + bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); + prt_str(&buf, " "); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); + bch_err_ratelimited(c, "%s: rewriting btree node at due to error\n %s", + __func__, buf.buf); + } bch2_btree_node_rewrite_async(c, b); } @@ -1938,7 +1973,12 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b, bool saw_error; int ret = bch2_bkey_validate(c, bkey_i_to_s_c(&b->key), - BKEY_TYPE_btree, WRITE); + (struct bkey_validate_context) { + .from = BKEY_VALIDATE_btree_node, + .level = b->c.level + 1, + .btree = b->c.btree_id, + .flags = BCH_VALIDATE_write, + }); if (ret) { bch2_fs_inconsistent(c, "invalid btree node key before write"); return ret; diff --git a/libbcachefs/btree_iter.c b/libbcachefs/btree_iter.c index 15ac72b1..9c54891c 100644 --- a/libbcachefs/btree_iter.c +++ b/libbcachefs/btree_iter.c @@ -270,8 +270,10 @@ static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) && iter->pos.snapshot != iter->snapshot); - BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) || - bkey_gt(iter->pos, iter->k.p)); + BUG_ON(iter->flags & BTREE_ITER_all_snapshots ? !bpos_eq(iter->pos, iter->k.p) : + !(iter->flags & BTREE_ITER_is_extents) ? !bkey_eq(iter->pos, iter->k.p) : + (bkey_lt(iter->pos, bkey_start_pos(&iter->k)) || + bkey_gt(iter->pos, iter->k.p))); } static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) @@ -327,7 +329,7 @@ out: void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, struct bpos pos) { - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); struct btree_path *path; struct trans_for_each_path_inorder_iter iter; @@ -720,7 +722,7 @@ static inline int btree_path_lock_root(struct btree_trans *trans, unsigned long trace_ip) { struct bch_fs *c = trans->c; - struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b; + struct btree_root *r = bch2_btree_id_root(c, path->btree_id); enum six_lock_type lock_type; unsigned i; int ret; @@ -728,7 +730,12 @@ static inline int btree_path_lock_root(struct btree_trans *trans, EBUG_ON(path->nodes_locked); while (1) { - b = READ_ONCE(*rootp); + struct btree *b = READ_ONCE(r->b); + if (unlikely(!b)) { + BUG_ON(!r->error); + return r->error; + } + path->level = READ_ONCE(b->c.level); if (unlikely(path->level < depth_want)) { @@ -753,7 +760,7 @@ static inline int btree_path_lock_root(struct btree_trans *trans, BUG(); } - if (likely(b == READ_ONCE(*rootp) && + if (likely(b == READ_ONCE(r->b) && b->c.level == path->level && !race_fault())) { for (i = 0; i < path->level; i++) @@ -823,6 +830,8 @@ static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *p bch2_bkey_buf_init(&tmp); + jiter->fail_if_too_many_whiteouts = true; + while (nr-- && !ret) { if (!bch2_btree_node_relock(trans, path, path->level)) break; @@ -880,6 +889,18 @@ static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans, __bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos); k = bch2_btree_and_journal_iter_peek(&jiter); + if (!k.k) { + struct printbuf buf = PRINTBUF; + + prt_str(&buf, "node not found at pos "); + bch2_bpos_to_text(&buf, path->pos); + prt_str(&buf, " at btree "); + bch2_btree_pos_to_text(&buf, c, l->b); + + ret = bch2_fs_topology_error(c, "%s", buf.buf); + printbuf_exit(&buf); + goto err; + } bch2_bkey_buf_reassemble(out, c, k); @@ -887,6 +908,7 @@ static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans, c->opts.btree_node_prefetch) ret = btree_path_prefetch_j(trans, path, &jiter); +err: bch2_btree_and_journal_iter_exit(&jiter); return ret; } @@ -985,7 +1007,7 @@ retry_all: bch2_trans_unlock(trans); cond_resched(); - trans_set_locked(trans); + trans_set_locked(trans, false); if (unlikely(trans->memory_allocation_failure)) { struct closure cl; @@ -1252,7 +1274,7 @@ __bch2_btree_path_set_pos(struct btree_trans *trans, { int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos); - bch2_trans_verify_not_in_restart(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); EBUG_ON(!trans->paths[path_idx].ref); trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos); @@ -1412,7 +1434,7 @@ void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_ (void *) trans->last_begin_ip); } -void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans) +static void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans) { #ifdef CONFIG_BCACHEFS_DEBUG struct printbuf buf = PRINTBUF; @@ -1427,10 +1449,16 @@ void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans) #endif } -void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans) +void __noreturn bch2_trans_unlocked_or_in_restart_error(struct btree_trans *trans) { - panic("trans should be locked, unlocked by %pS\n", - (void *) trans->last_unlock_ip); + if (trans->restarted) + bch2_trans_in_restart_error(trans); + + if (!trans->locked) + panic("trans should be locked, unlocked by %pS\n", + (void *) trans->last_unlock_ip); + + BUG(); } noinline __cold @@ -1711,8 +1739,7 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans, struct trans_for_each_path_inorder_iter iter; btree_path_idx_t path_pos = 0, path_idx; - bch2_trans_verify_not_unlocked(trans); - bch2_trans_verify_not_in_restart(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); bch2_trans_verify_locks(trans); btree_trans_sort_paths(trans); @@ -1837,7 +1864,6 @@ hole: return (struct bkey_s_c) { u, NULL }; } - void bch2_set_btree_iter_dontneed(struct btree_iter *iter) { struct btree_trans *trans = iter->trans; @@ -1864,7 +1890,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter) struct btree_trans *trans = iter->trans; int ret; - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); iter->path = bch2_btree_path_set_pos(trans, iter->path, btree_iter_search_key(iter), @@ -1939,7 +1965,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) int ret; EBUG_ON(trans->paths[iter->path].cached); - bch2_trans_verify_not_in_restart(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); bch2_btree_iter_verify(iter); ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); @@ -2095,7 +2121,7 @@ static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans, { struct btree_path *path = btree_iter_path(trans, iter); - return bch2_journal_keys_peek_upto(trans->c, iter->btree_id, + return bch2_journal_keys_peek_max(trans->c, iter->btree_id, path->level, path->pos, end_pos, @@ -2135,6 +2161,37 @@ struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans, return k; } +static struct bkey_i *bch2_btree_journal_peek_prev(struct btree_trans *trans, + struct btree_iter *iter, + struct bpos end_pos) +{ + struct btree_path *path = btree_iter_path(trans, iter); + + return bch2_journal_keys_peek_prev_min(trans->c, iter->btree_id, + path->level, + path->pos, + end_pos, + &iter->journal_idx); +} + +static noinline +struct bkey_s_c btree_trans_peek_prev_journal(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c k) +{ + struct btree_path *path = btree_iter_path(trans, iter); + struct bkey_i *next_journal = + bch2_btree_journal_peek_prev(trans, iter, + k.k ? k.k->p : path_l(path)->b->key.k.p); + + if (next_journal) { + iter->k = next_journal->k; + k = bkey_i_to_s_c(next_journal); + } + + return k; +} + /* * Checks btree key cache for key at iter->pos and returns it if present, or * bkey_s_c_null: @@ -2148,8 +2205,7 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos struct bkey_s_c k; int ret; - bch2_trans_verify_not_in_restart(trans); - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); if ((iter->flags & BTREE_ITER_key_cache_fill) && bpos_eq(iter->pos, pos)) @@ -2195,8 +2251,6 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp bch2_btree_iter_verify(iter); while (1) { - struct btree_path_level *l; - iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, iter->flags & BTREE_ITER_intent, btree_iter_ip_allocated(iter)); @@ -2210,7 +2264,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp } struct btree_path *path = btree_iter_path(trans, iter); - l = path_l(path); + struct btree_path_level *l = path_l(path); if (unlikely(!l->b)) { /* No btree nodes at requested level: */ @@ -2274,22 +2328,23 @@ out: } /** - * bch2_btree_iter_peek_upto() - returns first key greater than or equal to + * bch2_btree_iter_peek_max() - returns first key greater than or equal to * iterator's current position * @iter: iterator to peek from * @end: search limit: returns keys less than or equal to @end * * Returns: key if found, or an error extractable with bkey_err(). */ -struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end) +struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos end) { struct btree_trans *trans = iter->trans; struct bpos search_key = btree_iter_search_key(iter); struct bkey_s_c k; - struct bpos iter_pos; + struct bpos iter_pos = iter->pos; int ret; - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); + bch2_btree_iter_verify_entry_exit(iter); EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX)); if (iter->update_path) { @@ -2298,8 +2353,6 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e iter->update_path = 0; } - bch2_btree_iter_verify_entry_exit(iter); - while (1) { k = __bch2_btree_iter_peek(iter, search_key); if (unlikely(!k.k)) @@ -2307,75 +2360,74 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e if (unlikely(bkey_err(k))) goto out_no_locked; - /* - * We need to check against @end before FILTER_SNAPSHOTS because - * if we get to a different inode that requested we might be - * seeing keys for a different snapshot tree that will all be - * filtered out. - * - * But we can't do the full check here, because bkey_start_pos() - * isn't monotonically increasing before FILTER_SNAPSHOTS, and - * that's what we check against in extents mode: - */ - if (unlikely(!(iter->flags & BTREE_ITER_is_extents) - ? bkey_gt(k.k->p, end) - : k.k->p.inode > end.inode)) - goto end; + if (iter->flags & BTREE_ITER_filter_snapshots) { + /* + * We need to check against @end before FILTER_SNAPSHOTS because + * if we get to a different inode that requested we might be + * seeing keys for a different snapshot tree that will all be + * filtered out. + * + * But we can't do the full check here, because bkey_start_pos() + * isn't monotonically increasing before FILTER_SNAPSHOTS, and + * that's what we check against in extents mode: + */ + if (unlikely(!(iter->flags & BTREE_ITER_is_extents) + ? bkey_gt(k.k->p, end) + : k.k->p.inode > end.inode)) + goto end; + + if (iter->update_path && + !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) { + bch2_path_put_nokeep(trans, iter->update_path, + iter->flags & BTREE_ITER_intent); + iter->update_path = 0; + } - if (iter->update_path && - !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) { - bch2_path_put_nokeep(trans, iter->update_path, - iter->flags & BTREE_ITER_intent); - iter->update_path = 0; - } + if ((iter->flags & BTREE_ITER_intent) && + !(iter->flags & BTREE_ITER_is_extents) && + !iter->update_path) { + struct bpos pos = k.k->p; - if ((iter->flags & BTREE_ITER_filter_snapshots) && - (iter->flags & BTREE_ITER_intent) && - !(iter->flags & BTREE_ITER_is_extents) && - !iter->update_path) { - struct bpos pos = k.k->p; + if (pos.snapshot < iter->snapshot) { + search_key = bpos_successor(k.k->p); + continue; + } - if (pos.snapshot < iter->snapshot) { - search_key = bpos_successor(k.k->p); - continue; - } + pos.snapshot = iter->snapshot; - pos.snapshot = iter->snapshot; + /* + * advance, same as on exit for iter->path, but only up + * to snapshot + */ + __btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent); + iter->update_path = iter->path; + + iter->update_path = bch2_btree_path_set_pos(trans, + iter->update_path, pos, + iter->flags & BTREE_ITER_intent, + _THIS_IP_); + ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags); + if (unlikely(ret)) { + k = bkey_s_c_err(ret); + goto out_no_locked; + } + } /* - * advance, same as on exit for iter->path, but only up - * to snapshot + * We can never have a key in a leaf node at POS_MAX, so + * we don't have to check these successor() calls: */ - __btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent); - iter->update_path = iter->path; - - iter->update_path = bch2_btree_path_set_pos(trans, - iter->update_path, pos, - iter->flags & BTREE_ITER_intent, - _THIS_IP_); - ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags); - if (unlikely(ret)) { - k = bkey_s_c_err(ret); - goto out_no_locked; + if (!bch2_snapshot_is_ancestor(trans->c, + iter->snapshot, + k.k->p.snapshot)) { + search_key = bpos_successor(k.k->p); + continue; } - } - - /* - * We can never have a key in a leaf node at POS_MAX, so - * we don't have to check these successor() calls: - */ - if ((iter->flags & BTREE_ITER_filter_snapshots) && - !bch2_snapshot_is_ancestor(trans->c, - iter->snapshot, - k.k->p.snapshot)) { - search_key = bpos_successor(k.k->p); - continue; - } - if (bkey_whiteout(k.k) && - !(iter->flags & BTREE_ITER_all_snapshots)) { - search_key = bkey_successor(iter, k.k->p); - continue; + if (bkey_whiteout(k.k)) { + search_key = bkey_successor(iter, k.k->p); + continue; + } } /* @@ -2445,127 +2497,204 @@ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter) return bch2_btree_iter_peek(iter); } -/** - * bch2_btree_iter_peek_prev() - returns first key less than or equal to - * iterator's current position - * @iter: iterator to peek from - * - * Returns: key if found, or an error extractable with bkey_err(). - */ -struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) +static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, struct bpos search_key) { struct btree_trans *trans = iter->trans; - struct bpos search_key = iter->pos; - struct bkey_s_c k; - struct bkey saved_k; - const struct bch_val *saved_v; - btree_path_idx_t saved_path = 0; - int ret; - - bch2_trans_verify_not_unlocked(trans); - EBUG_ON(btree_iter_path(trans, iter)->cached || - btree_iter_path(trans, iter)->level); - - if (iter->flags & BTREE_ITER_with_journal) - return bkey_s_c_err(-BCH_ERR_btree_iter_with_journal_not_supported); + struct bkey_s_c k, k2; bch2_btree_iter_verify(iter); - bch2_btree_iter_verify_entry_exit(iter); - - if (iter->flags & BTREE_ITER_filter_snapshots) - search_key.snapshot = U32_MAX; while (1) { iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, - iter->flags & BTREE_ITER_intent, - btree_iter_ip_allocated(iter)); + iter->flags & BTREE_ITER_intent, + btree_iter_ip_allocated(iter)); - ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); + int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); if (unlikely(ret)) { /* ensure that iter->k is consistent with iter->pos: */ bch2_btree_iter_set_pos(iter, iter->pos); k = bkey_s_c_err(ret); - goto out_no_locked; + break; } struct btree_path *path = btree_iter_path(trans, iter); + struct btree_path_level *l = path_l(path); - k = btree_path_level_peek(trans, path, &path->l[0], &iter->k); - if (!k.k || - ((iter->flags & BTREE_ITER_is_extents) - ? bpos_ge(bkey_start_pos(k.k), search_key) - : bpos_gt(k.k->p, search_key))) - k = btree_path_level_prev(trans, path, &path->l[0], &iter->k); + if (unlikely(!l->b)) { + /* No btree nodes at requested level: */ + bch2_btree_iter_set_pos(iter, SPOS_MAX); + k = bkey_s_c_null; + break; + } + + btree_path_set_should_be_locked(trans, path); + + k = btree_path_level_peek_all(trans->c, l, &iter->k); + if (!k.k || bpos_gt(k.k->p, search_key)) { + k = btree_path_level_prev(trans, path, l, &iter->k); + + BUG_ON(k.k && bpos_gt(k.k->p, search_key)); + } + + if (unlikely(iter->flags & BTREE_ITER_with_key_cache) && + k.k && + (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) { + k = k2; + if (bkey_err(k2)) { + bch2_btree_iter_set_pos(iter, iter->pos); + break; + } + } + + if (unlikely(iter->flags & BTREE_ITER_with_journal)) + k = btree_trans_peek_prev_journal(trans, iter, k); if (unlikely((iter->flags & BTREE_ITER_with_updates) && trans->nr_updates)) bch2_btree_trans_peek_prev_updates(trans, iter, &k); - if (likely(k.k)) { - if (iter->flags & BTREE_ITER_filter_snapshots) { - if (k.k->p.snapshot == iter->snapshot) - goto got_key; + if (likely(k.k && !bkey_deleted(k.k))) { + break; + } else if (k.k) { + search_key = bpos_predecessor(k.k->p); + } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) { + /* Advance to previous leaf node: */ + search_key = bpos_predecessor(path->l[0].b->data->min_key); + } else { + /* Start of btree: */ + bch2_btree_iter_set_pos(iter, POS_MIN); + k = bkey_s_c_null; + break; + } + } + + bch2_btree_iter_verify(iter); + return k; +} +/** + * bch2_btree_iter_peek_prev_min() - returns first key less than or equal to + * iterator's current position + * @iter: iterator to peek from + * @end: search limit: returns keys greater than or equal to @end + * + * Returns: key if found, or an error extractable with bkey_err(). + */ +struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bpos end) +{ + if ((iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots)) && + !bkey_eq(iter->pos, POS_MAX)) { + /* + * bkey_start_pos(), for extents, is not monotonically + * increasing until after filtering for snapshots: + * + * Thus, for extents we need to search forward until we find a + * real visible extents - easiest to just use peek_slot() (which + * internally uses peek() for extents) + */ + struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); + if (bkey_err(k)) + return k; + + if (!bkey_deleted(k.k) && + (!(iter->flags & BTREE_ITER_is_extents) || + bkey_lt(bkey_start_pos(k.k), iter->pos))) + return k; + } + + struct btree_trans *trans = iter->trans; + struct bpos search_key = iter->pos; + struct bkey_s_c k; + btree_path_idx_t saved_path = 0; + + bch2_trans_verify_not_unlocked_or_in_restart(trans); + bch2_btree_iter_verify_entry_exit(iter); + EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bpos_eq(end, POS_MIN)); + + while (1) { + k = __bch2_btree_iter_peek_prev(iter, search_key); + if (unlikely(!k.k)) + goto end; + if (unlikely(bkey_err(k))) + goto out_no_locked; + + if (iter->flags & BTREE_ITER_filter_snapshots) { + struct btree_path *s = saved_path ? trans->paths + saved_path : NULL; + if (s && bpos_lt(k.k->p, SPOS(s->pos.inode, s->pos.offset, iter->snapshot))) { /* - * If we have a saved candidate, and we're no - * longer at the same _key_ (not pos), return - * that candidate + * If we have a saved candidate, and we're past + * the last possible snapshot overwrite, return + * it: */ - if (saved_path && !bkey_eq(k.k->p, saved_k.p)) { - bch2_path_put_nokeep(trans, iter->path, - iter->flags & BTREE_ITER_intent); - iter->path = saved_path; + bch2_path_put_nokeep(trans, iter->path, + iter->flags & BTREE_ITER_intent); + iter->path = saved_path; + saved_path = 0; + k = bch2_btree_path_peek_slot(btree_iter_path(trans, iter), &iter->k); + break; + } + + /* + * We need to check against @end before FILTER_SNAPSHOTS because + * if we get to a different inode that requested we might be + * seeing keys for a different snapshot tree that will all be + * filtered out. + */ + if (unlikely(bkey_lt(k.k->p, end))) + goto end; + + if (!bch2_snapshot_is_ancestor(trans->c, iter->snapshot, k.k->p.snapshot)) { + search_key = bpos_predecessor(k.k->p); + continue; + } + + if (k.k->p.snapshot != iter->snapshot) { + /* + * Have a key visible in iter->snapshot, but + * might have overwrites: - save it and keep + * searching. Unless it's a whiteout - then drop + * our previous saved candidate: + */ + if (saved_path) { + bch2_path_put_nokeep(trans, saved_path, + iter->flags & BTREE_ITER_intent); saved_path = 0; - iter->k = saved_k; - k.v = saved_v; - goto got_key; } - if (bch2_snapshot_is_ancestor(trans->c, - iter->snapshot, - k.k->p.snapshot)) { - if (saved_path) - bch2_path_put_nokeep(trans, saved_path, - iter->flags & BTREE_ITER_intent); + if (!bkey_whiteout(k.k)) { saved_path = btree_path_clone(trans, iter->path, iter->flags & BTREE_ITER_intent, _THIS_IP_); - path = btree_iter_path(trans, iter); - trace_btree_path_save_pos(trans, path, trans->paths + saved_path); - saved_k = *k.k; - saved_v = k.v; + trace_btree_path_save_pos(trans, + trans->paths + iter->path, + trans->paths + saved_path); } search_key = bpos_predecessor(k.k->p); continue; } -got_key: - if (bkey_whiteout(k.k) && - !(iter->flags & BTREE_ITER_all_snapshots)) { + + if (bkey_whiteout(k.k)) { search_key = bkey_predecessor(iter, k.k->p); - if (iter->flags & BTREE_ITER_filter_snapshots) - search_key.snapshot = U32_MAX; + search_key.snapshot = U32_MAX; continue; } - - btree_path_set_should_be_locked(trans, path); - break; - } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) { - /* Advance to previous leaf node: */ - search_key = bpos_predecessor(path->l[0].b->data->min_key); - } else { - /* Start of btree: */ - bch2_btree_iter_set_pos(iter, POS_MIN); - k = bkey_s_c_null; - goto out_no_locked; } - } - EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos)); + EBUG_ON(iter->flags & BTREE_ITER_all_snapshots ? bpos_gt(k.k->p, iter->pos) : + iter->flags & BTREE_ITER_is_extents ? bkey_ge(bkey_start_pos(k.k), iter->pos) : + bkey_gt(k.k->p, iter->pos)); + + if (unlikely(iter->flags & BTREE_ITER_all_snapshots ? bpos_lt(k.k->p, end) : + iter->flags & BTREE_ITER_is_extents ? bkey_le(k.k->p, end) : + bkey_lt(k.k->p, end))) + goto end; + + break; + } /* Extents can straddle iter->pos: */ - if (bkey_lt(k.k->p, iter->pos)) - iter->pos = k.k->p; + iter->pos = bpos_min(iter->pos, k.k->p);; if (iter->flags & BTREE_ITER_filter_snapshots) iter->pos.snapshot = iter->snapshot; @@ -2575,8 +2704,11 @@ out_no_locked: bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify(iter); - return k; +end: + bch2_btree_iter_set_pos(iter, end); + k = bkey_s_c_null; + goto out_no_locked; } /** @@ -2601,7 +2733,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) struct bkey_s_c k; int ret; - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); bch2_btree_iter_verify(iter); bch2_btree_iter_verify_entry_exit(iter); EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache)); @@ -2665,7 +2797,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) struct btree_iter iter2; bch2_trans_copy_iter(&iter2, iter); - k = bch2_btree_iter_peek_upto(&iter2, end); + k = bch2_btree_iter_peek_max(&iter2, end); if (k.k && !bkey_err(k)) { swap(iter->key_cache_path, iter2.key_cache_path); @@ -2676,7 +2808,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) } else { struct bpos pos = iter->pos; - k = bch2_btree_iter_peek_upto(iter, end); + k = bch2_btree_iter_peek_max(iter, end); if (unlikely(bkey_err(k))) bch2_btree_iter_set_pos(iter, pos); else @@ -3116,14 +3248,14 @@ u32 bch2_trans_begin(struct btree_trans *trans) trans->last_begin_ip = _RET_IP_; - trans_set_locked(trans); + trans_set_locked(trans, false); if (trans->restarted) { bch2_btree_path_traverse_all(trans); trans->notrace_relock_fail = false; } - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); return trans->restart_count; } @@ -3222,7 +3354,7 @@ got_trans: trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier); trans->srcu_lock_time = jiffies; trans->srcu_held = true; - trans_set_locked(trans); + trans_set_locked(trans, false); closure_init_stack_release(&trans->ref); return trans; @@ -3490,7 +3622,7 @@ int bch2_fs_btree_iter_init(struct bch_fs *c) #ifdef CONFIG_LOCKDEP fs_reclaim_acquire(GFP_KERNEL); struct btree_trans *trans = bch2_trans_get(c); - trans_set_locked(trans); + trans_set_locked(trans, false); bch2_trans_put(trans); fs_reclaim_release(GFP_KERNEL); #endif diff --git a/libbcachefs/btree_iter.h b/libbcachefs/btree_iter.h index 5f8b494a..3477fc8c 100644 --- a/libbcachefs/btree_iter.h +++ b/libbcachefs/btree_iter.h @@ -236,12 +236,12 @@ int __must_check bch2_btree_path_traverse_one(struct btree_trans *, btree_path_idx_t, unsigned, unsigned long); -static inline void bch2_trans_verify_not_unlocked(struct btree_trans *); +static inline void bch2_trans_verify_not_unlocked_or_in_restart(struct btree_trans *); static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans, btree_path_idx_t path, unsigned flags) { - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); if (trans->paths[path].uptodate < BTREE_ITER_NEED_RELOCK) return 0; @@ -326,20 +326,12 @@ static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans, bch2_trans_restart_error(trans, restart_count); } -void __noreturn bch2_trans_in_restart_error(struct btree_trans *); +void __noreturn bch2_trans_unlocked_or_in_restart_error(struct btree_trans *); -static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans) +static inline void bch2_trans_verify_not_unlocked_or_in_restart(struct btree_trans *trans) { - if (trans->restarted) - bch2_trans_in_restart_error(trans); -} - -void __noreturn bch2_trans_unlocked_error(struct btree_trans *); - -static inline void bch2_trans_verify_not_unlocked(struct btree_trans *trans) -{ - if (!trans->locked) - bch2_trans_unlocked_error(trans); + if (trans->restarted || !trans->locked) + bch2_trans_unlocked_or_in_restart_error(trans); } __always_inline @@ -352,7 +344,7 @@ static int btree_trans_restart_ip(struct btree_trans *trans, int err, unsigned l trans->last_restarted_ip = ip; #ifdef CONFIG_BCACHEFS_DEBUG darray_exit(&trans->last_restarted_trace); - bch2_save_backtrace(&trans->last_restarted_trace, current, 0, GFP_KERNEL); + bch2_save_backtrace(&trans->last_restarted_trace, current, 0, GFP_NOWAIT); #endif return -err; } @@ -389,15 +381,21 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *); struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *); struct btree *bch2_btree_iter_next_node(struct btree_iter *); -struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos); +struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *, struct bpos); struct bkey_s_c bch2_btree_iter_next(struct btree_iter *); static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) { - return bch2_btree_iter_peek_upto(iter, SPOS_MAX); + return bch2_btree_iter_peek_max(iter, SPOS_MAX); +} + +struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *, struct bpos); + +static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) +{ + return bch2_btree_iter_peek_prev_min(iter, POS_MIN); } -struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *); struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *); struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *); @@ -680,12 +678,12 @@ static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter, bch2_btree_iter_peek(iter); } -static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter, +static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_iter *iter, struct bpos end, unsigned flags) { if (!(flags & BTREE_ITER_slots)) - return bch2_btree_iter_peek_upto(iter, end); + return bch2_btree_iter_peek_max(iter, end); if (bkey_gt(iter->pos, end)) return bkey_s_c_null; @@ -749,7 +747,7 @@ transaction_restart: \ _ret2 ?: trans_was_restarted(_trans, _restart_count); \ }) -#define for_each_btree_key_upto_continue(_trans, _iter, \ +#define for_each_btree_key_max_continue(_trans, _iter, \ _end, _flags, _k, _do) \ ({ \ struct bkey_s_c _k; \ @@ -757,7 +755,7 @@ transaction_restart: \ \ do { \ _ret3 = lockrestart_do(_trans, ({ \ - (_k) = bch2_btree_iter_peek_upto_type(&(_iter), \ + (_k) = bch2_btree_iter_peek_max_type(&(_iter), \ _end, (_flags)); \ if (!(_k).k) \ break; \ @@ -771,9 +769,9 @@ transaction_restart: \ }) #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do) \ - for_each_btree_key_upto_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do) + for_each_btree_key_max_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do) -#define for_each_btree_key_upto(_trans, _iter, _btree_id, \ +#define for_each_btree_key_max(_trans, _iter, _btree_id, \ _start, _end, _flags, _k, _do) \ ({ \ bch2_trans_begin(trans); \ @@ -782,12 +780,12 @@ transaction_restart: \ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ (_start), (_flags)); \ \ - for_each_btree_key_upto_continue(_trans, _iter, _end, _flags, _k, _do);\ + for_each_btree_key_max_continue(_trans, _iter, _end, _flags, _k, _do);\ }) #define for_each_btree_key(_trans, _iter, _btree_id, \ _start, _flags, _k, _do) \ - for_each_btree_key_upto(_trans, _iter, _btree_id, _start, \ + for_each_btree_key_max(_trans, _iter, _btree_id, _start, \ SPOS_MAX, _flags, _k, _do) #define for_each_btree_key_reverse(_trans, _iter, _btree_id, \ @@ -831,33 +829,33 @@ transaction_restart: \ (_do) ?: bch2_trans_commit(_trans, (_disk_res),\ (_journal_seq), (_commit_flags))) -#define for_each_btree_key_upto_commit(_trans, _iter, _btree_id, \ +#define for_each_btree_key_max_commit(_trans, _iter, _btree_id, \ _start, _end, _iter_flags, _k, \ _disk_res, _journal_seq, _commit_flags,\ _do) \ - for_each_btree_key_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\ + for_each_btree_key_max(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\ (_do) ?: bch2_trans_commit(_trans, (_disk_res),\ (_journal_seq), (_commit_flags))) struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *); -#define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \ +#define for_each_btree_key_max_norestart(_trans, _iter, _btree_id, \ _start, _end, _flags, _k, _ret) \ for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ (_start), (_flags)); \ - (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\ + (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags),\ !((_ret) = bkey_err(_k)) && (_k).k; \ bch2_btree_iter_advance(&(_iter))) -#define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\ +#define for_each_btree_key_max_continue_norestart(_iter, _end, _flags, _k, _ret)\ for (; \ - (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags), \ + (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags), \ !((_ret) = bkey_err(_k)) && (_k).k; \ bch2_btree_iter_advance(&(_iter))) #define for_each_btree_key_norestart(_trans, _iter, _btree_id, \ _start, _flags, _k, _ret) \ - for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, _start,\ + for_each_btree_key_max_norestart(_trans, _iter, _btree_id, _start,\ SPOS_MAX, _flags, _k, _ret) #define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \ @@ -869,7 +867,7 @@ struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *); bch2_btree_iter_rewind(&(_iter))) #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \ - for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret) + for_each_btree_key_max_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret) /* * This should not be used in a fastpath, without first trying _do in diff --git a/libbcachefs/btree_journal_iter.c b/libbcachefs/btree_journal_iter.c index 924b5e3a..de3db161 100644 --- a/libbcachefs/btree_journal_iter.c +++ b/libbcachefs/btree_journal_iter.c @@ -16,6 +16,17 @@ * operations for the regular btree iter code to use: */ +static inline size_t pos_to_idx(struct journal_keys *keys, size_t pos) +{ + size_t gap_size = keys->size - keys->nr; + + BUG_ON(pos >= keys->gap && pos < keys->gap + gap_size); + + if (pos >= keys->gap) + pos -= gap_size; + return pos; +} + static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx) { size_t gap_size = keys->size - keys->nr; @@ -61,7 +72,7 @@ static size_t bch2_journal_key_search(struct journal_keys *keys, } /* Returns first non-overwritten key >= search key: */ -struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree_id, +struct bkey_i *bch2_journal_keys_peek_max(struct bch_fs *c, enum btree_id btree_id, unsigned level, struct bpos pos, struct bpos end_pos, size_t *idx) { @@ -84,27 +95,92 @@ search: } } + struct bkey_i *ret = NULL; + rcu_read_lock(); /* for overwritten_ranges */ + while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) { if (__journal_key_cmp(btree_id, level, end_pos, k) < 0) - return NULL; + break; if (k->overwritten) { - (*idx)++; + if (k->overwritten_range) + *idx = rcu_dereference(k->overwritten_range)->end; + else + *idx += 1; continue; } - if (__journal_key_cmp(btree_id, level, pos, k) <= 0) - return k->k; + if (__journal_key_cmp(btree_id, level, pos, k) <= 0) { + ret = k->k; + break; + } (*idx)++; iters++; if (iters == 10) { *idx = 0; + rcu_read_unlock(); goto search; } } - return NULL; + rcu_read_unlock(); + return ret; +} + +struct bkey_i *bch2_journal_keys_peek_prev_min(struct bch_fs *c, enum btree_id btree_id, + unsigned level, struct bpos pos, + struct bpos end_pos, size_t *idx) +{ + struct journal_keys *keys = &c->journal_keys; + unsigned iters = 0; + struct journal_key *k; + + BUG_ON(*idx > keys->nr); +search: + if (!*idx) + *idx = __bch2_journal_key_search(keys, btree_id, level, pos); + + while (*idx && + __journal_key_cmp(btree_id, level, end_pos, idx_to_key(keys, *idx - 1)) <= 0) { + (*idx)++; + iters++; + if (iters == 10) { + *idx = 0; + goto search; + } + } + + struct bkey_i *ret = NULL; + rcu_read_lock(); /* for overwritten_ranges */ + + while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) { + if (__journal_key_cmp(btree_id, level, end_pos, k) > 0) + break; + + if (k->overwritten) { + if (k->overwritten_range) + *idx = rcu_dereference(k->overwritten_range)->start - 1; + else + *idx -= 1; + continue; + } + + if (__journal_key_cmp(btree_id, level, pos, k) >= 0) { + ret = k->k; + break; + } + + --(*idx); + iters++; + if (iters == 10) { + *idx = 0; + goto search; + } + } + + rcu_read_unlock(); + return ret; } struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id, @@ -112,11 +188,12 @@ struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree { size_t idx = 0; - return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos, &idx); + return bch2_journal_keys_peek_max(c, btree_id, level, pos, pos, &idx); } static void journal_iter_verify(struct journal_iter *iter) { +#ifdef CONFIG_BCACHEFS_DEBUG struct journal_keys *keys = iter->keys; size_t gap_size = keys->size - keys->nr; @@ -126,10 +203,10 @@ static void journal_iter_verify(struct journal_iter *iter) if (iter->idx < keys->size) { struct journal_key *k = keys->data + iter->idx; - int cmp = cmp_int(k->btree_id, iter->btree_id) ?: - cmp_int(k->level, iter->level); - BUG_ON(cmp < 0); + int cmp = __journal_key_btree_cmp(iter->btree_id, iter->level, k); + BUG_ON(cmp > 0); } +#endif } static void journal_iters_fix(struct bch_fs *c) @@ -290,6 +367,68 @@ bool bch2_key_deleted_in_journal(struct btree_trans *trans, enum btree_id btree, bkey_deleted(&keys->data[idx].k->k)); } +static void __bch2_journal_key_overwritten(struct journal_keys *keys, size_t pos) +{ + struct journal_key *k = keys->data + pos; + size_t idx = pos_to_idx(keys, pos); + + k->overwritten = true; + + struct journal_key *prev = idx > 0 ? keys->data + idx_to_pos(keys, idx - 1) : NULL; + struct journal_key *next = idx + 1 < keys->nr ? keys->data + idx_to_pos(keys, idx + 1) : NULL; + + bool prev_overwritten = prev && prev->overwritten; + bool next_overwritten = next && next->overwritten; + + struct journal_key_range_overwritten *prev_range = + prev_overwritten ? prev->overwritten_range : NULL; + struct journal_key_range_overwritten *next_range = + next_overwritten ? next->overwritten_range : NULL; + + BUG_ON(prev_range && prev_range->end != idx); + BUG_ON(next_range && next_range->start != idx + 1); + + if (prev_range && next_range) { + prev_range->end = next_range->end; + + keys->data[pos].overwritten_range = prev_range; + for (size_t i = next_range->start; i < next_range->end; i++) { + struct journal_key *ip = keys->data + idx_to_pos(keys, i); + BUG_ON(ip->overwritten_range != next_range); + ip->overwritten_range = prev_range; + } + + kfree_rcu_mightsleep(next_range); + } else if (prev_range) { + prev_range->end++; + k->overwritten_range = prev_range; + if (next_overwritten) { + prev_range->end++; + next->overwritten_range = prev_range; + } + } else if (next_range) { + next_range->start--; + k->overwritten_range = next_range; + if (prev_overwritten) { + next_range->start--; + prev->overwritten_range = next_range; + } + } else if (prev_overwritten || next_overwritten) { + struct journal_key_range_overwritten *r = kmalloc(sizeof(*r), GFP_KERNEL); + if (!r) + return; + + r->start = idx - (size_t) prev_overwritten; + r->end = idx + 1 + (size_t) next_overwritten; + + rcu_assign_pointer(k->overwritten_range, r); + if (prev_overwritten) + prev->overwritten_range = r; + if (next_overwritten) + next->overwritten_range = r; + } +} + void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree, unsigned level, struct bpos pos) { @@ -299,8 +438,12 @@ void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree, if (idx < keys->size && keys->data[idx].btree_id == btree && keys->data[idx].level == level && - bpos_eq(keys->data[idx].k->k.p, pos)) - keys->data[idx].overwritten = true; + bpos_eq(keys->data[idx].k->k.p, pos) && + !keys->data[idx].overwritten) { + mutex_lock(&keys->overwrite_lock); + __bch2_journal_key_overwritten(keys, idx); + mutex_unlock(&keys->overwrite_lock); + } } static void bch2_journal_iter_advance(struct journal_iter *iter) @@ -314,24 +457,32 @@ static void bch2_journal_iter_advance(struct journal_iter *iter) static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter) { + struct bkey_s_c ret = bkey_s_c_null; + journal_iter_verify(iter); + rcu_read_lock(); while (iter->idx < iter->keys->size) { struct journal_key *k = iter->keys->data + iter->idx; - int cmp = cmp_int(k->btree_id, iter->btree_id) ?: - cmp_int(k->level, iter->level); - if (cmp > 0) + int cmp = __journal_key_btree_cmp(iter->btree_id, iter->level, k); + if (cmp < 0) break; BUG_ON(cmp); - if (!k->overwritten) - return bkey_i_to_s_c(k->k); + if (!k->overwritten) { + ret = bkey_i_to_s_c(k->k); + break; + } - bch2_journal_iter_advance(iter); + if (k->overwritten_range) + iter->idx = idx_to_pos(iter->keys, rcu_dereference(k->overwritten_range)->end); + else + bch2_journal_iter_advance(iter); } + rcu_read_unlock(); - return bkey_s_c_null; + return ret; } static void bch2_journal_iter_exit(struct journal_iter *iter) @@ -382,6 +533,7 @@ static void btree_and_journal_iter_prefetch(struct btree_and_journal_iter *_iter : (level > 1 ? 1 : 16); iter.prefetch = false; + iter.fail_if_too_many_whiteouts = true; bch2_bkey_buf_init(&tmp); while (nr--) { @@ -400,6 +552,7 @@ static void btree_and_journal_iter_prefetch(struct btree_and_journal_iter *_iter struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter) { struct bkey_s_c btree_k, journal_k = bkey_s_c_null, ret; + size_t iters = 0; if (iter->prefetch && iter->journal.level) btree_and_journal_iter_prefetch(iter); @@ -407,6 +560,11 @@ again: if (iter->at_end) return bkey_s_c_null; + iters++; + + if (iters > 20 && iter->fail_if_too_many_whiteouts) + return bkey_s_c_null; + while ((btree_k = bch2_journal_iter_peek_btree(iter)).k && bpos_lt(btree_k.k->p, iter->pos)) bch2_journal_iter_advance_btree(iter); @@ -481,16 +639,6 @@ void bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans, /* sort and dedup all keys in the journal: */ -void bch2_journal_entries_free(struct bch_fs *c) -{ - struct journal_replay **i; - struct genradix_iter iter; - - genradix_for_each(&c->journal_entries, iter, i) - kvfree(*i); - genradix_free(&c->journal_entries); -} - /* * When keys compare equal, oldest compares first: */ @@ -515,15 +663,26 @@ void bch2_journal_keys_put(struct bch_fs *c) move_gap(keys, keys->nr); - darray_for_each(*keys, i) + darray_for_each(*keys, i) { + if (i->overwritten_range && + (i == &darray_last(*keys) || + i->overwritten_range != i[1].overwritten_range)) + kfree(i->overwritten_range); + if (i->allocated) kfree(i->k); + } kvfree(keys->data); keys->data = NULL; keys->nr = keys->gap = keys->size = 0; - bch2_journal_entries_free(c); + struct journal_replay **i; + struct genradix_iter iter; + + genradix_for_each(&c->journal_entries, iter, i) + kvfree(*i); + genradix_free(&c->journal_entries); } static void __journal_keys_sort(struct journal_keys *keys) @@ -636,3 +795,12 @@ void bch2_journal_keys_dump(struct bch_fs *c) } printbuf_exit(&buf); } + +void bch2_fs_journal_keys_init(struct bch_fs *c) +{ + struct journal_keys *keys = &c->journal_keys; + + atomic_set(&keys->ref, 1); + keys->initial_ref_held = true; + mutex_init(&keys->overwrite_lock); +} diff --git a/libbcachefs/btree_journal_iter.h b/libbcachefs/btree_journal_iter.h index 1653de9d..2a308291 100644 --- a/libbcachefs/btree_journal_iter.h +++ b/libbcachefs/btree_journal_iter.h @@ -26,16 +26,24 @@ struct btree_and_journal_iter { struct bpos pos; bool at_end; bool prefetch; + bool fail_if_too_many_whiteouts; }; +static inline int __journal_key_btree_cmp(enum btree_id l_btree_id, + unsigned l_level, + const struct journal_key *r) +{ + return -cmp_int(l_level, r->level) ?: + cmp_int(l_btree_id, r->btree_id); +} + static inline int __journal_key_cmp(enum btree_id l_btree_id, unsigned l_level, struct bpos l_pos, const struct journal_key *r) { - return (cmp_int(l_btree_id, r->btree_id) ?: - cmp_int(l_level, r->level) ?: - bpos_cmp(l_pos, r->k->k.p)); + return __journal_key_btree_cmp(l_btree_id, l_level, r) ?: + bpos_cmp(l_pos, r->k->k.p); } static inline int journal_key_cmp(const struct journal_key *l, const struct journal_key *r) @@ -43,7 +51,9 @@ static inline int journal_key_cmp(const struct journal_key *l, const struct jour return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r); } -struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *, enum btree_id, +struct bkey_i *bch2_journal_keys_peek_max(struct bch_fs *, enum btree_id, + unsigned, struct bpos, struct bpos, size_t *); +struct bkey_i *bch2_journal_keys_peek_prev_min(struct bch_fs *, enum btree_id, unsigned, struct bpos, struct bpos, size_t *); struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *, enum btree_id, unsigned, struct bpos); @@ -79,8 +89,6 @@ static inline void bch2_journal_keys_put_initial(struct bch_fs *c) c->journal_keys.initial_ref_held = false; } -void bch2_journal_entries_free(struct bch_fs *); - int bch2_journal_keys_sort(struct bch_fs *); void bch2_shoot_down_journal_keys(struct bch_fs *, enum btree_id, @@ -89,4 +97,6 @@ void bch2_shoot_down_journal_keys(struct bch_fs *, enum btree_id, void bch2_journal_keys_dump(struct bch_fs *); +void bch2_fs_journal_keys_init(struct bch_fs *); + #endif /* _BCACHEFS_BTREE_JOURNAL_ITER_H */ diff --git a/libbcachefs/btree_journal_iter_types.h b/libbcachefs/btree_journal_iter_types.h new file mode 100644 index 00000000..8b773823 --- /dev/null +++ b/libbcachefs/btree_journal_iter_types.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCACHEFS_BTREE_JOURNAL_ITER_TYPES_H +#define _BCACHEFS_BTREE_JOURNAL_ITER_TYPES_H + +struct journal_key_range_overwritten { + size_t start, end; +}; + +struct journal_key { + u64 journal_seq; + u32 journal_offset; + enum btree_id btree_id:8; + unsigned level:8; + bool allocated; + bool overwritten; + struct journal_key_range_overwritten __rcu * + overwritten_range; + struct bkey_i *k; +}; + +struct journal_keys { + /* must match layout in darray_types.h */ + size_t nr, size; + struct journal_key *data; + /* + * Gap buffer: instead of all the empty space in the array being at the + * end of the buffer - from @nr to @size - the empty space is at @gap. + * This means that sequential insertions are O(n) instead of O(n^2). + */ + size_t gap; + atomic_t ref; + bool initial_ref_held; + struct mutex overwrite_lock; +}; + +#endif /* _BCACHEFS_BTREE_JOURNAL_ITER_TYPES_H */ diff --git a/libbcachefs/btree_key_cache.c b/libbcachefs/btree_key_cache.c index 244610b1..3bd40ea0 100644 --- a/libbcachefs/btree_key_cache.c +++ b/libbcachefs/btree_key_cache.c @@ -424,8 +424,15 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans, !test_bit(JOURNAL_space_low, &c->journal.flags)) commit_flags |= BCH_TRANS_COMMIT_no_journal_res; - ret = bch2_btree_iter_traverse(&b_iter) ?: - bch2_trans_update(trans, &b_iter, ck->k, + struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(&b_iter); + ret = bkey_err(btree_k); + if (ret) + goto err; + + /* * Check that we're not violating cache coherency rules: */ + BUG_ON(bkey_deleted(btree_k.k)); + + ret = bch2_trans_update(trans, &b_iter, ck->k, BTREE_UPDATE_key_cache_reclaim| BTREE_UPDATE_internal_snapshot_node| BTREE_TRIGGER_norun) ?: @@ -433,7 +440,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans, BCH_TRANS_COMMIT_no_check_rw| BCH_TRANS_COMMIT_no_enospc| commit_flags); - +err: bch2_fs_fatal_err_on(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart) && !bch2_err_matches(ret, BCH_ERR_journal_reclaim_would_deadlock) && diff --git a/libbcachefs/btree_locking.c b/libbcachefs/btree_locking.c index efe2a007..d343df9f 100644 --- a/libbcachefs/btree_locking.c +++ b/libbcachefs/btree_locking.c @@ -782,7 +782,7 @@ static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace) return bch2_trans_relock_fail(trans, path, &f, trace); } - trans_set_locked(trans); + trans_set_locked(trans, true); out: bch2_trans_verify_locks(trans); return 0; diff --git a/libbcachefs/btree_locking.h b/libbcachefs/btree_locking.h index 7c07f9fa..7474ab6c 100644 --- a/libbcachefs/btree_locking.h +++ b/libbcachefs/btree_locking.h @@ -188,10 +188,10 @@ int bch2_six_check_for_deadlock(struct six_lock *lock, void *p); /* lock: */ -static inline void trans_set_locked(struct btree_trans *trans) +static inline void trans_set_locked(struct btree_trans *trans, bool try) { if (!trans->locked) { - lock_acquire_exclusive(&trans->dep_map, 0, 0, NULL, _THIS_IP_); + lock_acquire_exclusive(&trans->dep_map, 0, try, NULL, _THIS_IP_); trans->locked = true; trans->last_unlock_ip = 0; @@ -282,7 +282,7 @@ static inline int btree_node_lock(struct btree_trans *trans, int ret = 0; EBUG_ON(level >= BTREE_MAX_DEPTH); - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); if (likely(six_trylock_type(&b->lock, type)) || btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) || diff --git a/libbcachefs/btree_node_scan.c b/libbcachefs/btree_node_scan.c index 5fbf3576..eeafb5e7 100644 --- a/libbcachefs/btree_node_scan.c +++ b/libbcachefs/btree_node_scan.c @@ -159,6 +159,9 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, return; if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(&bn->keys))) { + if (!c->chacha20) + return; + struct nonce nonce = btree_nonce(&bn->keys, 0); unsigned bytes = (void *) &bn->keys - (void *) &bn->flags; @@ -186,7 +189,7 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, .ptrs[0].type = 1 << BCH_EXTENT_ENTRY_ptr, .ptrs[0].offset = offset, .ptrs[0].dev = ca->dev_idx, - .ptrs[0].gen = *bucket_gen(ca, sector_to_bucket(ca, offset)), + .ptrs[0].gen = bucket_gen_get(ca, sector_to_bucket(ca, offset)), }; rcu_read_unlock(); @@ -535,7 +538,12 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree, bch_verbose(c, "%s(): recovering %s", __func__, buf.buf); printbuf_exit(&buf); - BUG_ON(bch2_bkey_validate(c, bkey_i_to_s_c(&tmp.k), BKEY_TYPE_btree, 0)); + BUG_ON(bch2_bkey_validate(c, bkey_i_to_s_c(&tmp.k), + (struct bkey_validate_context) { + .from = BKEY_VALIDATE_btree_node, + .level = level + 1, + .btree = btree, + })); ret = bch2_journal_key_insert(c, btree, level + 1, &tmp.k); if (ret) diff --git a/libbcachefs/btree_trans_commit.c b/libbcachefs/btree_trans_commit.c index b47f1188..78d72c26 100644 --- a/libbcachefs/btree_trans_commit.c +++ b/libbcachefs/btree_trans_commit.c @@ -619,8 +619,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, unsigned u64s = 0; int ret = 0; - bch2_trans_verify_not_unlocked(trans); - bch2_trans_verify_not_in_restart(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); if (race_fault()) { trace_and_count(c, trans_restart_fault_inject, trans, trace_ip); @@ -727,7 +726,12 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit; ret = bch2_bkey_validate(c, bkey_i_to_s_c(i->k), - i->bkey_type, invalid_flags); + (struct bkey_validate_context) { + .from = BKEY_VALIDATE_commit, + .level = i->level, + .btree = i->btree_id, + .flags = invalid_flags, + }); if (unlikely(ret)){ bch2_trans_inconsistent(trans, "invalid bkey on insert from %s -> %ps\n", trans->fn, (void *) i->ip_allocated); @@ -971,24 +975,6 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags, return ret; } -static noinline int -bch2_trans_commit_get_rw_cold(struct btree_trans *trans, unsigned flags) -{ - struct bch_fs *c = trans->c; - int ret; - - if (likely(!(flags & BCH_TRANS_COMMIT_lazy_rw)) || - test_bit(BCH_FS_started, &c->flags)) - return -BCH_ERR_erofs_trans_commit; - - ret = drop_locks_do(trans, bch2_fs_read_write_early(c)); - if (ret) - return ret; - - bch2_write_ref_get(c, BCH_WRITE_REF_trans); - return 0; -} - /* * This is for updates done in the early part of fsck - btree_gc - before we've * gone RW. we only add the new key to the list of keys for journal replay to @@ -999,6 +985,8 @@ do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans) { struct bch_fs *c = trans->c; + BUG_ON(current != c->recovery_task); + trans_for_each_update(trans, i) { int ret = bch2_journal_key_insert(c, i->btree_id, i->level, i->k); if (ret) @@ -1024,8 +1012,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) struct bch_fs *c = trans->c; int ret = 0; - bch2_trans_verify_not_unlocked(trans); - bch2_trans_verify_not_in_restart(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); if (!trans->nr_updates && !trans->journal_entries_u64s) @@ -1035,16 +1022,13 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) if (ret) goto out_reset; - if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) { - ret = do_bch2_trans_commit_to_journal_replay(trans); - goto out_reset; - } - if (!(flags & BCH_TRANS_COMMIT_no_check_rw) && unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_trans))) { - ret = bch2_trans_commit_get_rw_cold(trans, flags); - if (ret) - goto out_reset; + if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) + ret = do_bch2_trans_commit_to_journal_replay(trans); + else + ret = -BCH_ERR_erofs_trans_commit; + goto out_reset; } EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags)); @@ -1089,8 +1073,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) } retry: errored_at = NULL; - bch2_trans_verify_not_unlocked(trans); - bch2_trans_verify_not_in_restart(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res))) memset(&trans->journal_res, 0, sizeof(trans->journal_res)); memset(&trans->fs_usage_delta, 0, sizeof(trans->fs_usage_delta)); diff --git a/libbcachefs/btree_update.c b/libbcachefs/btree_update.c index 79a274dc..06fd5aa6 100644 --- a/libbcachefs/btree_update.c +++ b/libbcachefs/btree_update.c @@ -296,7 +296,7 @@ static int bch2_trans_update_extent(struct btree_trans *trans, BTREE_ITER_intent| BTREE_ITER_with_updates| BTREE_ITER_not_extents); - k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX)); + k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX)); if ((ret = bkey_err(k))) goto err; if (!k.k) @@ -323,7 +323,7 @@ static int bch2_trans_update_extent(struct btree_trans *trans, goto out; next: bch2_btree_iter_advance(&iter); - k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX)); + k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX)); if ((ret = bkey_err(k))) goto err; if (!k.k) @@ -588,12 +588,9 @@ struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsi int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter, enum btree_id btree, struct bpos end) { - struct bkey_s_c k; - int ret = 0; - bch2_trans_iter_init(trans, iter, btree, POS_MAX, BTREE_ITER_intent); - k = bch2_btree_iter_prev(iter); - ret = bkey_err(k); + struct bkey_s_c k = bch2_btree_iter_peek_prev(iter); + int ret = bkey_err(k); if (ret) goto err; @@ -672,27 +669,19 @@ int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k, bch2_btree_insert_trans(trans, id, k, iter_flags)); } -int bch2_btree_delete_extent_at(struct btree_trans *trans, struct btree_iter *iter, - unsigned len, unsigned update_flags) +int bch2_btree_delete_at(struct btree_trans *trans, + struct btree_iter *iter, unsigned update_flags) { - struct bkey_i *k; - - k = bch2_trans_kmalloc(trans, sizeof(*k)); - if (IS_ERR(k)) - return PTR_ERR(k); + struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k)); + int ret = PTR_ERR_OR_ZERO(k); + if (ret) + return ret; bkey_init(&k->k); k->k.p = iter->pos; - bch2_key_resize(&k->k, len); return bch2_trans_update(trans, iter, k, update_flags); } -int bch2_btree_delete_at(struct btree_trans *trans, - struct btree_iter *iter, unsigned update_flags) -{ - return bch2_btree_delete_extent_at(trans, iter, 0, update_flags); -} - int bch2_btree_delete(struct btree_trans *trans, enum btree_id btree, struct bpos pos, unsigned update_flags) @@ -721,7 +710,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id, int ret = 0; bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent); - while ((k = bch2_btree_iter_peek_upto(&iter, end)).k) { + while ((k = bch2_btree_iter_peek_max(&iter, end)).k) { struct disk_reservation disk_res = bch2_disk_reservation_init(trans->c, 0); struct bkey_i delete; @@ -794,8 +783,7 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id, return ret; } -int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree, - struct bpos pos, bool set) +int bch2_btree_bit_mod_iter(struct btree_trans *trans, struct btree_iter *iter, bool set) { struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k)); int ret = PTR_ERR_OR_ZERO(k); @@ -804,13 +792,21 @@ int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree, bkey_init(&k->k); k->k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted; - k->k.p = pos; + k->k.p = iter->pos; + if (iter->flags & BTREE_ITER_is_extents) + bch2_key_resize(&k->k, 1); + return bch2_trans_update(trans, iter, k, 0); +} + +int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree, + struct bpos pos, bool set) +{ struct btree_iter iter; bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent); - ret = bch2_btree_iter_traverse(&iter) ?: - bch2_trans_update(trans, &iter, k, 0); + int ret = bch2_btree_iter_traverse(&iter) ?: + bch2_btree_bit_mod_iter(trans, &iter, set); bch2_trans_iter_exit(trans, &iter); return ret; } @@ -865,8 +861,7 @@ __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt, memcpy(l->d, buf.buf, buf.pos); c->journal.early_journal_entries.nr += jset_u64s(u64s); } else { - ret = bch2_trans_commit_do(c, NULL, NULL, - BCH_TRANS_COMMIT_lazy_rw|commit_flags, + ret = bch2_trans_commit_do(c, NULL, NULL, commit_flags, __bch2_trans_log_msg(trans, &buf, u64s)); } err: diff --git a/libbcachefs/btree_update.h b/libbcachefs/btree_update.h index 70b3c989..58df2019 100644 --- a/libbcachefs/btree_update.h +++ b/libbcachefs/btree_update.h @@ -24,7 +24,6 @@ void bch2_btree_insert_key_leaf(struct btree_trans *, struct btree_path *, #define BCH_TRANS_COMMIT_FLAGS() \ x(no_enospc, "don't check for enospc") \ x(no_check_rw, "don't attempt to take a ref on c->writes") \ - x(lazy_rw, "go read-write if we haven't yet - only for use in recovery") \ x(no_journal_res, "don't take a journal reservation, instead " \ "pin journal entry referred to by trans->journal_res.seq") \ x(journal_reclaim, "operation required for journal reclaim; may return error" \ @@ -47,8 +46,6 @@ enum bch_trans_commit_flags { void bch2_trans_commit_flags_to_text(struct printbuf *, enum bch_trans_commit_flags); -int bch2_btree_delete_extent_at(struct btree_trans *, struct btree_iter *, - unsigned, unsigned); int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned); int bch2_btree_delete(struct btree_trans *, enum btree_id, struct bpos, unsigned); @@ -66,6 +63,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id, int bch2_btree_delete_range(struct bch_fs *, enum btree_id, struct bpos, struct bpos, unsigned, u64 *); +int bch2_btree_bit_mod_iter(struct btree_trans *, struct btree_iter *, bool); int bch2_btree_bit_mod(struct btree_trans *, enum btree_id, struct bpos, bool); int bch2_btree_bit_mod_buffered(struct btree_trans *, enum btree_id, struct bpos, bool); @@ -244,7 +242,8 @@ static inline struct bkey_i *bch2_bkey_make_mut_noupdate(struct btree_trans *tra KEY_TYPE_##_type, sizeof(struct bkey_i_##_type))) static inline struct bkey_i *__bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter, - struct bkey_s_c *k, unsigned flags, + struct bkey_s_c *k, + enum btree_iter_update_trigger_flags flags, unsigned type, unsigned min_bytes) { struct bkey_i *mut = __bch2_bkey_make_mut_noupdate(trans, *k, type, min_bytes); @@ -261,8 +260,9 @@ static inline struct bkey_i *__bch2_bkey_make_mut(struct btree_trans *trans, str return mut; } -static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter, - struct bkey_s_c *k, unsigned flags) +static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, + struct btree_iter *iter, struct bkey_s_c *k, + enum btree_iter_update_trigger_flags flags) { return __bch2_bkey_make_mut(trans, iter, k, flags, 0, 0); } @@ -274,7 +274,8 @@ static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struc static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *trans, struct btree_iter *iter, unsigned btree_id, struct bpos pos, - unsigned flags, unsigned type, unsigned min_bytes) + enum btree_iter_update_trigger_flags flags, + unsigned type, unsigned min_bytes) { struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags|BTREE_ITER_intent, type); @@ -289,7 +290,7 @@ static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *tr static inline struct bkey_i *bch2_bkey_get_mut_noupdate(struct btree_trans *trans, struct btree_iter *iter, unsigned btree_id, struct bpos pos, - unsigned flags) + enum btree_iter_update_trigger_flags flags) { return __bch2_bkey_get_mut_noupdate(trans, iter, btree_id, pos, flags, 0, 0); } @@ -297,7 +298,8 @@ static inline struct bkey_i *bch2_bkey_get_mut_noupdate(struct btree_trans *tran static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans, struct btree_iter *iter, unsigned btree_id, struct bpos pos, - unsigned flags, unsigned type, unsigned min_bytes) + enum btree_iter_update_trigger_flags flags, + unsigned type, unsigned min_bytes) { struct bkey_i *mut = __bch2_bkey_get_mut_noupdate(trans, iter, btree_id, pos, flags|BTREE_ITER_intent, type, min_bytes); @@ -318,7 +320,8 @@ static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans, static inline struct bkey_i *bch2_bkey_get_mut_minsize(struct btree_trans *trans, struct btree_iter *iter, unsigned btree_id, struct bpos pos, - unsigned flags, unsigned min_bytes) + enum btree_iter_update_trigger_flags flags, + unsigned min_bytes) { return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, min_bytes); } @@ -326,7 +329,7 @@ static inline struct bkey_i *bch2_bkey_get_mut_minsize(struct btree_trans *trans static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans, struct btree_iter *iter, unsigned btree_id, struct bpos pos, - unsigned flags) + enum btree_iter_update_trigger_flags flags) { return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, 0); } @@ -337,7 +340,8 @@ static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans, KEY_TYPE_##_type, sizeof(struct bkey_i_##_type))) static inline struct bkey_i *__bch2_bkey_alloc(struct btree_trans *trans, struct btree_iter *iter, - unsigned flags, unsigned type, unsigned val_size) + enum btree_iter_update_trigger_flags flags, + unsigned type, unsigned val_size) { struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k) + val_size); int ret; diff --git a/libbcachefs/btree_update_interior.c b/libbcachefs/btree_update_interior.c index 54a2435b..f2a1d5d3 100644 --- a/libbcachefs/btree_update_interior.c +++ b/libbcachefs/btree_update_interior.c @@ -58,11 +58,15 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key, b->data->min_key)); + bch2_bkey_buf_init(&prev); + bkey_init(&prev.k->k); + bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b); + if (b == btree_node_root(c, b)) { if (!bpos_eq(b->data->min_key, POS_MIN)) { printbuf_reset(&buf); bch2_bpos_to_text(&buf, b->data->min_key); - need_fsck_err(trans, btree_root_bad_min_key, + log_fsck_err(trans, btree_root_bad_min_key, "btree root with incorrect min_key: %s", buf.buf); goto topology_repair; } @@ -70,18 +74,14 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) if (!bpos_eq(b->data->max_key, SPOS_MAX)) { printbuf_reset(&buf); bch2_bpos_to_text(&buf, b->data->max_key); - need_fsck_err(trans, btree_root_bad_max_key, + log_fsck_err(trans, btree_root_bad_max_key, "btree root with incorrect max_key: %s", buf.buf); goto topology_repair; } } if (!b->c.level) - return 0; - - bch2_bkey_buf_init(&prev); - bkey_init(&prev.k->k); - bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b); + goto out; while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { if (k.k->type != KEY_TYPE_btree_ptr_v2) @@ -106,7 +106,7 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) prt_str(&buf, "\n next "); bch2_bkey_val_to_text(&buf, c, k); - need_fsck_err(trans, btree_node_topology_bad_min_key, "%s", buf.buf); + log_fsck_err(trans, btree_node_topology_bad_min_key, "%s", buf.buf); goto topology_repair; } @@ -123,7 +123,7 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) prt_str(&buf, " node "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); - need_fsck_err(trans, btree_node_topology_empty_interior_node, "%s", buf.buf); + log_fsck_err(trans, btree_node_topology_empty_interior_node, "%s", buf.buf); goto topology_repair; } else if (!bpos_eq(prev.k->k.p, b->key.k.p)) { bch2_topology_error(c); @@ -136,7 +136,7 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) prt_str(&buf, "\n last key "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k)); - need_fsck_err(trans, btree_node_topology_bad_max_key, "%s", buf.buf); + log_fsck_err(trans, btree_node_topology_bad_max_key, "%s", buf.buf); goto topology_repair; } out: @@ -146,13 +146,7 @@ fsck_err: printbuf_exit(&buf); return ret; topology_repair: - if ((c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology)) && - c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) { - bch2_inconsistent_error(c); - ret = -BCH_ERR_btree_need_topology_repair; - } else { - ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology); - } + ret = bch2_topology_error(c); goto out; } @@ -237,10 +231,6 @@ static void __btree_node_free(struct btree_trans *trans, struct btree *b) BUG_ON(b->will_make_reachable); clear_btree_node_noevict(b); - - mutex_lock(&c->btree_cache.lock); - list_move(&b->list, &c->btree_cache.freeable); - mutex_unlock(&c->btree_cache.lock); } static void bch2_btree_node_free_inmem(struct btree_trans *trans, @@ -252,12 +242,12 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans, bch2_btree_node_lock_write_nofail(trans, path, &b->c); + __btree_node_free(trans, b); + mutex_lock(&c->btree_cache.lock); bch2_btree_node_hash_remove(&c->btree_cache, b); mutex_unlock(&c->btree_cache.lock); - __btree_node_free(trans, b); - six_unlock_write(&b->c.lock); mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); @@ -289,7 +279,7 @@ static void bch2_btree_node_free_never_used(struct btree_update *as, clear_btree_node_need_write(b); mutex_lock(&c->btree_cache.lock); - bch2_btree_node_hash_remove(&c->btree_cache, b); + __bch2_btree_node_hash_remove(&c->btree_cache, b); mutex_unlock(&c->btree_cache.lock); BUG_ON(p->nr >= ARRAY_SIZE(p->b)); @@ -521,8 +511,7 @@ static void bch2_btree_reserve_put(struct btree_update *as, struct btree_trans * btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); __btree_node_free(trans, b); - six_unlock_write(&b->c.lock); - six_unlock_intent(&b->c.lock); + bch2_btree_node_to_freelist(c, b); } } } @@ -1371,9 +1360,14 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, if (unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags))) bch2_journal_key_overwritten(c, b->c.btree_id, b->c.level, insert->k.p); - if (bch2_bkey_validate(c, bkey_i_to_s_c(insert), - btree_node_type(b), BCH_VALIDATE_write) ?: - bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), BCH_VALIDATE_write)) { + struct bkey_validate_context from = (struct bkey_validate_context) { + .from = BKEY_VALIDATE_btree_node, + .level = b->c.level, + .btree = b->c.btree_id, + .flags = BCH_VALIDATE_commit, + }; + if (bch2_bkey_validate(c, bkey_i_to_s_c(insert), from) ?: + bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), from)) { bch2_fs_inconsistent(c, "%s: inserting invalid bkey", __func__); dump_stack(); } @@ -1423,15 +1417,35 @@ bch2_btree_insert_keys_interior(struct btree_update *as, (bkey_cmp_left_packed(b, k, &insert->k.p) >= 0)) ; - while (!bch2_keylist_empty(keys)) { - insert = bch2_keylist_front(keys); + for (; + insert != keys->top && bpos_le(insert->k.p, b->key.k.p); + insert = bkey_next(insert)) + bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert); - if (bpos_gt(insert->k.p, b->key.k.p)) - break; + if (bch2_btree_node_check_topology(trans, b)) { + struct printbuf buf = PRINTBUF; - bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert); - bch2_keylist_pop_front(keys); + for (struct bkey_i *k = keys->keys; + k != insert; + k = bkey_next(k)) { + bch2_bkey_val_to_text(&buf, trans->c, bkey_i_to_s_c(k)); + prt_newline(&buf); + } + + panic("%s(): check_topology error: inserted keys\n%s", __func__, buf.buf); } + + memmove_u64s_down(keys->keys, insert, keys->top_p - insert->_data); + keys->top_p -= insert->_data - keys->keys_p; +} + +static bool key_deleted_in_insert(struct keylist *insert_keys, struct bpos pos) +{ + if (insert_keys) + for_each_keylist_key(insert_keys, k) + if (bkey_deleted(&k->k) && bpos_eq(k->k.p, pos)) + return true; + return false; } /* @@ -1441,7 +1455,8 @@ bch2_btree_insert_keys_interior(struct btree_update *as, static void __btree_split_node(struct btree_update *as, struct btree_trans *trans, struct btree *b, - struct btree *n[2]) + struct btree *n[2], + struct keylist *insert_keys) { struct bkey_packed *k; struct bpos n1_pos = POS_MIN; @@ -1476,7 +1491,8 @@ static void __btree_split_node(struct btree_update *as, if (b->c.level && u64s < n1_u64s && u64s + k->u64s >= n1_u64s && - bch2_key_deleted_in_journal(trans, b->c.btree_id, b->c.level, uk.p)) + (bch2_key_deleted_in_journal(trans, b->c.btree_id, b->c.level, uk.p) || + key_deleted_in_insert(insert_keys, uk.p))) n1_u64s += k->u64s; i = u64s >= n1_u64s; @@ -1569,8 +1585,6 @@ static void btree_split_insert_keys(struct btree_update *as, bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p); bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys); - - BUG_ON(bch2_btree_node_check_topology(trans, b)); } } @@ -1603,7 +1617,7 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level); n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level); - __btree_split_node(as, trans, b, n); + __btree_split_node(as, trans, b, n, keys); if (keys) { btree_split_insert_keys(as, trans, path, n1, keys); @@ -1821,8 +1835,6 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t btree_update_updated_node(as, b); bch2_btree_node_unlock_write(trans, path, b); - - BUG_ON(bch2_btree_node_check_topology(trans, b)); return 0; split: /* @@ -1947,8 +1959,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, u64 start_time = local_clock(); int ret = 0; - bch2_trans_verify_not_in_restart(trans); - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); BUG_ON(!trans->paths[path].should_be_locked); BUG_ON(!btree_node_locked(&trans->paths[path], level)); @@ -2195,42 +2206,50 @@ struct async_btree_rewrite { struct list_head list; enum btree_id btree_id; unsigned level; - struct bpos pos; - __le64 seq; + struct bkey_buf key; }; static int async_btree_node_rewrite_trans(struct btree_trans *trans, struct async_btree_rewrite *a) { - struct bch_fs *c = trans->c; struct btree_iter iter; - struct btree *b; - int ret; - - bch2_trans_node_iter_init(trans, &iter, a->btree_id, a->pos, + bch2_trans_node_iter_init(trans, &iter, + a->btree_id, a->key.k->k.p, BTREE_MAX_DEPTH, a->level, 0); - b = bch2_btree_iter_peek_node(&iter); - ret = PTR_ERR_OR_ZERO(b); + struct btree *b = bch2_btree_iter_peek_node(&iter); + int ret = PTR_ERR_OR_ZERO(b); if (ret) goto out; - if (!b || b->data->keys.seq != a->seq) { + bool found = b && btree_ptr_hash_val(&b->key) == btree_ptr_hash_val(a->key.k); + ret = found + ? bch2_btree_node_rewrite(trans, &iter, b, 0) + : -ENOENT; + +#if 0 + /* Tracepoint... */ + if (!ret || ret == -ENOENT) { + struct bch_fs *c = trans->c; struct printbuf buf = PRINTBUF; - if (b) - bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); - else - prt_str(&buf, "(null"); - bch_info(c, "%s: node to rewrite not found:, searching for seq %llu, got\n%s", - __func__, a->seq, buf.buf); + if (!ret) { + prt_printf(&buf, "rewrite node:\n "); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(a->key.k)); + } else { + prt_printf(&buf, "node to rewrite not found:\n want: "); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(a->key.k)); + prt_printf(&buf, "\n got: "); + if (b) + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); + else + prt_str(&buf, "(null)"); + } + bch_info(c, "%s", buf.buf); printbuf_exit(&buf); - goto out; } - - ret = bch2_btree_node_rewrite(trans, &iter, b, 0); +#endif out: bch2_trans_iter_exit(trans, &iter); - return ret; } @@ -2241,81 +2260,96 @@ static void async_btree_node_rewrite_work(struct work_struct *work) struct bch_fs *c = a->c; int ret = bch2_trans_do(c, async_btree_node_rewrite_trans(trans, a)); - bch_err_fn_ratelimited(c, ret); + if (ret != -ENOENT) + bch_err_fn_ratelimited(c, ret); + + spin_lock(&c->btree_node_rewrites_lock); + list_del(&a->list); + spin_unlock(&c->btree_node_rewrites_lock); + + closure_wake_up(&c->btree_node_rewrites_wait); + + bch2_bkey_buf_exit(&a->key, c); bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite); kfree(a); } void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b) { - struct async_btree_rewrite *a; - int ret; - - a = kmalloc(sizeof(*a), GFP_NOFS); - if (!a) { - bch_err(c, "%s: error allocating memory", __func__); + struct async_btree_rewrite *a = kmalloc(sizeof(*a), GFP_NOFS); + if (!a) return; - } a->c = c; a->btree_id = b->c.btree_id; a->level = b->c.level; - a->pos = b->key.k.p; - a->seq = b->data->keys.seq; INIT_WORK(&a->work, async_btree_node_rewrite_work); - if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) { - mutex_lock(&c->pending_node_rewrites_lock); - list_add(&a->list, &c->pending_node_rewrites); - mutex_unlock(&c->pending_node_rewrites_lock); - return; - } + bch2_bkey_buf_init(&a->key); + bch2_bkey_buf_copy(&a->key, c, &b->key); - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) { - if (test_bit(BCH_FS_started, &c->flags)) { - bch_err(c, "%s: error getting c->writes ref", __func__); - kfree(a); - return; - } + bool now = false, pending = false; - ret = bch2_fs_read_write_early(c); - bch_err_msg(c, ret, "going read-write"); - if (ret) { - kfree(a); - return; - } + spin_lock(&c->btree_node_rewrites_lock); + if (bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) { + list_add(&a->list, &c->btree_node_rewrites); + now = true; + } else if (!test_bit(BCH_FS_may_go_rw, &c->flags)) { + list_add(&a->list, &c->btree_node_rewrites_pending); + pending = true; + } + spin_unlock(&c->btree_node_rewrites_lock); - bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite); + if (now) { + queue_work(c->btree_node_rewrite_worker, &a->work); + } else if (pending) { + /* bch2_do_pending_node_rewrites will execute */ + } else { + bch2_bkey_buf_exit(&a->key, c); + kfree(a); } +} - queue_work(c->btree_node_rewrite_worker, &a->work); +void bch2_async_btree_node_rewrites_flush(struct bch_fs *c) +{ + closure_wait_event(&c->btree_node_rewrites_wait, + list_empty(&c->btree_node_rewrites)); } void bch2_do_pending_node_rewrites(struct bch_fs *c) { - struct async_btree_rewrite *a, *n; - - mutex_lock(&c->pending_node_rewrites_lock); - list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) { - list_del(&a->list); + while (1) { + spin_lock(&c->btree_node_rewrites_lock); + struct async_btree_rewrite *a = + list_pop_entry(&c->btree_node_rewrites_pending, + struct async_btree_rewrite, list); + if (a) + list_add(&a->list, &c->btree_node_rewrites); + spin_unlock(&c->btree_node_rewrites_lock); + + if (!a) + break; bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite); queue_work(c->btree_node_rewrite_worker, &a->work); } - mutex_unlock(&c->pending_node_rewrites_lock); } void bch2_free_pending_node_rewrites(struct bch_fs *c) { - struct async_btree_rewrite *a, *n; + while (1) { + spin_lock(&c->btree_node_rewrites_lock); + struct async_btree_rewrite *a = + list_pop_entry(&c->btree_node_rewrites_pending, + struct async_btree_rewrite, list); + spin_unlock(&c->btree_node_rewrites_lock); - mutex_lock(&c->pending_node_rewrites_lock); - list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) { - list_del(&a->list); + if (!a) + break; + bch2_bkey_buf_exit(&a->key, c); kfree(a); } - mutex_unlock(&c->pending_node_rewrites_lock); } static int __bch2_btree_node_update_key(struct btree_trans *trans, @@ -2392,7 +2426,8 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, if (new_hash) { mutex_lock(&c->btree_cache.lock); bch2_btree_node_hash_remove(&c->btree_cache, new_hash); - bch2_btree_node_hash_remove(&c->btree_cache, b); + + __bch2_btree_node_hash_remove(&c->btree_cache, b); bkey_copy(&b->key, new_key); ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); @@ -2671,6 +2706,9 @@ void bch2_btree_reserve_cache_to_text(struct printbuf *out, struct bch_fs *c) void bch2_fs_btree_interior_update_exit(struct bch_fs *c) { + WARN_ON(!list_empty(&c->btree_node_rewrites)); + WARN_ON(!list_empty(&c->btree_node_rewrites_pending)); + if (c->btree_node_rewrite_worker) destroy_workqueue(c->btree_node_rewrite_worker); if (c->btree_interior_update_worker) @@ -2686,8 +2724,9 @@ void bch2_fs_btree_interior_update_init_early(struct bch_fs *c) mutex_init(&c->btree_interior_update_lock); INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work); - INIT_LIST_HEAD(&c->pending_node_rewrites); - mutex_init(&c->pending_node_rewrites_lock); + INIT_LIST_HEAD(&c->btree_node_rewrites); + INIT_LIST_HEAD(&c->btree_node_rewrites_pending); + spin_lock_init(&c->btree_node_rewrites_lock); } int bch2_fs_btree_interior_update_init(struct bch_fs *c) diff --git a/libbcachefs/btree_update_interior.h b/libbcachefs/btree_update_interior.h index 10f40095..7930ffea 100644 --- a/libbcachefs/btree_update_interior.h +++ b/libbcachefs/btree_update_interior.h @@ -159,7 +159,7 @@ static inline int bch2_foreground_maybe_merge(struct btree_trans *trans, unsigned level, unsigned flags) { - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); return bch2_foreground_maybe_merge_sibling(trans, path, level, flags, btree_prev_sib) ?: @@ -334,6 +334,7 @@ void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *); struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *, struct jset_entry *, unsigned long); +void bch2_async_btree_node_rewrites_flush(struct bch_fs *); void bch2_do_pending_node_rewrites(struct bch_fs *); void bch2_free_pending_node_rewrites(struct bch_fs *); diff --git a/libbcachefs/btree_write_buffer.c b/libbcachefs/btree_write_buffer.c index 3f56b584..49ce2d1e 100644 --- a/libbcachefs/btree_write_buffer.c +++ b/libbcachefs/btree_write_buffer.c @@ -19,8 +19,6 @@ static int bch2_btree_write_buffer_journal_flush(struct journal *, struct journal_entry_pin *, u64); -static int bch2_journal_keys_to_write_buffer(struct bch_fs *, struct journal_buf *); - static inline bool __wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r) { return (cmp_int(l->hi, r->hi) ?: @@ -277,6 +275,10 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) bool accounting_replay_done = test_bit(BCH_FS_accounting_replay_done, &c->flags); int ret = 0; + ret = bch2_journal_error(&c->journal); + if (ret) + return ret; + bch2_trans_unlock(trans); bch2_trans_begin(trans); @@ -477,21 +479,56 @@ err: return ret; } -static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 seq) +static int bch2_journal_keys_to_write_buffer(struct bch_fs *c, struct journal_buf *buf) +{ + struct journal_keys_to_wb dst; + int ret = 0; + + bch2_journal_keys_to_write_buffer_start(c, &dst, le64_to_cpu(buf->data->seq)); + + for_each_jset_entry_type(entry, buf->data, BCH_JSET_ENTRY_write_buffer_keys) { + jset_entry_for_each_key(entry, k) { + ret = bch2_journal_key_to_wb(c, &dst, entry->btree_id, k); + if (ret) + goto out; + } + + entry->type = BCH_JSET_ENTRY_btree_keys; + } +out: + ret = bch2_journal_keys_to_write_buffer_end(c, &dst) ?: ret; + return ret; +} + +static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 max_seq) { struct journal *j = &c->journal; struct journal_buf *buf; + bool blocked; int ret = 0; - while (!ret && (buf = bch2_next_write_buffer_flush_journal_buf(j, seq))) { + while (!ret && (buf = bch2_next_write_buffer_flush_journal_buf(j, max_seq, &blocked))) { ret = bch2_journal_keys_to_write_buffer(c, buf); + + if (!blocked && !ret) { + spin_lock(&j->lock); + buf->need_flush_to_write_buffer = false; + spin_unlock(&j->lock); + } + mutex_unlock(&j->buf_lock); + + if (blocked) { + bch2_journal_unblock(j); + break; + } } return ret; } -static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq) +static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 max_seq, + bool *did_work) { struct bch_fs *c = trans->c; struct btree_write_buffer *wb = &c->btree_write_buffer; @@ -500,7 +537,9 @@ static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq) do { bch2_trans_unlock(trans); - fetch_from_journal_err = fetch_wb_keys_from_journal(c, seq); + fetch_from_journal_err = fetch_wb_keys_from_journal(c, max_seq); + + *did_work |= wb->inc.keys.nr || wb->flushing.keys.nr; /* * On memory allocation failure, bch2_btree_write_buffer_flush_locked() @@ -511,8 +550,8 @@ static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq) mutex_unlock(&wb->flushing.lock); } while (!ret && (fetch_from_journal_err || - (wb->inc.pin.seq && wb->inc.pin.seq <= seq) || - (wb->flushing.pin.seq && wb->flushing.pin.seq <= seq))); + (wb->inc.pin.seq && wb->inc.pin.seq <= max_seq) || + (wb->flushing.pin.seq && wb->flushing.pin.seq <= max_seq))); return ret; } @@ -521,17 +560,34 @@ static int bch2_btree_write_buffer_journal_flush(struct journal *j, struct journal_entry_pin *_pin, u64 seq) { struct bch_fs *c = container_of(j, struct bch_fs, journal); + bool did_work = false; - return bch2_trans_run(c, btree_write_buffer_flush_seq(trans, seq)); + return bch2_trans_run(c, btree_write_buffer_flush_seq(trans, seq, &did_work)); } int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans) { struct bch_fs *c = trans->c; + bool did_work = false; trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_); - return btree_write_buffer_flush_seq(trans, journal_cur_seq(&c->journal)); + return btree_write_buffer_flush_seq(trans, journal_cur_seq(&c->journal), &did_work); +} + +/* + * The write buffer requires flushing when going RO: keys in the journal for the + * write buffer don't have a journal pin yet + */ +bool bch2_btree_write_buffer_flush_going_ro(struct bch_fs *c) +{ + if (bch2_journal_error(&c->journal)) + return false; + + bool did_work = false; + bch2_trans_run(c, btree_write_buffer_flush_seq(trans, + journal_cur_seq(&c->journal), &did_work)); + return did_work; } int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans) @@ -747,31 +803,6 @@ int bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys_ return ret; } -static int bch2_journal_keys_to_write_buffer(struct bch_fs *c, struct journal_buf *buf) -{ - struct journal_keys_to_wb dst; - int ret = 0; - - bch2_journal_keys_to_write_buffer_start(c, &dst, le64_to_cpu(buf->data->seq)); - - for_each_jset_entry_type(entry, buf->data, BCH_JSET_ENTRY_write_buffer_keys) { - jset_entry_for_each_key(entry, k) { - ret = bch2_journal_key_to_wb(c, &dst, entry->btree_id, k); - if (ret) - goto out; - } - - entry->type = BCH_JSET_ENTRY_btree_keys; - } - - spin_lock(&c->journal.lock); - buf->need_flush_to_write_buffer = false; - spin_unlock(&c->journal.lock); -out: - ret = bch2_journal_keys_to_write_buffer_end(c, &dst) ?: ret; - return ret; -} - static int wb_keys_resize(struct btree_write_buffer_keys *wb, size_t new_size) { if (wb->keys.size >= new_size) diff --git a/libbcachefs/btree_write_buffer.h b/libbcachefs/btree_write_buffer.h index 725e7965..d535cea2 100644 --- a/libbcachefs/btree_write_buffer.h +++ b/libbcachefs/btree_write_buffer.h @@ -21,6 +21,7 @@ static inline bool bch2_btree_write_buffer_must_wait(struct bch_fs *c) struct btree_trans; int bch2_btree_write_buffer_flush_sync(struct btree_trans *); +bool bch2_btree_write_buffer_flush_going_ro(struct bch_fs *); int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *); int bch2_btree_write_buffer_tryflush(struct btree_trans *); diff --git a/libbcachefs/buckets.c b/libbcachefs/buckets.c index ec7d9a59..afd35c93 100644 --- a/libbcachefs/buckets.c +++ b/libbcachefs/buckets.c @@ -18,7 +18,9 @@ #include "error.h" #include "inode.h" #include "movinggc.h" +#include "rebalance.h" #include "recovery.h" +#include "recovery_passes.h" #include "reflink.h" #include "replicas.h" #include "subvolume.h" @@ -401,8 +403,8 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, BUG_ON(!sectors); if (gen_after(ptr->gen, b_gen)) { - bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, - ptr_gen_newer_than_bucket_gen, + bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); + log_fsck_err(trans, ptr_gen_newer_than_bucket_gen, "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n" "while marking %s", ptr->dev, bucket_nr, b_gen, @@ -415,8 +417,8 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, } if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) { - bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, - ptr_too_stale, + bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); + log_fsck_err(trans, ptr_too_stale, "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" "while marking %s", ptr->dev, bucket_nr, b_gen, @@ -435,8 +437,8 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, } if (b_gen != ptr->gen) { - bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, - stale_dirty_ptr, + bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); + log_fsck_err(trans, stale_dirty_ptr, "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n" "while marking %s", ptr->dev, bucket_nr, b_gen, @@ -451,8 +453,8 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, } if (bucket_data_type_mismatch(bucket_data_type, ptr_data_type)) { - bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, - ptr_bucket_data_type_mismatch, + bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); + log_fsck_err(trans, ptr_bucket_data_type_mismatch, "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n" "while marking %s", ptr->dev, bucket_nr, b_gen, @@ -466,8 +468,8 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, } if ((u64) *bucket_sectors + sectors > U32_MAX) { - bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, - bucket_sector_count_overflow, + bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); + log_fsck_err(trans, bucket_sector_count_overflow, "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n" "while marking %s", ptr->dev, bucket_nr, b_gen, @@ -485,7 +487,9 @@ out: printbuf_exit(&buf); return ret; err: +fsck_err: bch2_dump_trans_updates(trans); + bch2_inconsistent_error(c); ret = -BCH_ERR_bucket_ref_update; goto out; } @@ -581,18 +585,18 @@ static int bch2_trigger_pointer(struct btree_trans *trans, } struct bpos bucket; - struct bch_backpointer bp; + struct bkey_i_backpointer bp; __bch2_extent_ptr_to_bp(trans->c, ca, btree_id, level, k, p, entry, &bucket, &bp, abs_sectors); if (flags & BTREE_TRIGGER_transactional) { struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0); ret = PTR_ERR_OR_ZERO(a) ?: - __mark_pointer(trans, ca, k, &p, *sectors, bp.data_type, &a->v); + __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &a->v); if (ret) goto err; if (!p.ptr.cached) { - ret = bch2_bucket_backpointer_mod(trans, ca, bucket, bp, k, insert); + ret = bch2_bucket_backpointer_mod(trans, k, &bp, insert); if (ret) goto err; } @@ -610,7 +614,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans, bucket_lock(g); struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; - ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.data_type, &new); + ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &new); alloc_to_bucket(g, new); bucket_unlock(g); err_unlock: @@ -951,6 +955,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, enum bch_data_type type, unsigned sectors) { + struct bch_fs *c = trans->c; struct btree_iter iter; int ret = 0; @@ -960,8 +965,8 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, return PTR_ERR(a); if (a->v.data_type && type && a->v.data_type != type) { - bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, - bucket_metadata_type_mismatch, + bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); + log_fsck_err(trans, bucket_metadata_type_mismatch, "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n" "while marking %s", iter.pos.inode, iter.pos.offset, a->v.gen, @@ -979,6 +984,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, ret = bch2_trans_update(trans, &iter, &a->k_i, 0); } err: +fsck_err: bch2_trans_iter_exit(trans, &iter); return ret; } @@ -1155,6 +1161,31 @@ int bch2_trans_mark_dev_sbs(struct bch_fs *c) return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional); } +bool bch2_is_superblock_bucket(struct bch_dev *ca, u64 b) +{ + struct bch_sb_layout *layout = &ca->disk_sb.sb->layout; + u64 b_offset = bucket_to_sector(ca, b); + u64 b_end = bucket_to_sector(ca, b + 1); + unsigned i; + + if (!b) + return true; + + for (i = 0; i < layout->nr_superblocks; i++) { + u64 offset = le64_to_cpu(layout->sb_offset[i]); + u64 end = offset + (1 << layout->sb_max_size_bits); + + if (!(offset >= b_end || end <= b_offset)) + return true; + } + + for (i = 0; i < ca->journal.nr; i++) + if (b == ca->journal.buckets[i]) + return true; + + return false; +} + /* Disk reservations: */ #define SECTORS_CACHE 1024 @@ -1266,8 +1297,9 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) BUG_ON(resize && ca->buckets_nouse); - if (!(bucket_gens = kvmalloc(sizeof(struct bucket_gens) + nbuckets, - GFP_KERNEL|__GFP_ZERO))) { + bucket_gens = kvmalloc(struct_size(bucket_gens, b, nbuckets), + GFP_KERNEL|__GFP_ZERO); + if (!bucket_gens) { ret = -BCH_ERR_ENOMEM_bucket_gens; goto err; } @@ -1285,11 +1317,13 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1); if (resize) { - size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets); - + bucket_gens->nbuckets = min(bucket_gens->nbuckets, + old_bucket_gens->nbuckets); + bucket_gens->nbuckets_minus_first = + bucket_gens->nbuckets - bucket_gens->first_bucket; memcpy(bucket_gens->b, old_bucket_gens->b, - n); + bucket_gens->nbuckets); } rcu_assign_pointer(ca->bucket_gens, bucket_gens); diff --git a/libbcachefs/buckets.h b/libbcachefs/buckets.h index fd5e6cca..3bebc4c3 100644 --- a/libbcachefs/buckets.h +++ b/libbcachefs/buckets.h @@ -103,12 +103,18 @@ static inline u8 *bucket_gen(struct bch_dev *ca, size_t b) return gens->b + b; } -static inline u8 bucket_gen_get(struct bch_dev *ca, size_t b) +static inline int bucket_gen_get_rcu(struct bch_dev *ca, size_t b) +{ + u8 *gen = bucket_gen(ca, b); + return gen ? *gen : -1; +} + +static inline int bucket_gen_get(struct bch_dev *ca, size_t b) { rcu_read_lock(); - u8 gen = *bucket_gen(ca, b); + int ret = bucket_gen_get_rcu(ca, b); rcu_read_unlock(); - return gen; + return ret; } static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca, @@ -169,10 +175,8 @@ static inline int gen_after(u8 a, u8 b) static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr) { - u8 *gen = bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)); - if (!gen) - return -1; - return gen_after(*gen, ptr->gen); + int gen = bucket_gen_get_rcu(ca, PTR_BUCKET_NR(ca, ptr)); + return gen < 0 ? gen : gen_after(gen, ptr->gen); } /** @@ -184,7 +188,6 @@ static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr rcu_read_lock(); int ret = dev_ptr_stale_rcu(ca, ptr); rcu_read_unlock(); - return ret; } @@ -305,26 +308,7 @@ int bch2_trans_mark_dev_sbs_flags(struct bch_fs *, enum btree_iter_update_trigger_flags); int bch2_trans_mark_dev_sbs(struct bch_fs *); -static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b) -{ - struct bch_sb_layout *layout = &ca->disk_sb.sb->layout; - u64 b_offset = bucket_to_sector(ca, b); - u64 b_end = bucket_to_sector(ca, b + 1); - unsigned i; - - if (!b) - return true; - - for (i = 0; i < layout->nr_superblocks; i++) { - u64 offset = le64_to_cpu(layout->sb_offset[i]); - u64 end = offset + (1 << layout->sb_max_size_bits); - - if (!(offset >= b_end || end <= b_offset)) - return true; - } - - return false; -} +bool bch2_is_superblock_bucket(struct bch_dev *, u64); static inline const char *bch2_data_type_str(enum bch_data_type type) { diff --git a/libbcachefs/buckets_types.h b/libbcachefs/buckets_types.h index 28bd09a2..7174047b 100644 --- a/libbcachefs/buckets_types.h +++ b/libbcachefs/buckets_types.h @@ -24,7 +24,7 @@ struct bucket_gens { u16 first_bucket; size_t nbuckets; size_t nbuckets_minus_first; - u8 b[]; + u8 b[] __counted_by(nbuckets); }; struct bch_dev_usage { diff --git a/libbcachefs/chardev.c b/libbcachefs/chardev.c index 2182b555..46e9e321 100644 --- a/libbcachefs/chardev.c +++ b/libbcachefs/chardev.c @@ -6,11 +6,11 @@ #include "buckets.h" #include "chardev.h" #include "disk_accounting.h" +#include "fsck.h" #include "journal.h" #include "move.h" #include "recovery_passes.h" #include "replicas.h" -#include "super.h" #include "super-io.h" #include "thread_with_file.h" @@ -127,130 +127,6 @@ static long bch2_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg } #endif -struct fsck_thread { - struct thread_with_stdio thr; - struct bch_fs *c; - struct bch_opts opts; -}; - -static void bch2_fsck_thread_exit(struct thread_with_stdio *_thr) -{ - struct fsck_thread *thr = container_of(_thr, struct fsck_thread, thr); - kfree(thr); -} - -static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio) -{ - struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr); - struct bch_fs *c = thr->c; - - int ret = PTR_ERR_OR_ZERO(c); - if (ret) - return ret; - - ret = bch2_fs_start(thr->c); - if (ret) - goto err; - - if (test_bit(BCH_FS_errors_fixed, &c->flags)) { - bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: errors fixed\n", c->name); - ret |= 1; - } - if (test_bit(BCH_FS_error, &c->flags)) { - bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: still has errors\n", c->name); - ret |= 4; - } -err: - bch2_fs_stop(c); - return ret; -} - -static const struct thread_with_stdio_ops bch2_offline_fsck_ops = { - .exit = bch2_fsck_thread_exit, - .fn = bch2_fsck_offline_thread_fn, -}; - -static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg) -{ - struct bch_ioctl_fsck_offline arg; - struct fsck_thread *thr = NULL; - darray_str(devs) = {}; - long ret = 0; - - if (copy_from_user(&arg, user_arg, sizeof(arg))) - return -EFAULT; - - if (arg.flags) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - for (size_t i = 0; i < arg.nr_devs; i++) { - u64 dev_u64; - ret = copy_from_user_errcode(&dev_u64, &user_arg->devs[i], sizeof(u64)); - if (ret) - goto err; - - char *dev_str = strndup_user((char __user *)(unsigned long) dev_u64, PATH_MAX); - ret = PTR_ERR_OR_ZERO(dev_str); - if (ret) - goto err; - - ret = darray_push(&devs, dev_str); - if (ret) { - kfree(dev_str); - goto err; - } - } - - thr = kzalloc(sizeof(*thr), GFP_KERNEL); - if (!thr) { - ret = -ENOMEM; - goto err; - } - - thr->opts = bch2_opts_empty(); - - if (arg.opts) { - char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16); - ret = PTR_ERR_OR_ZERO(optstr) ?: - bch2_parse_mount_opts(NULL, &thr->opts, NULL, optstr); - if (!IS_ERR(optstr)) - kfree(optstr); - - if (ret) - goto err; - } - - opt_set(thr->opts, stdio, (u64)(unsigned long)&thr->thr.stdio); - opt_set(thr->opts, read_only, 1); - opt_set(thr->opts, ratelimit_errors, 0); - - /* We need request_key() to be called before we punt to kthread: */ - opt_set(thr->opts, nostart, true); - - bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops); - - thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts); - - if (!IS_ERR(thr->c) && - thr->c->opts.errors == BCH_ON_ERROR_panic) - thr->c->opts.errors = BCH_ON_ERROR_ro; - - ret = __bch2_run_thread_with_stdio(&thr->thr); -out: - darray_for_each(devs, i) - kfree(*i); - darray_exit(&devs); - return ret; -err: - if (thr) - bch2_fsck_thread_exit(&thr->thr); - pr_err("ret %s", bch2_err_str(ret)); - goto out; -} - static long bch2_global_ioctl(unsigned cmd, void __user *arg) { long ret; @@ -775,99 +651,6 @@ static long bch2_ioctl_disk_resize_journal(struct bch_fs *c, return ret; } -static int bch2_fsck_online_thread_fn(struct thread_with_stdio *stdio) -{ - struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr); - struct bch_fs *c = thr->c; - - c->stdio_filter = current; - c->stdio = &thr->thr.stdio; - - /* - * XXX: can we figure out a way to do this without mucking with c->opts? - */ - unsigned old_fix_errors = c->opts.fix_errors; - if (opt_defined(thr->opts, fix_errors)) - c->opts.fix_errors = thr->opts.fix_errors; - else - c->opts.fix_errors = FSCK_FIX_ask; - - c->opts.fsck = true; - set_bit(BCH_FS_fsck_running, &c->flags); - - c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; - int ret = bch2_run_online_recovery_passes(c); - - clear_bit(BCH_FS_fsck_running, &c->flags); - bch_err_fn(c, ret); - - c->stdio = NULL; - c->stdio_filter = NULL; - c->opts.fix_errors = old_fix_errors; - - up(&c->online_fsck_mutex); - bch2_ro_ref_put(c); - return ret; -} - -static const struct thread_with_stdio_ops bch2_online_fsck_ops = { - .exit = bch2_fsck_thread_exit, - .fn = bch2_fsck_online_thread_fn, -}; - -static long bch2_ioctl_fsck_online(struct bch_fs *c, - struct bch_ioctl_fsck_online arg) -{ - struct fsck_thread *thr = NULL; - long ret = 0; - - if (arg.flags) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (!bch2_ro_ref_tryget(c)) - return -EROFS; - - if (down_trylock(&c->online_fsck_mutex)) { - bch2_ro_ref_put(c); - return -EAGAIN; - } - - thr = kzalloc(sizeof(*thr), GFP_KERNEL); - if (!thr) { - ret = -ENOMEM; - goto err; - } - - thr->c = c; - thr->opts = bch2_opts_empty(); - - if (arg.opts) { - char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16); - - ret = PTR_ERR_OR_ZERO(optstr) ?: - bch2_parse_mount_opts(c, &thr->opts, NULL, optstr); - if (!IS_ERR(optstr)) - kfree(optstr); - - if (ret) - goto err; - } - - ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_online_fsck_ops); -err: - if (ret < 0) { - bch_err_fn(c, ret); - if (thr) - bch2_fsck_thread_exit(&thr->thr); - up(&c->online_fsck_mutex); - bch2_ro_ref_put(c); - } - return ret; -} - #define BCH_IOCTL(_name, _argtype) \ do { \ _argtype i; \ diff --git a/libbcachefs/checksum.c b/libbcachefs/checksum.c index ce8fc677..23a38357 100644 --- a/libbcachefs/checksum.c +++ b/libbcachefs/checksum.c @@ -2,6 +2,7 @@ #include "bcachefs.h" #include "checksum.h" #include "errcode.h" +#include "error.h" #include "super.h" #include "super-io.h" @@ -252,6 +253,10 @@ int bch2_encrypt(struct bch_fs *c, unsigned type, if (!bch2_csum_type_is_encryption(type)) return 0; + if (bch2_fs_inconsistent_on(!c->chacha20, + c, "attempting to encrypt without encryption key")) + return -BCH_ERR_no_encryption_key; + return do_encrypt(c->chacha20, nonce, data, len); } @@ -337,8 +342,9 @@ int __bch2_encrypt_bio(struct bch_fs *c, unsigned type, size_t sgl_len = 0; int ret = 0; - if (!bch2_csum_type_is_encryption(type)) - return 0; + if (bch2_fs_inconsistent_on(!c->chacha20, + c, "attempting to encrypt without encryption key")) + return -BCH_ERR_no_encryption_key; darray_init(&sgl); diff --git a/libbcachefs/checksum.h b/libbcachefs/checksum.h index e40499fd..43b9d71f 100644 --- a/libbcachefs/checksum.h +++ b/libbcachefs/checksum.h @@ -109,7 +109,7 @@ int bch2_enable_encryption(struct bch_fs *, bool); void bch2_fs_encryption_exit(struct bch_fs *); int bch2_fs_encryption_init(struct bch_fs *); -static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opts type, +static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opt type, bool data) { switch (type) { diff --git a/libbcachefs/compress.c b/libbcachefs/compress.c index 1410365a..f99ff181 100644 --- a/libbcachefs/compress.c +++ b/libbcachefs/compress.c @@ -2,13 +2,33 @@ #include "bcachefs.h" #include "checksum.h" #include "compress.h" +#include "error.h" #include "extents.h" +#include "opts.h" #include "super-io.h" #include <linux/lz4.h> #include <linux/zlib.h> #include <linux/zstd.h> +static inline enum bch_compression_opts bch2_compression_type_to_opt(enum bch_compression_type type) +{ + switch (type) { + case BCH_COMPRESSION_TYPE_none: + case BCH_COMPRESSION_TYPE_incompressible: + return BCH_COMPRESSION_OPT_none; + case BCH_COMPRESSION_TYPE_lz4_old: + case BCH_COMPRESSION_TYPE_lz4: + return BCH_COMPRESSION_OPT_lz4; + case BCH_COMPRESSION_TYPE_gzip: + return BCH_COMPRESSION_OPT_gzip; + case BCH_COMPRESSION_TYPE_zstd: + return BCH_COMPRESSION_OPT_zstd; + default: + BUG(); + } +} + /* Bounce buffer: */ struct bbuf { void *b; @@ -158,6 +178,19 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src, void *workspace; int ret; + enum bch_compression_opts opt = bch2_compression_type_to_opt(crc.compression_type); + mempool_t *workspace_pool = &c->compress_workspace[opt]; + if (unlikely(!mempool_initialized(workspace_pool))) { + if (fsck_err(c, compression_type_not_marked_in_sb, + "compression type %s set but not marked in superblock", + __bch2_compression_types[crc.compression_type])) + ret = bch2_check_set_has_compressed_data(c, opt); + else + ret = -BCH_ERR_compression_workspace_not_initialized; + if (ret) + goto out; + } + src_data = bio_map_or_bounce(c, src, READ); switch (crc.compression_type) { @@ -176,13 +209,13 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src, .avail_out = dst_len, }; - workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS); + workspace = mempool_alloc(workspace_pool, GFP_NOFS); zlib_set_workspace(&strm, workspace); zlib_inflateInit2(&strm, -MAX_WBITS); ret = zlib_inflate(&strm, Z_FINISH); - mempool_free(workspace, &c->decompress_workspace); + mempool_free(workspace, workspace_pool); if (ret != Z_STREAM_END) goto err; @@ -195,14 +228,14 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src, if (real_src_len > src_len - 4) goto err; - workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS); + workspace = mempool_alloc(workspace_pool, GFP_NOFS); ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound()); ret = zstd_decompress_dctx(ctx, dst_data, dst_len, src_data.b + 4, real_src_len); - mempool_free(workspace, &c->decompress_workspace); + mempool_free(workspace, workspace_pool); if (ret != dst_len) goto err; @@ -212,6 +245,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src, BUG(); } ret = 0; +fsck_err: out: bio_unmap_or_unbounce(c, src_data); return ret; @@ -394,8 +428,21 @@ static unsigned __bio_compress(struct bch_fs *c, unsigned pad; int ret = 0; - BUG_ON(compression_type >= BCH_COMPRESSION_TYPE_NR); - BUG_ON(!mempool_initialized(&c->compress_workspace[compression_type])); + /* bch2_compression_decode catches unknown compression types: */ + BUG_ON(compression.type >= BCH_COMPRESSION_OPT_NR); + + mempool_t *workspace_pool = &c->compress_workspace[compression.type]; + if (unlikely(!mempool_initialized(workspace_pool))) { + if (fsck_err(c, compression_opt_not_marked_in_sb, + "compression opt %s set but not marked in superblock", + bch2_compression_opts[compression.type])) { + ret = bch2_check_set_has_compressed_data(c, compression.type); + if (ret) /* memory allocation failure, don't compress */ + return 0; + } else { + return 0; + } + } /* If it's only one block, don't bother trying to compress: */ if (src->bi_iter.bi_size <= c->opts.block_size) @@ -404,7 +451,7 @@ static unsigned __bio_compress(struct bch_fs *c, dst_data = bio_map_or_bounce(c, dst, WRITE); src_data = bio_map_or_bounce(c, src, READ); - workspace = mempool_alloc(&c->compress_workspace[compression_type], GFP_NOFS); + workspace = mempool_alloc(workspace_pool, GFP_NOFS); *src_len = src->bi_iter.bi_size; *dst_len = dst->bi_iter.bi_size; @@ -447,7 +494,7 @@ static unsigned __bio_compress(struct bch_fs *c, *src_len = round_down(*src_len, block_bytes(c)); } - mempool_free(workspace, &c->compress_workspace[compression_type]); + mempool_free(workspace, workspace_pool); if (ret) goto err; @@ -477,6 +524,9 @@ out: err: ret = BCH_COMPRESSION_TYPE_incompressible; goto out; +fsck_err: + ret = 0; + goto out; } unsigned bch2_bio_compress(struct bch_fs *c, @@ -559,7 +609,6 @@ void bch2_fs_compress_exit(struct bch_fs *c) { unsigned i; - mempool_exit(&c->decompress_workspace); for (i = 0; i < ARRAY_SIZE(c->compress_workspace); i++) mempool_exit(&c->compress_workspace[i]); mempool_exit(&c->compression_bounce[WRITE]); @@ -568,7 +617,6 @@ void bch2_fs_compress_exit(struct bch_fs *c) static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) { - size_t decompress_workspace_size = 0; ZSTD_parameters params = zstd_get_params(zstd_max_clevel(), c->opts.encoded_extent_max); @@ -576,19 +624,17 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) struct { unsigned feature; - enum bch_compression_type type; + enum bch_compression_opts type; size_t compress_workspace; - size_t decompress_workspace; } compression_types[] = { - { BCH_FEATURE_lz4, BCH_COMPRESSION_TYPE_lz4, - max_t(size_t, LZ4_MEM_COMPRESS, LZ4HC_MEM_COMPRESS), - 0 }, - { BCH_FEATURE_gzip, BCH_COMPRESSION_TYPE_gzip, - zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL), - zlib_inflate_workspacesize(), }, - { BCH_FEATURE_zstd, BCH_COMPRESSION_TYPE_zstd, - c->zstd_workspace_size, - zstd_dctx_workspace_bound() }, + { BCH_FEATURE_lz4, BCH_COMPRESSION_OPT_lz4, + max_t(size_t, LZ4_MEM_COMPRESS, LZ4HC_MEM_COMPRESS) }, + { BCH_FEATURE_gzip, BCH_COMPRESSION_OPT_gzip, + max(zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL), + zlib_inflate_workspacesize()) }, + { BCH_FEATURE_zstd, BCH_COMPRESSION_OPT_zstd, + max(c->zstd_workspace_size, + zstd_dctx_workspace_bound()) }, }, *i; bool have_compressed = false; @@ -613,9 +659,6 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) for (i = compression_types; i < compression_types + ARRAY_SIZE(compression_types); i++) { - decompress_workspace_size = - max(decompress_workspace_size, i->decompress_workspace); - if (!(features & (1 << i->feature))) continue; @@ -628,11 +671,6 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) return -BCH_ERR_ENOMEM_compression_workspace_init; } - if (!mempool_initialized(&c->decompress_workspace) && - mempool_init_kvmalloc_pool(&c->decompress_workspace, - 1, decompress_workspace_size)) - return -BCH_ERR_ENOMEM_decompression_workspace_init; - return 0; } diff --git a/libbcachefs/data_update.c b/libbcachefs/data_update.c index a6ee0bee..31b2aeb0 100644 --- a/libbcachefs/data_update.c +++ b/libbcachefs/data_update.c @@ -110,11 +110,8 @@ static void trace_move_extent_fail2(struct data_update *m, { struct bch_fs *c = m->op.c; struct bkey_s_c old = bkey_i_to_s_c(m->k.k); - const union bch_extent_entry *entry; - struct bch_extent_ptr *ptr; - struct extent_ptr_decoded p; struct printbuf buf = PRINTBUF; - unsigned i, rewrites_found = 0; + unsigned rewrites_found = 0; if (!trace_move_extent_fail_enabled()) return; @@ -122,27 +119,25 @@ static void trace_move_extent_fail2(struct data_update *m, prt_str(&buf, msg); if (insert) { - i = 0; + const union bch_extent_entry *entry; + struct bch_extent_ptr *ptr; + struct extent_ptr_decoded p; + + unsigned ptr_bit = 1; bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) { - if (((1U << i) & m->data_opts.rewrite_ptrs) && + if ((ptr_bit & m->data_opts.rewrite_ptrs) && (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) && !ptr->cached) - rewrites_found |= 1U << i; - i++; + rewrites_found |= ptr_bit; + ptr_bit <<= 1; } } - prt_printf(&buf, "\nrewrite ptrs: %u%u%u%u", - (m->data_opts.rewrite_ptrs & (1 << 0)) != 0, - (m->data_opts.rewrite_ptrs & (1 << 1)) != 0, - (m->data_opts.rewrite_ptrs & (1 << 2)) != 0, - (m->data_opts.rewrite_ptrs & (1 << 3)) != 0); + prt_str(&buf, "rewrites found:\t"); + bch2_prt_u64_base2(&buf, rewrites_found); + prt_newline(&buf); - prt_printf(&buf, "\nrewrites found: %u%u%u%u", - (rewrites_found & (1 << 0)) != 0, - (rewrites_found & (1 << 1)) != 0, - (rewrites_found & (1 << 2)) != 0, - (rewrites_found & (1 << 3)) != 0); + bch2_data_update_opts_to_text(&buf, c, &m->op.opts, &m->data_opts); prt_str(&buf, "\nold: "); bch2_bkey_val_to_text(&buf, c, old); @@ -194,7 +189,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, struct bpos next_pos; bool should_check_enospc; s64 i_sectors_delta = 0, disk_sectors_delta = 0; - unsigned rewrites_found = 0, durability, i; + unsigned rewrites_found = 0, durability, ptr_bit; bch2_trans_begin(trans); @@ -231,15 +226,16 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, * * Fist, drop rewrite_ptrs from @new: */ - i = 0; + ptr_bit = 1; bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) { - if (((1U << i) & m->data_opts.rewrite_ptrs) && + if ((ptr_bit & m->data_opts.rewrite_ptrs) && (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) && !ptr->cached) { - bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr); - rewrites_found |= 1U << i; + bch2_extent_ptr_set_cached(c, &m->op.opts, + bkey_i_to_s(insert), ptr); + rewrites_found |= ptr_bit; } - i++; + ptr_bit <<= 1; } if (m->data_opts.rewrite_ptrs && @@ -284,7 +280,8 @@ restart_drop_extra_replicas: durability - ptr_durability >= m->op.opts.data_replicas) { durability -= ptr_durability; - bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr); + bch2_extent_ptr_set_cached(c, &m->op.opts, + bkey_i_to_s(insert), &entry->ptr); goto restart_drop_extra_replicas; } } @@ -295,7 +292,7 @@ restart_drop_extra_replicas: bch2_extent_ptr_decoded_append(insert, &p); bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 }); - bch2_extent_normalize(c, bkey_i_to_s(insert)); + bch2_extent_normalize_by_opts(c, &m->op.opts, bkey_i_to_s(insert)); ret = bch2_sum_sector_overwrites(trans, &iter, insert, &should_check_enospc, @@ -321,8 +318,11 @@ restart_drop_extra_replicas: * it's been hard to reproduce, so this should give us some more * information when it does occur: */ - int invalid = bch2_bkey_validate(c, bkey_i_to_s_c(insert), __btree_node_type(0, m->btree_id), - BCH_VALIDATE_commit); + int invalid = bch2_bkey_validate(c, bkey_i_to_s_c(insert), + (struct bkey_validate_context) { + .btree = m->btree_id, + .flags = BCH_VALIDATE_commit, + }); if (invalid) { struct printbuf buf = PRINTBUF; @@ -360,7 +360,7 @@ restart_drop_extra_replicas: k.k->p, bkey_start_pos(&insert->k)) ?: bch2_insert_snapshot_whiteouts(trans, m->btree_id, k.k->p, insert->k.p) ?: - bch2_bkey_set_needs_rebalance(c, insert, &op->opts) ?: + bch2_bkey_set_needs_rebalance(c, &op->opts, insert) ?: bch2_trans_update(trans, &iter, insert, BTREE_UPDATE_internal_snapshot_node) ?: bch2_trans_commit(trans, &op->res, @@ -538,7 +538,7 @@ void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c, prt_newline(out); prt_str(out, "compression:\t"); - bch2_compression_opt_to_text(out, background_compression(*io_opts)); + bch2_compression_opt_to_text(out, io_opts->background_compression); prt_newline(out); prt_str(out, "opts.replicas:\t"); @@ -558,7 +558,8 @@ void bch2_data_update_to_text(struct printbuf *out, struct data_update *m) int bch2_extent_drop_ptrs(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k, - struct data_update_opts data_opts) + struct bch_io_opts *io_opts, + struct data_update_opts *data_opts) { struct bch_fs *c = trans->c; struct bkey_i *n; @@ -569,11 +570,11 @@ int bch2_extent_drop_ptrs(struct btree_trans *trans, if (ret) return ret; - while (data_opts.kill_ptrs) { - unsigned i = 0, drop = __fls(data_opts.kill_ptrs); + while (data_opts->kill_ptrs) { + unsigned i = 0, drop = __fls(data_opts->kill_ptrs); bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, i++ == drop); - data_opts.kill_ptrs ^= 1U << drop; + data_opts->kill_ptrs ^= 1U << drop; } /* @@ -581,7 +582,7 @@ int bch2_extent_drop_ptrs(struct btree_trans *trans, * will do the appropriate thing with it (turning it into a * KEY_TYPE_error key, or just a discard if it was a cached extent) */ - bch2_extent_normalize(c, bkey_i_to_s(n)); + bch2_extent_normalize_by_opts(c, io_opts, bkey_i_to_s(n)); /* * Since we're not inserting through an extent iterator @@ -611,7 +612,7 @@ int bch2_data_update_init(struct btree_trans *trans, struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); const union bch_extent_entry *entry; struct extent_ptr_decoded p; - unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas; + unsigned reserve_sectors = k.k->size * data_opts.extra_replicas; int ret = 0; /* @@ -649,22 +650,22 @@ int bch2_data_update_init(struct btree_trans *trans, BCH_WRITE_DATA_ENCODED| BCH_WRITE_MOVE| m->data_opts.write_flags; - m->op.compression_opt = background_compression(io_opts); + m->op.compression_opt = io_opts.background_compression; m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK; unsigned durability_have = 0, durability_removing = 0; - i = 0; + unsigned ptr_bit = 1; bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { if (!p.ptr.cached) { rcu_read_lock(); - if (BIT(i) & m->data_opts.rewrite_ptrs) { + if (ptr_bit & m->data_opts.rewrite_ptrs) { if (crc_is_compressed(p.crc)) reserve_sectors += k.k->size; m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p); durability_removing += bch2_extent_ptr_desired_durability(c, &p); - } else if (!(BIT(i) & m->data_opts.kill_ptrs)) { + } else if (!(ptr_bit & m->data_opts.kill_ptrs)) { bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev); durability_have += bch2_extent_ptr_durability(c, &p); } @@ -684,7 +685,7 @@ int bch2_data_update_init(struct btree_trans *trans, if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible) m->op.incompressible = true; - i++; + ptr_bit <<= 1; } unsigned durability_required = max(0, (int) (io_opts.data_replicas - durability_have)); @@ -720,7 +721,7 @@ int bch2_data_update_init(struct btree_trans *trans, m->data_opts.rewrite_ptrs = 0; /* if iter == NULL, it's just a promote */ if (iter) - ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts); + ret = bch2_extent_drop_ptrs(trans, iter, k, &io_opts, &m->data_opts); goto out; } @@ -747,14 +748,14 @@ out: void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts) { struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - unsigned i = 0; + unsigned ptr_bit = 1; bkey_for_each_ptr(ptrs, ptr) { - if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) { - opts->kill_ptrs |= 1U << i; - opts->rewrite_ptrs ^= 1U << i; + if ((opts->rewrite_ptrs & ptr_bit) && ptr->cached) { + opts->kill_ptrs |= ptr_bit; + opts->rewrite_ptrs ^= ptr_bit; } - i++; + ptr_bit <<= 1; } } diff --git a/libbcachefs/data_update.h b/libbcachefs/data_update.h index 8d36365b..e4b50723 100644 --- a/libbcachefs/data_update.h +++ b/libbcachefs/data_update.h @@ -40,7 +40,8 @@ void bch2_data_update_read_done(struct data_update *, int bch2_extent_drop_ptrs(struct btree_trans *, struct btree_iter *, struct bkey_s_c, - struct data_update_opts); + struct bch_io_opts *, + struct data_update_opts *); void bch2_data_update_exit(struct data_update *); int bch2_data_update_init(struct btree_trans *, struct btree_iter *, diff --git a/libbcachefs/dirent.c b/libbcachefs/dirent.c index faffc98d..41813f9c 100644 --- a/libbcachefs/dirent.c +++ b/libbcachefs/dirent.c @@ -101,7 +101,7 @@ const struct bch_hash_desc bch2_dirent_hash_desc = { }; int bch2_dirent_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); struct qstr d_name = bch2_dirent_get_name(d); @@ -120,7 +120,7 @@ int bch2_dirent_validate(struct bch_fs *c, struct bkey_s_c k, * Check new keys don't exceed the max length * (older keys may be larger.) */ - bkey_fsck_err_on((flags & BCH_VALIDATE_commit) && d_name.len > BCH_NAME_MAX, + bkey_fsck_err_on((from.flags & BCH_VALIDATE_commit) && d_name.len > BCH_NAME_MAX, c, dirent_name_too_long, "dirent name too big (%u > %u)", d_name.len, BCH_NAME_MAX); @@ -500,7 +500,7 @@ int bch2_empty_dir_snapshot(struct btree_trans *trans, u64 dir, u32 subvol, u32 struct bkey_s_c k; int ret; - for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_dirents, + for_each_btree_key_max_norestart(trans, iter, BTREE_ID_dirents, SPOS(dir, 0, snapshot), POS(dir, U64_MAX), 0, k, ret) if (k.k->type == KEY_TYPE_dirent) { @@ -549,7 +549,7 @@ int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx) bch2_bkey_buf_init(&sk); int ret = bch2_trans_run(c, - for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_dirents, + for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_dirents, POS(inum.inum, ctx->pos), POS(inum.inum, U64_MAX), inum.subvol, 0, k, ({ diff --git a/libbcachefs/dirent.h b/libbcachefs/dirent.h index 53ad9966..362b3b2f 100644 --- a/libbcachefs/dirent.h +++ b/libbcachefs/dirent.h @@ -4,10 +4,10 @@ #include "str_hash.h" -enum bch_validate_flags; extern const struct bch_hash_desc bch2_dirent_hash_desc; -int bch2_dirent_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_dirent_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_dirent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); #define bch2_bkey_ops_dirent ((struct bkey_ops) { \ diff --git a/libbcachefs/disk_accounting.c b/libbcachefs/disk_accounting.c index 55a00018..77534838 100644 --- a/libbcachefs/disk_accounting.c +++ b/libbcachefs/disk_accounting.c @@ -127,14 +127,15 @@ static inline bool is_zero(char *start, char *end) #define field_end(p, member) (((void *) (&p.member)) + sizeof(p.member)) int bch2_accounting_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct disk_accounting_pos acc_k; bpos_to_disk_accounting_pos(&acc_k, k.k->p); void *end = &acc_k + 1; int ret = 0; - bkey_fsck_err_on(bversion_zero(k.k->bversion), + bkey_fsck_err_on((from.flags & BCH_VALIDATE_commit) && + bversion_zero(k.k->bversion), c, accounting_key_version_0, "accounting key with version=0"); @@ -700,11 +701,38 @@ int bch2_accounting_read(struct bch_fs *c) struct btree_trans *trans = bch2_trans_get(c); struct printbuf buf = PRINTBUF; - int ret = for_each_btree_key(trans, iter, - BTREE_ID_accounting, POS_MIN, + /* + * We might run more than once if we rewind to start topology repair or + * btree node scan - and those might cause us to get different results, + * so we can't just skip if we've already run. + * + * Instead, zero out any accounting we have: + */ + percpu_down_write(&c->mark_lock); + darray_for_each(acc->k, e) + percpu_memset(e->v[0], 0, sizeof(u64) * e->nr_counters); + for_each_member_device(c, ca) + percpu_memset(ca->usage, 0, sizeof(*ca->usage)); + percpu_memset(c->usage, 0, sizeof(*c->usage)); + percpu_up_write(&c->mark_lock); + + struct btree_iter iter; + bch2_trans_iter_init(trans, &iter, BTREE_ID_accounting, POS_MIN, + BTREE_ITER_prefetch|BTREE_ITER_all_snapshots); + iter.flags &= ~BTREE_ITER_with_journal; + int ret = for_each_btree_key_continue(trans, iter, BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({ - struct bkey u; - struct bkey_s_c k = bch2_btree_path_peek_slot_exact(btree_iter_path(trans, &iter), &u); + if (k.k->type != KEY_TYPE_accounting) + continue; + + struct disk_accounting_pos acc_k; + bpos_to_disk_accounting_pos(&acc_k, k.k->p); + if (!bch2_accounting_is_mem(acc_k)) { + struct disk_accounting_pos next = { .type = acc_k.type + 1 }; + bch2_btree_iter_set_pos(&iter, disk_accounting_pos_to_bpos(&next)); + continue; + } + accounting_read_key(trans, k); })); if (ret) @@ -716,6 +744,11 @@ int bch2_accounting_read(struct bch_fs *c) darray_for_each(*keys, i) { if (i->k->k.type == KEY_TYPE_accounting) { + struct disk_accounting_pos acc_k; + bpos_to_disk_accounting_pos(&acc_k, i->k->k.p); + if (!bch2_accounting_is_mem(acc_k)) + continue; + struct bkey_s_c k = bkey_i_to_s_c(i->k); unsigned idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]), @@ -805,7 +838,7 @@ int bch2_accounting_read(struct bch_fs *c) break; case BCH_DISK_ACCOUNTING_dev_data_type: rcu_read_lock(); - struct bch_dev *ca = bch2_dev_rcu(c, k.dev_data_type.dev); + struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.dev_data_type.dev); if (ca) { struct bch_dev_usage_type __percpu *d = &ca->usage->d[k.dev_data_type.data_type]; percpu_u64_set(&d->buckets, v[0]); @@ -911,7 +944,7 @@ void bch2_verify_accounting_clean(struct bch_fs *c) break; case BCH_DISK_ACCOUNTING_dev_data_type: { rcu_read_lock(); - struct bch_dev *ca = bch2_dev_rcu(c, acc_k.dev_data_type.dev); + struct bch_dev *ca = bch2_dev_rcu_noerror(c, acc_k.dev_data_type.dev); if (!ca) { rcu_read_unlock(); continue; diff --git a/libbcachefs/disk_accounting.h b/libbcachefs/disk_accounting.h index 6639535d..cb20f723 100644 --- a/libbcachefs/disk_accounting.h +++ b/libbcachefs/disk_accounting.h @@ -63,27 +63,32 @@ static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage static inline void bpos_to_disk_accounting_pos(struct disk_accounting_pos *acc, struct bpos p) { - acc->_pad = p; + BUILD_BUG_ON(sizeof(*acc) != sizeof(p)); + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - bch2_bpos_swab(&acc->_pad); + acc->_pad = p; +#else + memcpy_swab(acc, &p, sizeof(p)); #endif } -static inline struct bpos disk_accounting_pos_to_bpos(struct disk_accounting_pos *k) +static inline struct bpos disk_accounting_pos_to_bpos(struct disk_accounting_pos *acc) { - struct bpos ret = k->_pad; - + struct bpos p; #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - bch2_bpos_swab(&ret); + p = acc->_pad; +#else + memcpy_swab(&p, acc, sizeof(p)); #endif - return ret; + return p; } int bch2_disk_accounting_mod(struct btree_trans *, struct disk_accounting_pos *, s64 *, unsigned, bool); int bch2_mod_dev_cached_sectors(struct btree_trans *, unsigned, s64, bool); -int bch2_accounting_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_accounting_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_accounting_key_to_text(struct printbuf *, struct disk_accounting_pos *); void bch2_accounting_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); void bch2_accounting_swab(struct bkey_s); @@ -113,6 +118,11 @@ enum bch_accounting_mode { int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, enum bch_accounting_mode); void bch2_accounting_mem_gc(struct bch_fs *); +static inline bool bch2_accounting_is_mem(struct disk_accounting_pos acc) +{ + return acc.type != BCH_DISK_ACCOUNTING_inum; +} + /* * Update in memory counters so they match the btree update we're doing; called * from transaction commit path @@ -129,7 +139,7 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, EBUG_ON(gc && !acc->gc_running); - if (acc_k.type == BCH_DISK_ACCOUNTING_inum) + if (!bch2_accounting_is_mem(acc_k)) return 0; if (mode == BCH_ACCOUNTING_normal) { @@ -142,7 +152,7 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, break; case BCH_DISK_ACCOUNTING_dev_data_type: rcu_read_lock(); - struct bch_dev *ca = bch2_dev_rcu(c, acc_k.dev_data_type.dev); + struct bch_dev *ca = bch2_dev_rcu_noerror(c, acc_k.dev_data_type.dev); if (ca) { this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].buckets, a.v->d[0]); this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].sectors, a.v->d[1]); diff --git a/libbcachefs/ec.c b/libbcachefs/ec.c index 43b4e43c..250e7389 100644 --- a/libbcachefs/ec.c +++ b/libbcachefs/ec.c @@ -26,6 +26,7 @@ #include "util.h" #include <linux/sort.h> +#include <linux/string_choices.h> #ifdef __KERNEL__ @@ -109,7 +110,7 @@ struct ec_bio { /* Stripes btree keys: */ int bch2_stripe_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; int ret = 0; @@ -129,7 +130,7 @@ int bch2_stripe_validate(struct bch_fs *c, struct bkey_s_c k, "invalid csum granularity (%u >= 64)", s->csum_granularity_bits); - ret = bch2_bkey_ptrs_validate(c, k, flags); + ret = bch2_bkey_ptrs_validate(c, k, from); fsck_err: return ret; } @@ -732,7 +733,7 @@ static void ec_block_endio(struct bio *bio) ? BCH_MEMBER_ERROR_write : BCH_MEMBER_ERROR_read, "erasure coding %s error: %s", - bio_data_dir(bio) ? "write" : "read", + str_write_read(bio_data_dir(bio)), bch2_blk_status_to_str(bio->bi_status))) clear_bit(ec_bio->idx, ec_bio->buf->valid); @@ -1275,11 +1276,10 @@ static int ec_stripe_update_extent(struct btree_trans *trans, struct bch_dev *ca, struct bpos bucket, u8 gen, struct ec_stripe_buf *s, - struct bpos *bp_pos) + struct bkey_s_c_backpointer bp) { struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; struct bch_fs *c = trans->c; - struct bch_backpointer bp; struct btree_iter iter; struct bkey_s_c k; const struct bch_extent_ptr *ptr_c; @@ -1288,33 +1288,26 @@ static int ec_stripe_update_extent(struct btree_trans *trans, struct bkey_i *n; int ret, dev, block; - ret = bch2_get_next_backpointer(trans, ca, bucket, gen, - bp_pos, &bp, BTREE_ITER_cached); - if (ret) - return ret; - if (bpos_eq(*bp_pos, SPOS_MAX)) - return 0; - - if (bp.level) { + if (bp.v->level) { struct printbuf buf = PRINTBUF; struct btree_iter node_iter; struct btree *b; - b = bch2_backpointer_get_node(trans, &node_iter, *bp_pos, bp); + b = bch2_backpointer_get_node(trans, bp, &node_iter); bch2_trans_iter_exit(trans, &node_iter); if (!b) return 0; prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b); - bch2_backpointer_to_text(&buf, &bp); + bch2_bkey_val_to_text(&buf, c, bp.s_c); bch2_fs_inconsistent(c, "%s", buf.buf); printbuf_exit(&buf); return -EIO; } - k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_intent); + k = bch2_backpointer_get_key(trans, bp, &iter, BTREE_ITER_intent); ret = bkey_err(k); if (ret) return ret; @@ -1373,7 +1366,6 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b struct bch_fs *c = trans->c; struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; struct bch_extent_ptr ptr = v->ptrs[block]; - struct bpos bp_pos = POS_MIN; int ret = 0; struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev); @@ -1382,18 +1374,21 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr); - while (1) { - ret = commit_do(trans, NULL, NULL, - BCH_TRANS_COMMIT_no_check_rw| - BCH_TRANS_COMMIT_no_enospc, - ec_stripe_update_extent(trans, ca, bucket_pos, ptr.gen, s, &bp_pos)); - if (ret) - break; - if (bkey_eq(bp_pos, POS_MAX)) + ret = for_each_btree_key_max_commit(trans, bp_iter, BTREE_ID_backpointers, + bucket_pos_to_bp_start(ca, bucket_pos), + bucket_pos_to_bp_end(ca, bucket_pos), 0, bp_k, + NULL, NULL, + BCH_TRANS_COMMIT_no_check_rw| + BCH_TRANS_COMMIT_no_enospc, ({ + if (bkey_ge(bp_k.k->p, bucket_pos_to_bp(ca, bpos_nosnap_successor(bucket_pos), 0))) break; - bp_pos = bpos_nosnap_successor(bp_pos); - } + if (bp_k.k->type != KEY_TYPE_backpointer) + continue; + + ec_stripe_update_extent(trans, ca, bucket_pos, ptr.gen, s, + bkey_s_c_to_backpointer(bp_k)); + })); bch2_dev_put(ca); return ret; @@ -1867,6 +1862,10 @@ __bch2_ec_stripe_head_get(struct btree_trans *trans, } h = ec_new_stripe_head_alloc(c, disk_label, algo, redundancy, watermark); + if (!h) { + h = ERR_PTR(-BCH_ERR_ENOMEM_stripe_head_alloc); + goto err; + } found: if (h->rw_devs_change_count != c->rw_devs_change_count) ec_stripe_head_devs_update(c, h); @@ -2303,7 +2302,7 @@ err: int bch2_dev_remove_stripes(struct bch_fs *c, unsigned dev_idx) { return bch2_trans_run(c, - for_each_btree_key_upto_commit(trans, iter, + for_each_btree_key_max_commit(trans, iter, BTREE_ID_alloc, POS(dev_idx, 0), POS(dev_idx, U64_MAX), BTREE_ITER_intent, k, NULL, NULL, 0, ({ @@ -2466,11 +2465,9 @@ void bch2_fs_ec_exit(struct bch_fs *c) while (1) { mutex_lock(&c->ec_stripe_head_lock); - h = list_first_entry_or_null(&c->ec_stripe_head_list, - struct ec_stripe_head, list); - if (h) - list_del(&h->list); + h = list_pop_entry(&c->ec_stripe_head_list, struct ec_stripe_head, list); mutex_unlock(&c->ec_stripe_head_lock); + if (!h) break; diff --git a/libbcachefs/ec.h b/libbcachefs/ec.h index 43326370..583ca6a2 100644 --- a/libbcachefs/ec.h +++ b/libbcachefs/ec.h @@ -6,9 +6,8 @@ #include "buckets_types.h" #include "extents_types.h" -enum bch_validate_flags; - -int bch2_stripe_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_stripe_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_stripe_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_trigger_stripe(struct btree_trans *, enum btree_id, unsigned, diff --git a/libbcachefs/errcode.h b/libbcachefs/errcode.h index 26990ad5..47387f7d 100644 --- a/libbcachefs/errcode.h +++ b/libbcachefs/errcode.h @@ -54,7 +54,7 @@ x(ENOMEM, ENOMEM_compression_bounce_read_init) \ x(ENOMEM, ENOMEM_compression_bounce_write_init) \ x(ENOMEM, ENOMEM_compression_workspace_init) \ - x(ENOMEM, ENOMEM_decompression_workspace_init) \ + x(EIO, compression_workspace_not_initialized) \ x(ENOMEM, ENOMEM_bucket_gens) \ x(ENOMEM, ENOMEM_buckets_nouse) \ x(ENOMEM, ENOMEM_usage_init) \ @@ -83,6 +83,8 @@ x(ENOMEM, ENOMEM_fs_other_alloc) \ x(ENOMEM, ENOMEM_dev_alloc) \ x(ENOMEM, ENOMEM_disk_accounting) \ + x(ENOMEM, ENOMEM_stripe_head_alloc) \ + x(ENOMEM, ENOMEM_journal_read_bucket) \ x(ENOSPC, ENOSPC_disk_reservation) \ x(ENOSPC, ENOSPC_bucket_alloc) \ x(ENOSPC, ENOSPC_disk_label_add) \ @@ -114,6 +116,7 @@ x(ENOENT, ENOENT_dirent_doesnt_match_inode) \ x(ENOENT, ENOENT_dev_not_found) \ x(ENOENT, ENOENT_dev_idx_not_found) \ + x(ENOENT, ENOENT_inode_no_backpointer) \ x(ENOTEMPTY, ENOTEMPTY_dir_not_empty) \ x(ENOTEMPTY, ENOTEMPTY_subvol_not_empty) \ x(EEXIST, EEXIST_str_hash_set) \ @@ -146,6 +149,7 @@ x(BCH_ERR_transaction_restart, transaction_restart_split_race) \ x(BCH_ERR_transaction_restart, transaction_restart_write_buffer_flush) \ x(BCH_ERR_transaction_restart, transaction_restart_nested) \ + x(BCH_ERR_transaction_restart, transaction_restart_commit) \ x(0, no_btree_node) \ x(BCH_ERR_no_btree_node, no_btree_node_relock) \ x(BCH_ERR_no_btree_node, no_btree_node_upgrade) \ @@ -191,7 +195,6 @@ x(EINVAL, opt_parse_error) \ x(EINVAL, remove_with_metadata_missing_unimplemented)\ x(EINVAL, remove_would_lose_data) \ - x(EINVAL, btree_iter_with_journal_not_supported) \ x(EROFS, erofs_trans_commit) \ x(EROFS, erofs_no_writes) \ x(EROFS, erofs_journal_err) \ @@ -223,6 +226,7 @@ x(BCH_ERR_invalid_sb_layout, invalid_sb_layout_type) \ x(BCH_ERR_invalid_sb_layout, invalid_sb_layout_nr_superblocks) \ x(BCH_ERR_invalid_sb_layout, invalid_sb_layout_superblocks_overlap) \ + x(BCH_ERR_invalid_sb_layout, invalid_sb_layout_sb_max_size_bits) \ x(BCH_ERR_invalid_sb, invalid_sb_members_missing) \ x(BCH_ERR_invalid_sb, invalid_sb_members) \ x(BCH_ERR_invalid_sb, invalid_sb_disk_groups) \ @@ -239,7 +243,10 @@ x(BCH_ERR_invalid_sb, invalid_sb_downgrade) \ x(BCH_ERR_invalid, invalid_bkey) \ x(BCH_ERR_operation_blocked, nocow_lock_blocked) \ + x(EIO, journal_shutdown) \ + x(EIO, journal_flush_err) \ x(EIO, btree_node_read_err) \ + x(BCH_ERR_btree_node_read_err, btree_node_read_err_cached) \ x(EIO, sb_not_downgraded) \ x(EIO, btree_node_write_all_failed) \ x(EIO, btree_node_read_error) \ @@ -255,6 +262,7 @@ x(EIO, no_device_to_read_from) \ x(EIO, missing_indirect_extent) \ x(EIO, invalidate_stripe_to_dev) \ + x(EIO, no_encryption_key) \ x(BCH_ERR_btree_node_read_err, btree_node_read_err_fixable) \ x(BCH_ERR_btree_node_read_err, btree_node_read_err_want_retry) \ x(BCH_ERR_btree_node_read_err, btree_node_read_err_must_retry) \ diff --git a/libbcachefs/error.c b/libbcachefs/error.c index b679def8..7af5c594 100644 --- a/libbcachefs/error.c +++ b/libbcachefs/error.c @@ -1,7 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" +#include "btree_cache.h" #include "btree_iter.h" #include "error.h" +#include "fs-common.h" #include "journal.h" #include "recovery_passes.h" #include "super.h" @@ -33,7 +35,7 @@ bool bch2_inconsistent_error(struct bch_fs *c) int bch2_topology_error(struct bch_fs *c) { set_bit(BCH_FS_topology_error, &c->flags); - if (!test_bit(BCH_FS_fsck_running, &c->flags)) { + if (!test_bit(BCH_FS_recovery_running, &c->flags)) { bch2_inconsistent_error(c); return -BCH_ERR_btree_need_topology_repair; } else { @@ -218,6 +220,30 @@ static const u8 fsck_flags_extra[] = { #undef x }; +static int do_fsck_ask_yn(struct bch_fs *c, + struct btree_trans *trans, + struct printbuf *question, + const char *action) +{ + prt_str(question, ", "); + prt_str(question, action); + + if (bch2_fs_stdio_redirect(c)) + bch2_print(c, "%s", question->buf); + else + bch2_print_string_as_lines(KERN_ERR, question->buf); + + int ask = bch2_fsck_ask_yn(c, trans); + + if (trans) { + int ret = bch2_trans_relock(trans); + if (ret) + return ret; + } + + return ask; +} + int __bch2_fsck_err(struct bch_fs *c, struct btree_trans *trans, enum bch_fsck_flags flags, @@ -226,7 +252,7 @@ int __bch2_fsck_err(struct bch_fs *c, { struct fsck_err_state *s = NULL; va_list args; - bool print = true, suppressing = false, inconsistent = false; + bool print = true, suppressing = false, inconsistent = false, exiting = false; struct printbuf buf = PRINTBUF, *out = &buf; int ret = -BCH_ERR_fsck_ignore; const char *action_orig = "fix?", *action = action_orig; @@ -256,9 +282,10 @@ int __bch2_fsck_err(struct bch_fs *c, !trans && bch2_current_has_btree_trans(c)); - if ((flags & FSCK_CAN_FIX) && - test_bit(err, c->sb.errors_silent)) - return -BCH_ERR_fsck_fix; + if (test_bit(err, c->sb.errors_silent)) + return flags & FSCK_CAN_FIX + ? -BCH_ERR_fsck_fix + : -BCH_ERR_fsck_ignore; bch2_sb_error_count(c, err); @@ -289,16 +316,14 @@ int __bch2_fsck_err(struct bch_fs *c, */ if (s->last_msg && !strcmp(buf.buf, s->last_msg)) { ret = s->ret; - mutex_unlock(&c->fsck_error_msgs_lock); - goto err; + goto err_unlock; } kfree(s->last_msg); s->last_msg = kstrdup(buf.buf, GFP_KERNEL); if (!s->last_msg) { - mutex_unlock(&c->fsck_error_msgs_lock); ret = -ENOMEM; - goto err; + goto err_unlock; } if (c->opts.ratelimit_errors && @@ -318,13 +343,19 @@ int __bch2_fsck_err(struct bch_fs *c, prt_printf(out, bch2_log_msg(c, "")); #endif - if ((flags & FSCK_CAN_FIX) && - (flags & FSCK_AUTOFIX) && + if ((flags & FSCK_AUTOFIX) && (c->opts.errors == BCH_ON_ERROR_continue || c->opts.errors == BCH_ON_ERROR_fix_safe)) { prt_str(out, ", "); - prt_actioning(out, action); - ret = -BCH_ERR_fsck_fix; + if (flags & FSCK_CAN_FIX) { + prt_actioning(out, action); + ret = -BCH_ERR_fsck_fix; + } else { + prt_str(out, ", continuing"); + ret = -BCH_ERR_fsck_ignore; + } + + goto print; } else if (!test_bit(BCH_FS_fsck_running, &c->flags)) { if (c->opts.errors != BCH_ON_ERROR_continue || !(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) { @@ -348,31 +379,18 @@ int __bch2_fsck_err(struct bch_fs *c, : c->opts.fix_errors; if (fix == FSCK_FIX_ask) { - prt_str(out, ", "); - prt_str(out, action); - - if (bch2_fs_stdio_redirect(c)) - bch2_print(c, "%s", out->buf); - else - bch2_print_string_as_lines(KERN_ERR, out->buf); print = false; - int ask = bch2_fsck_ask_yn(c, trans); - - if (trans) { - ret = bch2_trans_relock(trans); - if (ret) { - mutex_unlock(&c->fsck_error_msgs_lock); - goto err; - } - } + ret = do_fsck_ask_yn(c, trans, out, action); + if (ret < 0) + goto err_unlock; - if (ask >= YN_ALLNO && s) - s->fix = ask == YN_ALLNO + if (ret >= YN_ALLNO && s) + s->fix = ret == YN_ALLNO ? FSCK_FIX_no : FSCK_FIX_yes; - ret = ask & 1 + ret = ret & 1 ? -BCH_ERR_fsck_fix : -BCH_ERR_fsck_ignore; } else if (fix == FSCK_FIX_yes || @@ -385,9 +403,7 @@ int __bch2_fsck_err(struct bch_fs *c, prt_str(out, ", not "); prt_actioning(out, action); } - } else if (flags & FSCK_NEED_FSCK) { - prt_str(out, " (run fsck to correct)"); - } else { + } else if (!(flags & FSCK_CAN_IGNORE)) { prt_str(out, " (repair unimplemented)"); } @@ -396,14 +412,13 @@ int __bch2_fsck_err(struct bch_fs *c, !(flags & FSCK_CAN_IGNORE))) ret = -BCH_ERR_fsck_errors_not_fixed; - bool exiting = - test_bit(BCH_FS_fsck_running, &c->flags) && - (ret != -BCH_ERR_fsck_fix && - ret != -BCH_ERR_fsck_ignore); - - if (exiting) + if (test_bit(BCH_FS_fsck_running, &c->flags) && + (ret != -BCH_ERR_fsck_fix && + ret != -BCH_ERR_fsck_ignore)) { + exiting = true; print = true; - + } +print: if (print) { if (bch2_fs_stdio_redirect(c)) bch2_print(c, "%s\n", out->buf); @@ -419,17 +434,24 @@ int __bch2_fsck_err(struct bch_fs *c, if (s) s->ret = ret; - mutex_unlock(&c->fsck_error_msgs_lock); - if (inconsistent) bch2_inconsistent_error(c); - if (ret == -BCH_ERR_fsck_fix) { - set_bit(BCH_FS_errors_fixed, &c->flags); - } else { - set_bit(BCH_FS_errors_not_fixed, &c->flags); - set_bit(BCH_FS_error, &c->flags); + /* + * We don't yet track whether the filesystem currently has errors, for + * log_fsck_err()s: that would require us to track for every error type + * which recovery pass corrects it, to get the fsck exit status correct: + */ + if (flags & FSCK_CAN_FIX) { + if (ret == -BCH_ERR_fsck_fix) { + set_bit(BCH_FS_errors_fixed, &c->flags); + } else { + set_bit(BCH_FS_errors_not_fixed, &c->flags); + set_bit(BCH_FS_error, &c->flags); + } } +err_unlock: + mutex_unlock(&c->fsck_error_msgs_lock); err: if (action != action_orig) kfree(action); @@ -437,28 +459,47 @@ err: return ret; } +static const char * const bch2_bkey_validate_contexts[] = { +#define x(n) #n, + BKEY_VALIDATE_CONTEXTS() +#undef x + NULL +}; + int __bch2_bkey_fsck_err(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags validate_flags, + struct bkey_validate_context from, enum bch_sb_error_id err, const char *fmt, ...) { - if (validate_flags & BCH_VALIDATE_silent) + if (from.flags & BCH_VALIDATE_silent) return -BCH_ERR_fsck_delete_bkey; unsigned fsck_flags = 0; - if (!(validate_flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit))) + if (!(from.flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit))) { + //if (test_bit(err, c->sb.errors_silent)) + // return -BCH_ERR_fsck_delete_bkey; + fsck_flags |= FSCK_AUTOFIX|FSCK_CAN_FIX; + } + if (!WARN_ON(err >= ARRAY_SIZE(fsck_flags_extra))) + fsck_flags |= fsck_flags_extra[err]; struct printbuf buf = PRINTBUF; - va_list args; - prt_str(&buf, "invalid bkey "); + prt_printf(&buf, "invalid bkey in %s btree=", + bch2_bkey_validate_contexts[from.from]); + bch2_btree_id_to_text(&buf, from.btree); + prt_printf(&buf, " level=%u: ", from.level); + bch2_bkey_val_to_text(&buf, c, k); prt_str(&buf, "\n "); + + va_list args; va_start(args, fmt); prt_vprintf(&buf, fmt, args); va_end(args); + prt_str(&buf, ": delete?"); int ret = __bch2_fsck_err(c, NULL, fsck_flags, err, "%s", buf.buf); @@ -483,3 +524,36 @@ void bch2_flush_fsck_errs(struct bch_fs *c) mutex_unlock(&c->fsck_error_msgs_lock); } + +int bch2_inum_err_msg_trans(struct btree_trans *trans, struct printbuf *out, subvol_inum inum) +{ + u32 restart_count = trans->restart_count; + int ret = 0; + + /* XXX: we don't yet attempt to print paths when we don't know the subvol */ + if (inum.subvol) + ret = lockrestart_do(trans, bch2_inum_to_path(trans, inum, out)); + if (!inum.subvol || ret) + prt_printf(out, "inum %llu:%llu", inum.subvol, inum.inum); + + return trans_was_restarted(trans, restart_count); +} + +int bch2_inum_offset_err_msg_trans(struct btree_trans *trans, struct printbuf *out, + subvol_inum inum, u64 offset) +{ + int ret = bch2_inum_err_msg_trans(trans, out, inum); + prt_printf(out, " offset %llu: ", offset); + return ret; +} + +void bch2_inum_err_msg(struct bch_fs *c, struct printbuf *out, subvol_inum inum) +{ + bch2_trans_run(c, bch2_inum_err_msg_trans(trans, out, inum)); +} + +void bch2_inum_offset_err_msg(struct bch_fs *c, struct printbuf *out, + subvol_inum inum, u64 offset) +{ + bch2_trans_run(c, bch2_inum_offset_err_msg_trans(trans, out, inum, offset)); +} diff --git a/libbcachefs/error.h b/libbcachefs/error.h index 6551ada9..7acf2a27 100644 --- a/libbcachefs/error.h +++ b/libbcachefs/error.h @@ -45,32 +45,11 @@ int bch2_topology_error(struct bch_fs *); bch2_inconsistent_error(c); \ }) -#define bch2_fs_inconsistent_on(cond, c, ...) \ +#define bch2_fs_inconsistent_on(cond, ...) \ ({ \ bool _ret = unlikely(!!(cond)); \ - \ - if (_ret) \ - bch2_fs_inconsistent(c, __VA_ARGS__); \ - _ret; \ -}) - -/* - * Later we might want to mark only the particular device inconsistent, not the - * entire filesystem: - */ - -#define bch2_dev_inconsistent(ca, ...) \ -do { \ - bch_err(ca, __VA_ARGS__); \ - bch2_inconsistent_error((ca)->fs); \ -} while (0) - -#define bch2_dev_inconsistent_on(cond, ca, ...) \ -({ \ - bool _ret = unlikely(!!(cond)); \ - \ if (_ret) \ - bch2_dev_inconsistent(ca, __VA_ARGS__); \ + bch2_fs_inconsistent(__VA_ARGS__); \ _ret; \ }) @@ -123,9 +102,9 @@ int __bch2_fsck_err(struct bch_fs *, struct btree_trans *, void bch2_flush_fsck_errs(struct bch_fs *); -#define __fsck_err(c, _flags, _err_type, ...) \ +#define fsck_err_wrap(_do) \ ({ \ - int _ret = bch2_fsck_err(c, _flags, _err_type, __VA_ARGS__); \ + int _ret = _do; \ if (_ret != -BCH_ERR_fsck_fix && \ _ret != -BCH_ERR_fsck_ignore) { \ ret = _ret; \ @@ -135,6 +114,8 @@ void bch2_flush_fsck_errs(struct bch_fs *); _ret == -BCH_ERR_fsck_fix; \ }) +#define __fsck_err(...) fsck_err_wrap(bch2_fsck_err(__VA_ARGS__)) + /* These macros return true if error should be fixed: */ /* XXX: mark in superblock that filesystem contains errors, if we ignore: */ @@ -149,12 +130,6 @@ void bch2_flush_fsck_errs(struct bch_fs *); (unlikely(cond) ? __fsck_err(c, _flags, _err_type, __VA_ARGS__) : false);\ }) -#define need_fsck_err_on(cond, c, _err_type, ...) \ - __fsck_err_on(cond, c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, _err_type, __VA_ARGS__) - -#define need_fsck_err(c, _err_type, ...) \ - __fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, _err_type, __VA_ARGS__) - #define mustfix_fsck_err(c, _err_type, ...) \ __fsck_err(c, FSCK_CAN_FIX, _err_type, __VA_ARGS__) @@ -167,11 +142,22 @@ void bch2_flush_fsck_errs(struct bch_fs *); #define fsck_err_on(cond, c, _err_type, ...) \ __fsck_err_on(cond, c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__) +#define log_fsck_err(c, _err_type, ...) \ + __fsck_err(c, FSCK_CAN_IGNORE, _err_type, __VA_ARGS__) + +#define log_fsck_err_on(cond, ...) \ +({ \ + bool _ret = unlikely(!!(cond)); \ + if (_ret) \ + log_fsck_err(__VA_ARGS__); \ + _ret; \ +}) + enum bch_validate_flags; __printf(5, 6) int __bch2_bkey_fsck_err(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags, + struct bkey_validate_context from, enum bch_sb_error_id, const char *, ...); @@ -181,7 +167,7 @@ int __bch2_bkey_fsck_err(struct bch_fs *, */ #define bkey_fsck_err(c, _err_type, _err_msg, ...) \ do { \ - int _ret = __bch2_bkey_fsck_err(c, k, flags, \ + int _ret = __bch2_bkey_fsck_err(c, k, from, \ BCH_FSCK_ERR_##_err_type, \ _err_msg, ##__VA_ARGS__); \ if (_ret != -BCH_ERR_fsck_fix && \ @@ -252,4 +238,10 @@ void bch2_io_error(struct bch_dev *, enum bch_member_error_type); _ret; \ }) +int bch2_inum_err_msg_trans(struct btree_trans *, struct printbuf *, subvol_inum); +int bch2_inum_offset_err_msg_trans(struct btree_trans *, struct printbuf *, subvol_inum, u64); + +void bch2_inum_err_msg(struct bch_fs *, struct printbuf *, subvol_inum); +void bch2_inum_offset_err_msg(struct bch_fs *, struct printbuf *, subvol_inum, u64); + #endif /* _BCACHEFS_ERROR_H */ diff --git a/libbcachefs/extent_update.c b/libbcachefs/extent_update.c index 5f4fecb3..6aac579a 100644 --- a/libbcachefs/extent_update.c +++ b/libbcachefs/extent_update.c @@ -64,7 +64,7 @@ static int count_iters_for_insert(struct btree_trans *trans, break; case KEY_TYPE_reflink_p: { struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); - u64 idx = le64_to_cpu(p.v->idx); + u64 idx = REFLINK_P_IDX(p.v); unsigned sectors = bpos_min(*end, p.k->p).offset - bkey_start_offset(p.k); struct btree_iter iter; @@ -128,7 +128,7 @@ int bch2_extent_atomic_end(struct btree_trans *trans, bch2_trans_copy_iter(©, iter); - for_each_btree_key_upto_continue_norestart(copy, insert->k.p, 0, k, ret) { + for_each_btree_key_max_continue_norestart(copy, insert->k.p, 0, k, ret) { unsigned offset = 0; if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) diff --git a/libbcachefs/extents.c b/libbcachefs/extents.c index b7b5ea9c..2fc9ace5 100644 --- a/libbcachefs/extents.c +++ b/libbcachefs/extents.c @@ -21,6 +21,7 @@ #include "extents.h" #include "inode.h" #include "journal.h" +#include "rebalance.h" #include "replicas.h" #include "super.h" #include "super-io.h" @@ -88,6 +89,14 @@ static inline bool ptr_better(struct bch_fs *c, u64 l1 = dev_latency(c, p1.ptr.dev); u64 l2 = dev_latency(c, p2.ptr.dev); + /* + * Square the latencies, to bias more in favor of the faster + * device - we never want to stop issuing reads to the slower + * device altogether, so that we can update our latency numbers: + */ + l1 *= l1; + l2 *= l2; + /* Pick at random, biased in favor of the faster device: */ return bch2_rand_range(l1 + l2) > l1; @@ -169,7 +178,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k, /* KEY_TYPE_btree_ptr: */ int bch2_btree_ptr_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; @@ -177,7 +186,7 @@ int bch2_btree_ptr_validate(struct bch_fs *c, struct bkey_s_c k, c, btree_ptr_val_too_big, "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX); - ret = bch2_bkey_ptrs_validate(c, k, flags); + ret = bch2_bkey_ptrs_validate(c, k, from); fsck_err: return ret; } @@ -189,7 +198,7 @@ void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c, } int bch2_btree_ptr_v2_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k); int ret = 0; @@ -203,13 +212,13 @@ int bch2_btree_ptr_v2_validate(struct bch_fs *c, struct bkey_s_c k, c, btree_ptr_v2_min_key_bad, "min_key > key"); - if ((flags & BCH_VALIDATE_write) && + if ((from.flags & BCH_VALIDATE_write) && c->sb.version_min >= bcachefs_metadata_version_btree_ptr_sectors_written) bkey_fsck_err_on(!bp.v->sectors_written, c, btree_ptr_v2_written_0, "sectors_written == 0"); - ret = bch2_bkey_ptrs_validate(c, k, flags); + ret = bch2_bkey_ptrs_validate(c, k, from); fsck_err: return ret; } @@ -396,7 +405,7 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r) /* KEY_TYPE_reservation: */ int bch2_reservation_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k); int ret = 0; @@ -979,31 +988,54 @@ bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bke return NULL; } -void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr) +static bool want_cached_ptr(struct bch_fs *c, struct bch_io_opts *opts, + struct bch_extent_ptr *ptr) +{ + if (!opts->promote_target || + !bch2_dev_in_target(c, ptr->dev, opts->promote_target)) + return false; + + struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev); + + return ca && bch2_dev_is_readable(ca) && !dev_ptr_stale_rcu(ca, ptr); +} + +void bch2_extent_ptr_set_cached(struct bch_fs *c, + struct bch_io_opts *opts, + struct bkey_s k, + struct bch_extent_ptr *ptr) { struct bkey_ptrs ptrs = bch2_bkey_ptrs(k); union bch_extent_entry *entry; - union bch_extent_entry *ec = NULL; + struct extent_ptr_decoded p; - bkey_extent_entry_for_each(ptrs, entry) { + rcu_read_lock(); + if (!want_cached_ptr(c, opts, ptr)) { + bch2_bkey_drop_ptr_noerror(k, ptr); + goto out; + } + + /* + * Stripes can't contain cached data, for - reasons. + * + * Possibly something we can fix in the future? + */ + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) if (&entry->ptr == ptr) { - ptr->cached = true; - if (ec) - extent_entry_drop(k, ec); - return; + if (p.has_ec) + bch2_bkey_drop_ptr_noerror(k, ptr); + else + ptr->cached = true; + goto out; } - if (extent_entry_is_stripe_ptr(entry)) - ec = entry; - else if (extent_entry_is_ptr(entry)) - ec = NULL; - } - BUG(); +out: + rcu_read_unlock(); } /* - * bch_extent_normalize - clean up an extent, dropping stale pointers etc. + * bch2_extent_normalize - clean up an extent, dropping stale pointers etc. * * Returns true if @k should be dropped entirely * @@ -1017,8 +1049,39 @@ bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k) rcu_read_lock(); bch2_bkey_drop_ptrs(k, ptr, ptr->cached && - (ca = bch2_dev_rcu(c, ptr->dev)) && - dev_ptr_stale_rcu(ca, ptr) > 0); + (!(ca = bch2_dev_rcu(c, ptr->dev)) || + dev_ptr_stale_rcu(ca, ptr) > 0)); + rcu_read_unlock(); + + return bkey_deleted(k.k); +} + +/* + * bch2_extent_normalize_by_opts - clean up an extent, dropping stale pointers etc. + * + * Like bch2_extent_normalize(), but also only keeps a single cached pointer on + * the promote target. + */ +bool bch2_extent_normalize_by_opts(struct bch_fs *c, + struct bch_io_opts *opts, + struct bkey_s k) +{ + struct bkey_ptrs ptrs; + bool have_cached_ptr; + + rcu_read_lock(); +restart_drop_ptrs: + ptrs = bch2_bkey_ptrs(k); + have_cached_ptr = false; + + bkey_for_each_ptr(ptrs, ptr) + if (ptr->cached) { + if (have_cached_ptr || !want_cached_ptr(c, opts, ptr)) { + bch2_bkey_drop_ptr(k, ptr); + goto restart_drop_ptrs; + } + have_cached_ptr = true; + } rcu_read_unlock(); return bkey_deleted(k.k); @@ -1067,6 +1130,57 @@ void bch2_extent_crc_unpacked_to_text(struct printbuf *out, struct bch_extent_cr bch2_prt_compression_type(out, crc->compression_type); } +static void bch2_extent_rebalance_to_text(struct printbuf *out, struct bch_fs *c, + const struct bch_extent_rebalance *r) +{ + prt_str(out, "rebalance:"); + + prt_printf(out, " replicas=%u", r->data_replicas); + if (r->data_replicas_from_inode) + prt_str(out, " (inode)"); + + prt_str(out, " checksum="); + bch2_prt_csum_opt(out, r->data_checksum); + if (r->data_checksum_from_inode) + prt_str(out, " (inode)"); + + if (r->background_compression || r->background_compression_from_inode) { + prt_str(out, " background_compression="); + bch2_compression_opt_to_text(out, r->background_compression); + + if (r->background_compression_from_inode) + prt_str(out, " (inode)"); + } + + if (r->background_target || r->background_target_from_inode) { + prt_str(out, " background_target="); + if (c) + bch2_target_to_text(out, c, r->background_target); + else + prt_printf(out, "%u", r->background_target); + + if (r->background_target_from_inode) + prt_str(out, " (inode)"); + } + + if (r->promote_target || r->promote_target_from_inode) { + prt_str(out, " promote_target="); + if (c) + bch2_target_to_text(out, c, r->promote_target); + else + prt_printf(out, "%u", r->promote_target); + + if (r->promote_target_from_inode) + prt_str(out, " (inode)"); + } + + if (r->erasure_code || r->erasure_code_from_inode) { + prt_printf(out, " ec=%u", r->erasure_code); + if (r->erasure_code_from_inode) + prt_str(out, " (inode)"); + } +} + void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) { @@ -1102,18 +1216,10 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c, (u64) ec->idx, ec->block); break; } - case BCH_EXTENT_ENTRY_rebalance: { - const struct bch_extent_rebalance *r = &entry->rebalance; - - prt_str(out, "rebalance: target "); - if (c) - bch2_target_to_text(out, c, r->target); - else - prt_printf(out, "%u", r->target); - prt_str(out, " compression "); - bch2_compression_opt_to_text(out, r->compression); + case BCH_EXTENT_ENTRY_rebalance: + bch2_extent_rebalance_to_text(out, c, &entry->rebalance); break; - } + default: prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry)); return; @@ -1125,7 +1231,7 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c, static int extent_ptr_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags, + struct bkey_validate_context from, const struct bch_extent_ptr *ptr, unsigned size_ondisk, bool metadata) @@ -1168,7 +1274,7 @@ fsck_err: } int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); const union bch_extent_entry *entry; @@ -1195,7 +1301,7 @@ int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k, switch (extent_entry_type(entry)) { case BCH_EXTENT_ENTRY_ptr: - ret = extent_ptr_validate(c, k, flags, &entry->ptr, size_ondisk, false); + ret = extent_ptr_validate(c, k, from, &entry->ptr, size_ondisk, false); if (ret) return ret; @@ -1217,9 +1323,6 @@ int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k, case BCH_EXTENT_ENTRY_crc128: crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry)); - bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, - c, ptr_crc_uncompressed_size_too_small, - "checksum offset + key size > uncompressed size"); bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type), c, ptr_crc_csum_type_unknown, "invalid checksum type"); @@ -1227,6 +1330,19 @@ int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k, c, ptr_crc_compression_type_unknown, "invalid compression type"); + bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, + c, ptr_crc_uncompressed_size_too_small, + "checksum offset + key size > uncompressed size"); + bkey_fsck_err_on(crc_is_encoded(crc) && + (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) && + (from.flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)), + c, ptr_crc_uncompressed_size_too_big, + "too large encoded extent"); + bkey_fsck_err_on(!crc_is_compressed(crc) && + crc.compressed_size != crc.uncompressed_size, + c, ptr_crc_uncompressed_size_mismatch, + "not compressed but compressed != uncompressed size"); + if (bch2_csum_type_is_encryption(crc.csum_type)) { if (nonce == UINT_MAX) nonce = crc.offset + crc.nonce; @@ -1240,12 +1356,6 @@ int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k, "redundant crc entry"); crc_since_last_ptr = true; - bkey_fsck_err_on(crc_is_encoded(crc) && - (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) && - (flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)), - c, ptr_crc_uncompressed_size_too_big, - "too large encoded extent"); - size_ondisk = crc.compressed_size; break; case BCH_EXTENT_ENTRY_stripe_ptr: @@ -1311,7 +1421,7 @@ void bch2_ptr_swab(struct bkey_s k) for (entry = ptrs.start; entry < ptrs.end; entry = extent_entry_next(entry)) { - switch (extent_entry_type(entry)) { + switch (__extent_entry_type(entry)) { case BCH_EXTENT_ENTRY_ptr: break; case BCH_EXTENT_ENTRY_crc32: @@ -1331,170 +1441,13 @@ void bch2_ptr_swab(struct bkey_s k) break; case BCH_EXTENT_ENTRY_rebalance: break; + default: + /* Bad entry type: will be caught by validate() */ + return; } } } -const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k) -{ - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - const union bch_extent_entry *entry; - - bkey_extent_entry_for_each(ptrs, entry) - if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance) - return &entry->rebalance; - - return NULL; -} - -unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k, - unsigned target, unsigned compression) -{ - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - unsigned rewrite_ptrs = 0; - - if (compression) { - unsigned compression_type = bch2_compression_opt_to_type(compression); - const union bch_extent_entry *entry; - struct extent_ptr_decoded p; - unsigned i = 0; - - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { - if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible || - p.ptr.unwritten) { - rewrite_ptrs = 0; - goto incompressible; - } - - if (!p.ptr.cached && p.crc.compression_type != compression_type) - rewrite_ptrs |= 1U << i; - i++; - } - } -incompressible: - if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) { - unsigned i = 0; - - bkey_for_each_ptr(ptrs, ptr) { - if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target)) - rewrite_ptrs |= 1U << i; - i++; - } - } - - return rewrite_ptrs; -} - -bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k) -{ - const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k); - - /* - * If it's an indirect extent, we don't delete the rebalance entry when - * done so that we know what options were applied - check if it still - * needs work done: - */ - if (r && - k.k->type == KEY_TYPE_reflink_v && - !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression)) - r = NULL; - - return r != NULL; -} - -static u64 __bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k, - unsigned target, unsigned compression) -{ - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - const union bch_extent_entry *entry; - struct extent_ptr_decoded p; - u64 sectors = 0; - - if (compression) { - unsigned compression_type = bch2_compression_opt_to_type(compression); - - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { - if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible || - p.ptr.unwritten) { - sectors = 0; - goto incompressible; - } - - if (!p.ptr.cached && p.crc.compression_type != compression_type) - sectors += p.crc.compressed_size; - } - } -incompressible: - if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) { - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) - if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, target)) - sectors += p.crc.compressed_size; - } - - return sectors; -} - -u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k) -{ - const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k); - - return r ? __bch2_bkey_sectors_need_rebalance(c, k, r->target, r->compression) : 0; -} - -int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k, - struct bch_io_opts *opts) -{ - struct bkey_s k = bkey_i_to_s(_k); - struct bch_extent_rebalance *r; - unsigned target = opts->background_target; - unsigned compression = background_compression(*opts); - bool needs_rebalance; - - if (!bkey_extent_is_direct_data(k.k)) - return 0; - - /* get existing rebalance entry: */ - r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c); - if (r) { - if (k.k->type == KEY_TYPE_reflink_v) { - /* - * indirect extents: existing options take precedence, - * so that we don't move extents back and forth if - * they're referenced by different inodes with different - * options: - */ - if (r->target) - target = r->target; - if (r->compression) - compression = r->compression; - } - - r->target = target; - r->compression = compression; - } - - needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression); - - if (needs_rebalance && !r) { - union bch_extent_entry *new = bkey_val_end(k); - - new->rebalance.type = 1U << BCH_EXTENT_ENTRY_rebalance; - new->rebalance.compression = compression; - new->rebalance.target = target; - new->rebalance.unused = 0; - k.k->u64s += extent_entry_u64s(new); - } else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) { - /* - * For indirect extents, don't delete the rebalance entry when - * we're finished so that we know we specifically moved it or - * compressed it to its current location/compression type - */ - extent_entry_drop(k, (union bch_extent_entry *) r); - } - - return 0; -} - /* Generic extent code: */ int bch2_cut_front_s(struct bpos where, struct bkey_s k) @@ -1554,7 +1507,7 @@ int bch2_cut_front_s(struct bpos where, struct bkey_s k) case KEY_TYPE_reflink_p: { struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k); - le64_add_cpu(&p.v->idx, sub); + SET_REFLINK_P_IDX(p.v, REFLINK_P_IDX(p.v) + sub); break; } case KEY_TYPE_inline_data: diff --git a/libbcachefs/extents.h b/libbcachefs/extents.h index 923a5f18..620b284a 100644 --- a/libbcachefs/extents.h +++ b/libbcachefs/extents.h @@ -8,7 +8,6 @@ struct bch_fs; struct btree_trans; -enum bch_validate_flags; /* extent entries: */ @@ -410,12 +409,12 @@ int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c, /* KEY_TYPE_btree_ptr: */ int bch2_btree_ptr_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_btree_ptr_v2_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned, int, struct bkey_s); @@ -452,7 +451,7 @@ bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); /* KEY_TYPE_reservation: */ int bch2_reservation_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); @@ -686,14 +685,17 @@ bool bch2_extents_match(struct bkey_s_c, struct bkey_s_c); struct bch_extent_ptr * bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s); -void bch2_extent_ptr_set_cached(struct bkey_s, struct bch_extent_ptr *); +void bch2_extent_ptr_set_cached(struct bch_fs *, struct bch_io_opts *, + struct bkey_s, struct bch_extent_ptr *); +bool bch2_extent_normalize_by_opts(struct bch_fs *, struct bch_io_opts *, struct bkey_s); bool bch2_extent_normalize(struct bch_fs *, struct bkey_s); + void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *, const struct bch_extent_ptr *); void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_bkey_ptrs_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); static inline bool bch2_extent_ptr_eq(struct bch_extent_ptr ptr1, struct bch_extent_ptr ptr2) @@ -707,15 +709,6 @@ static inline bool bch2_extent_ptr_eq(struct bch_extent_ptr ptr1, void bch2_ptr_swab(struct bkey_s); -const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c); -unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *, struct bkey_s_c, - unsigned, unsigned); -bool bch2_bkey_needs_rebalance(struct bch_fs *, struct bkey_s_c); -u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *, struct bkey_s_c); - -int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bkey_i *, - struct bch_io_opts *); - /* Generic extent code: */ enum bch_extent_overlap { diff --git a/libbcachefs/extents_format.h b/libbcachefs/extents_format.h index 3bd2fdbb..c198dfc3 100644 --- a/libbcachefs/extents_format.h +++ b/libbcachefs/extents_format.h @@ -201,19 +201,8 @@ struct bch_extent_stripe_ptr { #endif }; -struct bch_extent_rebalance { -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u64 type:6, - unused:34, - compression:8, /* enum bch_compression_opt */ - target:16; -#elif defined (__BIG_ENDIAN_BITFIELD) - __u64 target:16, - compression:8, - unused:34, - type:6; -#endif -}; +/* bch_extent_rebalance: */ +#include "rebalance_format.h" union bch_extent_entry { #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64 diff --git a/libbcachefs/fs-common.c b/libbcachefs/fs-common.c index 7e10a9dd..dcaa47f6 100644 --- a/libbcachefs/fs-common.c +++ b/libbcachefs/fs-common.c @@ -548,3 +548,84 @@ err: bch2_trans_iter_exit(trans, &src_dir_iter); return ret; } + +static inline void prt_bytes_reversed(struct printbuf *out, const void *b, unsigned n) +{ + bch2_printbuf_make_room(out, n); + + unsigned can_print = min(n, printbuf_remaining(out)); + + b += n; + + for (unsigned i = 0; i < can_print; i++) + out->buf[out->pos++] = *((char *) --b); + + printbuf_nul_terminate(out); +} + +static inline void reverse_bytes(void *b, size_t n) +{ + char *e = b + n, *s = b; + + while (s < e) { + --e; + swap(*s, *e); + s++; + } +} + +/* XXX: we don't yet attempt to print paths when we don't know the subvol */ +int bch2_inum_to_path(struct btree_trans *trans, subvol_inum inum, struct printbuf *path) +{ + unsigned orig_pos = path->pos; + int ret = 0; + + while (!(inum.subvol == BCACHEFS_ROOT_SUBVOL && + inum.inum == BCACHEFS_ROOT_INO)) { + struct bch_inode_unpacked inode; + ret = bch2_inode_find_by_inum_trans(trans, inum, &inode); + if (ret) + goto err; + + if (!inode.bi_dir && !inode.bi_dir_offset) { + ret = -BCH_ERR_ENOENT_inode_no_backpointer; + goto err; + } + + u32 snapshot; + ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); + if (ret) + goto err; + + struct btree_iter d_iter; + struct bkey_s_c_dirent d = bch2_bkey_get_iter_typed(trans, &d_iter, + BTREE_ID_dirents, SPOS(inode.bi_dir, inode.bi_dir_offset, snapshot), + 0, dirent); + ret = bkey_err(d.s_c); + if (ret) + goto err; + + struct qstr dirent_name = bch2_dirent_get_name(d); + prt_bytes_reversed(path, dirent_name.name, dirent_name.len); + + prt_char(path, '/'); + + if (d.v->d_type == DT_SUBVOL) + inum.subvol = le32_to_cpu(d.v->d_parent_subvol); + inum.inum = d.k->p.inode; + + bch2_trans_iter_exit(trans, &d_iter); + } + + if (orig_pos == path->pos) + prt_char(path, '/'); + + ret = path->allocation_failure ? -ENOMEM : 0; + if (ret) + goto err; + + reverse_bytes(path->buf + orig_pos, path->pos - orig_pos); + return 0; +err: + return ret; +} diff --git a/libbcachefs/fs-common.h b/libbcachefs/fs-common.h index c934e807..2b59210b 100644 --- a/libbcachefs/fs-common.h +++ b/libbcachefs/fs-common.h @@ -42,4 +42,6 @@ int bch2_rename_trans(struct btree_trans *, bool bch2_reinherit_attrs(struct bch_inode_unpacked *, struct bch_inode_unpacked *); +int bch2_inum_to_path(struct btree_trans *, subvol_inum, struct printbuf *); + #endif /* _BCACHEFS_FS_COMMON_H */ diff --git a/libbcachefs/fs-io-buffered.c b/libbcachefs/fs-io-buffered.c index 0923f38a..ff8b8df5 100644 --- a/libbcachefs/fs-io-buffered.c +++ b/libbcachefs/fs-io-buffered.c @@ -164,7 +164,8 @@ static void bchfs_read(struct btree_trans *trans, BTREE_ITER_slots); while (1) { struct bkey_s_c k; - unsigned bytes, sectors, offset_into_extent; + unsigned bytes, sectors; + s64 offset_into_extent; enum btree_id data_btree = BTREE_ID_extents; bch2_trans_begin(trans); @@ -197,7 +198,7 @@ static void bchfs_read(struct btree_trans *trans, k = bkey_i_to_s_c(sk.k); - sectors = min(sectors, k.k->size - offset_into_extent); + sectors = min_t(unsigned, sectors, k.k->size - offset_into_extent); if (readpages_iter) { ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors, @@ -230,10 +231,12 @@ err: bch2_trans_iter_exit(trans, &iter); if (ret) { - bch_err_inum_offset_ratelimited(c, - iter.pos.inode, - iter.pos.offset << 9, - "read error %i from btree lookup", ret); + struct printbuf buf = PRINTBUF; + bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter.pos.offset << 9); + prt_printf(&buf, "read error %i from btree lookup", ret); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + rbio->bio.bi_status = BLK_STS_IOERR; bio_endio(&rbio->bio); } @@ -686,7 +689,7 @@ int bch2_write_begin(struct file *file, struct address_space *mapping, folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, FGP_WRITEBEGIN | fgf_set_order(len), mapping_gfp_mask(mapping)); - if (IS_ERR_OR_NULL(folio)) + if (IS_ERR(folio)) goto err_unlock; offset = pos - folio_pos(folio); diff --git a/libbcachefs/fs-io-direct.c b/libbcachefs/fs-io-direct.c index 2089c36b..b0367b9d 100644 --- a/libbcachefs/fs-io-direct.c +++ b/libbcachefs/fs-io-direct.c @@ -226,6 +226,7 @@ struct dio_write { struct mm_struct *mm; const struct iovec *iov; unsigned loop:1, + have_mm_ref:1, extending:1, sync:1, flush:1; @@ -390,6 +391,9 @@ static __always_inline long bch2_dio_write_done(struct dio_write *dio) kfree(dio->iov); + if (dio->have_mm_ref) + mmdrop(dio->mm); + ret = dio->op.error ?: ((long) dio->written << 9); bio_put(&dio->op.wbio.bio); @@ -529,9 +533,24 @@ static __always_inline long bch2_dio_write_loop(struct dio_write *dio) if (unlikely(dio->iter.count) && !dio->sync && - !dio->loop && - bch2_dio_write_copy_iov(dio)) - dio->sync = sync = true; + !dio->loop) { + /* + * Rest of write will be submitted asynchronously - + * unless copying the iov fails: + */ + if (likely(!bch2_dio_write_copy_iov(dio))) { + /* + * aio guarantees that mm_struct outlives the + * request, but io_uring does not + */ + if (dio->mm) { + mmgrab(dio->mm); + dio->have_mm_ref = true; + } + } else { + dio->sync = sync = true; + } + } dio->loop = true; closure_call(&dio->op.cl, bch2_write, NULL, NULL); @@ -559,15 +578,25 @@ err: static noinline __cold void bch2_dio_write_continue(struct dio_write *dio) { - struct mm_struct *mm = dio->mm; + struct mm_struct *mm = dio->have_mm_ref ? dio->mm: NULL; bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE); - if (mm) + if (mm) { + if (unlikely(!mmget_not_zero(mm))) { + /* process exited */ + dio->op.error = -ESRCH; + bch2_dio_write_done(dio); + return; + } + kthread_use_mm(mm); + } bch2_dio_write_loop(dio); - if (mm) + if (mm) { kthread_unuse_mm(mm); + mmput(mm); + } } static void bch2_dio_write_loop_async(struct bch_write_op *op) @@ -641,6 +670,7 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter) dio->mm = current->mm; dio->iov = NULL; dio->loop = false; + dio->have_mm_ref = false; dio->extending = extending; dio->sync = is_sync_kiocb(req) || extending; dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled; diff --git a/libbcachefs/fs-io-pagecache.c b/libbcachefs/fs-io-pagecache.c index 1d4910ea..e072900e 100644 --- a/libbcachefs/fs-io-pagecache.c +++ b/libbcachefs/fs-io-pagecache.c @@ -29,7 +29,7 @@ int bch2_filemap_get_contig_folios_d(struct address_space *mapping, break; f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp); - if (IS_ERR_OR_NULL(f)) + if (IS_ERR(f)) break; BUG_ON(fs->nr && folio_pos(f) != pos); @@ -199,7 +199,7 @@ int bch2_folio_set(struct bch_fs *c, subvol_inum inum, unsigned folio_idx = 0; return bch2_trans_run(c, - for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents, + for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, POS(inum.inum, offset), POS(inum.inum, U64_MAX), inum.subvol, BTREE_ITER_slots, k, ({ diff --git a/libbcachefs/fs-io.c b/libbcachefs/fs-io.c index 15d3f073..33d0e708 100644 --- a/libbcachefs/fs-io.c +++ b/libbcachefs/fs-io.c @@ -167,6 +167,34 @@ void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode, /* fsync: */ +static int bch2_get_inode_journal_seq_trans(struct btree_trans *trans, subvol_inum inum, + u64 *seq) +{ + struct printbuf buf = PRINTBUF; + struct bch_inode_unpacked u; + struct btree_iter iter; + int ret = bch2_inode_peek(trans, &iter, &u, inum, 0); + if (ret) + return ret; + + u64 cur_seq = journal_cur_seq(&trans->c->journal); + *seq = min(cur_seq, u.bi_journal_seq); + + if (fsck_err_on(u.bi_journal_seq > cur_seq, + trans, inode_journal_seq_in_future, + "inode journal seq in future (currently at %llu)\n%s", + cur_seq, + (bch2_inode_unpacked_to_text(&buf, &u), + buf.buf))) { + u.bi_journal_seq = cur_seq; + ret = bch2_inode_write(trans, &iter, &u); + } +fsck_err: + bch2_trans_iter_exit(trans, &iter); + printbuf_exit(&buf); + return ret; +} + /* * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an * insert trigger: look up the btree inode instead @@ -180,9 +208,10 @@ static int bch2_flush_inode(struct bch_fs *c, if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync)) return -EROFS; - struct bch_inode_unpacked u; - int ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u) ?: - bch2_journal_flush_seq(&c->journal, u.bi_journal_seq, TASK_INTERRUPTIBLE) ?: + u64 seq; + int ret = bch2_trans_commit_do(c, NULL, NULL, 0, + bch2_get_inode_journal_seq_trans(trans, inode_inum(inode), &seq)) ?: + bch2_journal_flush_seq(&c->journal, seq, TASK_INTERRUPTIBLE) ?: bch2_inode_flush_nocow_writes(c, inode); bch2_write_ref_put(c, BCH_WRITE_REF_fsync); return ret; @@ -222,7 +251,7 @@ static inline int range_has_data(struct bch_fs *c, u32 subvol, struct bpos end) { return bch2_trans_run(c, - for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents, start, end, + for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, start, end, subvol, 0, k, ({ bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k); }))); @@ -256,7 +285,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode, folio = __filemap_get_folio(mapping, index, FGP_LOCK|FGP_CREAT, GFP_KERNEL); - if (IS_ERR_OR_NULL(folio)) { + if (IS_ERR(folio)) { ret = -ENOMEM; goto out; } @@ -587,7 +616,7 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode, POS(inode->v.i_ino, start_sector), BTREE_ITER_slots|BTREE_ITER_intent); - while (!ret && bkey_lt(iter.pos, end_pos)) { + while (!ret) { s64 i_sectors_delta = 0; struct quota_res quota_res = { 0 }; struct bkey_s_c k; @@ -598,6 +627,9 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode, bch2_trans_begin(trans); + if (bkey_ge(iter.pos, end_pos)) + break; + ret = bch2_subvolume_get_snapshot(trans, inode->ei_inum.subvol, &snapshot); if (ret) @@ -634,12 +666,15 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode, if (bch2_clamp_data_hole(&inode->v, &hole_start, &hole_end, - opts.data_replicas, true)) + opts.data_replicas, true)) { ret = drop_locks_do(trans, (bch2_clamp_data_hole(&inode->v, &hole_start, &hole_end, opts.data_replicas, false), 0)); + if (ret) + goto bkey_err; + } bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start)); if (ret) @@ -667,10 +702,13 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode, bch2_i_sectors_acct(c, inode, "a_res, i_sectors_delta); if (bch2_mark_pagecache_reserved(inode, &hole_start, - iter.pos.offset, true)) - drop_locks_do(trans, + iter.pos.offset, true)) { + ret = drop_locks_do(trans, bch2_mark_pagecache_reserved(inode, &hole_start, iter.pos.offset, false)); + if (ret) + goto bkey_err; + } bkey_err: bch2_quota_reservation_put(c, inode, "a_res); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) @@ -797,7 +835,7 @@ static int quota_reserve_range(struct bch_inode_info *inode, u64 sectors = end - start; int ret = bch2_trans_run(c, - for_each_btree_key_in_subvolume_upto(trans, iter, + for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, POS(inode->v.i_ino, start), POS(inode->v.i_ino, end - 1), @@ -913,7 +951,7 @@ static loff_t bch2_seek_data(struct file *file, u64 offset) return -ENXIO; int ret = bch2_trans_run(c, - for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents, + for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, POS(inode->v.i_ino, offset >> 9), POS(inode->v.i_ino, U64_MAX), inum.subvol, 0, k, ({ @@ -949,7 +987,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset) return -ENXIO; int ret = bch2_trans_run(c, - for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents, + for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, POS(inode->v.i_ino, offset >> 9), POS(inode->v.i_ino, U64_MAX), inum.subvol, BTREE_ITER_slots, k, ({ diff --git a/libbcachefs/fs.c b/libbcachefs/fs.c index 79bc54e7..c6e7df7c 100644 --- a/libbcachefs/fs.c +++ b/libbcachefs/fs.c @@ -23,6 +23,7 @@ #include "journal.h" #include "keylist.h" #include "quota.h" +#include "rebalance.h" #include "snapshot.h" #include "super.h" #include "xattr.h" @@ -38,6 +39,7 @@ #include <linux/posix_acl.h> #include <linux/random.h> #include <linux/seq_file.h> +#include <linux/siphash.h> #include <linux/statfs.h> #include <linux/string.h> #include <linux/xattr.h> @@ -89,10 +91,25 @@ int __must_check bch2_write_inode(struct bch_fs *c, retry: bch2_trans_begin(trans); - ret = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode), - BTREE_ITER_intent) ?: - (set ? set(trans, inode, &inode_u, p) : 0) ?: - bch2_inode_write(trans, &iter, &inode_u) ?: + ret = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode), BTREE_ITER_intent); + if (ret) + goto err; + + struct bch_extent_rebalance old_r = bch2_inode_rebalance_opts_get(c, &inode_u); + + ret = (set ? set(trans, inode, &inode_u, p) : 0); + if (ret) + goto err; + + struct bch_extent_rebalance new_r = bch2_inode_rebalance_opts_get(c, &inode_u); + + if (memcmp(&old_r, &new_r, sizeof(new_r))) { + ret = bch2_set_rebalance_needs_scan_trans(trans, inode_u.bi_inum); + if (ret) + goto err; + } + + ret = bch2_inode_write(trans, &iter, &inode_u) ?: bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); /* @@ -101,7 +118,7 @@ retry: */ if (!ret) bch2_inode_update_after_write(trans, inode, &inode_u, fields); - +err: bch2_trans_iter_exit(trans, &iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) @@ -160,8 +177,9 @@ static bool subvol_inum_eq(subvol_inum a, subvol_inum b) static u32 bch2_vfs_inode_hash_fn(const void *data, u32 len, u32 seed) { const subvol_inum *inum = data; + siphash_key_t k = { .key[0] = seed }; - return jhash(&inum->inum, sizeof(inum->inum), seed); + return siphash_2u64(inum->subvol, inum->inum, &k); } static u32 bch2_vfs_inode_obj_hash_fn(const void *data, u32 len, u32 seed) @@ -190,11 +208,18 @@ static const struct rhashtable_params bch2_vfs_inodes_params = { .automatic_shrinking = true, }; +static const struct rhashtable_params bch2_vfs_inodes_by_inum_params = { + .head_offset = offsetof(struct bch_inode_info, by_inum_hash), + .key_offset = offsetof(struct bch_inode_info, ei_inum.inum), + .key_len = sizeof(u64), + .automatic_shrinking = true, +}; + int bch2_inode_or_descendents_is_open(struct btree_trans *trans, struct bpos p) { struct bch_fs *c = trans->c; - struct rhashtable *ht = &c->vfs_inodes_table; - subvol_inum inum = (subvol_inum) { .inum = p.offset }; + struct rhltable *ht = &c->vfs_inodes_by_inum_table; + u64 inum = p.offset; DARRAY(u32) subvols; int ret = 0; @@ -219,15 +244,15 @@ restart_from_top: struct rhash_lock_head __rcu *const *bkt; struct rhash_head *he; unsigned int hash; - struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); + struct bucket_table *tbl = rht_dereference_rcu(ht->ht.tbl, &ht->ht); restart: - hash = rht_key_hashfn(ht, tbl, &inum, bch2_vfs_inodes_params); + hash = rht_key_hashfn(&ht->ht, tbl, &inum, bch2_vfs_inodes_by_inum_params); bkt = rht_bucket(tbl, hash); do { struct bch_inode_info *inode; rht_for_each_entry_rcu_from(inode, he, rht_ptr_rcu(bkt), tbl, hash, hash) { - if (inode->ei_inum.inum == inum.inum) { + if (inode->ei_inum.inum == inum) { ret = darray_push_gfp(&subvols, inode->ei_inum.subvol, GFP_NOWAIT|__GFP_NOWARN); if (ret) { @@ -248,7 +273,7 @@ restart: /* Ensure we see any new tables. */ smp_rmb(); - tbl = rht_dereference_rcu(tbl->future_tbl, ht); + tbl = rht_dereference_rcu(tbl->future_tbl, &ht->ht); if (unlikely(tbl)) goto restart; rcu_read_unlock(); @@ -327,7 +352,11 @@ static void bch2_inode_hash_remove(struct bch_fs *c, struct bch_inode_info *inod spin_unlock(&inode->v.i_lock); if (remove) { - int ret = rhashtable_remove_fast(&c->vfs_inodes_table, + int ret = rhltable_remove(&c->vfs_inodes_by_inum_table, + &inode->by_inum_hash, bch2_vfs_inodes_by_inum_params); + BUG_ON(ret); + + ret = rhashtable_remove_fast(&c->vfs_inodes_table, &inode->hash, bch2_vfs_inodes_params); BUG_ON(ret); inode->v.i_hash.pprev = NULL; @@ -372,6 +401,11 @@ retry: discard_new_inode(&inode->v); return old; } else { + int ret = rhltable_insert(&c->vfs_inodes_by_inum_table, + &inode->by_inum_hash, + bch2_vfs_inodes_by_inum_params); + BUG_ON(ret); + inode_fake_hash(&inode->v); inode_sb_list_add(&inode->v); @@ -396,10 +430,10 @@ static struct inode *bch2_alloc_inode(struct super_block *sb) BUG(); } -static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c) +static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c, gfp_t gfp) { struct bch_inode_info *inode = alloc_inode_sb(c->vfs_sb, - bch2_inode_cache, GFP_NOFS); + bch2_inode_cache, gfp); if (!inode) return NULL; @@ -411,7 +445,7 @@ static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c) mutex_init(&inode->ei_quota_lock); memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush)); - if (unlikely(inode_init_always(c->vfs_sb, &inode->v))) { + if (unlikely(inode_init_always_gfp(c->vfs_sb, &inode->v, gfp))) { kmem_cache_free(bch2_inode_cache, inode); return NULL; } @@ -424,12 +458,10 @@ static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c) */ static struct bch_inode_info *bch2_new_inode(struct btree_trans *trans) { - struct bch_inode_info *inode = - memalloc_flags_do(PF_MEMALLOC_NORECLAIM|PF_MEMALLOC_NOWARN, - __bch2_new_inode(trans->c)); + struct bch_inode_info *inode = __bch2_new_inode(trans->c, GFP_NOWAIT); if (unlikely(!inode)) { - int ret = drop_locks_do(trans, (inode = __bch2_new_inode(trans->c)) ? 0 : -ENOMEM); + int ret = drop_locks_do(trans, (inode = __bch2_new_inode(trans->c, GFP_NOFS)) ? 0 : -ENOMEM); if (ret && inode) { __destroy_inode(&inode->v); kmem_cache_free(bch2_inode_cache, inode); @@ -503,7 +535,7 @@ __bch2_create(struct mnt_idmap *idmap, if (ret) return ERR_PTR(ret); #endif - inode = __bch2_new_inode(c); + inode = __bch2_new_inode(c, GFP_NOFS); if (unlikely(!inode)) { inode = ERR_PTR(-ENOMEM); goto err; @@ -1247,7 +1279,6 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, struct btree_iter iter; struct bkey_s_c k; struct bkey_buf cur, prev; - unsigned offset_into_extent, sectors; bool have_extent = false; int ret = 0; @@ -1280,7 +1311,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, bch2_btree_iter_set_snapshot(&iter, snapshot); - k = bch2_btree_iter_peek_upto(&iter, end); + k = bch2_btree_iter_peek_max(&iter, end); ret = bkey_err(k); if (ret) continue; @@ -1294,9 +1325,8 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, continue; } - offset_into_extent = iter.pos.offset - - bkey_start_offset(k.k); - sectors = k.k->size - offset_into_extent; + s64 offset_into_extent = iter.pos.offset - bkey_start_offset(k.k); + unsigned sectors = k.k->size - offset_into_extent; bch2_bkey_buf_reassemble(&cur, c, k); @@ -1308,7 +1338,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, k = bkey_i_to_s_c(cur.k); bch2_bkey_buf_realloc(&prev, c, k.k->u64s); - sectors = min(sectors, k.k->size - offset_into_extent); + sectors = min_t(unsigned, sectors, k.k->size - offset_into_extent); bch2_cut_front(POS(k.k->p.inode, bkey_start_offset(k.k) + @@ -1738,7 +1768,6 @@ static void bch2_vfs_inode_init(struct btree_trans *trans, bch2_inode_update_after_write(trans, inode, bi, ~0); inode->v.i_blocks = bi->bi_sectors; - inode->v.i_ino = bi->bi_inum; inode->v.i_rdev = bi->bi_dev; inode->v.i_generation = bi->bi_generation; inode->v.i_size = bi->bi_size; @@ -2202,7 +2231,8 @@ got_sb: sb->s_time_gran = c->sb.nsec_per_time_unit; sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1; sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec); - sb->s_uuid = c->sb.user_uuid; + super_set_uuid(sb, c->sb.user_uuid.b, sizeof(c->sb.user_uuid)); + super_set_sysfs_name_uuid(sb); sb->s_shrink->seeks = 0; c->vfs_sb = sb; strscpy(sb->s_id, c->name, sizeof(sb->s_id)); @@ -2347,13 +2377,16 @@ static int bch2_init_fs_context(struct fs_context *fc) void bch2_fs_vfs_exit(struct bch_fs *c) { + if (c->vfs_inodes_by_inum_table.ht.tbl) + rhltable_destroy(&c->vfs_inodes_by_inum_table); if (c->vfs_inodes_table.tbl) rhashtable_destroy(&c->vfs_inodes_table); } int bch2_fs_vfs_init(struct bch_fs *c) { - return rhashtable_init(&c->vfs_inodes_table, &bch2_vfs_inodes_params); + return rhashtable_init(&c->vfs_inodes_table, &bch2_vfs_inodes_params) ?: + rhltable_init(&c->vfs_inodes_by_inum_table, &bch2_vfs_inodes_by_inum_params); } static struct file_system_type bcache_fs_type = { diff --git a/libbcachefs/fs.h b/libbcachefs/fs.h index 59f9f7ae..dd219854 100644 --- a/libbcachefs/fs.h +++ b/libbcachefs/fs.h @@ -14,6 +14,7 @@ struct bch_inode_info { struct inode v; struct rhash_head hash; + struct rhlist_head by_inum_hash; subvol_inum ei_inum; struct list_head ei_vfs_inode_list; diff --git a/libbcachefs/fsck.c b/libbcachefs/fsck.c index c96025b8..1a5a0711 100644 --- a/libbcachefs/fsck.c +++ b/libbcachefs/fsck.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" +#include "bcachefs_ioctl.h" #include "bkey_buf.h" #include "btree_cache.h" #include "btree_update.h" @@ -16,6 +17,7 @@ #include "recovery_passes.h" #include "snapshot.h" #include "super.h" +#include "thread_with_file.h" #include "xattr.h" #include <linux/bsearch.h> @@ -73,7 +75,7 @@ static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum, { u64 sectors = 0; - int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_extents, + int ret = for_each_btree_key_max(trans, iter, BTREE_ID_extents, SPOS(inum, 0, snapshot), POS(inum, U64_MAX), 0, k, ({ @@ -90,7 +92,7 @@ static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum, { u64 subdirs = 0; - int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_dirents, + int ret = for_each_btree_key_max(trans, iter, BTREE_ID_dirents, SPOS(inum, 0, snapshot), POS(inum, U64_MAX), 0, k, ({ @@ -210,6 +212,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot, { struct bch_fs *c = trans->c; struct qstr lostfound_str = QSTR("lost+found"); + struct btree_iter lostfound_iter = { NULL }; u64 inum = 0; unsigned d_type = 0; int ret; @@ -288,11 +291,16 @@ create_lostfound: * XXX: we could have a nicer log message here if we had a nice way to * walk backpointers to print a path */ - bch_notice(c, "creating lost+found in subvol %llu snapshot %u", - root_inum.subvol, le32_to_cpu(st.root_snapshot)); + struct printbuf path = PRINTBUF; + ret = bch2_inum_to_path(trans, root_inum, &path); + if (ret) + goto err; + + bch_notice(c, "creating %s/lost+found in subvol %llu snapshot %u", + path.buf, root_inum.subvol, snapshot); + printbuf_exit(&path); u64 now = bch2_current_time(c); - struct btree_iter lostfound_iter = { NULL }; u64 cpu = raw_smp_processor_id(); bch2_inode_init_early(c, lostfound); @@ -618,7 +626,7 @@ static int reconstruct_inode(struct btree_trans *trans, enum btree_id btree, u32 struct btree_iter iter = {}; bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0); - struct bkey_s_c k = bch2_btree_iter_peek_prev(&iter); + struct bkey_s_c k = bch2_btree_iter_peek_prev_min(&iter, POS(inum, 0)); bch2_trans_iter_exit(trans, &iter); int ret = bkey_err(k); if (ret) @@ -1390,7 +1398,7 @@ static int check_inode(struct btree_trans *trans, if (fsck_err_on(!ret, trans, inode_unlinked_and_not_open, - "inode %llu%u unlinked and not open", + "inode %llu:%u unlinked and not open", u.bi_inum, u.bi_snapshot)) { ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot); bch_err_msg(c, ret, "in fsck deleting inode"); @@ -1439,6 +1447,17 @@ static int check_inode(struct btree_trans *trans, do_update = true; } } + + if (fsck_err_on(u.bi_journal_seq > journal_cur_seq(&c->journal), + trans, inode_journal_seq_in_future, + "inode journal seq in future (currently at %llu)\n%s", + journal_cur_seq(&c->journal), + (printbuf_reset(&buf), + bch2_inode_unpacked_to_text(&buf, &u), + buf.buf))) { + u.bi_journal_seq = journal_cur_seq(&c->journal); + do_update = true; + } do_update: if (do_update) { ret = __bch2_fsck_write_inode(trans, &u); @@ -1647,7 +1666,7 @@ static int check_i_sectors_notnested(struct btree_trans *trans, struct inode_wal if (i->count != count2) { bch_err_ratelimited(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu", w->last_pos.inode, i->snapshot, i->count, count2); - return -BCH_ERR_internal_fsck_err; + i->count = count2; } if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty), @@ -1751,7 +1770,7 @@ static int overlapping_extents_found(struct btree_trans *trans, bch2_trans_iter_init(trans, &iter1, btree, pos1, BTREE_ITER_all_snapshots| BTREE_ITER_not_extents); - k1 = bch2_btree_iter_peek_upto(&iter1, POS(pos1.inode, U64_MAX)); + k1 = bch2_btree_iter_peek_max(&iter1, POS(pos1.inode, U64_MAX)); ret = bkey_err(k1); if (ret) goto err; @@ -1776,7 +1795,7 @@ static int overlapping_extents_found(struct btree_trans *trans, while (1) { bch2_btree_iter_advance(&iter2); - k2 = bch2_btree_iter_peek_upto(&iter2, POS(pos1.inode, U64_MAX)); + k2 = bch2_btree_iter_peek_max(&iter2, POS(pos1.inode, U64_MAX)); ret = bkey_err(k2); if (ret) goto err; @@ -3192,3 +3211,223 @@ int bch2_fix_reflink_p(struct bch_fs *c) bch_err_fn(c, ret); return ret; } + +#ifndef NO_BCACHEFS_CHARDEV + +struct fsck_thread { + struct thread_with_stdio thr; + struct bch_fs *c; + struct bch_opts opts; +}; + +static void bch2_fsck_thread_exit(struct thread_with_stdio *_thr) +{ + struct fsck_thread *thr = container_of(_thr, struct fsck_thread, thr); + kfree(thr); +} + +static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio) +{ + struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr); + struct bch_fs *c = thr->c; + + int ret = PTR_ERR_OR_ZERO(c); + if (ret) + return ret; + + ret = bch2_fs_start(thr->c); + if (ret) + goto err; + + if (test_bit(BCH_FS_errors_fixed, &c->flags)) { + bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: errors fixed\n", c->name); + ret |= 1; + } + if (test_bit(BCH_FS_error, &c->flags)) { + bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: still has errors\n", c->name); + ret |= 4; + } +err: + bch2_fs_stop(c); + return ret; +} + +static const struct thread_with_stdio_ops bch2_offline_fsck_ops = { + .exit = bch2_fsck_thread_exit, + .fn = bch2_fsck_offline_thread_fn, +}; + +long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg) +{ + struct bch_ioctl_fsck_offline arg; + struct fsck_thread *thr = NULL; + darray_str(devs) = {}; + long ret = 0; + + if (copy_from_user(&arg, user_arg, sizeof(arg))) + return -EFAULT; + + if (arg.flags) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + for (size_t i = 0; i < arg.nr_devs; i++) { + u64 dev_u64; + ret = copy_from_user_errcode(&dev_u64, &user_arg->devs[i], sizeof(u64)); + if (ret) + goto err; + + char *dev_str = strndup_user((char __user *)(unsigned long) dev_u64, PATH_MAX); + ret = PTR_ERR_OR_ZERO(dev_str); + if (ret) + goto err; + + ret = darray_push(&devs, dev_str); + if (ret) { + kfree(dev_str); + goto err; + } + } + + thr = kzalloc(sizeof(*thr), GFP_KERNEL); + if (!thr) { + ret = -ENOMEM; + goto err; + } + + thr->opts = bch2_opts_empty(); + + if (arg.opts) { + char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16); + ret = PTR_ERR_OR_ZERO(optstr) ?: + bch2_parse_mount_opts(NULL, &thr->opts, NULL, optstr); + if (!IS_ERR(optstr)) + kfree(optstr); + + if (ret) + goto err; + } + + opt_set(thr->opts, stdio, (u64)(unsigned long)&thr->thr.stdio); + opt_set(thr->opts, read_only, 1); + opt_set(thr->opts, ratelimit_errors, 0); + + /* We need request_key() to be called before we punt to kthread: */ + opt_set(thr->opts, nostart, true); + + bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops); + + thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts); + + if (!IS_ERR(thr->c) && + thr->c->opts.errors == BCH_ON_ERROR_panic) + thr->c->opts.errors = BCH_ON_ERROR_ro; + + ret = __bch2_run_thread_with_stdio(&thr->thr); +out: + darray_for_each(devs, i) + kfree(*i); + darray_exit(&devs); + return ret; +err: + if (thr) + bch2_fsck_thread_exit(&thr->thr); + pr_err("ret %s", bch2_err_str(ret)); + goto out; +} + +static int bch2_fsck_online_thread_fn(struct thread_with_stdio *stdio) +{ + struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr); + struct bch_fs *c = thr->c; + + c->stdio_filter = current; + c->stdio = &thr->thr.stdio; + + /* + * XXX: can we figure out a way to do this without mucking with c->opts? + */ + unsigned old_fix_errors = c->opts.fix_errors; + if (opt_defined(thr->opts, fix_errors)) + c->opts.fix_errors = thr->opts.fix_errors; + else + c->opts.fix_errors = FSCK_FIX_ask; + + c->opts.fsck = true; + set_bit(BCH_FS_fsck_running, &c->flags); + + c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; + int ret = bch2_run_online_recovery_passes(c); + + clear_bit(BCH_FS_fsck_running, &c->flags); + bch_err_fn(c, ret); + + c->stdio = NULL; + c->stdio_filter = NULL; + c->opts.fix_errors = old_fix_errors; + + up(&c->online_fsck_mutex); + bch2_ro_ref_put(c); + return ret; +} + +static const struct thread_with_stdio_ops bch2_online_fsck_ops = { + .exit = bch2_fsck_thread_exit, + .fn = bch2_fsck_online_thread_fn, +}; + +long bch2_ioctl_fsck_online(struct bch_fs *c, struct bch_ioctl_fsck_online arg) +{ + struct fsck_thread *thr = NULL; + long ret = 0; + + if (arg.flags) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (!bch2_ro_ref_tryget(c)) + return -EROFS; + + if (down_trylock(&c->online_fsck_mutex)) { + bch2_ro_ref_put(c); + return -EAGAIN; + } + + thr = kzalloc(sizeof(*thr), GFP_KERNEL); + if (!thr) { + ret = -ENOMEM; + goto err; + } + + thr->c = c; + thr->opts = bch2_opts_empty(); + + if (arg.opts) { + char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16); + + ret = PTR_ERR_OR_ZERO(optstr) ?: + bch2_parse_mount_opts(c, &thr->opts, NULL, optstr); + if (!IS_ERR(optstr)) + kfree(optstr); + + if (ret) + goto err; + } + + ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_online_fsck_ops); +err: + if (ret < 0) { + bch_err_fn(c, ret); + if (thr) + bch2_fsck_thread_exit(&thr->thr); + up(&c->online_fsck_mutex); + bch2_ro_ref_put(c); + } + return ret; +} + +#endif /* NO_BCACHEFS_CHARDEV */ diff --git a/libbcachefs/fsck.h b/libbcachefs/fsck.h index 1cca3101..4481b40a 100644 --- a/libbcachefs/fsck.h +++ b/libbcachefs/fsck.h @@ -14,4 +14,7 @@ int bch2_check_directory_structure(struct bch_fs *); int bch2_check_nlinks(struct bch_fs *); int bch2_fix_reflink_p(struct bch_fs *); +long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *); +long bch2_ioctl_fsck_online(struct bch_fs *, struct bch_ioctl_fsck_online); + #endif /* _BCACHEFS_FSCK_H */ diff --git a/libbcachefs/inode.c b/libbcachefs/inode.c index 46310fcd..8818e418 100644 --- a/libbcachefs/inode.c +++ b/libbcachefs/inode.c @@ -14,6 +14,7 @@ #include "extent_update.h" #include "fs.h" #include "inode.h" +#include "opts.h" #include "str_hash.h" #include "snapshot.h" #include "subvolume.h" @@ -21,7 +22,7 @@ #include <linux/random.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #define x(name, ...) #name, const char * const bch2_inode_opts[] = { @@ -428,7 +429,7 @@ struct bkey_i *bch2_inode_to_v3(struct btree_trans *trans, struct bkey_i *k) } static int __bch2_inode_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bch_inode_unpacked unpacked; int ret = 0; @@ -468,7 +469,7 @@ fsck_err: } int bch2_inode_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_inode inode = bkey_s_c_to_inode(k); int ret = 0; @@ -478,13 +479,13 @@ int bch2_inode_validate(struct bch_fs *c, struct bkey_s_c k, "invalid str hash type (%llu >= %u)", INODEv1_STR_HASH(inode.v), BCH_STR_HASH_NR); - ret = __bch2_inode_validate(c, k, flags); + ret = __bch2_inode_validate(c, k, from); fsck_err: return ret; } int bch2_inode_v2_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k); int ret = 0; @@ -494,13 +495,13 @@ int bch2_inode_v2_validate(struct bch_fs *c, struct bkey_s_c k, "invalid str hash type (%llu >= %u)", INODEv2_STR_HASH(inode.v), BCH_STR_HASH_NR); - ret = __bch2_inode_validate(c, k, flags); + ret = __bch2_inode_validate(c, k, from); fsck_err: return ret; } int bch2_inode_v3_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k); int ret = 0; @@ -518,7 +519,7 @@ int bch2_inode_v3_validate(struct bch_fs *c, struct bkey_s_c k, "invalid str hash type (%llu >= %u)", INODEv3_STR_HASH(inode.v), BCH_STR_HASH_NR); - ret = __bch2_inode_validate(c, k, flags); + ret = __bch2_inode_validate(c, k, from); fsck_err: return ret; } @@ -617,7 +618,7 @@ bch2_bkey_get_iter_snapshot_parent(struct btree_trans *trans, struct btree_iter struct bkey_s_c k; int ret = 0; - for_each_btree_key_upto_norestart(trans, *iter, btree, + for_each_btree_key_max_norestart(trans, *iter, btree, bpos_successor(pos), SPOS(pos.inode, pos.offset, U32_MAX), flags|BTREE_ITER_all_snapshots, k, ret) @@ -652,7 +653,7 @@ int __bch2_inode_has_child_snapshots(struct btree_trans *trans, struct bpos pos) struct bkey_s_c k; int ret = 0; - for_each_btree_key_upto_norestart(trans, iter, + for_each_btree_key_max_norestart(trans, iter, BTREE_ID_inodes, POS(0, pos.offset), bpos_predecessor(pos), BTREE_ITER_all_snapshots| BTREE_ITER_with_updates, k, ret) @@ -779,7 +780,7 @@ int bch2_trigger_inode(struct btree_trans *trans, } int bch2_inode_generation_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; @@ -966,7 +967,7 @@ static int bch2_inode_delete_keys(struct btree_trans *trans, bch2_btree_iter_set_snapshot(&iter, snapshot); - k = bch2_btree_iter_peek_upto(&iter, end); + k = bch2_btree_iter_peek_max(&iter, end); ret = bkey_err(k); if (ret) goto err; @@ -1141,12 +1142,17 @@ struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *inode) void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c, struct bch_inode_unpacked *inode) { -#define x(_name, _bits) opts->_name = inode_opt_get(c, inode, _name); +#define x(_name, _bits) \ + if ((inode)->bi_##_name) { \ + opts->_name = inode->bi_##_name - 1; \ + opts->_name##_from_inode = true; \ + } else { \ + opts->_name = c->opts._name; \ + } BCH_INODE_OPTS() #undef x - if (opts->nocow) - opts->compression = opts->background_compression = opts->data_checksum = opts->erasure_code = 0; + bch2_io_opts_fixups(opts); } int bch2_inum_opts_get(struct btree_trans *trans, subvol_inum inum, struct bch_io_opts *opts) diff --git a/libbcachefs/inode.h b/libbcachefs/inode.h index bdeb6be7..927c8759 100644 --- a/libbcachefs/inode.h +++ b/libbcachefs/inode.h @@ -7,15 +7,14 @@ #include "opts.h" #include "snapshot.h" -enum bch_validate_flags; extern const char * const bch2_inode_opts[]; int bch2_inode_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); int bch2_inode_v2_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); int bch2_inode_v3_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int __bch2_inode_has_child_snapshots(struct btree_trans *, struct bpos); @@ -60,7 +59,7 @@ static inline bool bkey_is_inode(const struct bkey *k) } int bch2_inode_generation_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_inode_generation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); #define bch2_bkey_ops_inode_generation ((struct bkey_ops) { \ @@ -262,6 +261,14 @@ void bch2_inode_opts_get(struct bch_io_opts *, struct bch_fs *, struct bch_inode_unpacked *); int bch2_inum_opts_get(struct btree_trans*, subvol_inum, struct bch_io_opts *); +static inline struct bch_extent_rebalance +bch2_inode_rebalance_opts_get(struct bch_fs *c, struct bch_inode_unpacked *inode) +{ + struct bch_io_opts io_opts; + bch2_inode_opts_get(&io_opts, c, inode); + return io_opts_to_rebalance_opts(&io_opts); +} + int bch2_inode_rm_snapshot(struct btree_trans *, u64, u32); int bch2_delete_dead_inodes(struct bch_fs *); diff --git a/libbcachefs/io_misc.c b/libbcachefs/io_misc.c index f2830517..53539791 100644 --- a/libbcachefs/io_misc.c +++ b/libbcachefs/io_misc.c @@ -113,11 +113,13 @@ int bch2_extent_fallocate(struct btree_trans *trans, err: if (!ret && sectors_allocated) bch2_increment_clock(c, sectors_allocated, WRITE); - if (should_print_err(ret)) - bch_err_inum_offset_ratelimited(c, - inum.inum, - iter->pos.offset << 9, - "%s(): error: %s", __func__, bch2_err_str(ret)); + if (should_print_err(ret)) { + struct printbuf buf = PRINTBUF; + bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter->pos.offset << 9); + prt_printf(&buf, "fallocate error: %s", bch2_err_str(ret)); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + } err_noprint: bch2_open_buckets_put(c, &open_buckets); bch2_disk_reservation_put(c, &disk_res); @@ -164,9 +166,9 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter, bch2_btree_iter_set_snapshot(iter, snapshot); /* - * peek_upto() doesn't have ideal semantics for extents: + * peek_max() doesn't have ideal semantics for extents: */ - k = bch2_btree_iter_peek_upto(iter, end_pos); + k = bch2_btree_iter_peek_max(iter, end_pos); if (!k.k) break; @@ -426,8 +428,8 @@ case LOGGED_OP_FINSERT_shift_extents: bch2_btree_iter_set_pos(&iter, SPOS(inum.inum, pos, snapshot)); k = insert - ? bch2_btree_iter_peek_prev(&iter) - : bch2_btree_iter_peek_upto(&iter, POS(inum.inum, U64_MAX)); + ? bch2_btree_iter_peek_prev_min(&iter, POS(inum.inum, 0)) + : bch2_btree_iter_peek_max(&iter, POS(inum.inum, U64_MAX)); if ((ret = bkey_err(k))) goto btree_err; @@ -461,7 +463,7 @@ case LOGGED_OP_FINSERT_shift_extents: op->v.pos = cpu_to_le64(insert ? bkey_start_offset(&delete.k) : delete.k.p.offset); - ret = bch2_bkey_set_needs_rebalance(c, copy, &opts) ?: + ret = bch2_bkey_set_needs_rebalance(c, &opts, copy) ?: bch2_btree_insert_trans(trans, BTREE_ID_extents, &delete, 0) ?: bch2_btree_insert_trans(trans, BTREE_ID_extents, copy, 0) ?: bch2_logged_op_update(trans, &op->k_i) ?: diff --git a/libbcachefs/io_read.c b/libbcachefs/io_read.c index fc246f34..34a3569d 100644 --- a/libbcachefs/io_read.c +++ b/libbcachefs/io_read.c @@ -21,6 +21,7 @@ #include "io_read.h" #include "io_misc.h" #include "io_write.h" +#include "reflink.h" #include "subvolume.h" #include "trace.h" @@ -231,11 +232,11 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans, update_opts.target = opts.foreground_target; struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - unsigned i = 0; + unsigned ptr_bit = 1; bkey_for_each_ptr(ptrs, ptr) { if (bch2_dev_io_failures(failed, ptr->dev)) - update_opts.rewrite_ptrs |= BIT(i); - i++; + update_opts.rewrite_ptrs |= ptr_bit; + ptr_bit <<= 1; } } @@ -262,7 +263,8 @@ err: bio_free_pages(&(*rbio)->bio); kfree(*rbio); *rbio = NULL; - kfree(op); + /* We may have added to the rhashtable and thus need rcu freeing: */ + kfree_rcu(op, rcu); bch2_write_ref_put(c, BCH_WRITE_REF_promote); return ERR_PTR(ret); } @@ -320,6 +322,20 @@ nopromote: /* Read */ +static int bch2_read_err_msg_trans(struct btree_trans *trans, struct printbuf *out, + struct bch_read_bio *rbio, struct bpos read_pos) +{ + return bch2_inum_offset_err_msg_trans(trans, out, + (subvol_inum) { rbio->subvol, read_pos.inode }, + read_pos.offset << 9); +} + +static void bch2_read_err_msg(struct bch_fs *c, struct printbuf *out, + struct bch_read_bio *rbio, struct bpos read_pos) +{ + bch2_trans_run(c, bch2_read_err_msg_trans(trans, out, rbio, read_pos)); +} + #define READ_RETRY_AVOID 1 #define READ_RETRY 2 #define READ_ERR 3 @@ -498,6 +514,29 @@ static void bch2_rbio_error(struct bch_read_bio *rbio, int retry, } } +static void bch2_read_io_err(struct work_struct *work) +{ + struct bch_read_bio *rbio = + container_of(work, struct bch_read_bio, work); + struct bio *bio = &rbio->bio; + struct bch_fs *c = rbio->c; + struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; + struct printbuf buf = PRINTBUF; + + bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); + prt_printf(&buf, "data read error: %s", bch2_blk_status_to_str(bio->bi_status)); + + if (ca) { + bch2_io_error(ca, BCH_MEMBER_ERROR_read); + bch_err_ratelimited(ca, "%s", buf.buf); + } else { + bch_err_ratelimited(c, "%s", buf.buf); + } + + printbuf_exit(&buf); + bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status); +} + static int __bch2_rbio_narrow_crcs(struct btree_trans *trans, struct bch_read_bio *rbio) { @@ -561,6 +600,73 @@ static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio) __bch2_rbio_narrow_crcs(trans, rbio)); } +static void bch2_read_csum_err(struct work_struct *work) +{ + struct bch_read_bio *rbio = + container_of(work, struct bch_read_bio, work); + struct bch_fs *c = rbio->c; + struct bio *src = &rbio->bio; + struct bch_extent_crc_unpacked crc = rbio->pick.crc; + struct nonce nonce = extent_nonce(rbio->version, crc); + struct bch_csum csum = bch2_checksum_bio(c, crc.csum_type, nonce, src); + struct printbuf buf = PRINTBUF; + + bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); + prt_str(&buf, "data "); + bch2_csum_err_msg(&buf, crc.csum_type, rbio->pick.crc.csum, csum); + + struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; + if (ca) { + bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); + bch_err_ratelimited(ca, "%s", buf.buf); + } else { + bch_err_ratelimited(c, "%s", buf.buf); + } + + bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR); + printbuf_exit(&buf); +} + +static void bch2_read_decompress_err(struct work_struct *work) +{ + struct bch_read_bio *rbio = + container_of(work, struct bch_read_bio, work); + struct bch_fs *c = rbio->c; + struct printbuf buf = PRINTBUF; + + bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); + prt_str(&buf, "decompression error"); + + struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; + if (ca) + bch_err_ratelimited(ca, "%s", buf.buf); + else + bch_err_ratelimited(c, "%s", buf.buf); + + bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR); + printbuf_exit(&buf); +} + +static void bch2_read_decrypt_err(struct work_struct *work) +{ + struct bch_read_bio *rbio = + container_of(work, struct bch_read_bio, work); + struct bch_fs *c = rbio->c; + struct printbuf buf = PRINTBUF; + + bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); + prt_str(&buf, "decrypt error"); + + struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; + if (ca) + bch_err_ratelimited(ca, "%s", buf.buf); + else + bch_err_ratelimited(c, "%s", buf.buf); + + bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR); + printbuf_exit(&buf); +} + /* Inner part that may run in process context */ static void __bch2_read_endio(struct work_struct *work) { @@ -667,33 +773,13 @@ csum_err: goto out; } - struct printbuf buf = PRINTBUF; - buf.atomic++; - prt_str(&buf, "data "); - bch2_csum_err_msg(&buf, crc.csum_type, rbio->pick.crc.csum, csum); - - struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; - if (ca) { - bch_err_inum_offset_ratelimited(ca, - rbio->read_pos.inode, - rbio->read_pos.offset << 9, - "data %s", buf.buf); - bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); - } - printbuf_exit(&buf); - bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR); + bch2_rbio_punt(rbio, bch2_read_csum_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); goto out; decompression_err: - bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode, - rbio->read_pos.offset << 9, - "decompression error"); - bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR); + bch2_rbio_punt(rbio, bch2_read_decompress_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); goto out; decrypt_err: - bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode, - rbio->read_pos.offset << 9, - "decrypt error"); - bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR); + bch2_rbio_punt(rbio, bch2_read_decrypt_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); goto out; } @@ -714,16 +800,8 @@ static void bch2_read_endio(struct bio *bio) if (!rbio->split) rbio->bio.bi_end_io = rbio->end_io; - if (bio->bi_status) { - if (ca) { - bch_err_inum_offset_ratelimited(ca, - rbio->read_pos.inode, - rbio->read_pos.offset, - "data read error: %s", - bch2_blk_status_to_str(bio->bi_status)); - bch2_io_error(ca, BCH_MEMBER_ERROR_read); - } - bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status); + if (unlikely(bio->bi_status)) { + bch2_rbio_punt(rbio, bch2_read_io_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); return; } @@ -749,45 +827,6 @@ static void bch2_read_endio(struct bio *bio) bch2_rbio_punt(rbio, __bch2_read_endio, context, wq); } -int __bch2_read_indirect_extent(struct btree_trans *trans, - unsigned *offset_into_extent, - struct bkey_buf *orig_k) -{ - struct btree_iter iter; - struct bkey_s_c k; - u64 reflink_offset; - int ret; - - reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) + - *offset_into_extent; - - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_reflink, - POS(0, reflink_offset), 0); - ret = bkey_err(k); - if (ret) - goto err; - - if (k.k->type != KEY_TYPE_reflink_v && - k.k->type != KEY_TYPE_indirect_inline_data) { - bch_err_inum_offset_ratelimited(trans->c, - orig_k->k->k.p.inode, - orig_k->k->k.p.offset << 9, - "%llu len %u points to nonexistent indirect extent %llu", - orig_k->k->k.p.offset, - orig_k->k->k.size, - reflink_offset); - bch2_inconsistent_error(trans->c); - ret = -BCH_ERR_missing_indirect_extent; - goto err; - } - - *offset_into_extent = iter.pos.offset - bkey_start_offset(k.k); - bch2_bkey_buf_reassemble(orig_k, trans->c, k); -err: - bch2_trans_iter_exit(trans, &iter); - return ret; -} - static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, struct bch_dev *ca, struct bkey_s_c k, @@ -802,16 +841,15 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, PTR_BUCKET_POS(ca, &ptr), BTREE_ITER_cached); - u8 *gen = bucket_gen(ca, iter.pos.offset); - if (gen) { - + int gen = bucket_gen_get(ca, iter.pos.offset); + if (gen >= 0) { prt_printf(&buf, "Attempting to read from stale dirty pointer:\n"); printbuf_indent_add(&buf, 2); bch2_bkey_val_to_text(&buf, c, k); prt_newline(&buf); - prt_printf(&buf, "memory gen: %u", *gen); + prt_printf(&buf, "memory gen: %u", gen); ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); if (!ret) { @@ -868,15 +906,24 @@ retry_pick: if (!pick_ret) goto hole; - if (pick_ret < 0) { + if (unlikely(pick_ret < 0)) { + struct printbuf buf = PRINTBUF; + bch2_read_err_msg_trans(trans, &buf, orig, read_pos); + prt_printf(&buf, "no device to read from: %s\n ", bch2_err_str(pick_ret)); + bch2_bkey_val_to_text(&buf, c, k); + + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + goto err; + } + + if (unlikely(bch2_csum_type_is_encryption(pick.crc.csum_type)) && !c->chacha20) { struct printbuf buf = PRINTBUF; + bch2_read_err_msg_trans(trans, &buf, orig, read_pos); + prt_printf(&buf, "attempting to read encrypted data without encryption key\n "); bch2_bkey_val_to_text(&buf, c, k); - bch_err_inum_offset_ratelimited(c, - read_pos.inode, read_pos.offset << 9, - "no device to read from: %s\n %s", - bch2_err_str(pick_ret), - buf.buf); + bch_err_ratelimited(c, "%s", buf.buf); printbuf_exit(&buf); goto err; } @@ -1062,11 +1109,15 @@ get_bio: } if (!rbio->pick.idx) { - if (!rbio->have_ioref) { - bch_err_inum_offset_ratelimited(c, - read_pos.inode, - read_pos.offset << 9, - "no device to read from"); + if (unlikely(!rbio->have_ioref)) { + struct printbuf buf = PRINTBUF; + bch2_read_err_msg_trans(trans, &buf, rbio, read_pos); + prt_printf(&buf, "no device to read from:\n "); + bch2_bkey_val_to_text(&buf, c, k); + + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR); goto out; } @@ -1164,7 +1215,6 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, BTREE_ITER_slots); while (1) { - unsigned bytes, sectors, offset_into_extent; enum btree_id data_btree = BTREE_ID_extents; bch2_trans_begin(trans); @@ -1184,9 +1234,9 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, if (ret) goto err; - offset_into_extent = iter.pos.offset - + s64 offset_into_extent = iter.pos.offset - bkey_start_offset(k.k); - sectors = k.k->size - offset_into_extent; + unsigned sectors = k.k->size - offset_into_extent; bch2_bkey_buf_reassemble(&sk, c, k); @@ -1201,9 +1251,9 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, * With indirect extents, the amount of data to read is the min * of the original extent and the indirect extent: */ - sectors = min(sectors, k.k->size - offset_into_extent); + sectors = min_t(unsigned, sectors, k.k->size - offset_into_extent); - bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9; + unsigned bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9; swap(bvec_iter.bi_size, bytes); if (bvec_iter.bi_size == bytes) @@ -1229,16 +1279,20 @@ err: } bch2_trans_iter_exit(trans, &iter); - bch2_trans_put(trans); - bch2_bkey_buf_exit(&sk, c); if (ret) { - bch_err_inum_offset_ratelimited(c, inum.inum, - bvec_iter.bi_sector << 9, - "read error %i from btree lookup", ret); + struct printbuf buf = PRINTBUF; + bch2_inum_offset_err_msg_trans(trans, &buf, inum, bvec_iter.bi_sector << 9); + prt_printf(&buf, "read error %i from btree lookup", ret); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + rbio->bio.bi_status = BLK_STS_IOERR; bch2_rbio_done(rbio); } + + bch2_trans_put(trans); + bch2_bkey_buf_exit(&sk, c); } void bch2_fs_io_read_exit(struct bch_fs *c) diff --git a/libbcachefs/io_read.h b/libbcachefs/io_read.h index d9c18bb7..a82e8a94 100644 --- a/libbcachefs/io_read.h +++ b/libbcachefs/io_read.h @@ -3,6 +3,7 @@ #define _BCACHEFS_IO_READ_H #include "bkey_buf.h" +#include "reflink.h" struct bch_read_bio { struct bch_fs *c; @@ -79,19 +80,32 @@ struct bch_devs_mask; struct cache_promote_op; struct extent_ptr_decoded; -int __bch2_read_indirect_extent(struct btree_trans *, unsigned *, - struct bkey_buf *); - static inline int bch2_read_indirect_extent(struct btree_trans *trans, enum btree_id *data_btree, - unsigned *offset_into_extent, - struct bkey_buf *k) + s64 *offset_into_extent, + struct bkey_buf *extent) { - if (k->k->k.type != KEY_TYPE_reflink_p) + if (extent->k->k.type != KEY_TYPE_reflink_p) return 0; *data_btree = BTREE_ID_reflink; - return __bch2_read_indirect_extent(trans, offset_into_extent, k); + struct btree_iter iter; + struct bkey_s_c k = bch2_lookup_indirect_extent(trans, &iter, + offset_into_extent, + bkey_i_to_s_c_reflink_p(extent->k), + true, 0); + int ret = bkey_err(k); + if (ret) + return ret; + + if (bkey_deleted(k.k)) { + bch2_trans_iter_exit(trans, &iter); + return -BCH_ERR_missing_indirect_extent; + } + + bch2_bkey_buf_reassemble(extent, trans->c, k); + bch2_trans_iter_exit(trans, &iter); + return 0; } enum bch_read_flags { diff --git a/libbcachefs/io_write.c b/libbcachefs/io_write.c index 8609e25e..20da357e 100644 --- a/libbcachefs/io_write.c +++ b/libbcachefs/io_write.c @@ -164,7 +164,7 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans, bch2_trans_copy_iter(&iter, extent_iter); - for_each_btree_key_upto_continue_norestart(iter, + for_each_btree_key_max_continue_norestart(iter, new->k.p, BTREE_ITER_slots, old, ret) { s64 sectors = min(new->k.p.offset, old.k->p.offset) - max(bkey_start_offset(&new->k), @@ -216,6 +216,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, SPOS(0, extent_iter->pos.inode, extent_iter->snapshot), + BTREE_ITER_intent| BTREE_ITER_cached); int ret = bkey_err(k); if (unlikely(ret)) @@ -369,7 +370,7 @@ static int bch2_write_index_default(struct bch_write_op *op) bkey_start_pos(&sk.k->k), BTREE_ITER_slots|BTREE_ITER_intent); - ret = bch2_bkey_set_needs_rebalance(c, sk.k, &op->opts) ?: + ret = bch2_bkey_set_needs_rebalance(c, &op->opts, sk.k) ?: bch2_extent_update(trans, inum, &iter, sk.k, &op->res, op->new_i_size, &op->i_sectors_delta, @@ -395,6 +396,21 @@ static int bch2_write_index_default(struct bch_write_op *op) /* Writes */ +static void __bch2_write_op_error(struct printbuf *out, struct bch_write_op *op, + u64 offset) +{ + bch2_inum_offset_err_msg(op->c, out, + (subvol_inum) { op->subvol, op->pos.inode, }, + offset << 9); + prt_printf(out, "write error%s: ", + op->flags & BCH_WRITE_MOVE ? "(internal move)" : ""); +} + +static void bch2_write_op_error(struct printbuf *out, struct bch_write_op *op) +{ + __bch2_write_op_error(out, op, op->pos.offset); +} + void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, enum bch_data_type type, const struct bkey_i *k, @@ -531,14 +547,14 @@ static void __bch2_write_index(struct bch_write_op *op) op->written += sectors_start - keylist_sectors(keys); - if (ret && !bch2_err_matches(ret, EROFS)) { + if (unlikely(ret && !bch2_err_matches(ret, EROFS))) { struct bkey_i *insert = bch2_keylist_front(&op->insert_keys); - bch_err_inum_offset_ratelimited(c, - insert->k.p.inode, insert->k.p.offset << 9, - "%s write error while doing btree update: %s", - op->flags & BCH_WRITE_MOVE ? "move" : "user", - bch2_err_str(ret)); + struct printbuf buf = PRINTBUF; + __bch2_write_op_error(&buf, op, bkey_start_offset(&insert->k)); + prt_printf(&buf, "btree update error: %s", bch2_err_str(ret)); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); } if (ret) @@ -621,9 +637,7 @@ void bch2_write_point_do_index_updates(struct work_struct *work) while (1) { spin_lock_irq(&wp->writes_lock); - op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list); - if (op) - list_del(&op->wp_list); + op = list_pop_entry(&wp->writes, struct bch_write_op, wp_list); wp_update_state(wp, op != NULL); spin_unlock_irq(&wp->writes_lock); @@ -1080,11 +1094,14 @@ do_write: *_dst = dst; return more; csum_err: - bch_err_inum_offset_ratelimited(c, - op->pos.inode, - op->pos.offset << 9, - "%s write error: error verifying existing checksum while rewriting existing data (memory corruption?)", - op->flags & BCH_WRITE_MOVE ? "move" : "user"); + { + struct printbuf buf = PRINTBUF; + bch2_write_op_error(&buf, op); + prt_printf(&buf, "error verifying existing checksum while rewriting existing data (memory corruption?)"); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + } + ret = -EIO; err: if (to_wbio(dst)->bounce) @@ -1165,7 +1182,7 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op) struct btree_trans *trans = bch2_trans_get(c); for_each_keylist_key(&op->insert_keys, orig) { - int ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents, + int ret = for_each_btree_key_max_commit(trans, iter, BTREE_ID_extents, bkey_start_pos(&orig->k), orig->k.p, BTREE_ITER_intent, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ @@ -1175,11 +1192,11 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op) if (ret && !bch2_err_matches(ret, EROFS)) { struct bkey_i *insert = bch2_keylist_front(&op->insert_keys); - bch_err_inum_offset_ratelimited(c, - insert->k.p.inode, insert->k.p.offset << 9, - "%s write error while doing btree update: %s", - op->flags & BCH_WRITE_MOVE ? "move" : "user", - bch2_err_str(ret)); + struct printbuf buf = PRINTBUF; + __bch2_write_op_error(&buf, op, bkey_start_offset(&insert->k)); + prt_printf(&buf, "btree update error: %s", bch2_err_str(ret)); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); } if (ret) { @@ -1300,11 +1317,8 @@ retry: bucket_to_u64(i->b), BUCKET_NOCOW_LOCK_UPDATE); - rcu_read_lock(); - u8 *gen = bucket_gen(ca, i->b.offset); - stale = !gen ? -1 : gen_after(*gen, i->gen); - rcu_read_unlock(); - + int gen = bucket_gen_get(ca, i->b.offset); + stale = gen < 0 ? gen : gen_after(gen, i->gen); if (unlikely(stale)) { stale_at = i; goto err_bucket_stale; @@ -1343,9 +1357,11 @@ err: goto retry; if (ret) { - bch_err_inum_offset_ratelimited(c, - op->pos.inode, op->pos.offset << 9, - "%s: btree lookup error %s", __func__, bch2_err_str(ret)); + struct printbuf buf = PRINTBUF; + bch2_write_op_error(&buf, op); + prt_printf(&buf, "%s(): btree lookup error: %s", __func__, bch2_err_str(ret)); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); op->error = ret; op->flags |= BCH_WRITE_SUBMITTED; } @@ -1465,14 +1481,14 @@ err: if (ret <= 0) { op->flags |= BCH_WRITE_SUBMITTED; - if (ret < 0) { - if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT)) - bch_err_inum_offset_ratelimited(c, - op->pos.inode, - op->pos.offset << 9, - "%s(): %s error: %s", __func__, - op->flags & BCH_WRITE_MOVE ? "move" : "user", - bch2_err_str(ret)); + if (unlikely(ret < 0)) { + if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT)) { + struct printbuf buf = PRINTBUF; + bch2_write_op_error(&buf, op); + prt_printf(&buf, "%s(): %s", __func__, bch2_err_str(ret)); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + } op->error = ret; break; } @@ -1598,12 +1614,11 @@ CLOSURE_CALLBACK(bch2_write) bch2_keylist_init(&op->insert_keys, op->inline_keys); wbio_init(bio)->put_bio = false; - if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) { - bch_err_inum_offset_ratelimited(c, - op->pos.inode, - op->pos.offset << 9, - "%s write error: misaligned write", - op->flags & BCH_WRITE_MOVE ? "move" : "user"); + if (unlikely(bio->bi_iter.bi_size & (c->opts.block_size - 1))) { + struct printbuf buf = PRINTBUF; + bch2_write_op_error(&buf, op); + prt_printf(&buf, "misaligned write"); + printbuf_exit(&buf); op->error = -EIO; goto err; } diff --git a/libbcachefs/journal.c b/libbcachefs/journal.c index 2cf8f24d..dc665219 100644 --- a/libbcachefs/journal.c +++ b/libbcachefs/journal.c @@ -217,6 +217,12 @@ void bch2_journal_buf_put_final(struct journal *j, u64 seq) if (__bch2_journal_pin_put(j, seq)) bch2_journal_reclaim_fast(j); bch2_journal_do_writes(j); + + /* + * for __bch2_next_write_buffer_flush_journal_buf(), when quiescing an + * open journal entry + */ + wake_up(&j->wait); } /* @@ -251,6 +257,9 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val, bool t if (!__journal_entry_is_open(old)) return; + if (old.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL) + old.cur_entry_offset = j->cur_entry_offset_if_blocked; + /* Close out old buffer: */ buf->data->u64s = cpu_to_le32(old.cur_entry_offset); @@ -373,6 +382,10 @@ static int journal_entry_open(struct journal *j) if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf)) return JOURNAL_ERR_max_in_flight; + if (bch2_fs_fatal_err_on(journal_cur_seq(j) >= JOURNAL_SEQ_MAX, + c, "cannot start: journal seq overflow")) + return JOURNAL_ERR_insufficient_devices; /* -EROFS */ + BUG_ON(!j->cur_entry_sectors); buf->expires = @@ -664,7 +677,7 @@ out: * @seq: seq to flush * @parent: closure object to wait with * Returns: 1 if @seq has already been flushed, 0 if @seq is being flushed, - * -EIO if @seq will never be flushed + * -BCH_ERR_journal_flush_err if @seq will never be flushed * * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if * necessary @@ -687,7 +700,7 @@ int bch2_journal_flush_seq_async(struct journal *j, u64 seq, /* Recheck under lock: */ if (j->err_seq && seq >= j->err_seq) { - ret = -EIO; + ret = -BCH_ERR_journal_flush_err; goto out; } @@ -868,22 +881,53 @@ int bch2_journal_meta(struct journal *j) void bch2_journal_unblock(struct journal *j) { spin_lock(&j->lock); - j->blocked--; + if (!--j->blocked && + j->cur_entry_offset_if_blocked < JOURNAL_ENTRY_CLOSED_VAL && + j->reservations.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL) { + union journal_res_state old, new; + + old.v = atomic64_read(&j->reservations.counter); + do { + new.v = old.v; + new.cur_entry_offset = j->cur_entry_offset_if_blocked; + } while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v)); + } spin_unlock(&j->lock); journal_wake(j); } +static void __bch2_journal_block(struct journal *j) +{ + if (!j->blocked++) { + union journal_res_state old, new; + + old.v = atomic64_read(&j->reservations.counter); + do { + j->cur_entry_offset_if_blocked = old.cur_entry_offset; + + if (j->cur_entry_offset_if_blocked >= JOURNAL_ENTRY_CLOSED_VAL) + break; + + new.v = old.v; + new.cur_entry_offset = JOURNAL_ENTRY_BLOCKED_VAL; + } while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v)); + + journal_cur_buf(j)->data->u64s = cpu_to_le32(old.cur_entry_offset); + } +} + void bch2_journal_block(struct journal *j) { spin_lock(&j->lock); - j->blocked++; + __bch2_journal_block(j); spin_unlock(&j->lock); journal_quiesce(j); } -static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq) +static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, + u64 max_seq, bool *blocked) { struct journal_buf *ret = NULL; @@ -900,13 +944,17 @@ static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct jou struct journal_buf *buf = j->buf + idx; if (buf->need_flush_to_write_buffer) { - if (seq == journal_cur_seq(j)) - __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true); - union journal_res_state s; s.v = atomic64_read_acquire(&j->reservations.counter); - ret = journal_state_count(s, idx) + unsigned open = seq == journal_cur_seq(j) && __journal_entry_is_open(s); + + if (open && !*blocked) { + __bch2_journal_block(j); + *blocked = true; + } + + ret = journal_state_count(s, idx) > open ? ERR_PTR(-EAGAIN) : buf; break; @@ -919,11 +967,17 @@ static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct jou return ret; } -struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq) +struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, + u64 max_seq, bool *blocked) { struct journal_buf *ret; + *blocked = false; + + wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, + max_seq, blocked)) != ERR_PTR(-EAGAIN)); + if (IS_ERR_OR_NULL(ret) && *blocked) + bch2_journal_unblock(j); - wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, max_seq)) != ERR_PTR(-EAGAIN)); return ret; } @@ -952,19 +1006,17 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, } for (nr_got = 0; nr_got < nr_want; nr_got++) { - if (new_fs) { - bu[nr_got] = bch2_bucket_alloc_new_fs(ca); - if (bu[nr_got] < 0) { - ret = -BCH_ERR_ENOSPC_bucket_alloc; - break; - } - } else { - ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, - BCH_DATA_journal, cl); - ret = PTR_ERR_OR_ZERO(ob[nr_got]); - if (ret) - break; + enum bch_watermark watermark = new_fs + ? BCH_WATERMARK_btree + : BCH_WATERMARK_normal; + + ob[nr_got] = bch2_bucket_alloc(c, ca, watermark, + BCH_DATA_journal, cl); + ret = PTR_ERR_OR_ZERO(ob[nr_got]); + if (ret) + break; + if (!new_fs) { ret = bch2_trans_run(c, bch2_trans_mark_metadata_bucket(trans, ca, ob[nr_got]->bucket, BCH_DATA_journal, @@ -974,9 +1026,9 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, bch_err_msg(c, ret, "marking new journal buckets"); break; } - - bu[nr_got] = ob[nr_got]->bucket; } + + bu[nr_got] = ob[nr_got]->bucket; } if (!nr_got) @@ -1016,8 +1068,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, if (ret) goto err_unblock; - if (!new_fs) - bch2_write_super(c); + bch2_write_super(c); /* Commit: */ if (c) @@ -1051,9 +1102,8 @@ err_unblock: bu[i], BCH_DATA_free, 0, BTREE_TRIGGER_transactional)); err_free: - if (!new_fs) - for (i = 0; i < nr_got; i++) - bch2_open_bucket_put(c, ob[i]); + for (i = 0; i < nr_got; i++) + bch2_open_bucket_put(c, ob[i]); kfree(new_bucket_seq); kfree(new_buckets); @@ -1224,6 +1274,11 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq) bool had_entries = false; u64 last_seq = cur_seq, nr, seq; + if (cur_seq >= JOURNAL_SEQ_MAX) { + bch_err(c, "cannot start: journal seq overflow"); + return -EINVAL; + } + genradix_for_each_reverse(&c->journal_entries, iter, _i) { i = *_i; @@ -1481,6 +1536,9 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) case JOURNAL_ENTRY_CLOSED_VAL: prt_printf(out, "closed\n"); break; + case JOURNAL_ENTRY_BLOCKED_VAL: + prt_printf(out, "blocked\n"); + break; default: prt_printf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s); break; diff --git a/libbcachefs/journal.h b/libbcachefs/journal.h index 2762be6f..a6a2e888 100644 --- a/libbcachefs/journal.h +++ b/libbcachefs/journal.h @@ -285,7 +285,8 @@ static inline void bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq spin_lock(&j->lock); bch2_journal_buf_put_final(j, seq); spin_unlock(&j->lock); - } + } else if (unlikely(s.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL)) + wake_up(&j->wait); } /* @@ -411,7 +412,7 @@ void bch2_journal_halt(struct journal *); static inline int bch2_journal_error(struct journal *j) { return j->reservations.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL - ? -EIO : 0; + ? -BCH_ERR_journal_shutdown : 0; } struct bch_dev; @@ -424,7 +425,7 @@ static inline void bch2_journal_set_replay_done(struct journal *j) void bch2_journal_unblock(struct journal *); void bch2_journal_block(struct journal *); -struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq); +struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *, u64, bool *); void __bch2_journal_debug_to_text(struct printbuf *, struct journal *); void bch2_journal_debug_to_text(struct printbuf *, struct journal *); diff --git a/libbcachefs/journal_io.c b/libbcachefs/journal_io.c index 998ffa51..1627f3e1 100644 --- a/libbcachefs/journal_io.c +++ b/libbcachefs/journal_io.c @@ -17,6 +17,8 @@ #include "sb-clean.h" #include "trace.h" +#include <linux/string_choices.h> + void bch2_journal_pos_from_member_info_set(struct bch_fs *c) { lockdep_assert_held(&c->sb_lock); @@ -325,11 +327,11 @@ static void journal_entry_err_msg(struct printbuf *out, static int journal_validate_key(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, - unsigned level, enum btree_id btree_id, struct bkey_i *k, - unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from, + unsigned version, int big_endian) { + enum bch_validate_flags flags = from.flags; int write = flags & BCH_VALIDATE_write; void *next = vstruct_next(entry); int ret = 0; @@ -364,11 +366,10 @@ static int journal_validate_key(struct bch_fs *c, } if (!write) - bch2_bkey_compat(level, btree_id, version, big_endian, + bch2_bkey_compat(from.level, from.btree, version, big_endian, write, NULL, bkey_to_packed(k)); - ret = bch2_bkey_validate(c, bkey_i_to_s_c(k), - __btree_node_type(level, btree_id), write); + ret = bch2_bkey_validate(c, bkey_i_to_s_c(k), from); if (ret == -BCH_ERR_fsck_delete_bkey) { le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); memmove(k, bkey_next(k), next - (void *) bkey_next(k)); @@ -379,7 +380,7 @@ static int journal_validate_key(struct bch_fs *c, goto fsck_err; if (write) - bch2_bkey_compat(level, btree_id, version, big_endian, + bch2_bkey_compat(from.level, from.btree, version, big_endian, write, NULL, bkey_to_packed(k)); fsck_err: return ret; @@ -392,13 +393,15 @@ static int journal_entry_btree_keys_validate(struct bch_fs *c, enum bch_validate_flags flags) { struct bkey_i *k = entry->start; + struct bkey_validate_context from = { + .from = BKEY_VALIDATE_journal, + .level = entry->level, + .btree = entry->btree_id, + .flags = flags|BCH_VALIDATE_journal, + }; while (k != vstruct_last(entry)) { - int ret = journal_validate_key(c, jset, entry, - entry->level, - entry->btree_id, - k, version, big_endian, - flags|BCH_VALIDATE_journal); + int ret = journal_validate_key(c, jset, entry, k, from, version, big_endian); if (ret == FSCK_DELETED_KEY) continue; else if (ret) @@ -453,8 +456,14 @@ static int journal_entry_btree_root_validate(struct bch_fs *c, return 0; } - ret = journal_validate_key(c, jset, entry, 1, entry->btree_id, k, - version, big_endian, flags); + struct bkey_validate_context from = { + .from = BKEY_VALIDATE_journal, + .level = entry->level + 1, + .btree = entry->btree_id, + .root = true, + .flags = flags, + }; + ret = journal_validate_key(c, jset, entry, k, from, version, big_endian); if (ret == FSCK_DELETED_KEY) ret = 0; fsck_err: @@ -666,7 +675,7 @@ static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c, struct jset_entry_clock *clock = container_of(entry, struct jset_entry_clock, entry); - prt_printf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time)); + prt_printf(out, "%s=%llu", str_write_read(clock->rw), le64_to_cpu(clock->time)); } static int journal_entry_dev_usage_validate(struct bch_fs *c, @@ -709,6 +718,9 @@ static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs container_of(entry, struct jset_entry_dev_usage, entry); unsigned i, nr_types = jset_entry_dev_usage_nr_types(u); + if (vstruct_bytes(entry) < sizeof(*u)) + return; + prt_printf(out, "dev=%u", le32_to_cpu(u->dev)); printbuf_indent_add(out, 2); @@ -1012,6 +1024,8 @@ reread: nr_bvecs = buf_pages(buf->data, sectors_read << 9); bio = bio_kmalloc(nr_bvecs, GFP_KERNEL); + if (!bio) + return -BCH_ERR_ENOMEM_journal_read_bucket; bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ); bio->bi_iter.bi_sector = offset; diff --git a/libbcachefs/journal_reclaim.c b/libbcachefs/journal_reclaim.c index 3d8fc264..1aabbbe3 100644 --- a/libbcachefs/journal_reclaim.c +++ b/libbcachefs/journal_reclaim.c @@ -38,6 +38,9 @@ unsigned bch2_journal_dev_buckets_available(struct journal *j, struct journal_device *ja, enum journal_space_from from) { + if (!ja->nr) + return 0; + unsigned available = (journal_space_from(ja, from) - ja->cur_idx - 1 + ja->nr) % ja->nr; diff --git a/libbcachefs/journal_types.h b/libbcachefs/journal_types.h index 19183fcf..e9bd716f 100644 --- a/libbcachefs/journal_types.h +++ b/libbcachefs/journal_types.h @@ -9,6 +9,9 @@ #include "super_types.h" #include "fifo.h" +/* btree write buffer steals 8 bits for its own purposes: */ +#define JOURNAL_SEQ_MAX ((1ULL << 56) - 1) + #define JOURNAL_BUF_BITS 2 #define JOURNAL_BUF_NR (1U << JOURNAL_BUF_BITS) #define JOURNAL_BUF_MASK (JOURNAL_BUF_NR - 1) @@ -112,6 +115,7 @@ union journal_res_state { */ #define JOURNAL_ENTRY_OFFSET_MAX ((1U << 20) - 1) +#define JOURNAL_ENTRY_BLOCKED_VAL (JOURNAL_ENTRY_OFFSET_MAX - 2) #define JOURNAL_ENTRY_CLOSED_VAL (JOURNAL_ENTRY_OFFSET_MAX - 1) #define JOURNAL_ENTRY_ERROR_VAL (JOURNAL_ENTRY_OFFSET_MAX) @@ -193,6 +197,7 @@ struct journal { * insufficient devices: */ enum journal_errors cur_entry_error; + unsigned cur_entry_offset_if_blocked; unsigned buf_size_want; /* diff --git a/libbcachefs/lru.c b/libbcachefs/lru.c index 10857ecc..ce794d55 100644 --- a/libbcachefs/lru.c +++ b/libbcachefs/lru.c @@ -12,7 +12,7 @@ /* KEY_TYPE_lru is obsolete: */ int bch2_lru_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; @@ -192,7 +192,7 @@ int bch2_check_lrus(struct bch_fs *c) int ret = bch2_trans_run(c, for_each_btree_key_commit(trans, iter, BTREE_ID_lru, POS_MIN, BTREE_ITER_prefetch, k, - NULL, NULL, BCH_TRANS_COMMIT_no_enospc|BCH_TRANS_COMMIT_lazy_rw, + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, bch2_check_lru_key(trans, &iter, k, &last_flushed))); bch2_bkey_buf_exit(&last_flushed, c); diff --git a/libbcachefs/lru.h b/libbcachefs/lru.h index e6a7d824..f31a6cf1 100644 --- a/libbcachefs/lru.h +++ b/libbcachefs/lru.h @@ -33,7 +33,7 @@ static inline enum bch_lru_type lru_type(struct bkey_s_c l) return BCH_LRU_read; } -int bch2_lru_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_lru_validate(struct bch_fs *, struct bkey_s_c, struct bkey_validate_context); void bch2_lru_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); void bch2_lru_pos_to_text(struct printbuf *, struct bpos); diff --git a/libbcachefs/move.c b/libbcachefs/move.c index 8c456d8b..46017546 100644 --- a/libbcachefs/move.c +++ b/libbcachefs/move.c @@ -21,6 +21,7 @@ #include "journal_reclaim.h" #include "keylist.h" #include "move.h" +#include "rebalance.h" #include "replicas.h" #include "snapshot.h" #include "super-io.h" @@ -196,6 +197,13 @@ void bch2_moving_ctxt_exit(struct moving_context *ctxt) list_del(&ctxt->list); mutex_unlock(&c->moving_context_lock); + /* + * Generally, releasing a transaction within a transaction restart means + * an unhandled transaction restart: but this can happen legitimately + * within the move code, e.g. when bch2_move_ratelimit() tells us to + * exit before we've retried + */ + bch2_trans_begin(ctxt->trans); bch2_trans_put(ctxt->trans); memset(ctxt, 0, sizeof(*ctxt)); } @@ -266,7 +274,7 @@ int bch2_move_extent(struct moving_context *ctxt, if (!data_opts.rewrite_ptrs && !data_opts.extra_replicas) { if (data_opts.kill_ptrs) - return bch2_extent_drop_ptrs(trans, iter, k, data_opts); + return bch2_extent_drop_ptrs(trans, iter, k, &io_opts, &data_opts); return 0; } @@ -379,14 +387,19 @@ err: return ret; } -struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans, +static struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans, struct per_snapshot_io_opts *io_opts, + struct btree_iter *extent_iter, struct bkey_s_c extent_k) { struct bch_fs *c = trans->c; u32 restart_count = trans->restart_count; + struct bch_io_opts *opts_ret = &io_opts->fs_io_opts; int ret = 0; + if (extent_k.k->type == KEY_TYPE_reflink_v) + goto out; + if (io_opts->cur_inum != extent_k.k->p.inode) { io_opts->d.nr = 0; @@ -415,43 +428,46 @@ struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans, if (extent_k.k->p.snapshot) darray_for_each(io_opts->d, i) - if (bch2_snapshot_is_ancestor(c, extent_k.k->p.snapshot, i->snapshot)) - return &i->io_opts; - - return &io_opts->fs_io_opts; + if (bch2_snapshot_is_ancestor(c, extent_k.k->p.snapshot, i->snapshot)) { + opts_ret = &i->io_opts; + break; + } +out: + ret = bch2_get_update_rebalance_opts(trans, opts_ret, extent_iter, extent_k); + if (ret) + return ERR_PTR(ret); + return opts_ret; } int bch2_move_get_io_opts_one(struct btree_trans *trans, struct bch_io_opts *io_opts, + struct btree_iter *extent_iter, struct bkey_s_c extent_k) { - struct btree_iter iter; - struct bkey_s_c k; - int ret; + struct bch_fs *c = trans->c; + + *io_opts = bch2_opts_to_inode_opts(c->opts); /* reflink btree? */ - if (!extent_k.k->p.inode) { - *io_opts = bch2_opts_to_inode_opts(trans->c->opts); - return 0; - } + if (!extent_k.k->p.inode) + goto out; - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, + struct btree_iter inode_iter; + struct bkey_s_c inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot), BTREE_ITER_cached); - ret = bkey_err(k); + int ret = bkey_err(inode_k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) return ret; - if (!ret && bkey_is_inode(k.k)) { + if (!ret && bkey_is_inode(inode_k.k)) { struct bch_inode_unpacked inode; - bch2_inode_unpack(k, &inode); - bch2_inode_opts_get(io_opts, trans->c, &inode); - } else { - *io_opts = bch2_opts_to_inode_opts(trans->c->opts); + bch2_inode_unpack(inode_k, &inode); + bch2_inode_opts_get(io_opts, c, &inode); } - - bch2_trans_iter_exit(trans, &iter); - return 0; + bch2_trans_iter_exit(trans, &inode_iter); +out: + return bch2_get_update_rebalance_opts(trans, io_opts, extent_iter, extent_k); } int bch2_move_ratelimit(struct moving_context *ctxt) @@ -552,7 +568,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt, if (!bkey_extent_is_direct_data(k.k)) goto next_nondata; - io_opts = bch2_move_get_io_opts(trans, &snapshot_io_opts, k); + io_opts = bch2_move_get_io_opts(trans, &snapshot_io_opts, &iter, k); ret = PTR_ERR_OR_ZERO(io_opts); if (ret) continue; @@ -654,16 +670,11 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, struct bch_fs *c = trans->c; bool is_kthread = current->flags & PF_KTHREAD; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); - struct btree_iter iter; + struct btree_iter iter = {}, bp_iter = {}; struct bkey_buf sk; - struct bch_backpointer bp; - struct bch_alloc_v4 a_convert; - const struct bch_alloc_v4 *a; struct bkey_s_c k; struct data_update_opts data_opts; - unsigned dirty_sectors, bucket_size; - u64 fragmentation; - struct bpos bp_pos = POS_MIN; + unsigned sectors_moved = 0; int ret = 0; struct bch_dev *ca = bch2_dev_tryget(c, bucket.inode); @@ -679,21 +690,13 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, */ bch2_trans_begin(trans); - bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, - bucket, BTREE_ITER_cached); - ret = lockrestart_do(trans, - bkey_err(k = bch2_btree_iter_peek_slot(&iter))); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, + bucket_pos_to_bp_start(ca, bucket), 0); bch_err_msg(c, ret, "looking up alloc key"); if (ret) goto err; - a = bch2_alloc_to_v4(k, &a_convert); - dirty_sectors = bch2_bucket_sectors_dirty(*a); - bucket_size = ca->mi.bucket_size; - fragmentation = alloc_lru_idx_fragmentation(*a, ca); - ret = bch2_btree_write_buffer_tryflush(trans); bch_err_msg(c, ret, "flushing btree write buffer"); if (ret) @@ -705,18 +708,23 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, bch2_trans_begin(trans); - ret = bch2_get_next_backpointer(trans, ca, bucket, gen, - &bp_pos, &bp, - BTREE_ITER_cached); + k = bch2_btree_iter_peek(&bp_iter); + ret = bkey_err(k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) goto err; - if (bkey_eq(bp_pos, POS_MAX)) + + if (!k.k || bkey_gt(k.k->p, bucket_pos_to_bp_end(ca, bucket))) break; - if (!bp.level) { - k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0); + if (k.k->type != KEY_TYPE_backpointer) + goto next; + + struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k); + + if (!bp.v->level) { + k = bch2_backpointer_get_key(trans, bp, &iter, 0); ret = bkey_err(k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; @@ -728,7 +736,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, bch2_bkey_buf_reassemble(&sk, c, k); k = bkey_i_to_s_c(sk.k); - ret = bch2_move_get_io_opts_one(trans, &io_opts, k); + ret = bch2_move_get_io_opts_one(trans, &io_opts, &iter, k); if (ret) { bch2_trans_iter_exit(trans, &iter); continue; @@ -738,14 +746,18 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, data_opts.target = io_opts.background_target; data_opts.rewrite_ptrs = 0; + unsigned sectors = bp.v->bucket_len; /* move_extent will drop locks */ unsigned i = 0; - bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) { - if (ptr->dev == bucket.inode) { - data_opts.rewrite_ptrs |= 1U << i; - if (ptr->cached) { + const union bch_extent_entry *entry; + struct extent_ptr_decoded p; + bkey_for_each_ptr_decode(k.k, bch2_bkey_ptrs_c(k), p, entry) { + if (p.ptr.dev == bucket.inode) { + if (p.ptr.cached) { bch2_trans_iter_exit(trans, &iter); goto next; } + data_opts.rewrite_ptrs |= 1U << i; + break; } i++; } @@ -765,11 +777,12 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, goto err; if (ctxt->stats) - atomic64_add(k.k->size, &ctxt->stats->sectors_seen); + atomic64_add(sectors, &ctxt->stats->sectors_seen); + sectors_moved += sectors; } else { struct btree *b; - b = bch2_backpointer_get_node(trans, &iter, bp_pos, bp); + b = bch2_backpointer_get_node(trans, bp, &iter); ret = PTR_ERR_OR_ZERO(b); if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) continue; @@ -796,13 +809,15 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, atomic64_add(sectors, &ctxt->stats->sectors_seen); atomic64_add(sectors, &ctxt->stats->sectors_moved); } + sectors_moved += btree_sectors(c); } next: - bp_pos = bpos_nosnap_successor(bp_pos); + bch2_btree_iter_advance(&bp_iter); } - trace_evacuate_bucket(c, &bucket, dirty_sectors, bucket_size, fragmentation, ret); + trace_evacuate_bucket(c, &bucket, sectors_moved, ca->mi.bucket_size, ret); err: + bch2_trans_iter_exit(trans, &bp_iter); bch2_dev_put(ca); bch2_bkey_buf_exit(&sk, c); return ret; diff --git a/libbcachefs/move.h b/libbcachefs/move.h index 9baf3093..51e0505a 100644 --- a/libbcachefs/move.h +++ b/libbcachefs/move.h @@ -110,9 +110,8 @@ static inline void per_snapshot_io_opts_exit(struct per_snapshot_io_opts *io_opt darray_exit(&io_opts->d); } -struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *, - struct per_snapshot_io_opts *, struct bkey_s_c); -int bch2_move_get_io_opts_one(struct btree_trans *, struct bch_io_opts *, struct bkey_s_c); +int bch2_move_get_io_opts_one(struct btree_trans *, struct bch_io_opts *, + struct btree_iter *, struct bkey_s_c); int bch2_scan_old_btree_nodes(struct bch_fs *, struct bch_move_stats *); diff --git a/libbcachefs/movinggc.c b/libbcachefs/movinggc.c index d658be90..85c361e7 100644 --- a/libbcachefs/movinggc.c +++ b/libbcachefs/movinggc.c @@ -167,7 +167,7 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt, bch2_trans_begin(trans); - ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru, + ret = for_each_btree_key_max(trans, iter, BTREE_ID_lru, lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0), lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX), 0, k, ({ @@ -350,9 +350,9 @@ static int bch2_copygc_thread(void *arg) bch2_trans_unlock_long(ctxt.trans); cond_resched(); - if (!c->copy_gc_enabled) { + if (!c->opts.copygc_enabled) { move_buckets_wait(&ctxt, buckets, true); - kthread_wait_freezable(c->copy_gc_enabled || + kthread_wait_freezable(c->opts.copygc_enabled || kthread_should_stop()); } diff --git a/libbcachefs/opts.c b/libbcachefs/opts.c index 6673cbd8..6772faf3 100644 --- a/libbcachefs/opts.c +++ b/libbcachefs/opts.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> +#include <linux/fs_parser.h> #include "bcachefs.h" #include "compress.h" @@ -48,12 +49,12 @@ static const char * const __bch2_csum_types[] = { NULL }; -const char * const bch2_csum_opts[] = { +const char * const __bch2_csum_opts[] = { BCH_CSUM_OPTS() NULL }; -static const char * const __bch2_compression_types[] = { +const char * const __bch2_compression_types[] = { BCH_COMPRESSION_TYPES() NULL }; @@ -113,6 +114,7 @@ void bch2_prt_##name(struct printbuf *out, type t) \ PRT_STR_OPT_BOUNDSCHECKED(jset_entry_type, enum bch_jset_entry_type); PRT_STR_OPT_BOUNDSCHECKED(fs_usage_type, enum bch_fs_usage_type); PRT_STR_OPT_BOUNDSCHECKED(data_type, enum bch_data_type); +PRT_STR_OPT_BOUNDSCHECKED(csum_opt, enum bch_csum_opt); PRT_STR_OPT_BOUNDSCHECKED(csum_type, enum bch_csum_type); PRT_STR_OPT_BOUNDSCHECKED(compression_type, enum bch_compression_type); PRT_STR_OPT_BOUNDSCHECKED(str_hash_type, enum bch_str_hash_type); @@ -226,7 +228,7 @@ const struct bch_option bch2_opt_table[] = { #define OPT_UINT(_min, _max) .type = BCH_OPT_UINT, \ .min = _min, .max = _max #define OPT_STR(_choices) .type = BCH_OPT_STR, \ - .min = 0, .max = ARRAY_SIZE(_choices), \ + .min = 0, .max = ARRAY_SIZE(_choices) - 1, \ .choices = _choices #define OPT_STR_NOLIMIT(_choices) .type = BCH_OPT_STR, \ .min = 0, .max = U64_MAX, \ @@ -333,17 +335,18 @@ int bch2_opt_parse(struct bch_fs *c, switch (opt->type) { case BCH_OPT_BOOL: if (val) { - ret = kstrtou64(val, 10, res); + ret = lookup_constant(bool_names, val, -BCH_ERR_option_not_bool); + if (ret != -BCH_ERR_option_not_bool) { + *res = ret; + } else { + if (err) + prt_printf(err, "%s: must be bool", opt->attr.name); + return ret; + } } else { - ret = 0; *res = 1; } - if (ret < 0 || (*res != 0 && *res != 1)) { - if (err) - prt_printf(err, "%s: must be bool", opt->attr.name); - return ret < 0 ? ret : -BCH_ERR_option_not_bool; - } break; case BCH_OPT_UINT: if (!val) { @@ -428,7 +431,7 @@ void bch2_opt_to_text(struct printbuf *out, prt_printf(out, "%lli", v); break; case BCH_OPT_STR: - if (v < opt->min || v >= opt->max - 1) + if (v < opt->min || v >= opt->max) prt_printf(out, "(invalid option %lli)", v); else if (flags & OPT_SHOW_FULL_LIST) prt_string_option(out, opt->choices, v); @@ -710,11 +713,14 @@ void bch2_opt_set_sb(struct bch_fs *c, struct bch_dev *ca, struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts src) { - return (struct bch_io_opts) { + struct bch_io_opts opts = { #define x(_name, _bits) ._name = src._name, BCH_INODE_OPTS() #undef x }; + + bch2_io_opts_fixups(&opts); + return opts; } bool bch2_opt_is_inode_opt(enum bch_opt_id id) diff --git a/libbcachefs/opts.h b/libbcachefs/opts.h index 23dda014..ea69099e 100644 --- a/libbcachefs/opts.h +++ b/libbcachefs/opts.h @@ -16,7 +16,8 @@ extern const char * const bch2_version_upgrade_opts[]; extern const char * const bch2_sb_features[]; extern const char * const bch2_sb_compat[]; extern const char * const __bch2_btree_ids[]; -extern const char * const bch2_csum_opts[]; +extern const char * const __bch2_csum_opts[]; +extern const char * const __bch2_compression_types[]; extern const char * const bch2_compression_opts[]; extern const char * const __bch2_str_hash_types[]; extern const char * const bch2_str_hash_opts[]; @@ -27,6 +28,7 @@ extern const char * const bch2_d_types[]; void bch2_prt_jset_entry_type(struct printbuf *, enum bch_jset_entry_type); void bch2_prt_fs_usage_type(struct printbuf *, enum bch_fs_usage_type); void bch2_prt_data_type(struct printbuf *, enum bch_data_type); +void bch2_prt_csum_opt(struct printbuf *, enum bch_csum_opt); void bch2_prt_csum_type(struct printbuf *, enum bch_csum_type); void bch2_prt_compression_type(struct printbuf *, enum bch_compression_type); void bch2_prt_str_hash_type(struct printbuf *, enum bch_str_hash_type); @@ -171,12 +173,12 @@ enum fsck_err_opts { "size", "Maximum size of checksummed/compressed extents")\ x(metadata_checksum, u8, \ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \ - OPT_STR(bch2_csum_opts), \ + OPT_STR(__bch2_csum_opts), \ BCH_SB_META_CSUM_TYPE, BCH_CSUM_OPT_crc32c, \ NULL, NULL) \ x(data_checksum, u8, \ OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \ - OPT_STR(bch2_csum_opts), \ + OPT_STR(__bch2_csum_opts), \ BCH_SB_DATA_CSUM_TYPE, BCH_CSUM_OPT_crc32c, \ NULL, NULL) \ x(compression, u8, \ @@ -473,6 +475,18 @@ enum fsck_err_opts { BCH2_NO_SB_OPT, true, \ NULL, "Enable nocow mode: enables runtime locking in\n"\ "data move path needed if nocow will ever be in use\n")\ + x(copygc_enabled, u8, \ + OPT_FS|OPT_MOUNT, \ + OPT_BOOL(), \ + BCH2_NO_SB_OPT, true, \ + NULL, "Enable copygc: disable for debugging, or to\n"\ + "quiet the system when doing performance testing\n")\ + x(rebalance_enabled, u8, \ + OPT_FS|OPT_MOUNT, \ + OPT_BOOL(), \ + BCH2_NO_SB_OPT, true, \ + NULL, "Enable rebalance: disable for debugging, or to\n"\ + "quiet the system when doing performance testing\n")\ x(no_data_io, u8, \ OPT_MOUNT, \ OPT_BOOL(), \ @@ -488,7 +502,7 @@ enum fsck_err_opts { OPT_DEVICE, \ OPT_UINT(0, S64_MAX), \ BCH2_NO_SB_OPT, 0, \ - "size", "Size of filesystem on device") \ + "size", "Specifies the bucket size; must be greater than the btree node size")\ x(durability, u8, \ OPT_DEVICE|OPT_SB_FIELD_ONE_BIAS, \ OPT_UINT(0, BCH_REPLICAS_MAX), \ @@ -624,14 +638,39 @@ struct bch_io_opts { #define x(_name, _bits) u##_bits _name; BCH_INODE_OPTS() #undef x +#define x(_name, _bits) u64 _name##_from_inode:1; + BCH_INODE_OPTS() +#undef x }; -static inline unsigned background_compression(struct bch_io_opts opts) +static inline void bch2_io_opts_fixups(struct bch_io_opts *opts) { - return opts.background_compression ?: opts.compression; + if (!opts->background_target) + opts->background_target = opts->foreground_target; + if (!opts->background_compression) + opts->background_compression = opts->compression; + if (opts->nocow) { + opts->compression = opts->background_compression = 0; + opts->data_checksum = 0; + opts->erasure_code = 0; + } } struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts); bool bch2_opt_is_inode_opt(enum bch_opt_id); +/* rebalance opts: */ + +static inline struct bch_extent_rebalance io_opts_to_rebalance_opts(struct bch_io_opts *opts) +{ + return (struct bch_extent_rebalance) { + .type = BIT(BCH_EXTENT_ENTRY_rebalance), +#define x(_name) \ + ._name = opts->_name, \ + ._name##_from_inode = opts->_name##_from_inode, + BCH_REBALANCE_OPTS() +#undef x + }; +}; + #endif /* _BCACHEFS_OPTS_H */ diff --git a/libbcachefs/quota.c b/libbcachefs/quota.c index 74f45a81..8b857fc3 100644 --- a/libbcachefs/quota.c +++ b/libbcachefs/quota.c @@ -60,7 +60,7 @@ const struct bch_sb_field_ops bch_sb_field_ops_quota = { }; int bch2_quota_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; diff --git a/libbcachefs/quota.h b/libbcachefs/quota.h index a62abcc5..1551800f 100644 --- a/libbcachefs/quota.h +++ b/libbcachefs/quota.h @@ -5,10 +5,10 @@ #include "inode.h" #include "quota_types.h" -enum bch_validate_flags; extern const struct bch_sb_field_ops bch_sb_field_ops_quota; -int bch2_quota_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_quota_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_quota_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); #define bch2_bkey_ops_quota ((struct bkey_ops) { \ diff --git a/libbcachefs/rebalance.c b/libbcachefs/rebalance.c index cd664737..4adc74cd 100644 --- a/libbcachefs/rebalance.c +++ b/libbcachefs/rebalance.c @@ -24,6 +24,192 @@ #include <linux/kthread.h> #include <linux/sched/cputime.h> +/* bch_extent_rebalance: */ + +static const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k) +{ + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + const union bch_extent_entry *entry; + + bkey_extent_entry_for_each(ptrs, entry) + if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance) + return &entry->rebalance; + + return NULL; +} + +static inline unsigned bch2_bkey_ptrs_need_compress(struct bch_fs *c, + struct bch_io_opts *opts, + struct bkey_s_c k, + struct bkey_ptrs_c ptrs) +{ + if (!opts->background_compression) + return 0; + + unsigned compression_type = bch2_compression_opt_to_type(opts->background_compression); + const union bch_extent_entry *entry; + struct extent_ptr_decoded p; + unsigned ptr_bit = 1; + unsigned rewrite_ptrs = 0; + + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { + if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible || + p.ptr.unwritten) + return 0; + + if (!p.ptr.cached && p.crc.compression_type != compression_type) + rewrite_ptrs |= ptr_bit; + ptr_bit <<= 1; + } + + return rewrite_ptrs; +} + +static inline unsigned bch2_bkey_ptrs_need_move(struct bch_fs *c, + struct bch_io_opts *opts, + struct bkey_ptrs_c ptrs) +{ + if (!opts->background_target || + !bch2_target_accepts_data(c, BCH_DATA_user, opts->background_target)) + return 0; + + unsigned ptr_bit = 1; + unsigned rewrite_ptrs = 0; + + bkey_for_each_ptr(ptrs, ptr) { + if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, opts->background_target)) + rewrite_ptrs |= ptr_bit; + ptr_bit <<= 1; + } + + return rewrite_ptrs; +} + +static unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, + struct bch_io_opts *opts, + struct bkey_s_c k) +{ + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + + return bch2_bkey_ptrs_need_compress(c, opts, k, ptrs) | + bch2_bkey_ptrs_need_move(c, opts, ptrs); +} + +u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k) +{ + const struct bch_extent_rebalance *opts = bch2_bkey_rebalance_opts(k); + if (!opts) + return 0; + + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + const union bch_extent_entry *entry; + struct extent_ptr_decoded p; + u64 sectors = 0; + + if (opts->background_compression) { + unsigned compression_type = bch2_compression_opt_to_type(opts->background_compression); + + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { + if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible || + p.ptr.unwritten) { + sectors = 0; + goto incompressible; + } + + if (!p.ptr.cached && p.crc.compression_type != compression_type) + sectors += p.crc.compressed_size; + } + } +incompressible: + if (opts->background_target && + bch2_target_accepts_data(c, BCH_DATA_user, opts->background_target)) { + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) + if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, opts->background_target)) + sectors += p.crc.compressed_size; + } + + return sectors; +} + +static bool bch2_bkey_rebalance_needs_update(struct bch_fs *c, struct bch_io_opts *opts, + struct bkey_s_c k) +{ + if (!bkey_extent_is_direct_data(k.k)) + return 0; + + const struct bch_extent_rebalance *old = bch2_bkey_rebalance_opts(k); + + if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k)) { + struct bch_extent_rebalance new = io_opts_to_rebalance_opts(opts); + return old == NULL || memcmp(old, &new, sizeof(new)); + } else { + return old != NULL; + } +} + +int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bch_io_opts *opts, + struct bkey_i *_k) +{ + if (!bkey_extent_is_direct_data(&_k->k)) + return 0; + + struct bkey_s k = bkey_i_to_s(_k); + struct bch_extent_rebalance *old = + (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c); + + if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k.s_c)) { + if (!old) { + old = bkey_val_end(k); + k.k->u64s += sizeof(*old) / sizeof(u64); + } + + *old = io_opts_to_rebalance_opts(opts); + } else { + if (old) + extent_entry_drop(k, (union bch_extent_entry *) old); + } + + return 0; +} + +int bch2_get_update_rebalance_opts(struct btree_trans *trans, + struct bch_io_opts *io_opts, + struct btree_iter *iter, + struct bkey_s_c k) +{ + BUG_ON(iter->flags & BTREE_ITER_is_extents); + BUG_ON(iter->flags & BTREE_ITER_filter_snapshots); + + const struct bch_extent_rebalance *r = k.k->type == KEY_TYPE_reflink_v + ? bch2_bkey_rebalance_opts(k) : NULL; + if (r) { +#define x(_name) \ + if (r->_name##_from_inode) { \ + io_opts->_name = r->_name; \ + io_opts->_name##_from_inode = true; \ + } + BCH_REBALANCE_OPTS() +#undef x + } + + if (!bch2_bkey_rebalance_needs_update(trans->c, io_opts, k)) + return 0; + + struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + 8); + int ret = PTR_ERR_OR_ZERO(n); + if (ret) + return ret; + + bkey_reassemble(n, k); + + /* On successfull transaction commit, @k was invalidated: */ + + return bch2_bkey_set_needs_rebalance(trans->c, io_opts, n) ?: + bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?: + bch2_trans_commit(trans, NULL, NULL, 0) ?: + -BCH_ERR_transaction_restart_nested; +} + #define REBALANCE_WORK_SCAN_OFFSET (U64_MAX - 1) static const char * const bch2_rebalance_state_strs[] = { @@ -33,7 +219,7 @@ static const char * const bch2_rebalance_state_strs[] = { #undef x }; -static int __bch2_set_rebalance_needs_scan(struct btree_trans *trans, u64 inum) +int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum) { struct btree_iter iter; struct bkey_s_c k; @@ -71,9 +257,8 @@ err: int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum) { int ret = bch2_trans_commit_do(c, NULL, NULL, - BCH_TRANS_COMMIT_no_enospc| - BCH_TRANS_COMMIT_lazy_rw, - __bch2_set_rebalance_needs_scan(trans, inum)); + BCH_TRANS_COMMIT_no_enospc, + bch2_set_rebalance_needs_scan_trans(trans, inum)); rebalance_wakeup(c); return ret; } @@ -121,6 +306,9 @@ static int bch2_bkey_clear_needs_rebalance(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) { + if (!bch2_bkey_rebalance_opts(k)) + return 0; + struct bkey_i *n = bch2_bkey_make_mut(trans, iter, &k, 0); int ret = PTR_ERR_OR_ZERO(n); if (ret) @@ -134,31 +322,27 @@ static int bch2_bkey_clear_needs_rebalance(struct btree_trans *trans, static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans, struct bpos work_pos, struct btree_iter *extent_iter, + struct bch_io_opts *io_opts, struct data_update_opts *data_opts) { struct bch_fs *c = trans->c; - struct bkey_s_c k; bch2_trans_iter_exit(trans, extent_iter); bch2_trans_iter_init(trans, extent_iter, work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink, work_pos, BTREE_ITER_all_snapshots); - k = bch2_btree_iter_peek_slot(extent_iter); + struct bkey_s_c k = bch2_btree_iter_peek_slot(extent_iter); if (bkey_err(k)) return k; - const struct bch_extent_rebalance *r = k.k ? bch2_bkey_rebalance_opts(k) : NULL; - if (!r) { - /* raced due to btree write buffer, nothing to do */ - return bkey_s_c_null; - } + int ret = bch2_move_get_io_opts_one(trans, io_opts, extent_iter, k); + if (ret) + return bkey_s_c_err(ret); memset(data_opts, 0, sizeof(*data_opts)); - - data_opts->rewrite_ptrs = - bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression); - data_opts->target = r->target; + data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, io_opts, k); + data_opts->target = io_opts->background_target; data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS; if (!data_opts->rewrite_ptrs) { @@ -178,12 +362,28 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans, if (trace_rebalance_extent_enabled()) { struct printbuf buf = PRINTBUF; - prt_str(&buf, "target="); - bch2_target_to_text(&buf, c, r->target); - prt_str(&buf, " compression="); - bch2_compression_opt_to_text(&buf, r->compression); - prt_str(&buf, " "); bch2_bkey_val_to_text(&buf, c, k); + prt_newline(&buf); + + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + + unsigned p = bch2_bkey_ptrs_need_compress(c, io_opts, k, ptrs); + if (p) { + prt_str(&buf, "compression="); + bch2_compression_opt_to_text(&buf, io_opts->background_compression); + prt_str(&buf, " "); + bch2_prt_u64_base2(&buf, p); + prt_newline(&buf); + } + + p = bch2_bkey_ptrs_need_move(c, io_opts, ptrs); + if (p) { + prt_str(&buf, "move="); + bch2_target_to_text(&buf, c, io_opts->background_target); + prt_str(&buf, " "); + bch2_prt_u64_base2(&buf, p); + prt_newline(&buf); + } trace_rebalance_extent(c, buf.buf); printbuf_exit(&buf); @@ -212,14 +412,10 @@ static int do_rebalance_extent(struct moving_context *ctxt, bch2_bkey_buf_init(&sk); ret = bkey_err(k = next_rebalance_extent(trans, work_pos, - extent_iter, &data_opts)); + extent_iter, &io_opts, &data_opts)); if (ret || !k.k) goto out; - ret = bch2_move_get_io_opts_one(trans, &io_opts, k); - if (ret) - goto out; - atomic64_add(k.k->size, &ctxt->stats->sectors_seen); /* @@ -253,20 +449,8 @@ static bool rebalance_pred(struct bch_fs *c, void *arg, struct bch_io_opts *io_opts, struct data_update_opts *data_opts) { - unsigned target, compression; - - if (k.k->p.inode) { - target = io_opts->background_target; - compression = background_compression(*io_opts); - } else { - const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k); - - target = r ? r->target : io_opts->background_target; - compression = r ? r->compression : background_compression(*io_opts); - } - - data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, k, target, compression); - data_opts->target = target; + data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, io_opts, k); + data_opts->target = io_opts->background_target; data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS; return data_opts->rewrite_ptrs != 0; } @@ -338,9 +522,9 @@ static int do_rebalance(struct moving_context *ctxt) BTREE_ITER_all_snapshots); while (!bch2_move_ratelimit(ctxt)) { - if (!r->enabled) { + if (!c->opts.rebalance_enabled) { bch2_moving_ctxt_flush_all(ctxt); - kthread_wait_freezable(r->enabled || + kthread_wait_freezable(c->opts.rebalance_enabled || kthread_should_stop()); } diff --git a/libbcachefs/rebalance.h b/libbcachefs/rebalance.h index 28a52638..0a0821ab 100644 --- a/libbcachefs/rebalance.h +++ b/libbcachefs/rebalance.h @@ -2,8 +2,18 @@ #ifndef _BCACHEFS_REBALANCE_H #define _BCACHEFS_REBALANCE_H +#include "compress.h" +#include "disk_groups.h" #include "rebalance_types.h" +u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *, struct bkey_s_c); +int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bch_io_opts *, struct bkey_i *); +int bch2_get_update_rebalance_opts(struct btree_trans *, + struct bch_io_opts *, + struct btree_iter *, + struct bkey_s_c); + +int bch2_set_rebalance_needs_scan_trans(struct btree_trans *, u64); int bch2_set_rebalance_needs_scan(struct bch_fs *, u64 inum); int bch2_set_fs_needs_rebalance(struct bch_fs *); diff --git a/libbcachefs/rebalance_format.h b/libbcachefs/rebalance_format.h new file mode 100644 index 00000000..ff9a1342 --- /dev/null +++ b/libbcachefs/rebalance_format.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCACHEFS_REBALANCE_FORMAT_H +#define _BCACHEFS_REBALANCE_FORMAT_H + +struct bch_extent_rebalance { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u64 type:6, + unused:3, + + promote_target_from_inode:1, + erasure_code_from_inode:1, + data_checksum_from_inode:1, + background_compression_from_inode:1, + data_replicas_from_inode:1, + background_target_from_inode:1, + + promote_target:16, + erasure_code:1, + data_checksum:4, + data_replicas:4, + background_compression:8, /* enum bch_compression_opt */ + background_target:16; +#elif defined (__BIG_ENDIAN_BITFIELD) + __u64 background_target:16, + background_compression:8, + data_replicas:4, + data_checksum:4, + erasure_code:1, + promote_target:16, + + background_target_from_inode:1, + data_replicas_from_inode:1, + background_compression_from_inode:1, + data_checksum_from_inode:1, + erasure_code_from_inode:1, + promote_target_from_inode:1, + + unused:3, + type:6; +#endif +}; + +/* subset of BCH_INODE_OPTS */ +#define BCH_REBALANCE_OPTS() \ + x(data_checksum) \ + x(background_compression) \ + x(data_replicas) \ + x(promote_target) \ + x(background_target) \ + x(erasure_code) + +#endif /* _BCACHEFS_REBALANCE_FORMAT_H */ + diff --git a/libbcachefs/rebalance_types.h b/libbcachefs/rebalance_types.h index 0fffb536..fe5098c1 100644 --- a/libbcachefs/rebalance_types.h +++ b/libbcachefs/rebalance_types.h @@ -30,8 +30,6 @@ struct bch_fs_rebalance { struct bbpos scan_start; struct bbpos scan_end; struct bch_move_stats scan_stats; - - unsigned enabled:1; }; #endif /* _BCACHEFS_REBALANCE_TYPES_H */ diff --git a/libbcachefs/recovery.c b/libbcachefs/recovery.c index 69ac748f..a342744f 100644 --- a/libbcachefs/recovery.c +++ b/libbcachefs/recovery.c @@ -40,19 +40,42 @@ int bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree) int ret = 0; mutex_lock(&c->sb_lock); + struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); if (!(c->sb.btrees_lost_data & b)) { struct printbuf buf = PRINTBUF; bch2_btree_id_to_text(&buf, btree); bch_err(c, "flagging btree %s lost data", buf.buf); printbuf_exit(&buf); - bch2_sb_field_get(c->disk_sb.sb, ext)->btrees_lost_data |= cpu_to_le64(b); + ext->btrees_lost_data |= cpu_to_le64(b); } + /* Once we have runtime self healing for topology errors we won't need this: */ + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_topology) ?: ret; + + /* Btree node accounting will be off: */ + __set_bit_le64(BCH_FSCK_ERR_accounting_mismatch, ext->errors_silent); + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_allocations) ?: ret; + +#ifdef CONFIG_BCACHEFS_DEBUG + /* + * These are much more minor, and don't need to be corrected right away, + * but in debug mode we want the next fsck run to be clean: + */ + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_lrus) ?: ret; + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_backpointers_to_extents) ?: ret; +#endif + switch (btree) { case BTREE_ID_alloc: - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_allocations) ?: ret; ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret; + + __set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent); + __set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent); + __set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent); + __set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent); + __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent); + __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent); goto out; case BTREE_ID_backpointers: ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_btree_backpointers) ?: ret; @@ -75,7 +98,6 @@ int bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree) goto out; default: ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_scan_for_btree_nodes) ?: ret; - ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_topology) ?: ret; goto out; } out: @@ -113,6 +135,8 @@ static void bch2_reconstruct_alloc(struct bch_fs *c) __set_bit_le64(BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, ext->errors_silent); __set_bit_le64(BCH_FSCK_ERR_fs_usage_replicas_wrong, ext->errors_silent); + __set_bit_le64(BCH_FSCK_ERR_alloc_key_to_missing_lru_entry, ext->errors_silent); + __set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent); __set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent); __set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent); @@ -128,11 +152,10 @@ static void bch2_reconstruct_alloc(struct bch_fs *c) __set_bit_le64(BCH_FSCK_ERR_accounting_mismatch, ext->errors_silent); c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); - bch2_write_super(c); - mutex_unlock(&c->sb_lock); - c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); + bch2_write_super(c); + mutex_unlock(&c->sb_lock); bch2_shoot_down_journal_keys(c, BTREE_ID_alloc, 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX); @@ -441,7 +464,9 @@ static int journal_replay_entry_early(struct bch_fs *c, switch (entry->type) { case BCH_JSET_ENTRY_btree_root: { - struct btree_root *r; + + if (unlikely(!entry->u64s)) + return 0; if (fsck_err_on(entry->btree_id >= BTREE_ID_NR_MAX, c, invalid_btree_id, @@ -455,15 +480,11 @@ static int journal_replay_entry_early(struct bch_fs *c, return ret; } - r = bch2_btree_id_root(c, entry->btree_id); + struct btree_root *r = bch2_btree_id_root(c, entry->btree_id); - if (entry->u64s) { - r->level = entry->level; - bkey_copy(&r->key, (struct bkey_i *) entry->start); - r->error = 0; - } else { - r->error = -BCH_ERR_btree_node_read_error; - } + r->level = entry->level; + bkey_copy(&r->key, (struct bkey_i *) entry->start); + r->error = 0; r->alive = true; break; } @@ -570,6 +591,7 @@ static int read_btree_roots(struct bch_fs *c) r->error = 0; ret = bch2_btree_lost_data(c, i); + BUG_ON(ret); } } @@ -691,8 +713,13 @@ int bch2_fs_recovery(struct bch_fs *c) goto err; } - if (c->opts.norecovery) - c->opts.recovery_pass_last = BCH_RECOVERY_PASS_snapshots_read; + if (c->opts.norecovery) { + c->opts.recovery_pass_last = c->opts.recovery_pass_last + ? min(c->opts.recovery_pass_last, BCH_RECOVERY_PASS_snapshots_read) + : BCH_RECOVERY_PASS_snapshots_read; + c->opts.nochanges = true; + c->opts.read_only = true; + } mutex_lock(&c->sb_lock); struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); @@ -743,13 +770,11 @@ int bch2_fs_recovery(struct bch_fs *c) bch2_write_super(c); mutex_unlock(&c->sb_lock); - if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) - c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_topology); - if (c->opts.fsck) set_bit(BCH_FS_fsck_running, &c->flags); if (c->sb.clean) set_bit(BCH_FS_clean_recovery, &c->flags); + set_bit(BCH_FS_recovery_running, &c->flags); ret = bch2_blacklist_table_initialize(c); if (ret) { @@ -893,11 +918,21 @@ use_clean: if (ret) goto err; + /* + * Normally set by the appropriate recovery pass: when cleared, this + * indicates we're in early recovery and btree updates should be done by + * being applied to the journal replay keys. _Must_ be cleared before + * multithreaded use: + */ + set_bit(BCH_FS_may_go_rw, &c->flags); clear_bit(BCH_FS_fsck_running, &c->flags); + clear_bit(BCH_FS_recovery_running, &c->flags); /* in case we don't run journal replay, i.e. norecovery mode */ set_bit(BCH_FS_accounting_replay_done, &c->flags); + bch2_async_btree_node_rewrites_flush(c); + /* fsync if we fixed errors */ if (test_bit(BCH_FS_errors_fixed, &c->flags)) { bch2_journal_flush_all_pins(&c->journal); @@ -1030,6 +1065,7 @@ int bch2_fs_initialize(struct bch_fs *c) struct bch_inode_unpacked root_inode, lostfound_inode; struct bkey_inode_buf packed_inode; struct qstr lostfound = QSTR("lost+found"); + struct bch_member *m; int ret; bch_notice(c, "initializing new filesystem"); @@ -1046,9 +1082,16 @@ int bch2_fs_initialize(struct bch_fs *c) SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current); bch2_write_super(c); } + + for_each_member_device(c, ca) { + m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); + SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, false); + ca->mi = bch2_mi_to_cpu(m); + } + + bch2_write_super(c); mutex_unlock(&c->sb_lock); - c->curr_recovery_pass = BCH_RECOVERY_PASS_NR; set_bit(BCH_FS_btree_running, &c->flags); set_bit(BCH_FS_may_go_rw, &c->flags); @@ -1089,9 +1132,6 @@ int bch2_fs_initialize(struct bch_fs *c) if (ret) goto err; - for_each_online_member(c, ca) - ca->new_fs_bucket_idx = 0; - ret = bch2_fs_freespace_init(c); if (ret) goto err; @@ -1150,6 +1190,7 @@ int bch2_fs_initialize(struct bch_fs *c) bch2_write_super(c); mutex_unlock(&c->sb_lock); + c->curr_recovery_pass = BCH_RECOVERY_PASS_NR; return 0; err: bch_err_fn(c, ret); diff --git a/libbcachefs/recovery_passes.c b/libbcachefs/recovery_passes.c index e6676a1b..f6d3a99c 100644 --- a/libbcachefs/recovery_passes.c +++ b/libbcachefs/recovery_passes.c @@ -27,6 +27,12 @@ const char * const bch2_recovery_passes[] = { NULL }; +/* Fake recovery pass, so that scan_for_btree_nodes isn't 0: */ +static int bch2_recovery_pass_empty(struct bch_fs *c) +{ + return 0; +} + static int bch2_set_may_go_rw(struct bch_fs *c) { struct journal_keys *keys = &c->journal_keys; @@ -40,7 +46,7 @@ static int bch2_set_may_go_rw(struct bch_fs *c) set_bit(BCH_FS_may_go_rw, &c->flags); - if (keys->nr || c->opts.fsck || !c->sb.clean || c->opts.recovery_passes) + if (keys->nr || !c->opts.read_only || c->opts.fsck || !c->sb.clean || c->opts.recovery_passes) return bch2_fs_read_write_early(c); return 0; } @@ -235,14 +241,7 @@ int bch2_run_online_recovery_passes(struct bch_fs *c) continue; ret = bch2_run_recovery_pass(c, i); - - if (c->curr_recovery_pass < i) { - /* - * bch2_run_explicit_recovery_pass() was called: we - * can't always catch -BCH_ERR_restart_recovery because - * it may have been called from another thread (btree - * node read completion) - */ + if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) { i = c->curr_recovery_pass; continue; } @@ -259,6 +258,12 @@ int bch2_run_recovery_passes(struct bch_fs *c) { int ret = 0; + /* + * We can't allow set_may_go_rw to be excluded; that would cause us to + * use the journal replay keys for updates where it's not expected. + */ + c->opts.recovery_passes_exclude &= ~BCH_RECOVERY_PASS_set_may_go_rw; + while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) { spin_lock_irq(&c->recovery_pass_lock); unsigned pass = c->curr_recovery_pass; diff --git a/libbcachefs/recovery_passes_types.h b/libbcachefs/recovery_passes_types.h index 9d96c06e..94dc20ca 100644 --- a/libbcachefs/recovery_passes_types.h +++ b/libbcachefs/recovery_passes_types.h @@ -13,6 +13,7 @@ * must never change: */ #define BCH_RECOVERY_PASSES() \ + x(recovery_pass_empty, 41, PASS_SILENT) \ x(scan_for_btree_nodes, 37, 0) \ x(check_topology, 4, 0) \ x(accounting_read, 39, PASS_ALWAYS) \ diff --git a/libbcachefs/reflink.c b/libbcachefs/reflink.c index f457925f..e1911b9b 100644 --- a/libbcachefs/reflink.c +++ b/libbcachefs/reflink.c @@ -15,6 +15,17 @@ #include <linux/sched/signal.h> +static inline bool bkey_extent_is_reflink_data(const struct bkey *k) +{ + switch (k->type) { + case KEY_TYPE_reflink_v: + case KEY_TYPE_indirect_inline_data: + return true; + default: + return false; + } +} + static inline unsigned bkey_type_to_indirect(const struct bkey *k) { switch (k->type) { @@ -30,15 +41,15 @@ static inline unsigned bkey_type_to_indirect(const struct bkey *k) /* reflink pointers */ int bch2_reflink_p_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); int ret = 0; - bkey_fsck_err_on(le64_to_cpu(p.v->idx) < le32_to_cpu(p.v->front_pad), + bkey_fsck_err_on(REFLINK_P_IDX(p.v) < le32_to_cpu(p.v->front_pad), c, reflink_p_front_pad_bad, "idx < front_pad (%llu < %u)", - le64_to_cpu(p.v->idx), le32_to_cpu(p.v->front_pad)); + REFLINK_P_IDX(p.v), le32_to_cpu(p.v->front_pad)); fsck_err: return ret; } @@ -49,7 +60,7 @@ void bch2_reflink_p_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); prt_printf(out, "idx %llu front_pad %u back_pad %u", - le64_to_cpu(p.v->idx), + REFLINK_P_IDX(p.v), le32_to_cpu(p.v->front_pad), le32_to_cpu(p.v->back_pad)); } @@ -65,49 +76,250 @@ bool bch2_reflink_p_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r */ return false; - if (le64_to_cpu(l.v->idx) + l.k->size != le64_to_cpu(r.v->idx)) + if (REFLINK_P_IDX(l.v) + l.k->size != REFLINK_P_IDX(r.v)) + return false; + + if (REFLINK_P_ERROR(l.v) != REFLINK_P_ERROR(r.v)) return false; bch2_key_resize(l.k, l.k->size + r.k->size); return true; } +/* indirect extents */ + +int bch2_reflink_v_validate(struct bch_fs *c, struct bkey_s_c k, + struct bkey_validate_context from) +{ + int ret = 0; + + bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, REFLINK_P_IDX_MAX)), + c, reflink_v_pos_bad, + "indirect extent above maximum position 0:%llu", + REFLINK_P_IDX_MAX); + + ret = bch2_bkey_ptrs_validate(c, k, from); +fsck_err: + return ret; +} + +void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c, + struct bkey_s_c k) +{ + struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k); + + prt_printf(out, "refcount: %llu ", le64_to_cpu(r.v->refcount)); + + bch2_bkey_ptrs_to_text(out, c, k); +} + +#if 0 +Currently disabled, needs to be debugged: + +bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r) +{ + struct bkey_s_reflink_v l = bkey_s_to_reflink_v(_l); + struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(_r); + + return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r); +} +#endif + +/* indirect inline data */ + +int bch2_indirect_inline_data_validate(struct bch_fs *c, struct bkey_s_c k, + struct bkey_validate_context from) +{ + return 0; +} + +void bch2_indirect_inline_data_to_text(struct printbuf *out, + struct bch_fs *c, struct bkey_s_c k) +{ + struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k); + unsigned datalen = bkey_inline_data_bytes(k.k); + + prt_printf(out, "refcount %llu datalen %u: %*phN", + le64_to_cpu(d.v->refcount), datalen, + min(datalen, 32U), d.v->data); +} + +/* lookup */ + +static int bch2_indirect_extent_not_missing(struct btree_trans *trans, struct bkey_s_c_reflink_p p, + bool should_commit) +{ + struct bkey_i_reflink_p *new = bch2_bkey_make_mut_noupdate_typed(trans, p.s_c, reflink_p); + int ret = PTR_ERR_OR_ZERO(new); + if (ret) + return ret; + + SET_REFLINK_P_ERROR(&new->v, false); + ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, &new->k_i, BTREE_TRIGGER_norun); + if (ret) + return ret; + + if (!should_commit) + return 0; + + return bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: + -BCH_ERR_transaction_restart_nested; +} + +static int bch2_indirect_extent_missing_error(struct btree_trans *trans, + struct bkey_s_c_reflink_p p, + u64 missing_start, u64 missing_end, + bool should_commit) +{ + if (REFLINK_P_ERROR(p.v)) + return -BCH_ERR_missing_indirect_extent; + + struct bch_fs *c = trans->c; + u64 live_start = REFLINK_P_IDX(p.v); + u64 live_end = REFLINK_P_IDX(p.v) + p.k->size; + u64 refd_start = live_start - le32_to_cpu(p.v->front_pad); + u64 refd_end = live_end + le32_to_cpu(p.v->back_pad); + struct printbuf buf = PRINTBUF; + int ret = 0; + + BUG_ON(missing_start < refd_start); + BUG_ON(missing_end > refd_end); + + if (fsck_err(trans, reflink_p_to_missing_reflink_v, + "pointer to missing indirect extent\n" + " %s\n" + " missing range %llu-%llu", + (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf), + missing_start, missing_end)) { + struct bkey_i_reflink_p *new = bch2_bkey_make_mut_noupdate_typed(trans, p.s_c, reflink_p); + ret = PTR_ERR_OR_ZERO(new); + if (ret) + goto err; + + /* + * Is the missing range not actually needed? + * + * p.v->idx refers to the data that we actually want, but if the + * indirect extent we point to was bigger, front_pad and back_pad + * indicate the range we took a reference on. + */ + + if (missing_end <= live_start) { + new->v.front_pad = cpu_to_le32(live_start - missing_end); + } else if (missing_start >= live_end) { + new->v.back_pad = cpu_to_le32(missing_start - live_end); + } else { + struct bpos new_start = bkey_start_pos(&new->k); + struct bpos new_end = new->k.p; + + if (missing_start > live_start) + new_start.offset += missing_start - live_start; + if (missing_end < live_end) + new_end.offset -= live_end - missing_end; + + bch2_cut_front(new_start, &new->k_i); + bch2_cut_back(new_end, &new->k_i); + + SET_REFLINK_P_ERROR(&new->v, true); + } + + ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, &new->k_i, BTREE_TRIGGER_norun); + if (ret) + goto err; + + if (should_commit) + ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: + -BCH_ERR_transaction_restart_nested; + } +err: +fsck_err: + printbuf_exit(&buf); + return ret; +} + +/* + * This is used from the read path, which doesn't expect to have to do a + * transaction commit, and from triggers, which should not be doing a commit: + */ +struct bkey_s_c bch2_lookup_indirect_extent(struct btree_trans *trans, + struct btree_iter *iter, + s64 *offset_into_extent, + struct bkey_s_c_reflink_p p, + bool should_commit, + unsigned iter_flags) +{ + BUG_ON(*offset_into_extent < -((s64) le32_to_cpu(p.v->front_pad))); + BUG_ON(*offset_into_extent >= p.k->size + le32_to_cpu(p.v->back_pad)); + + u64 reflink_offset = REFLINK_P_IDX(p.v) + *offset_into_extent; + + struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_reflink, + POS(0, reflink_offset), iter_flags); + if (bkey_err(k)) + return k; + + if (unlikely(!bkey_extent_is_reflink_data(k.k))) { + bch2_trans_iter_exit(trans, iter); + + unsigned size = min((u64) k.k->size, + REFLINK_P_IDX(p.v) + p.k->size + le32_to_cpu(p.v->back_pad) - + reflink_offset); + bch2_key_resize(&iter->k, size); + + int ret = bch2_indirect_extent_missing_error(trans, p, reflink_offset, + k.k->p.offset, should_commit); + if (ret) + return bkey_s_c_err(ret); + } else if (unlikely(REFLINK_P_ERROR(p.v))) { + bch2_trans_iter_exit(trans, iter); + + int ret = bch2_indirect_extent_not_missing(trans, p, should_commit); + if (ret) + return bkey_s_c_err(ret); + } + + *offset_into_extent = reflink_offset - bkey_start_offset(k.k); + return k; +} + +/* reflink pointer trigger */ + static int trans_trigger_reflink_p_segment(struct btree_trans *trans, struct bkey_s_c_reflink_p p, u64 *idx, enum btree_iter_update_trigger_flags flags) { struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_i *k; - __le64 *refcount; - int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1; struct printbuf buf = PRINTBUF; - int ret; - k = bch2_bkey_get_mut_noupdate(trans, &iter, - BTREE_ID_reflink, POS(0, *idx), - BTREE_ITER_with_updates); - ret = PTR_ERR_OR_ZERO(k); + s64 offset_into_extent = *idx - REFLINK_P_IDX(p.v); + struct btree_iter iter; + struct bkey_s_c k = bch2_lookup_indirect_extent(trans, &iter, &offset_into_extent, p, false, + BTREE_ITER_intent| + BTREE_ITER_with_updates); + int ret = bkey_err(k); if (ret) - goto err; + return ret; - refcount = bkey_refcount(bkey_i_to_s(k)); - if (!refcount) { - bch2_bkey_val_to_text(&buf, c, p.s_c); - bch2_trans_inconsistent(trans, - "nonexistent indirect extent at %llu while marking\n %s", - *idx, buf.buf); - ret = -EIO; - goto err; + if (bkey_deleted(k.k)) { + if (!(flags & BTREE_TRIGGER_overwrite)) + ret = -BCH_ERR_missing_indirect_extent; + goto next; } + struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); + ret = PTR_ERR_OR_ZERO(new); + if (ret) + goto err; + + __le64 *refcount = bkey_refcount(bkey_i_to_s(new)); if (!*refcount && (flags & BTREE_TRIGGER_overwrite)) { bch2_bkey_val_to_text(&buf, c, p.s_c); - bch2_trans_inconsistent(trans, - "indirect extent refcount underflow at %llu while marking\n %s", - *idx, buf.buf); - ret = -EIO; - goto err; + prt_printf(&buf, "\n "); + bch2_bkey_val_to_text(&buf, c, k); + log_fsck_err(trans, reflink_refcount_underflow, + "indirect extent refcount underflow while marking\n %s", + buf.buf); + goto next; } if (flags & BTREE_TRIGGER_insert) { @@ -115,25 +327,26 @@ static int trans_trigger_reflink_p_segment(struct btree_trans *trans, u64 pad; pad = max_t(s64, le32_to_cpu(v->front_pad), - le64_to_cpu(v->idx) - bkey_start_offset(&k->k)); + REFLINK_P_IDX(v) - bkey_start_offset(&new->k)); BUG_ON(pad > U32_MAX); v->front_pad = cpu_to_le32(pad); pad = max_t(s64, le32_to_cpu(v->back_pad), - k->k.p.offset - p.k->size - le64_to_cpu(v->idx)); + new->k.p.offset - p.k->size - REFLINK_P_IDX(v)); BUG_ON(pad > U32_MAX); v->back_pad = cpu_to_le32(pad); } - le64_add_cpu(refcount, add); + le64_add_cpu(refcount, !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1); bch2_btree_iter_set_pos_to_extent_start(&iter); - ret = bch2_trans_update(trans, &iter, k, 0); + ret = bch2_trans_update(trans, &iter, new, 0); if (ret) goto err; - - *idx = k->k.p.offset; +next: + *idx = k.k->p.offset; err: +fsck_err: bch2_trans_iter_exit(trans, &iter); printbuf_exit(&buf); return ret; @@ -147,9 +360,7 @@ static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans, struct bch_fs *c = trans->c; struct reflink_gc *r; int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1; - u64 start = le64_to_cpu(p.v->idx); - u64 end = le64_to_cpu(p.v->idx) + p.k->size; - u64 next_idx = end + le32_to_cpu(p.v->back_pad); + u64 next_idx = REFLINK_P_IDX(p.v) + p.k->size + le32_to_cpu(p.v->back_pad); s64 ret = 0; struct printbuf buf = PRINTBUF; @@ -168,36 +379,14 @@ static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans, *idx = r->offset; return 0; not_found: - BUG_ON(!(flags & BTREE_TRIGGER_check_repair)); - - if (fsck_err(trans, reflink_p_to_missing_reflink_v, - "pointer to missing indirect extent\n" - " %s\n" - " missing range %llu-%llu", - (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf), - *idx, next_idx)) { - struct bkey_i *update = bch2_bkey_make_mut_noupdate(trans, p.s_c); - ret = PTR_ERR_OR_ZERO(update); + if (flags & BTREE_TRIGGER_check_repair) { + ret = bch2_indirect_extent_missing_error(trans, p, *idx, next_idx, false); if (ret) goto err; - - if (next_idx <= start) { - bkey_i_to_reflink_p(update)->v.front_pad = cpu_to_le32(start - next_idx); - } else if (*idx >= end) { - bkey_i_to_reflink_p(update)->v.back_pad = cpu_to_le32(*idx - end); - } else { - bkey_error_init(update); - update->k.p = p.k->p; - update->k.size = p.k->size; - set_bkey_val_u64s(&update->k, 0); - } - - ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, update, BTREE_TRIGGER_norun); } *idx = next_idx; err: -fsck_err: printbuf_exit(&buf); return ret; } @@ -210,8 +399,8 @@ static int __trigger_reflink_p(struct btree_trans *trans, struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); int ret = 0; - u64 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad); - u64 end = le64_to_cpu(p.v->idx) + p.k->size + le32_to_cpu(p.v->back_pad); + u64 idx = REFLINK_P_IDX(p.v) - le32_to_cpu(p.v->front_pad); + u64 end = REFLINK_P_IDX(p.v) + p.k->size + le32_to_cpu(p.v->back_pad); if (flags & BTREE_TRIGGER_transactional) { while (idx < end && !ret) @@ -253,35 +442,7 @@ int bch2_trigger_reflink_p(struct btree_trans *trans, return trigger_run_overwrite_then_insert(__trigger_reflink_p, trans, btree_id, level, old, new, flags); } -/* indirect extents */ - -int bch2_reflink_v_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) -{ - return bch2_bkey_ptrs_validate(c, k, flags); -} - -void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c, - struct bkey_s_c k) -{ - struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k); - - prt_printf(out, "refcount: %llu ", le64_to_cpu(r.v->refcount)); - - bch2_bkey_ptrs_to_text(out, c, k); -} - -#if 0 -Currently disabled, needs to be debugged: - -bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r) -{ - struct bkey_s_reflink_v l = bkey_s_to_reflink_v(_l); - struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(_r); - - return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r); -} -#endif +/* indirect extent trigger */ static inline void check_indirect_extent_deleting(struct bkey_s new, @@ -307,25 +468,6 @@ int bch2_trigger_reflink_v(struct btree_trans *trans, return bch2_trigger_extent(trans, btree_id, level, old, new, flags); } -/* indirect inline data */ - -int bch2_indirect_inline_data_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) -{ - return 0; -} - -void bch2_indirect_inline_data_to_text(struct printbuf *out, - struct bch_fs *c, struct bkey_s_c k) -{ - struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k); - unsigned datalen = bkey_inline_data_bytes(k.k); - - prt_printf(out, "refcount %llu datalen %u: %*phN", - le64_to_cpu(d.v->refcount), datalen, - min(datalen, 32U), d.v->data); -} - int bch2_trigger_indirect_inline_data(struct btree_trans *trans, enum btree_id btree_id, unsigned level, struct bkey_s_c old, struct bkey_s new, @@ -336,6 +478,8 @@ int bch2_trigger_indirect_inline_data(struct btree_trans *trans, return 0; } +/* create */ + static int bch2_make_extent_indirect(struct btree_trans *trans, struct btree_iter *extent_iter, struct bkey_i *orig) @@ -358,6 +502,14 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, if (ret) goto err; + /* + * XXX: we're assuming that 56 bits will be enough for the life of the + * filesystem: we need to implement wraparound, with a cursor in the + * logged ops btree: + */ + if (bkey_ge(reflink_iter.pos, POS(0, REFLINK_P_IDX_MAX - orig->k.size))) + return -ENOSPC; + r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_bytes(&orig->k)); ret = PTR_ERR_OR_ZERO(r_v); if (ret) @@ -394,7 +546,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, memset(&r_p->v, 0, sizeof(r_p->v)); #endif - r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k)); + SET_REFLINK_P_IDX(&r_p->v, bkey_start_offset(&r_v->k)); ret = bch2_trans_update(trans, extent_iter, &r_p->k_i, BTREE_UPDATE_internal_snapshot_node); @@ -409,7 +561,7 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end) struct bkey_s_c k; int ret; - for_each_btree_key_upto_continue_norestart(*iter, end, 0, k, ret) { + for_each_btree_key_max_continue_norestart(*iter, end, 0, k, ret) { if (bkey_extent_is_unwritten(k)) continue; @@ -533,11 +685,11 @@ s64 bch2_remap_range(struct bch_fs *c, struct bkey_i_reflink_p *dst_p = bkey_reflink_p_init(new_dst.k); - u64 offset = le64_to_cpu(src_p.v->idx) + + u64 offset = REFLINK_P_IDX(src_p.v) + (src_want.offset - bkey_start_offset(src_k.k)); - dst_p->v.idx = cpu_to_le64(offset); + SET_REFLINK_P_IDX(&dst_p->v, offset); } else { BUG(); } @@ -547,7 +699,7 @@ s64 bch2_remap_range(struct bch_fs *c, min(src_k.k->p.offset - src_want.offset, dst_end.offset - dst_iter.pos.offset)); - ret = bch2_bkey_set_needs_rebalance(c, new_dst.k, &opts) ?: + ret = bch2_bkey_set_needs_rebalance(c, &opts, new_dst.k) ?: bch2_extent_update(trans, dst_inum, &dst_iter, new_dst.k, &disk_res, new_i_size, i_sectors_delta, @@ -591,3 +743,97 @@ err: return dst_done ?: ret ?: ret2; } + +/* fsck */ + +static int bch2_gc_write_reflink_key(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c k, + size_t *idx) +{ + struct bch_fs *c = trans->c; + const __le64 *refcount = bkey_refcount_c(k); + struct printbuf buf = PRINTBUF; + struct reflink_gc *r; + int ret = 0; + + if (!refcount) + return 0; + + while ((r = genradix_ptr(&c->reflink_gc_table, *idx)) && + r->offset < k.k->p.offset) + ++*idx; + + if (!r || + r->offset != k.k->p.offset || + r->size != k.k->size) { + bch_err(c, "unexpected inconsistency walking reflink table at gc finish"); + return -EINVAL; + } + + if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), + trans, reflink_v_refcount_wrong, + "reflink key has wrong refcount:\n" + " %s\n" + " should be %u", + (bch2_bkey_val_to_text(&buf, c, k), buf.buf), + r->refcount)) { + struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); + ret = PTR_ERR_OR_ZERO(new); + if (ret) + goto out; + + if (!r->refcount) + new->k.type = KEY_TYPE_deleted; + else + *bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount); + ret = bch2_trans_update(trans, iter, new, 0); + } +out: +fsck_err: + printbuf_exit(&buf); + return ret; +} + +int bch2_gc_reflink_done(struct bch_fs *c) +{ + size_t idx = 0; + + int ret = bch2_trans_run(c, + for_each_btree_key_commit(trans, iter, + BTREE_ID_reflink, POS_MIN, + BTREE_ITER_prefetch, k, + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, + bch2_gc_write_reflink_key(trans, &iter, k, &idx))); + c->reflink_gc_nr = 0; + return ret; +} + +int bch2_gc_reflink_start(struct bch_fs *c) +{ + c->reflink_gc_nr = 0; + + int ret = bch2_trans_run(c, + for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN, + BTREE_ITER_prefetch, k, ({ + const __le64 *refcount = bkey_refcount_c(k); + + if (!refcount) + continue; + + struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table, + c->reflink_gc_nr++, GFP_KERNEL); + if (!r) { + ret = -BCH_ERR_ENOMEM_gc_reflink_start; + break; + } + + r->offset = k.k->p.offset; + r->size = k.k->size; + r->refcount = 0; + 0; + }))); + + bch_err_fn(c, ret); + return ret; +} diff --git a/libbcachefs/reflink.h b/libbcachefs/reflink.h index 51afe11d..f119316a 100644 --- a/libbcachefs/reflink.h +++ b/libbcachefs/reflink.h @@ -2,9 +2,8 @@ #ifndef _BCACHEFS_REFLINK_H #define _BCACHEFS_REFLINK_H -enum bch_validate_flags; - -int bch2_reflink_p_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_reflink_p_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); int bch2_trigger_reflink_p(struct btree_trans *, enum btree_id, unsigned, @@ -19,7 +18,8 @@ int bch2_trigger_reflink_p(struct btree_trans *, enum btree_id, unsigned, .min_val_size = 16, \ }) -int bch2_reflink_v_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_reflink_v_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_trigger_reflink_v(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, @@ -34,7 +34,7 @@ int bch2_trigger_reflink_v(struct btree_trans *, enum btree_id, unsigned, }) int bch2_indirect_inline_data_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_indirect_inline_data_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_trigger_indirect_inline_data(struct btree_trans *, @@ -73,7 +73,14 @@ static inline __le64 *bkey_refcount(struct bkey_s k) } } +struct bkey_s_c bch2_lookup_indirect_extent(struct btree_trans *, struct btree_iter *, + s64 *, struct bkey_s_c_reflink_p, + bool, unsigned); + s64 bch2_remap_range(struct bch_fs *, subvol_inum, u64, subvol_inum, u64, u64, u64, s64 *); +int bch2_gc_reflink_done(struct bch_fs *); +int bch2_gc_reflink_start(struct bch_fs *); + #endif /* _BCACHEFS_REFLINK_H */ diff --git a/libbcachefs/reflink_format.h b/libbcachefs/reflink_format.h index 6772eebb..53502627 100644 --- a/libbcachefs/reflink_format.h +++ b/libbcachefs/reflink_format.h @@ -4,7 +4,7 @@ struct bch_reflink_p { struct bch_val v; - __le64 idx; + __le64 idx_flags; /* * A reflink pointer might point to an indirect extent which is then * later split (by copygc or rebalance). If we only pointed to part of @@ -17,6 +17,9 @@ struct bch_reflink_p { __le32 back_pad; } __packed __aligned(8); +LE64_BITMASK(REFLINK_P_IDX, struct bch_reflink_p, idx_flags, 0, 56); +LE64_BITMASK(REFLINK_P_ERROR, struct bch_reflink_p, idx_flags, 56, 57); + struct bch_reflink_v { struct bch_val v; __le64 refcount; diff --git a/libbcachefs/sb-downgrade.c b/libbcachefs/sb-downgrade.c index ae715ff6..fe453e17 100644 --- a/libbcachefs/sb-downgrade.c +++ b/libbcachefs/sb-downgrade.c @@ -81,7 +81,17 @@ BCH_FSCK_ERR_accounting_mismatch) \ x(inode_has_child_snapshots, \ BIT_ULL(BCH_RECOVERY_PASS_check_inodes), \ - BCH_FSCK_ERR_inode_has_child_snapshots_wrong) + BCH_FSCK_ERR_inode_has_child_snapshots_wrong) \ + x(backpointer_bucket_gen, \ + BIT_ULL(BCH_RECOVERY_PASS_check_backpointers_to_extents)|\ + BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers),\ + BCH_FSCK_ERR_backpointer_to_missing_ptr, \ + BCH_FSCK_ERR_ptr_to_missing_backpointer) \ + x(disk_accounting_big_endian, \ + BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ + BCH_FSCK_ERR_accounting_mismatch, \ + BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \ + BCH_FSCK_ERR_accounting_key_junk_at_end) #define DOWNGRADE_TABLE() \ x(bucket_stripe_sectors, \ @@ -117,7 +127,19 @@ BCH_FSCK_ERR_bkey_version_in_future) \ x(rebalance_work_acct_fix, \ BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ - BCH_FSCK_ERR_accounting_mismatch) + BCH_FSCK_ERR_accounting_mismatch, \ + BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \ + BCH_FSCK_ERR_accounting_key_junk_at_end) \ + x(backpointer_bucket_gen, \ + BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers),\ + BCH_FSCK_ERR_backpointer_bucket_offset_wrong, \ + BCH_FSCK_ERR_backpointer_to_missing_ptr, \ + BCH_FSCK_ERR_ptr_to_missing_backpointer) \ + x(disk_accounting_big_endian, \ + BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ + BCH_FSCK_ERR_accounting_mismatch, \ + BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \ + BCH_FSCK_ERR_accounting_key_junk_at_end) struct upgrade_downgrade_entry { u64 recovery_passes; @@ -143,6 +165,9 @@ UPGRADE_TABLE() static int have_stripes(struct bch_fs *c) { + if (IS_ERR_OR_NULL(c->btree_roots_known[BTREE_ID_stripes].b)) + return 0; + return !btree_node_fake(c->btree_roots_known[BTREE_ID_stripes].b); } diff --git a/libbcachefs/sb-errors_format.h b/libbcachefs/sb-errors_format.h index 937275d0..3bbda181 100644 --- a/libbcachefs/sb-errors_format.h +++ b/libbcachefs/sb-errors_format.h @@ -5,9 +5,8 @@ enum bch_fsck_flags { FSCK_CAN_FIX = 1 << 0, FSCK_CAN_IGNORE = 1 << 1, - FSCK_NEED_FSCK = 1 << 2, - FSCK_NO_RATELIMIT = 1 << 3, - FSCK_AUTOFIX = 1 << 4, + FSCK_NO_RATELIMIT = 1 << 2, + FSCK_AUTOFIX = 1 << 3, }; #define BCH_SB_ERRS() \ @@ -68,17 +67,17 @@ enum bch_fsck_flags { x(btree_node_bkey_past_bset_end, 54, 0) \ x(btree_node_bkey_bad_format, 55, 0) \ x(btree_node_bad_bkey, 56, 0) \ - x(btree_node_bkey_out_of_order, 57, 0) \ - x(btree_root_bkey_invalid, 58, 0) \ - x(btree_root_read_error, 59, 0) \ + x(btree_node_bkey_out_of_order, 57, FSCK_AUTOFIX) \ + x(btree_root_bkey_invalid, 58, FSCK_AUTOFIX) \ + x(btree_root_read_error, 59, FSCK_AUTOFIX) \ x(btree_root_bad_min_key, 60, 0) \ x(btree_root_bad_max_key, 61, 0) \ - x(btree_node_read_error, 62, 0) \ - x(btree_node_topology_bad_min_key, 63, 0) \ - x(btree_node_topology_bad_max_key, 64, 0) \ - x(btree_node_topology_overwritten_by_prev_node, 65, 0) \ - x(btree_node_topology_overwritten_by_next_node, 66, 0) \ - x(btree_node_topology_interior_node_empty, 67, 0) \ + x(btree_node_read_error, 62, FSCK_AUTOFIX) \ + x(btree_node_topology_bad_min_key, 63, FSCK_AUTOFIX) \ + x(btree_node_topology_bad_max_key, 64, FSCK_AUTOFIX) \ + x(btree_node_topology_overwritten_by_prev_node, 65, FSCK_AUTOFIX) \ + x(btree_node_topology_overwritten_by_next_node, 66, FSCK_AUTOFIX) \ + x(btree_node_topology_interior_node_empty, 67, FSCK_AUTOFIX) \ x(fs_usage_hidden_wrong, 68, FSCK_AUTOFIX) \ x(fs_usage_btree_wrong, 69, FSCK_AUTOFIX) \ x(fs_usage_data_wrong, 70, FSCK_AUTOFIX) \ @@ -123,11 +122,12 @@ enum bch_fsck_flags { x(alloc_key_cached_sectors_wrong, 109, FSCK_AUTOFIX) \ x(alloc_key_stripe_wrong, 110, FSCK_AUTOFIX) \ x(alloc_key_stripe_redundancy_wrong, 111, FSCK_AUTOFIX) \ + x(alloc_key_journal_seq_in_future, 298, FSCK_AUTOFIX) \ x(bucket_sector_count_overflow, 112, 0) \ x(bucket_metadata_type_mismatch, 113, 0) \ - x(need_discard_key_wrong, 114, 0) \ - x(freespace_key_wrong, 115, 0) \ - x(freespace_hole_missing, 116, 0) \ + x(need_discard_key_wrong, 114, FSCK_AUTOFIX) \ + x(freespace_key_wrong, 115, FSCK_AUTOFIX) \ + x(freespace_hole_missing, 116, FSCK_AUTOFIX) \ x(bucket_gens_val_size_bad, 117, 0) \ x(bucket_gens_key_wrong, 118, FSCK_AUTOFIX) \ x(bucket_gens_hole_wrong, 119, FSCK_AUTOFIX) \ @@ -136,10 +136,13 @@ enum bch_fsck_flags { x(bucket_gens_nonzero_for_invalid_buckets, 122, FSCK_AUTOFIX) \ x(need_discard_freespace_key_to_invalid_dev_bucket, 123, 0) \ x(need_discard_freespace_key_bad, 124, 0) \ + x(discarding_bucket_not_in_need_discard_btree, 291, 0) \ x(backpointer_bucket_offset_wrong, 125, 0) \ + x(backpointer_level_bad, 294, 0) \ + x(backpointer_dev_bad, 297, 0) \ x(backpointer_to_missing_device, 126, 0) \ x(backpointer_to_missing_alloc, 127, 0) \ - x(backpointer_to_missing_ptr, 128, 0) \ + x(backpointer_to_missing_ptr, 128, FSCK_AUTOFIX) \ x(lru_entry_at_time_0, 129, FSCK_AUTOFIX) \ x(lru_entry_to_invalid_bucket, 130, FSCK_AUTOFIX) \ x(lru_entry_bad, 131, FSCK_AUTOFIX) \ @@ -169,15 +172,18 @@ enum bch_fsck_flags { x(ptr_bucket_data_type_mismatch, 155, 0) \ x(ptr_cached_and_erasure_coded, 156, 0) \ x(ptr_crc_uncompressed_size_too_small, 157, 0) \ + x(ptr_crc_uncompressed_size_too_big, 161, 0) \ + x(ptr_crc_uncompressed_size_mismatch, 300, 0) \ x(ptr_crc_csum_type_unknown, 158, 0) \ x(ptr_crc_compression_type_unknown, 159, 0) \ x(ptr_crc_redundant, 160, 0) \ - x(ptr_crc_uncompressed_size_too_big, 161, 0) \ x(ptr_crc_nonce_mismatch, 162, 0) \ x(ptr_stripe_redundant, 163, 0) \ x(reservation_key_nr_replicas_invalid, 164, 0) \ x(reflink_v_refcount_wrong, 165, 0) \ + x(reflink_v_pos_bad, 292, 0) \ x(reflink_p_to_missing_reflink_v, 166, 0) \ + x(reflink_refcount_underflow, 293, 0) \ x(stripe_pos_bad, 167, 0) \ x(stripe_val_size_bad, 168, 0) \ x(stripe_csum_granularity_bad, 290, 0) \ @@ -228,6 +234,7 @@ enum bch_fsck_flags { x(inode_wrong_nlink, 209, FSCK_AUTOFIX) \ x(inode_has_child_snapshots_wrong, 287, 0) \ x(inode_unreachable, 210, FSCK_AUTOFIX) \ + x(inode_journal_seq_in_future, 299, FSCK_AUTOFIX) \ x(deleted_inode_but_clean, 211, FSCK_AUTOFIX) \ x(deleted_inode_missing, 212, FSCK_AUTOFIX) \ x(deleted_inode_is_dir, 213, FSCK_AUTOFIX) \ @@ -284,7 +291,7 @@ enum bch_fsck_flags { x(btree_root_unreadable_and_scan_found_nothing, 263, 0) \ x(snapshot_node_missing, 264, 0) \ x(dup_backpointer_to_bad_csum_extent, 265, 0) \ - x(btree_bitmap_not_marked, 266, 0) \ + x(btree_bitmap_not_marked, 266, FSCK_AUTOFIX) \ x(sb_clean_entry_overrun, 267, 0) \ x(btree_ptr_v2_written_0, 268, 0) \ x(subvol_snapshot_bad, 269, 0) \ @@ -302,7 +309,9 @@ enum bch_fsck_flags { x(accounting_key_replicas_devs_unsorted, 280, FSCK_AUTOFIX) \ x(accounting_key_version_0, 282, FSCK_AUTOFIX) \ x(logged_op_but_clean, 283, FSCK_AUTOFIX) \ - x(MAX, 291, 0) + x(compression_opt_not_marked_in_sb, 295, FSCK_AUTOFIX) \ + x(compression_type_not_marked_in_sb, 296, FSCK_AUTOFIX) \ + x(MAX, 301, 0) enum bch_sb_error_id { #define x(t, n, ...) BCH_FSCK_ERR_##t = n, diff --git a/libbcachefs/sb-members.c b/libbcachefs/sb-members.c index fb08dd68..116131f9 100644 --- a/libbcachefs/sb-members.c +++ b/libbcachefs/sb-members.c @@ -163,7 +163,7 @@ static int validate_member(struct printbuf *err, return -BCH_ERR_invalid_sb_members; } - if (m.btree_bitmap_shift >= 64) { + if (m.btree_bitmap_shift >= BCH_MI_BTREE_BITMAP_SHIFT_MAX) { prt_printf(err, "device %u: invalid btree_bitmap_shift %u", i, m.btree_bitmap_shift); return -BCH_ERR_invalid_sb_members; } @@ -450,7 +450,7 @@ static void __bch2_dev_btree_bitmap_mark(struct bch_sb_field_members_v2 *mi, uns m->btree_bitmap_shift += resize; } - BUG_ON(m->btree_bitmap_shift > 57); + BUG_ON(m->btree_bitmap_shift >= BCH_MI_BTREE_BITMAP_SHIFT_MAX); BUG_ON(end > 64ULL << m->btree_bitmap_shift); for (unsigned bit = start >> m->btree_bitmap_shift; diff --git a/libbcachefs/sb-members_format.h b/libbcachefs/sb-members_format.h index d727d2df..2adf1221 100644 --- a/libbcachefs/sb-members_format.h +++ b/libbcachefs/sb-members_format.h @@ -66,6 +66,12 @@ struct bch_member { }; /* + * btree_allocated_bitmap can represent sector addresses of a u64: it itself has + * 64 elements, so 64 - ilog2(64) + */ +#define BCH_MI_BTREE_BITMAP_SHIFT_MAX 58 + +/* * This limit comes from the bucket_gens array - it's a single allocation, and * kernel allocation are limited to INT_MAX */ diff --git a/libbcachefs/siphash.c b/libbcachefs/siphash.c index dc1a27cc..a1cc44e6 100644 --- a/libbcachefs/siphash.c +++ b/libbcachefs/siphash.c @@ -45,7 +45,7 @@ */ #include <asm/byteorder.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/bitops.h> #include <linux/string.h> diff --git a/libbcachefs/snapshot.c b/libbcachefs/snapshot.c index 34e01bd8..f368270d 100644 --- a/libbcachefs/snapshot.c +++ b/libbcachefs/snapshot.c @@ -32,7 +32,7 @@ void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c, } int bch2_snapshot_tree_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; @@ -225,7 +225,7 @@ void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c, } int bch2_snapshot_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_snapshot s; u32 i, id; @@ -1733,8 +1733,12 @@ void bch2_delete_dead_snapshots_work(struct work_struct *work) void bch2_delete_dead_snapshots_async(struct bch_fs *c) { - if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) && - !queue_work(c->write_ref_wq, &c->snapshot_delete_work)) + if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots)) + return; + + BUG_ON(!test_bit(BCH_FS_may_go_rw, &c->flags)); + + if (!queue_work(c->write_ref_wq, &c->snapshot_delete_work)) bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots); } diff --git a/libbcachefs/snapshot.h b/libbcachefs/snapshot.h index 29c94716..ae23d45f 100644 --- a/libbcachefs/snapshot.h +++ b/libbcachefs/snapshot.h @@ -2,11 +2,9 @@ #ifndef _BCACHEFS_SNAPSHOT_H #define _BCACHEFS_SNAPSHOT_H -enum bch_validate_flags; - void bch2_snapshot_tree_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_snapshot_tree_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); #define bch2_bkey_ops_snapshot_tree ((struct bkey_ops) { \ .key_validate = bch2_snapshot_tree_validate, \ @@ -19,7 +17,8 @@ struct bkey_i_snapshot_tree *__bch2_snapshot_tree_create(struct btree_trans *); int bch2_snapshot_tree_lookup(struct btree_trans *, u32, struct bch_snapshot_tree *); void bch2_snapshot_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); -int bch2_snapshot_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_snapshot_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, enum btree_iter_update_trigger_flags); diff --git a/libbcachefs/str_hash.h b/libbcachefs/str_hash.h index ec2b1fee..00c78505 100644 --- a/libbcachefs/str_hash.h +++ b/libbcachefs/str_hash.h @@ -160,7 +160,7 @@ bch2_hash_lookup_in_snapshot(struct btree_trans *trans, struct bkey_s_c k; int ret; - for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id, + for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, SPOS(inum.inum, desc.hash_key(info, key), snapshot), POS(inum.inum, U64_MAX), BTREE_ITER_slots|flags, k, ret) { @@ -210,7 +210,7 @@ bch2_hash_hole(struct btree_trans *trans, if (ret) return ret; - for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id, + for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, SPOS(inum.inum, desc.hash_key(info, key), snapshot), POS(inum.inum, U64_MAX), BTREE_ITER_slots|BTREE_ITER_intent, k, ret) @@ -265,7 +265,7 @@ struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans, bool found = false; int ret; - for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id, + for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, SPOS(insert->k.p.inode, desc.hash_bkey(info, bkey_i_to_s_c(insert)), snapshot), diff --git a/libbcachefs/subvolume.c b/libbcachefs/subvolume.c index 80e5efaf..5e5ae405 100644 --- a/libbcachefs/subvolume.c +++ b/libbcachefs/subvolume.c @@ -207,7 +207,7 @@ int bch2_check_subvol_children(struct bch_fs *c) /* Subvolumes: */ int bch2_subvolume_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_subvolume subvol = bkey_s_c_to_subvolume(k); int ret = 0; @@ -675,7 +675,7 @@ err: /* set bi_subvol on root inode */ int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c) { - int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_lazy_rw, + int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, __bch2_fs_upgrade_for_subvolumes(trans)); bch_err_fn(c, ret); return ret; diff --git a/libbcachefs/subvolume.h b/libbcachefs/subvolume.h index f897d106..d53d292c 100644 --- a/libbcachefs/subvolume.h +++ b/libbcachefs/subvolume.h @@ -5,12 +5,11 @@ #include "darray.h" #include "subvolume_types.h" -enum bch_validate_flags; - int bch2_check_subvols(struct bch_fs *); int bch2_check_subvol_children(struct bch_fs *); -int bch2_subvolume_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_subvolume_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_subvolume_trigger(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, @@ -34,7 +33,7 @@ int bch2_subvol_is_ro_trans(struct btree_trans *, u32); int bch2_subvol_is_ro(struct bch_fs *, u32); static inline struct bkey_s_c -bch2_btree_iter_peek_in_subvolume_upto_type(struct btree_iter *iter, struct bpos end, +bch2_btree_iter_peek_in_subvolume_max_type(struct btree_iter *iter, struct bpos end, u32 subvolid, unsigned flags) { u32 snapshot; @@ -43,10 +42,10 @@ bch2_btree_iter_peek_in_subvolume_upto_type(struct btree_iter *iter, struct bpos return bkey_s_c_err(ret); bch2_btree_iter_set_snapshot(iter, snapshot); - return bch2_btree_iter_peek_upto_type(iter, end, flags); + return bch2_btree_iter_peek_max_type(iter, end, flags); } -#define for_each_btree_key_in_subvolume_upto_continue(_trans, _iter, \ +#define for_each_btree_key_in_subvolume_max_continue(_trans, _iter, \ _end, _subvolid, _flags, _k, _do) \ ({ \ struct bkey_s_c _k; \ @@ -54,7 +53,7 @@ bch2_btree_iter_peek_in_subvolume_upto_type(struct btree_iter *iter, struct bpos \ do { \ _ret3 = lockrestart_do(_trans, ({ \ - (_k) = bch2_btree_iter_peek_in_subvolume_upto_type(&(_iter), \ + (_k) = bch2_btree_iter_peek_in_subvolume_max_type(&(_iter), \ _end, _subvolid, (_flags)); \ if (!(_k).k) \ break; \ @@ -67,14 +66,14 @@ bch2_btree_iter_peek_in_subvolume_upto_type(struct btree_iter *iter, struct bpos _ret3; \ }) -#define for_each_btree_key_in_subvolume_upto(_trans, _iter, _btree_id, \ +#define for_each_btree_key_in_subvolume_max(_trans, _iter, _btree_id, \ _start, _end, _subvolid, _flags, _k, _do) \ ({ \ struct btree_iter _iter; \ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ (_start), (_flags)); \ \ - for_each_btree_key_in_subvolume_upto_continue(_trans, _iter, \ + for_each_btree_key_in_subvolume_max_continue(_trans, _iter, \ _end, _subvolid, _flags, _k, _do); \ }) diff --git a/libbcachefs/super-io.c b/libbcachefs/super-io.c index ce7410d7..6a086c1c 100644 --- a/libbcachefs/super-io.c +++ b/libbcachefs/super-io.c @@ -23,6 +23,7 @@ #include <linux/backing-dev.h> #include <linux/sort.h> +#include <linux/string_choices.h> static const struct blk_holder_ops bch2_sb_handle_bdev_ops = { }; @@ -287,6 +288,11 @@ static int validate_sb_layout(struct bch_sb_layout *layout, struct printbuf *out return -BCH_ERR_invalid_sb_layout_nr_superblocks; } + if (layout->sb_max_size_bits > BCH_SB_LAYOUT_SIZE_BITS_MAX) { + prt_printf(out, "Invalid superblock layout: max_size_bits too high"); + return -BCH_ERR_invalid_sb_layout_sb_max_size_bits; + } + max_sectors = 1 << layout->sb_max_size_bits; prev_offset = le64_to_cpu(layout->sb_offset[0]); @@ -671,7 +677,8 @@ reread: } enum bch_csum_type csum_type = BCH_SB_CSUM_TYPE(sb->sb); - if (csum_type >= BCH_CSUM_NR) { + if (csum_type >= BCH_CSUM_NR || + bch2_csum_type_is_encryption(csum_type)) { prt_printf(err, "unknown checksum type %llu", BCH_SB_CSUM_TYPE(sb->sb)); return -BCH_ERR_invalid_sb_csum_type; } @@ -873,7 +880,7 @@ static void write_super_endio(struct bio *bio) ? BCH_MEMBER_ERROR_write : BCH_MEMBER_ERROR_read, "superblock %s error: %s", - bio_data_dir(bio) ? "write" : "read", + str_write_read(bio_data_dir(bio)), bch2_blk_status_to_str(bio->bi_status))) ca->sb_write_error = 1; @@ -886,14 +893,15 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca) struct bch_sb *sb = ca->disk_sb.sb; struct bio *bio = ca->disk_sb.bio; + memset(ca->sb_read_scratch, 0, BCH_SB_READ_SCRATCH_BUF_SIZE); + bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ|REQ_SYNC|REQ_META); bio->bi_iter.bi_sector = le64_to_cpu(sb->layout.sb_offset[0]); bio->bi_end_io = write_super_endio; bio->bi_private = ca; - bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE); + bch2_bio_map(bio, ca->sb_read_scratch, BCH_SB_READ_SCRATCH_BUF_SIZE); - this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb], - bio_sectors(bio)); + this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb], bio_sectors(bio)); percpu_ref_get(&ca->io_ref); closure_bio_submit(bio, &c->sb_write); diff --git a/libbcachefs/super-io.h b/libbcachefs/super-io.h index fadd364e..90e7b176 100644 --- a/libbcachefs/super-io.h +++ b/libbcachefs/super-io.h @@ -10,6 +10,8 @@ #include <asm/byteorder.h> +#define BCH_SB_READ_SCRATCH_BUF_SIZE 4096 + static inline bool bch2_version_compatible(u16 version) { return BCH_VERSION_MAJOR(version) <= BCH_VERSION_MAJOR(bcachefs_metadata_version_current) && diff --git a/libbcachefs/super.c b/libbcachefs/super.c index d1b67e60..14157820 100644 --- a/libbcachefs/super.c +++ b/libbcachefs/super.c @@ -272,6 +272,7 @@ static void __bch2_fs_read_only(struct bch_fs *c) clean_passes++; if (bch2_btree_interior_updates_flush(c) || + bch2_btree_write_buffer_flush_going_ro(c) || bch2_journal_flush_all_pins(&c->journal) || bch2_btree_flush_all_writes(c) || seq != atomic64_read(&c->journal.seq)) { @@ -289,7 +290,7 @@ static void __bch2_fs_read_only(struct bch_fs *c) bch2_fs_journal_stop(&c->journal); - bch_info(c, "%sshutdown complete, journal seq %llu", + bch_info(c, "%sclean shutdown complete, journal seq %llu", test_bit(BCH_FS_clean_shutdown, &c->flags) ? "" : "un", c->journal.seq_ondisk); @@ -440,6 +441,8 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early) { int ret; + BUG_ON(!test_bit(BCH_FS_may_go_rw, &c->flags)); + if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) { bch_err(c, "cannot go rw, unfixed btree errors"); return -BCH_ERR_erofs_unfixed_errors; @@ -770,8 +773,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) init_rwsem(&c->gc_lock); mutex_init(&c->gc_gens_lock); - atomic_set(&c->journal_keys.ref, 1); - c->journal_keys.initial_ref_held = true; for (i = 0; i < BCH_TIME_STAT_NR; i++) bch2_time_stats_init(&c->times[i]); @@ -781,6 +782,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) bch2_fs_btree_key_cache_init_early(&c->btree_key_cache); bch2_fs_btree_iter_init_early(c); bch2_fs_btree_interior_update_init_early(c); + bch2_fs_journal_keys_init(c); bch2_fs_allocator_background_init(c); bch2_fs_allocator_foreground_init(c); bch2_fs_rebalance_init(c); @@ -809,9 +811,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) INIT_LIST_HEAD(&c->vfs_inodes_list); mutex_init(&c->vfs_inodes_lock); - c->copy_gc_enabled = 1; - c->rebalance.enabled = 1; - c->journal.flush_write_time = &c->times[BCH_TIME_journal_flush_write]; c->journal.noflush_write_time = &c->times[BCH_TIME_journal_noflush_write]; c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq]; @@ -1033,9 +1032,12 @@ int bch2_fs_start(struct bch_fs *c) bch2_dev_allocator_add(c, ca); bch2_recalc_capacity(c); + c->recovery_task = current; ret = BCH_SB_INITIALIZED(c->disk_sb.sb) ? bch2_fs_recovery(c) : bch2_fs_initialize(c); + c->recovery_task = NULL; + if (ret) goto err; @@ -1198,7 +1200,7 @@ static void bch2_dev_free(struct bch_dev *ca) free_percpu(ca->io_done); bch2_dev_buckets_free(ca); - free_page((unsigned long) ca->sb_read_scratch); + kfree(ca->sb_read_scratch); bch2_time_stats_quantiles_exit(&ca->io_latency[WRITE]); bch2_time_stats_quantiles_exit(&ca->io_latency[READ]); @@ -1337,7 +1339,7 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c, if (percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete, PERCPU_REF_INIT_DEAD, GFP_KERNEL) || - !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) || + !(ca->sb_read_scratch = kmalloc(BCH_SB_READ_SCRATCH_BUF_SIZE, GFP_KERNEL)) || bch2_dev_buckets_alloc(c, ca) || !(ca->io_done = alloc_percpu(*ca->io_done))) goto err; @@ -1366,7 +1368,6 @@ static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx) { struct bch_member member = bch2_sb_member_get(c->disk_sb.sb, dev_idx); struct bch_dev *ca = NULL; - int ret = 0; if (bch2_fs_init_fault("dev_alloc")) goto err; @@ -1378,10 +1379,8 @@ static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx) ca->fs = c; bch2_dev_attach(c, ca, dev_idx); - return ret; + return 0; err: - if (ca) - bch2_dev_free(ca); return -BCH_ERR_ENOMEM_dev_alloc; } @@ -1751,11 +1750,6 @@ int bch2_dev_add(struct bch_fs *c, const char *path) if (ret) goto err; - ret = bch2_dev_journal_alloc(ca, true); - bch_err_msg(c, ret, "allocating journal"); - if (ret) - goto err; - down_write(&c->state_lock); mutex_lock(&c->sb_lock); @@ -1806,11 +1800,14 @@ int bch2_dev_add(struct bch_fs *c, const char *path) if (ret) goto err_late; - ca->new_fs_bucket_idx = 0; - if (ca->mi.state == BCH_MEMBER_STATE_rw) __bch2_dev_read_write(c, ca); + ret = bch2_dev_journal_alloc(ca, false); + bch_err_msg(c, ret, "allocating journal"); + if (ret) + goto err_late; + up_write(&c->state_lock); return 0; diff --git a/libbcachefs/super.h b/libbcachefs/super.h index dada0933..fa6d5221 100644 --- a/libbcachefs/super.h +++ b/libbcachefs/super.h @@ -34,16 +34,6 @@ void bch2_fs_read_only(struct bch_fs *); int bch2_fs_read_write(struct bch_fs *); int bch2_fs_read_write_early(struct bch_fs *); -/* - * Only for use in the recovery/fsck path: - */ -static inline void bch2_fs_lazy_rw(struct bch_fs *c) -{ - if (!test_bit(BCH_FS_rw, &c->flags) && - !test_bit(BCH_FS_was_rw, &c->flags)) - bch2_fs_read_write_early(c); -} - void __bch2_fs_stop(struct bch_fs *); void bch2_fs_free(struct bch_fs *); void bch2_fs_stop(struct bch_fs *); diff --git a/libbcachefs/sysfs.c b/libbcachefs/sysfs.c index 3270bfab..97733c76 100644 --- a/libbcachefs/sysfs.c +++ b/libbcachefs/sysfs.c @@ -146,7 +146,7 @@ write_attribute(trigger_journal_writes); write_attribute(trigger_btree_cache_shrink); write_attribute(trigger_btree_key_cache_shrink); write_attribute(trigger_freelist_wakeup); -rw_attribute(gc_gens_pos); +read_attribute(gc_gens_pos); read_attribute(uuid); read_attribute(minor); @@ -211,12 +211,11 @@ BCH_PERSISTENT_COUNTERS() #undef x rw_attribute(discard); +read_attribute(state); rw_attribute(label); -rw_attribute(copy_gc_enabled); read_attribute(copy_gc_wait); -rw_attribute(rebalance_enabled); sysfs_pd_controller_attribute(rebalance); read_attribute(rebalance_status); @@ -237,11 +236,6 @@ write_attribute(perf_test); BCH_TIME_STATS() #undef x -static struct attribute sysfs_state_rw = { - .name = "state", - .mode = 0444, -}; - static size_t bch2_btree_cache_size(struct bch_fs *c) { struct btree_cache *bc = &c->btree_cache; @@ -340,9 +334,6 @@ SHOW(bch2_fs) if (attr == &sysfs_gc_gens_pos) bch2_gc_gens_pos_to_text(out, c); - sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); - - sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled); sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */ if (attr == &sysfs_copy_gc_wait) @@ -419,23 +410,6 @@ STORE(bch2_fs) { struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); - if (attr == &sysfs_copy_gc_enabled) { - ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled) - ?: (ssize_t) size; - - if (c->copygc_thread) - wake_up_process(c->copygc_thread); - return ret; - } - - if (attr == &sysfs_rebalance_enabled) { - ssize_t ret = strtoul_safe(buf, c->rebalance.enabled) - ?: (ssize_t) size; - - rebalance_wakeup(c); - return ret; - } - sysfs_pd_controller_store(rebalance, &c->rebalance.pd); /* Debugging: */ @@ -611,10 +585,8 @@ struct attribute *bch2_fs_internal_files[] = { &sysfs_gc_gens_pos, - &sysfs_copy_gc_enabled, &sysfs_copy_gc_wait, - &sysfs_rebalance_enabled, sysfs_pd_controller_files(rebalance), &sysfs_moving_ctxts, @@ -683,6 +655,13 @@ STORE(bch2_fs_opts_dir) (id == Opt_compression && !c->opts.background_compression))) bch2_set_rebalance_needs_scan(c, 0); + if (v && id == Opt_rebalance_enabled) + rebalance_wakeup(c); + + if (v && id == Opt_copygc_enabled && + c->copygc_thread) + wake_up_process(c->copygc_thread); + ret = size; err: bch2_write_ref_put(c, BCH_WRITE_REF_sysfs); @@ -791,7 +770,7 @@ SHOW(bch2_dev) prt_char(out, '\n'); } - if (attr == &sysfs_state_rw) { + if (attr == &sysfs_state) { prt_string_option(out, bch2_member_states, ca->mi.state); prt_char(out, '\n'); } @@ -871,7 +850,7 @@ struct attribute *bch2_dev_files[] = { /* settings: */ &sysfs_discard, - &sysfs_state_rw, + &sysfs_state, &sysfs_label, &sysfs_has_data, diff --git a/libbcachefs/tests.c b/libbcachefs/tests.c index 315038a0..6c646981 100644 --- a/libbcachefs/tests.c +++ b/libbcachefs/tests.c @@ -131,7 +131,7 @@ static int test_iterate(struct bch_fs *c, u64 nr) i = 0; ret = bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs, + for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 0, k, ({ BUG_ON(k.k->p.offset != i++); @@ -186,7 +186,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr) i = 0; ret = bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, BTREE_ID_extents, + for_each_btree_key_max(trans, iter, BTREE_ID_extents, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 0, k, ({ BUG_ON(bkey_start_offset(k.k) != i); @@ -242,7 +242,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr) i = 0; ret = bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs, + for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 0, k, ({ BUG_ON(k.k->p.offset != i); @@ -259,7 +259,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr) i = 0; ret = bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs, + for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), BTREE_ITER_slots, k, ({ if (i >= nr * 2) @@ -302,7 +302,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr) i = 0; ret = bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, BTREE_ID_extents, + for_each_btree_key_max(trans, iter, BTREE_ID_extents, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 0, k, ({ BUG_ON(bkey_start_offset(k.k) != i + 8); @@ -320,7 +320,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr) i = 0; ret = bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, BTREE_ID_extents, + for_each_btree_key_max(trans, iter, BTREE_ID_extents, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), BTREE_ITER_slots, k, ({ if (i == nr) @@ -349,10 +349,10 @@ static int test_peek_end(struct bch_fs *c, u64 nr) bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), 0); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); bch2_trans_iter_exit(trans, &iter); @@ -369,10 +369,10 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr) bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(0, 0, U32_MAX), 0); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); bch2_trans_iter_exit(trans, &iter); @@ -488,7 +488,7 @@ static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi) trans = bch2_trans_get(c); bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, SPOS(0, 0, snapid_lo), 0); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k->p.snapshot != U32_MAX); @@ -672,7 +672,7 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos) bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos, BTREE_ITER_intent); - k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)); + k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)); ret = bkey_err(k); if (ret) goto err; @@ -726,7 +726,7 @@ static int seq_insert(struct bch_fs *c, u64 nr) static int seq_lookup(struct bch_fs *c, u64 nr) { return bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs, + for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 0, k, 0)); @@ -809,6 +809,11 @@ int bch2_btree_perf_test(struct bch_fs *c, const char *testname, unsigned i; u64 time; + if (nr == 0 || nr_threads == 0) { + pr_err("nr of iterations or threads is not allowed to be 0"); + return -EINVAL; + } + atomic_set(&j.ready, nr_threads); init_waitqueue_head(&j.ready_wait); diff --git a/libbcachefs/trace.h b/libbcachefs/trace.h index 5597b9d6..2d5932d2 100644 --- a/libbcachefs/trace.h +++ b/libbcachefs/trace.h @@ -848,8 +848,8 @@ TRACE_EVENT(move_data, TRACE_EVENT(evacuate_bucket, TP_PROTO(struct bch_fs *c, struct bpos *bucket, unsigned sectors, unsigned bucket_size, - u64 fragmentation, int ret), - TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret), + int ret), + TP_ARGS(c, bucket, sectors, bucket_size, ret), TP_STRUCT__entry( __field(dev_t, dev ) @@ -857,7 +857,6 @@ TRACE_EVENT(evacuate_bucket, __field(u64, bucket ) __field(u32, sectors ) __field(u32, bucket_size ) - __field(u64, fragmentation ) __field(int, ret ) ), @@ -867,15 +866,14 @@ TRACE_EVENT(evacuate_bucket, __entry->bucket = bucket->offset; __entry->sectors = sectors; __entry->bucket_size = bucket_size; - __entry->fragmentation = fragmentation; __entry->ret = ret; ), - TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i", + TP_printk("%d,%d %llu:%llu sectors %u/%u ret %i", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->member, __entry->bucket, __entry->sectors, __entry->bucket_size, - __entry->fragmentation, __entry->ret) + __entry->ret) ); TRACE_EVENT(copygc, diff --git a/libbcachefs/util.h b/libbcachefs/util.h index fb02c1c3..c292b9ce 100644 --- a/libbcachefs/util.h +++ b/libbcachefs/util.h @@ -317,6 +317,19 @@ do { \ _ptr ? container_of(_ptr, type, member) : NULL; \ }) +static inline struct list_head *list_pop(struct list_head *head) +{ + if (list_empty(head)) + return NULL; + + struct list_head *ret = head->next; + list_del_init(ret); + return ret; +} + +#define list_pop_entry(head, type, member) \ + container_of_or_null(list_pop(head), type, member) + /* Does linear interpolation between powers of two */ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) { @@ -696,4 +709,13 @@ static inline bool test_bit_le64(size_t bit, __le64 *addr) return (addr[bit / 64] & cpu_to_le64(BIT_ULL(bit % 64))) != 0; } +static inline void memcpy_swab(void *_dst, void *_src, size_t len) +{ + u8 *dst = _dst + len; + u8 *src = _src; + + while (len--) + *--dst = *src++; +} + #endif /* _BCACHEFS_UTIL_H */ diff --git a/libbcachefs/varint.c b/libbcachefs/varint.c index a9ebcd82..6a78553d 100644 --- a/libbcachefs/varint.c +++ b/libbcachefs/varint.c @@ -3,7 +3,7 @@ #include <linux/bitops.h> #include <linux/math.h> #include <linux/string.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #ifdef CONFIG_VALGRIND #include <valgrind/memcheck.h> diff --git a/libbcachefs/xattr.c b/libbcachefs/xattr.c index bf3c6bb5..aed7c698 100644 --- a/libbcachefs/xattr.c +++ b/libbcachefs/xattr.c @@ -71,7 +71,7 @@ const struct bch_hash_desc bch2_xattr_hash_desc = { }; int bch2_xattr_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k); unsigned val_u64s = xattr_val_u64s(xattr.v->x_name_len, @@ -309,7 +309,7 @@ ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) u64 offset = 0, inum = inode->ei_inode.bi_inum; int ret = bch2_trans_run(c, - for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_xattrs, + for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_xattrs, POS(inum, offset), POS(inum, U64_MAX), inode->ei_inum.subvol, 0, k, ({ @@ -565,13 +565,6 @@ static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler, ret = bch2_write_inode(c, inode, inode_opt_set_fn, &s, 0); err: mutex_unlock(&inode->ei_update_lock); - - if (value && - (opt_id == Opt_background_target || - opt_id == Opt_background_compression || - (opt_id == Opt_compression && !inode_opt_get(c, &inode->ei_inode, background_compression)))) - bch2_set_rebalance_needs_scan(c, inode->ei_inode.bi_inum); - err_class_exit: return bch2_err_class(ret); } diff --git a/libbcachefs/xattr.h b/libbcachefs/xattr.h index 2c96de05..132fbbd1 100644 --- a/libbcachefs/xattr.h +++ b/libbcachefs/xattr.h @@ -6,7 +6,8 @@ extern const struct bch_hash_desc bch2_xattr_hash_desc; -int bch2_xattr_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_xattr_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_xattr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); #define bch2_bkey_ops_xattr ((struct bkey_ops) { \ diff --git a/linux/fs_parser.c b/linux/fs_parser.c new file mode 100644 index 00000000..b1cd0c72 --- /dev/null +++ b/linux/fs_parser.c @@ -0,0 +1,36 @@ + +#include <linux/kernel.h> +#include <linux/fs_parser.h> +#include <string.h> + +const struct constant_table bool_names[] = { + { "0", false }, + { "1", true }, + { "false", false }, + { "no", false }, + { "true", true }, + { "yes", true }, + { }, +}; + +static const struct constant_table * +__lookup_constant(const struct constant_table *tbl, const char *name) +{ + for ( ; tbl->name; tbl++) + if (strcmp(name, tbl->name) == 0) + return tbl; + return NULL; +} + +/** + * lookup_constant - Look up a constant by name in an ordered table + * @tbl: The table of constants to search. + * @name: The name to look up. + * @not_found: The value to return if the name is not found. + */ +int lookup_constant(const struct constant_table *tbl, const char *name, int not_found) +{ + const struct constant_table *p = __lookup_constant(tbl, name); + + return p ? p->value : not_found; +} diff --git a/linux/xxhash.c b/linux/xxhash.c index d5bb9ff1..b5bd567a 100644 --- a/linux/xxhash.c +++ b/linux/xxhash.c @@ -38,7 +38,7 @@ * - xxHash source repository: https://github.com/Cyan4973/xxHash */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/errno.h> #include <linux/compiler.h> #include <linux/kernel.h> |