diff options
103 files changed, 2500 insertions, 3472 deletions
diff --git a/.bcachefs_revision b/.bcachefs_revision index 1d699419..7abb0304 100644 --- a/.bcachefs_revision +++ b/.bcachefs_revision @@ -1 +1 @@ -4b5105c627f4f1490e9bc4267c8096926de367b5 +deeffbdc52f1092dadb3d523c4429e002c7fc485 diff --git a/c_src/cmd_image.c b/c_src/cmd_image.c index 58268b4b..d00d85cf 100644 --- a/c_src/cmd_image.c +++ b/c_src/cmd_image.c @@ -477,7 +477,7 @@ static void image_create(struct bch_opt_strs fs_opt_strs, if (ret) goto err; - struct copy_fs_state s = {}; + struct copy_fs_state s = { .verbosity = verbosity }; ret = copy_fs(c, &s, src_fd, src_path) ?: finish_image(c, keep_alloc, verbosity); if (ret) @@ -635,7 +635,7 @@ static int image_update(const char *src_path, const char *dst_image, u64 input_bytes = count_input_size(src_fd); - if (truncate(dst_image, input_bytes * 2)) + if (truncate(dst_image, xstat(dst_image).st_size + input_bytes * 2)) die("truncate error: %m"); darray_const_str device_paths = {}; @@ -705,7 +705,7 @@ static int image_update(const char *src_path, const char *dst_image, goto err_stop; bch_verbose(c, "Syncing data"); - struct copy_fs_state s = {}; + struct copy_fs_state s = { .verbosity = verbosity }; ret = copy_fs(c, &s, src_fd, src_path) ?: finish_image(c, keep_alloc, verbosity); diff --git a/c_src/posix_to_bcachefs.c b/c_src/posix_to_bcachefs.c index 8cb1c7c8..0e7d4c29 100644 --- a/c_src/posix_to_bcachefs.c +++ b/c_src/posix_to_bcachefs.c @@ -282,7 +282,7 @@ static void write_data(struct bch_fs *c, closure_call(&op.cl, bch2_write, NULL, NULL); BUG_ON(!(op.flags & BCH_WRITE_submitted)); - dst_inode->bi_sectors += len >> 9; + dst_inode->bi_sectors += op.i_sectors_delta; if (op.error) die("write error: %s", bch2_err_str(op.error)); @@ -371,6 +371,8 @@ static void copy_link(struct bch_fs *c, if (ret) die("bch2_fpunch error: %s", bch2_err_str(ret)); + dst->bi_sectors += i_sectors_delta; + ret = readlink(src, src_buf, sizeof(src_buf)); if (ret < 0) die("readlink error: %m"); @@ -669,6 +671,7 @@ static int recursive_remove(struct bch_fs *c, } static int delete_non_matching_dirents(struct bch_fs *c, + struct copy_fs_state *s, subvol_inum dst_dir_inum, struct bch_inode_unpacked *dst_dir, dirents src_dirents) @@ -692,7 +695,8 @@ static int delete_non_matching_dirents(struct bch_fs *c, !strcmp(dst_d->d_name, "lost+found")) continue; - printf("deleting %s type %u\n", dst_d->d_name, dst_d->d_type); + if (s->verbosity > 1) + printf("deleting %s\n", dst_d->d_name); ret = recursive_remove(c, dst_dir_inum, dst_dir, dst_d); if (ret) @@ -724,7 +728,7 @@ static int copy_dir(struct bch_fs *c, sort(dirents.data, dirents.nr, sizeof(dirents.data[0]), dirent_cmp, NULL); subvol_inum dir_inum = { 1, dst->bi_inum }; - int ret = delete_non_matching_dirents(c, dir_inum, dst, dirents); + int ret = delete_non_matching_dirents(c, s, dir_inum, dst, dirents); if (ret) goto err; diff --git a/c_src/posix_to_bcachefs.h b/c_src/posix_to_bcachefs.h index 542ae171..3fc586d8 100644 --- a/c_src/posix_to_bcachefs.h +++ b/c_src/posix_to_bcachefs.h @@ -36,6 +36,7 @@ struct copy_fs_state { GENRADIX(u64) hardlinks; ranges extents; enum bch_migrate_type type; + unsigned verbosity; u64 reserve_start; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 72056043..b4495886 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -265,6 +265,7 @@ static inline void bio_set_op_attrs(struct bio *bio, unsigned op, #define REQ_FUA (1ULL << __REQ_FUA) #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) +#define REQ_IDLE (1ULL << __REQ_IDLE) #define RW_MASK REQ_OP_WRITE diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 153251c0..20fa19a0 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -4,6 +4,7 @@ #define _LINUX_PERCPU_RWSEM_H #include <pthread.h> +#include <linux/cleanup.h> #include <linux/preempt.h> struct percpu_rw_semaphore { @@ -55,4 +56,11 @@ static inline int percpu_init_rwsem(struct percpu_rw_semaphore *sem) #define percpu_rwsem_assert_held(sem) do {} while (0) +DEFINE_GUARD(percpu_read, struct percpu_rw_semaphore *, + percpu_down_read(_T), percpu_up_read(_T)) +DEFINE_GUARD_COND(percpu_read, _try, percpu_down_read_trylock(_T)) + +DEFINE_GUARD(percpu_write, struct percpu_rw_semaphore *, + percpu_down_write(_T), percpu_up_write(_T)) + #endif diff --git a/include/linux/preempt.h b/include/linux/preempt.h index dbc7c24d..534932f6 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -1,6 +1,8 @@ #ifndef __LINUX_PREEMPT_H #define __LINUX_PREEMPT_H +#include <linux/cleanup.h> + extern void preempt_disable(void); extern void preempt_enable(void); @@ -13,4 +15,7 @@ extern void preempt_enable(void); #define preempt_enable_notrace() preempt_enable() #define preemptible() 0 +DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable()) +DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace()) + #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index f851d6a2..4dd30b71 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -2,6 +2,7 @@ #define __TOOLS_LINUX_RWSEM_H #include <pthread.h> +#include <linux/cleanup.h> struct rw_semaphore { pthread_rwlock_t lock; @@ -18,12 +19,53 @@ static inline void init_rwsem(struct rw_semaphore *lock) pthread_rwlock_init(&lock->lock, NULL); } -#define down_read(l) pthread_rwlock_rdlock(&(l)->lock) -#define down_read_killable(l) (pthread_rwlock_rdlock(&(l)->lock), 0) -#define down_read_trylock(l) (!pthread_rwlock_tryrdlock(&(l)->lock)) -#define up_read(l) pthread_rwlock_unlock(&(l)->lock) +static inline void down_read(struct rw_semaphore *sem) +{ + pthread_rwlock_rdlock(&sem->lock); +} + +static inline int down_read_trylock(struct rw_semaphore *sem) +{ + return !pthread_rwlock_tryrdlock(&sem->lock); +} + +static inline int down_read_interruptible(struct rw_semaphore *sem) +{ + pthread_rwlock_rdlock(&sem->lock); + return 0; +} + +static inline int down_read_killable(struct rw_semaphore *sem) +{ + pthread_rwlock_rdlock(&sem->lock); + return 0; +} + +static inline void up_read(struct rw_semaphore *sem) +{ + pthread_rwlock_unlock(&sem->lock); +} + +static inline void down_write(struct rw_semaphore *sem) +{ + pthread_rwlock_wrlock(&sem->lock); +} + +static inline int down_write_trylock(struct rw_semaphore *sem) +{ + return !pthread_rwlock_trywrlock(&sem->lock); +} + +static inline void up_write(struct rw_semaphore *sem) +{ + pthread_rwlock_unlock(&sem->lock); +} + +DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T)) +DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T)) +DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0) -#define down_write(l) pthread_rwlock_wrlock(&(l)->lock) -#define up_write(l) pthread_rwlock_unlock(&(l)->lock) +DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T)) +DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T)) #endif /* __TOOLS_LINUX_RWSEM_H */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 5c83c766..20cb1756 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -42,11 +42,11 @@ DEFINE_LOCK_GUARD_1(spinlock, spinlock_t, spin_unlock(_T->lock)) DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock)) -#if 0 DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t, spin_lock_irq(_T->lock), spin_unlock_irq(_T->lock)) +#if 0 DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try, spin_trylock_irq(_T->lock)) diff --git a/libbcachefs/acl.c b/libbcachefs/acl.c index d03adc36..307824d6 100644 --- a/libbcachefs/acl.c +++ b/libbcachefs/acl.c @@ -279,7 +279,7 @@ struct posix_acl *bch2_get_acl(struct inode *vinode, int type, bool rcu) if (rcu) return ERR_PTR(-ECHILD); - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); retry: bch2_trans_begin(trans); @@ -304,7 +304,6 @@ err: set_cached_acl(&inode->v, type, acl); bch2_trans_iter_exit(trans, &iter); - bch2_trans_put(trans); return acl; } @@ -350,8 +349,8 @@ int bch2_set_acl(struct mnt_idmap *idmap, umode_t mode; int ret; - mutex_lock(&inode->ei_update_lock); - struct btree_trans *trans = bch2_trans_get(c); + guard(mutex)(&inode->ei_update_lock); + CLASS(btree_trans, trans)(c); retry: bch2_trans_begin(trans); acl = _acl; @@ -385,17 +384,13 @@ btree_err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; if (unlikely(ret)) - goto err; + return ret; bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME|ATTR_MODE); set_cached_acl(&inode->v, type, acl); -err: - bch2_trans_put(trans); - mutex_unlock(&inode->ei_update_lock); - - return ret; + return 0; } int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum, diff --git a/libbcachefs/alloc_background.c b/libbcachefs/alloc_background.c index d64839c7..4c1604fd 100644 --- a/libbcachefs/alloc_background.c +++ b/libbcachefs/alloc_background.c @@ -565,11 +565,11 @@ void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bke int bch2_bucket_gens_init(struct bch_fs *c) { - struct btree_trans *trans = bch2_trans_get(c); struct bkey_i_bucket_gens g; bool have_bucket_gens_key = false; int ret; + CLASS(btree_trans, trans)(c); ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, BTREE_ITER_prefetch, k, ({ /* @@ -609,17 +609,14 @@ iter_err: BCH_TRANS_COMMIT_no_enospc, bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0)); - bch2_trans_put(trans); - - bch_err_fn(c, ret); return ret; } int bch2_alloc_read(struct bch_fs *c) { - down_read(&c->state_lock); + guard(rwsem_read)(&c->state_lock); - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct bch_dev *ca = NULL; int ret; @@ -680,10 +677,6 @@ int bch2_alloc_read(struct bch_fs *c) } bch2_dev_put(ca); - bch2_trans_put(trans); - - up_read(&c->state_lock); - bch_err_fn(c, ret); return ret; } @@ -699,7 +692,7 @@ static int __need_discard_or_freespace_err(struct btree_trans *trans, ? BCH_FSCK_ERR_need_discard_key_wrong : BCH_FSCK_ERR_freespace_key_wrong; enum btree_id btree = discard ? BTREE_ID_need_discard : BTREE_ID_freespace; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, alloc_k); @@ -711,8 +704,6 @@ static int __need_discard_or_freespace_err(struct btree_trans *trans, if (bch2_err_matches(ret, BCH_ERR_fsck_ignore) || bch2_err_matches(ret, BCH_ERR_fsck_errors_not_fixed)) ret = 0; - - printbuf_exit(&buf); return ret; } @@ -860,10 +851,10 @@ int bch2_trigger_alloc(struct btree_trans *trans, enum btree_iter_update_trigger_flags flags) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; - struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p); + CLASS(bch2_dev_bucket_tryget, ca)(c, new.k->p); if (!ca) return bch_err_throw(c, trigger_alloc); @@ -879,7 +870,7 @@ int bch2_trigger_alloc(struct btree_trans *trans, struct bkey_i_alloc_v4 *new_ka = bch2_alloc_to_v4_mut_inlined(trans, new.s_c); ret = PTR_ERR_OR_ZERO(new_ka); if (unlikely(ret)) - goto err; + return ret; new_a = &new_ka->v; } @@ -913,7 +904,7 @@ int bch2_trigger_alloc(struct btree_trans *trans, ret = bch2_bucket_do_index(trans, ca, old, old_a, false) ?: bch2_bucket_do_index(trans, ca, new.s_c, new_a, true); if (ret) - goto err; + return ret; } if (new_a->data_type == BCH_DATA_cached && @@ -925,7 +916,7 @@ int bch2_trigger_alloc(struct btree_trans *trans, alloc_lru_idx_read(*old_a), alloc_lru_idx_read(*new_a)); if (ret) - goto err; + return ret; ret = bch2_lru_change(trans, BCH_LRU_BUCKET_FRAGMENTATION, @@ -933,17 +924,17 @@ int bch2_trigger_alloc(struct btree_trans *trans, alloc_lru_idx_fragmentation(*old_a, ca), alloc_lru_idx_fragmentation(*new_a, ca)); if (ret) - goto err; + return ret; if (old_a->gen != new_a->gen) { ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen); if (ret) - goto err; + return ret; } ret = bch2_alloc_key_to_dev_counters(trans, ca, old_a, new_a, flags); if (ret) - goto err; + return ret; } if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) { @@ -994,7 +985,7 @@ int bch2_trigger_alloc(struct btree_trans *trans, if (bch2_fs_fatal_err_on(ret, c, "setting bucket_needs_journal_commit: %s", bch2_err_str(ret))) - goto err; + return ret; } } @@ -1036,16 +1027,12 @@ int bch2_trigger_alloc(struct btree_trans *trans, g->gen_valid = 1; g->gen = new_a->gen; } -err: fsck_err: - printbuf_exit(&buf); - bch2_dev_put(ca); return ret; invalid_bucket: bch2_fs_inconsistent(c, "reference to invalid bucket\n%s", (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf)); - ret = bch_err_throw(c, trigger_alloc); - goto err; + return bch_err_throw(c, trigger_alloc); } /* @@ -1164,10 +1151,10 @@ int bch2_check_alloc_key(struct btree_trans *trans, const struct bch_alloc_v4 *a; unsigned gens_offset; struct bkey_s_c k; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; - struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p); + CLASS(bch2_dev_bucket_tryget_noerror, ca)(c, alloc_k.k->p); if (fsck_err_on(!ca, trans, alloc_key_to_missing_dev_bucket, "alloc key for invalid device:bucket %llu:%llu", @@ -1177,7 +1164,7 @@ int bch2_check_alloc_key(struct btree_trans *trans, return ret; if (!ca->mi.freespace_initialized) - goto out; + return 0; a = bch2_alloc_to_v4(alloc_k, &a_convert); @@ -1185,35 +1172,35 @@ int bch2_check_alloc_key(struct btree_trans *trans, k = bch2_btree_iter_peek_slot(trans, discard_iter); ret = bkey_err(k); if (ret) - goto err; + return ret; bool is_discarded = a->data_type == BCH_DATA_need_discard; if (need_discard_or_freespace_err_on(!!k.k->type != is_discarded, trans, alloc_k, !is_discarded, true, true)) { ret = bch2_btree_bit_mod_iter(trans, discard_iter, is_discarded); if (ret) - goto err; + return ret; } bch2_btree_iter_set_pos(trans, freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a)); k = bch2_btree_iter_peek_slot(trans, freespace_iter); ret = bkey_err(k); if (ret) - goto err; + return ret; bool is_free = a->data_type == BCH_DATA_free; if (need_discard_or_freespace_err_on(!!k.k->type != is_free, trans, alloc_k, !is_free, false, true)) { ret = bch2_btree_bit_mod_iter(trans, freespace_iter, is_free); if (ret) - goto err; + return ret; } bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset)); k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter); ret = bkey_err(k); if (ret) - goto err; + return ret; if (fsck_err_on(a->gen != alloc_gen(k, gens_offset), trans, bucket_gens_key_wrong, @@ -1226,7 +1213,7 @@ int bch2_check_alloc_key(struct btree_trans *trans, ret = PTR_ERR_OR_ZERO(g); if (ret) - goto err; + return ret; if (k.k->type == KEY_TYPE_bucket_gens) { bkey_reassemble(&g->k_i, k); @@ -1239,13 +1226,9 @@ int bch2_check_alloc_key(struct btree_trans *trans, ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0); if (ret) - goto err; + return ret; } -out: -err: fsck_err: - bch2_dev_put(ca); - printbuf_exit(&buf); return ret; } @@ -1257,7 +1240,7 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans, struct btree_iter *freespace_iter) { struct bkey_s_c k; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret; if (!ca->mi.freespace_initialized) @@ -1268,7 +1251,7 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans, k = bch2_btree_iter_peek_slot(trans, freespace_iter); ret = bkey_err(k); if (ret) - goto err; + return ret; *end = bkey_min(k.k->p, *end); @@ -1281,10 +1264,9 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans, end->offset)) { struct bkey_i *update = bch2_trans_kmalloc(trans, sizeof(*update)); - ret = PTR_ERR_OR_ZERO(update); if (ret) - goto err; + return ret; bkey_init(&update->k); update->k.type = KEY_TYPE_set; @@ -1295,11 +1277,9 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans, ret = bch2_trans_update(trans, freespace_iter, update, 0); if (ret) - goto err; + return ret; } -err: fsck_err: - printbuf_exit(&buf); return ret; } @@ -1310,7 +1290,7 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans, struct btree_iter *bucket_gens_iter) { struct bkey_s_c k; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); unsigned i, gens_offset, gens_end_offset; int ret; @@ -1319,7 +1299,7 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans, k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter); ret = bkey_err(k); if (ret) - goto err; + return ret; if (bkey_cmp(alloc_gens_pos(start, &gens_offset), alloc_gens_pos(*end, &gens_end_offset))) @@ -1345,23 +1325,20 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans, if (need_update) { struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g)); - ret = PTR_ERR_OR_ZERO(u); if (ret) - goto err; + return ret; memcpy(u, &g, sizeof(g)); ret = bch2_trans_update(trans, bucket_gens_iter, u, 0); if (ret) - goto err; + return ret; } } *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0)); -err: fsck_err: - printbuf_exit(&buf); return ret; } @@ -1404,7 +1381,7 @@ int __bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_i enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard ? BCH_DATA_need_discard : BCH_DATA_free; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bool async_repair = fsck_flags & FSCK_ERR_NO_LOG; fsck_flags |= FSCK_CAN_FIX|FSCK_CAN_IGNORE; @@ -1456,7 +1433,6 @@ out: fsck_err: bch2_set_btree_iter_dontneed(trans, &alloc_iter); bch2_trans_iter_exit(trans, &alloc_iter); - printbuf_exit(&buf); return ret; delete: if (!async_repair) { @@ -1513,19 +1489,19 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans, u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; u64 b; bool need_update = false; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; BUG_ON(k.k->type != KEY_TYPE_bucket_gens); bkey_reassemble(&g.k_i, k); - struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode); + CLASS(bch2_dev_tryget_noerror, ca)(c, k.k->p.inode); if (!ca) { if (fsck_err(trans, bucket_gens_to_invalid_dev, "bucket_gens key for invalid device:\n%s", (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - ret = bch2_btree_delete_at(trans, iter, 0); - goto out; + return bch2_btree_delete_at(trans, iter, 0); + return 0; } if (fsck_err_on(end <= ca->mi.first_bucket || @@ -1533,8 +1509,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans, trans, bucket_gens_to_invalid_buckets, "bucket_gens key for invalid buckets:\n%s", (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - ret = bch2_btree_delete_at(trans, iter, 0); - goto out; + return bch2_btree_delete_at(trans, iter, 0); } for (b = start; b < ca->mi.first_bucket; b++) @@ -1555,30 +1530,26 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans, if (need_update) { struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g)); - ret = PTR_ERR_OR_ZERO(u); if (ret) - goto out; + return ret; memcpy(u, &g, sizeof(g)); - ret = bch2_trans_update(trans, iter, u, 0); + return bch2_trans_update(trans, iter, u, 0); } -out: fsck_err: - bch2_dev_put(ca); - printbuf_exit(&buf); return ret; } int bch2_check_alloc_info(struct bch_fs *c) { - struct btree_trans *trans = bch2_trans_get(c); struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter; struct bch_dev *ca = NULL; struct bkey hole; struct bkey_s_c k; int ret = 0; + CLASS(btree_trans, trans)(c); bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN, BTREE_ITER_prefetch); bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN, @@ -1646,14 +1617,14 @@ bkey_err: ca = NULL; if (ret < 0) - goto err; + return ret; ret = for_each_btree_key(trans, iter, BTREE_ID_need_discard, POS_MIN, BTREE_ITER_prefetch, k, bch2_check_discard_freespace_key(trans, &iter)); if (ret) - goto err; + return ret; bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN, BTREE_ITER_prefetch); @@ -1670,11 +1641,9 @@ bkey_err: continue; } if (ret) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, k); - bch_err(c, "while checking %s", buf.buf); - printbuf_exit(&buf); break; } @@ -1682,16 +1651,14 @@ bkey_err: } bch2_trans_iter_exit(trans, &iter); if (ret) - goto err; + return ret; ret = for_each_btree_key_commit(trans, iter, BTREE_ID_bucket_gens, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, bch2_check_bucket_gens_key(trans, &iter, k)); -err: - bch2_trans_put(trans); - bch_err_fn(c, ret); + return ret; } @@ -1703,7 +1670,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, struct bch_alloc_v4 a_convert; const struct bch_alloc_v4 *a; struct bkey_s_c alloc_k; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret; alloc_k = bch2_btree_iter_peek(trans, alloc_iter); @@ -1714,7 +1681,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, if (ret) return ret; - struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode); + CLASS(bch2_dev_tryget_noerror, ca)(c, alloc_k.k->p.inode); if (!ca) return 0; @@ -1726,95 +1693,80 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, bucket_to_u64(alloc_k.k->p), lru_idx, alloc_k, last_flushed); if (ret) - goto err; + return ret; } - if (a->data_type != BCH_DATA_cached) - goto err; + if (a->data_type == BCH_DATA_cached) { + if (fsck_err_on(!a->io_time[READ], + trans, alloc_key_cached_but_read_time_zero, + "cached bucket with read_time 0\n%s", + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { + struct bkey_i_alloc_v4 *a_mut = + bch2_alloc_to_v4_mut(trans, alloc_k); + ret = PTR_ERR_OR_ZERO(a_mut); + if (ret) + return ret; - if (fsck_err_on(!a->io_time[READ], - trans, alloc_key_cached_but_read_time_zero, - "cached bucket with read_time 0\n%s", - (printbuf_reset(&buf), - bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { - struct bkey_i_alloc_v4 *a_mut = - bch2_alloc_to_v4_mut(trans, alloc_k); - ret = PTR_ERR_OR_ZERO(a_mut); - if (ret) - goto err; + a_mut->v.io_time[READ] = bch2_current_io_time(c, READ); + ret = bch2_trans_update(trans, alloc_iter, + &a_mut->k_i, BTREE_TRIGGER_norun); + if (ret) + return ret; - a_mut->v.io_time[READ] = bch2_current_io_time(c, READ); - ret = bch2_trans_update(trans, alloc_iter, - &a_mut->k_i, BTREE_TRIGGER_norun); - if (ret) - goto err; + a = &a_mut->v; + } - a = &a_mut->v; + ret = bch2_lru_check_set(trans, alloc_k.k->p.inode, + bucket_to_u64(alloc_k.k->p), + a->io_time[READ], + alloc_k, last_flushed); } - - ret = bch2_lru_check_set(trans, alloc_k.k->p.inode, - bucket_to_u64(alloc_k.k->p), - a->io_time[READ], - alloc_k, last_flushed); - if (ret) - goto err; -err: fsck_err: - bch2_dev_put(ca); - printbuf_exit(&buf); return ret; } int bch2_check_alloc_to_lru_refs(struct bch_fs *c) { struct bkey_buf last_flushed; - bch2_bkey_buf_init(&last_flushed); bkey_init(&last_flushed.k->k); - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, + CLASS(btree_trans, trans)(c); + int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed))) ?: - bch2_check_stripe_to_lru_refs(c); + bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed)) ?: + bch2_check_stripe_to_lru_refs(trans); bch2_bkey_buf_exit(&last_flushed, c); - bch_err_fn(c, ret); return ret; } static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress) { struct bch_fs *c = ca->fs; - int ret; - mutex_lock(&ca->discard_buckets_in_flight_lock); + guard(mutex)(&ca->discard_buckets_in_flight_lock); struct discard_in_flight *i = darray_find_p(ca->discard_buckets_in_flight, i, i->bucket == bucket); - if (i) { - ret = bch_err_throw(c, EEXIST_discard_in_flight_add); - goto out; - } + if (i) + return bch_err_throw(c, EEXIST_discard_in_flight_add); - ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) { + return darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) { .in_progress = in_progress, .bucket = bucket, })); -out: - mutex_unlock(&ca->discard_buckets_in_flight_lock); - return ret; } static void discard_in_flight_remove(struct bch_dev *ca, u64 bucket) { - mutex_lock(&ca->discard_buckets_in_flight_lock); + guard(mutex)(&ca->discard_buckets_in_flight_lock); struct discard_in_flight *i = darray_find_p(ca->discard_buckets_in_flight, i, i->bucket == bucket); BUG_ON(!i || !i->in_progress); darray_remove_item(&ca->discard_buckets_in_flight, i); - mutex_unlock(&ca->discard_buckets_in_flight_lock); } struct discard_buckets_state { @@ -1836,7 +1788,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, struct btree_iter iter = {}; struct bkey_s_c k; struct bkey_i_alloc_v4 *a; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bool discard_locked = false; int ret = 0; @@ -1927,7 +1879,6 @@ fsck_err: if (!ret) s->seen++; bch2_trans_iter_exit(trans, &iter); - printbuf_exit(&buf); return ret; } @@ -2024,17 +1975,16 @@ static void bch2_do_discards_fast_work(struct work_struct *work) bool got_bucket = false; u64 bucket; - mutex_lock(&ca->discard_buckets_in_flight_lock); - darray_for_each(ca->discard_buckets_in_flight, i) { - if (i->in_progress) - continue; + scoped_guard(mutex, &ca->discard_buckets_in_flight_lock) + darray_for_each(ca->discard_buckets_in_flight, i) { + if (i->in_progress) + continue; - got_bucket = true; - bucket = i->bucket; - i->in_progress = true; - break; - } - mutex_unlock(&ca->discard_buckets_in_flight_lock); + got_bucket = true; + bucket = i->bucket; + i->in_progress = true; + break; + } if (!got_bucket) break; @@ -2142,7 +2092,7 @@ static int invalidate_one_bucket(struct btree_trans *trans, s64 *nr_to_invalidate) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); struct bpos bucket = u64_to_bucket(lru_k.k->p.offset); struct btree_iter alloc_iter = {}; int ret = 0; @@ -2203,7 +2153,6 @@ static int invalidate_one_bucket(struct btree_trans *trans, out: fsck_err: bch2_trans_iter_exit(trans, &alloc_iter); - printbuf_exit(&buf); return ret; } @@ -2226,7 +2175,7 @@ static void bch2_do_invalidates_work(struct work_struct *work) { struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work); struct bch_fs *c = ca->fs; - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); int ret = 0; struct bkey_buf last_flushed; @@ -2268,7 +2217,6 @@ restart_err: } bch2_trans_iter_exit(trans, &iter); err: - bch2_trans_put(trans); bch2_bkey_buf_exit(&last_flushed, c); enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_do_invalidates); enumerated_ref_put(&c->writes, BCH_WRITE_REF_invalidate); @@ -2301,18 +2249,17 @@ void bch2_do_invalidates(struct bch_fs *c) int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, u64 bucket_start, u64 bucket_end) { - struct btree_trans *trans = bch2_trans_get(c); struct btree_iter iter; struct bkey_s_c k; struct bkey hole; struct bpos end = POS(ca->dev_idx, bucket_end); - struct bch_member *m; unsigned long last_updated = jiffies; int ret; BUG_ON(bucket_start > bucket_end); BUG_ON(bucket_end > ca->mi.nbuckets); + CLASS(btree_trans, trans)(c); bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)), BTREE_ITER_prefetch); @@ -2383,17 +2330,16 @@ bkey_err: } bch2_trans_iter_exit(trans, &iter); - bch2_trans_put(trans); if (ret < 0) { bch_err_msg(ca, ret, "initializing free space"); return ret; } - mutex_lock(&c->sb_lock); - m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); - SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true); - mutex_unlock(&c->sb_lock); + scoped_guard(mutex, &c->sb_lock) { + struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); + SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true); + } return 0; } @@ -2403,7 +2349,6 @@ int bch2_fs_freespace_init(struct bch_fs *c) if (c->sb.features & BIT_ULL(BCH_FEATURE_small_image)) return 0; - /* * We can crash during the device add path, so we need to check this on * every mount: @@ -2428,9 +2373,8 @@ int bch2_fs_freespace_init(struct bch_fs *c) } if (doing_init) { - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); bch2_write_super(c); - mutex_unlock(&c->sb_lock); bch_verbose(c, "done initializing freespace"); } diff --git a/libbcachefs/alloc_foreground.c b/libbcachefs/alloc_foreground.c index 77406394..fd141552 100644 --- a/libbcachefs/alloc_foreground.c +++ b/libbcachefs/alloc_foreground.c @@ -106,20 +106,20 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) return; } - spin_lock(&ob->lock); - ob->valid = false; - ob->data_type = 0; - spin_unlock(&ob->lock); + scoped_guard(spinlock, &ob->lock) { + ob->valid = false; + ob->data_type = 0; + } - spin_lock(&c->freelist_lock); - bch2_open_bucket_hash_remove(c, ob); + scoped_guard(spinlock, &c->freelist_lock) { + bch2_open_bucket_hash_remove(c, ob); - ob->freelist = c->open_buckets_freelist; - c->open_buckets_freelist = ob - c->open_buckets; + ob->freelist = c->open_buckets_freelist; + c->open_buckets_freelist = ob - c->open_buckets; - c->open_buckets_nr_free++; - ca->nr_open_buckets--; - spin_unlock(&c->freelist_lock); + c->open_buckets_nr_free++; + ca->nr_open_buckets--; + } closure_wake_up(&c->open_buckets_wait); } @@ -164,14 +164,14 @@ static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob) BUG_ON(c->open_buckets_partial_nr >= ARRAY_SIZE(c->open_buckets_partial)); - spin_lock(&c->freelist_lock); - scoped_guard(rcu) + scoped_guard(spinlock, &c->freelist_lock) { + guard(rcu)(); bch2_dev_rcu(c, ob->dev)->nr_partial_buckets++; - ob->on_partial_list = true; - c->open_buckets_partial[c->open_buckets_partial_nr++] = - ob - c->open_buckets; - spin_unlock(&c->freelist_lock); + ob->on_partial_list = true; + c->open_buckets_partial[c->open_buckets_partial_nr++] = + ob - c->open_buckets; + } closure_wake_up(&c->open_buckets_wait); closure_wake_up(&c->freelist_wait); @@ -219,33 +219,31 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, return NULL; } - spin_lock(&c->freelist_lock); + guard(spinlock)(&c->freelist_lock); if (unlikely(c->open_buckets_nr_free <= bch2_open_buckets_reserved(req->watermark))) { if (cl) closure_wait(&c->open_buckets_wait, cl); track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], true); - spin_unlock(&c->freelist_lock); return ERR_PTR(bch_err_throw(c, open_buckets_empty)); } /* Recheck under lock: */ if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) { - spin_unlock(&c->freelist_lock); req->counters.skipped_open++; return NULL; } struct open_bucket *ob = bch2_open_bucket_alloc(c); - spin_lock(&ob->lock); - ob->valid = true; - ob->sectors_free = ca->mi.bucket_size; - ob->dev = ca->dev_idx; - ob->gen = gen; - ob->bucket = bucket; - spin_unlock(&ob->lock); + scoped_guard(spinlock, &ob->lock) { + ob->valid = true; + ob->sectors_free = ca->mi.bucket_size; + ob->dev = ca->dev_idx; + ob->gen = gen; + ob->bucket = bucket; + } ca->nr_open_buckets++; bch2_open_bucket_hash_add(c, ob); @@ -253,7 +251,6 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], false); track_event_change(&c->times[BCH_TIME_blocked_allocate], false); - spin_unlock(&c->freelist_lock); return ob; } @@ -453,7 +450,7 @@ static noinline void trace_bucket_alloc2(struct bch_fs *c, struct closure *cl, struct open_bucket *ob) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); printbuf_tabstop_push(&buf, 24); @@ -480,8 +477,6 @@ static noinline void trace_bucket_alloc2(struct bch_fs *c, prt_printf(&buf, "err\t%s\n", bch2_err_str(PTR_ERR(ob))); trace_bucket_alloc_fail(c, buf.buf); } - - printbuf_exit(&buf); } /** @@ -589,7 +584,8 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, .ca = ca, }; - bch2_trans_do(c, + CLASS(btree_trans, trans)(c); + lockrestart_do(trans, PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, &req, cl, false))); return ob; } @@ -848,17 +844,15 @@ static int bucket_alloc_set_writepoint(struct bch_fs *c, static int bucket_alloc_set_partial(struct bch_fs *c, struct alloc_request *req) { - int i, ret = 0; - if (!c->open_buckets_partial_nr) return 0; - spin_lock(&c->freelist_lock); + guard(spinlock)(&c->freelist_lock); if (!c->open_buckets_partial_nr) - goto unlock; + return 0; - for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) { + for (int i = c->open_buckets_partial_nr - 1; i >= 0; --i) { struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i]; if (want_bucket(c, req, ob)) { @@ -878,14 +872,13 @@ static int bucket_alloc_set_partial(struct bch_fs *c, scoped_guard(rcu) bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--; - ret = add_new_bucket(c, req, ob); + int ret = add_new_bucket(c, req, ob); if (ret) - break; + return ret; } } -unlock: - spin_unlock(&c->freelist_lock); - return ret; + + return 0; } static int __open_bucket_add_buckets(struct btree_trans *trans, @@ -981,23 +974,18 @@ static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c, return ob->ec != NULL; } else if (ca) { bool drop = ob->dev == ca->dev_idx; - struct open_bucket *ob2; - unsigned i; if (!drop && ob->ec) { - unsigned nr_blocks; + guard(mutex)(&ob->ec->lock); + unsigned nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks; - mutex_lock(&ob->ec->lock); - nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks; - - for (i = 0; i < nr_blocks; i++) { + for (unsigned i = 0; i < nr_blocks; i++) { if (!ob->ec->blocks[i]) continue; - ob2 = c->open_buckets + ob->ec->blocks[i]; + struct open_bucket *ob2 = c->open_buckets + ob->ec->blocks[i]; drop |= ob2->dev == ca->dev_idx; } - mutex_unlock(&ob->ec->lock); } return drop; @@ -1013,14 +1001,13 @@ static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca, struct open_bucket *ob; unsigned i; - mutex_lock(&wp->lock); + guard(mutex)(&wp->lock); open_bucket_for_each(c, &wp->ptrs, ob, i) if (should_drop_bucket(ob, c, ca, ec)) bch2_open_bucket_put(c, ob); else ob_push(c, &ptrs, ob); wp->ptrs = ptrs; - mutex_unlock(&wp->lock); } void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca, @@ -1036,39 +1023,37 @@ void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca, bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point); bch2_writepoint_stop(c, ca, ec, &c->btree_write_point); - mutex_lock(&c->btree_reserve_cache_lock); - while (c->btree_reserve_cache_nr) { - struct btree_alloc *a = - &c->btree_reserve_cache[--c->btree_reserve_cache_nr]; + scoped_guard(mutex, &c->btree_reserve_cache_lock) + while (c->btree_reserve_cache_nr) { + struct btree_alloc *a = + &c->btree_reserve_cache[--c->btree_reserve_cache_nr]; - bch2_open_buckets_put(c, &a->ob); - } - mutex_unlock(&c->btree_reserve_cache_lock); + bch2_open_buckets_put(c, &a->ob); + } - spin_lock(&c->freelist_lock); i = 0; - while (i < c->open_buckets_partial_nr) { - struct open_bucket *ob = - c->open_buckets + c->open_buckets_partial[i]; - - if (should_drop_bucket(ob, c, ca, ec)) { - --c->open_buckets_partial_nr; - swap(c->open_buckets_partial[i], - c->open_buckets_partial[c->open_buckets_partial_nr]); - - ob->on_partial_list = false; - - scoped_guard(rcu) - bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--; - - spin_unlock(&c->freelist_lock); - bch2_open_bucket_put(c, ob); - spin_lock(&c->freelist_lock); - } else { - i++; + scoped_guard(spinlock, &c->freelist_lock) + while (i < c->open_buckets_partial_nr) { + struct open_bucket *ob = + c->open_buckets + c->open_buckets_partial[i]; + + if (should_drop_bucket(ob, c, ca, ec)) { + --c->open_buckets_partial_nr; + swap(c->open_buckets_partial[i], + c->open_buckets_partial[c->open_buckets_partial_nr]); + + ob->on_partial_list = false; + + scoped_guard(rcu) + bch2_dev_rcu(c, ob->dev)->nr_partial_buckets--; + + spin_unlock(&c->freelist_lock); + bch2_open_bucket_put(c, ob); + spin_lock(&c->freelist_lock); + } else { + i++; + } } - } - spin_unlock(&c->freelist_lock); bch2_ec_stop_dev(c, ca); } @@ -1122,22 +1107,17 @@ static noinline bool try_decrease_writepoints(struct btree_trans *trans, unsigne struct open_bucket *ob; unsigned i; - mutex_lock(&c->write_points_hash_lock); - if (c->write_points_nr < old_nr) { - mutex_unlock(&c->write_points_hash_lock); - return true; - } - - if (c->write_points_nr == 1 || - !too_many_writepoints(c, 8)) { - mutex_unlock(&c->write_points_hash_lock); - return false; - } + scoped_guard(mutex, &c->write_points_hash_lock) { + if (c->write_points_nr < old_nr) + return true; - wp = c->write_points + --c->write_points_nr; + if (c->write_points_nr == 1 || + !too_many_writepoints(c, 8)) + return false; - hlist_del_rcu(&wp->node); - mutex_unlock(&c->write_points_hash_lock); + wp = c->write_points + --c->write_points_nr; + hlist_del_rcu(&wp->node); + } bch2_trans_mutex_lock_norelock(trans, &wp->lock); open_bucket_for_each(c, &wp->ptrs, ob, i) @@ -1471,35 +1451,25 @@ void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct ope void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c, struct bch_dev *ca) { - struct open_bucket *ob; - - out->atomic++; + guard(printbuf_atomic)(out); - for (ob = c->open_buckets; + for (struct open_bucket *ob = c->open_buckets; ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) { - spin_lock(&ob->lock); + guard(spinlock)(&ob->lock); if (ob->valid && (!ca || ob->dev == ca->dev_idx)) bch2_open_bucket_to_text(out, c, ob); - spin_unlock(&ob->lock); } - - --out->atomic; } void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c) { - unsigned i; - - out->atomic++; - spin_lock(&c->freelist_lock); + guard(printbuf_atomic)(out); + guard(spinlock)(&c->freelist_lock); - for (i = 0; i < c->open_buckets_partial_nr; i++) + for (unsigned i = 0; i < c->open_buckets_partial_nr; i++) bch2_open_bucket_to_text(out, c, c->open_buckets + c->open_buckets_partial[i]); - - spin_unlock(&c->freelist_lock); - --out->atomic; } static const char * const bch2_write_point_states[] = { @@ -1515,7 +1485,7 @@ static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob; unsigned i; - mutex_lock(&wp->lock); + guard(mutex)(&wp->lock); prt_printf(out, "%lu: ", wp->write_point); prt_human_readable_u64(out, wp->sectors_allocated << 9); @@ -1534,8 +1504,6 @@ static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c, open_bucket_for_each(c, &wp->ptrs, ob, i) bch2_open_bucket_to_text(out, c, ob); printbuf_indent_sub(out, 2); - - mutex_unlock(&wp->lock); } void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c) @@ -1622,7 +1590,7 @@ void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca) static noinline void bch2_print_allocator_stuck(struct bch_fs *c) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_printf(&buf, "Allocator stuck? Waited for %u seconds\n", c->opts.allocator_stuck_timeout); @@ -1635,8 +1603,8 @@ static noinline void bch2_print_allocator_stuck(struct bch_fs *c) bch2_printbuf_make_room(&buf, 4096); - buf.atomic++; - scoped_guard(rcu) + scoped_guard(rcu) { + guard(printbuf_atomic)(&buf); for_each_online_member_rcu(c, ca) { prt_printf(&buf, "Dev %u:\n", ca->dev_idx); printbuf_indent_add(&buf, 2); @@ -1644,7 +1612,7 @@ static noinline void bch2_print_allocator_stuck(struct bch_fs *c) printbuf_indent_sub(&buf, 2); prt_newline(&buf); } - --buf.atomic; + } prt_printf(&buf, "Copygc debug:\n"); printbuf_indent_add(&buf, 2); @@ -1658,7 +1626,6 @@ static noinline void bch2_print_allocator_stuck(struct bch_fs *c) printbuf_indent_sub(&buf, 2); bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); } static inline unsigned allocator_wait_timeout(struct bch_fs *c) diff --git a/libbcachefs/alloc_foreground.h b/libbcachefs/alloc_foreground.h index 1b3fc846..02aef668 100644 --- a/libbcachefs/alloc_foreground.h +++ b/libbcachefs/alloc_foreground.h @@ -210,16 +210,11 @@ static inline bool bch2_bucket_is_open(struct bch_fs *c, unsigned dev, u64 bucke static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64 bucket) { - bool ret; - if (bch2_bucket_is_open(c, dev, bucket)) return true; - spin_lock(&c->freelist_lock); - ret = bch2_bucket_is_open(c, dev, bucket); - spin_unlock(&c->freelist_lock); - - return ret; + guard(spinlock)(&c->freelist_lock); + return bch2_bucket_is_open(c, dev, bucket); } enum bch_write_flags; diff --git a/libbcachefs/backpointers.c b/libbcachefs/backpointers.c index bc277f42..bd26ab3e 100644 --- a/libbcachefs/backpointers.c +++ b/libbcachefs/backpointers.c @@ -108,7 +108,7 @@ static noinline int backpointer_mod_err(struct btree_trans *trans, bool insert) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bool will_check = c->recovery.passes_to_run & BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers); int ret = 0; @@ -146,7 +146,6 @@ static noinline int backpointer_mod_err(struct btree_trans *trans, if (buf.buf) bch_err(c, "%s", buf.buf); - printbuf_exit(&buf); return ret; } @@ -209,7 +208,7 @@ static int backpointer_target_not_found(struct btree_trans *trans, bool commit) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; /* @@ -245,7 +244,7 @@ static int backpointer_target_not_found(struct btree_trans *trans, "%s", buf.buf)) { ret = bch2_backpointer_del(trans, bp.k->p); if (ret || !commit) - goto out; + return ret; /* * Normally, on transaction commit from inside a transaction, @@ -263,9 +262,7 @@ static int backpointer_target_not_found(struct btree_trans *trans, */ ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); } -out: fsck_err: - printbuf_exit(&buf); return ret; } @@ -389,7 +386,7 @@ static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, st struct bch_fs *c = trans->c; struct btree_iter alloc_iter = {}; struct bkey_s_c alloc_k; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; struct bpos bucket; @@ -424,7 +421,6 @@ static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, st out: fsck_err: bch2_trans_iter_exit(trans, &alloc_iter); - printbuf_exit(&buf); return ret; } @@ -435,14 +431,13 @@ int bch2_check_btree_backpointers(struct bch_fs *c) bch2_bkey_buf_init(&last_flushed); bkey_init(&last_flushed.k->k); - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, + CLASS(btree_trans, trans)(c); + int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers, POS_MIN, 0, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_check_backpointer_has_valid_bucket(trans, k, &last_flushed))); + bch2_check_backpointer_has_valid_bucket(trans, k, &last_flushed)); bch2_bkey_buf_exit(&last_flushed, c); - bch_err_fn(c, ret); return ret; } @@ -472,7 +467,7 @@ static int check_extent_checksum(struct btree_trans *trans, struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(extent); const union bch_extent_entry *entry; struct extent_ptr_decoded p; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); void *data_buf = NULL; struct bio *bio = NULL; size_t bytes; @@ -531,7 +526,6 @@ err: kvfree(data_buf); enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_check_extent_checksums); - printbuf_exit(&buf); return ret; } @@ -542,7 +536,7 @@ static int check_bp_exists(struct btree_trans *trans, { struct bch_fs *c = trans->c; struct btree_iter other_extent_iter = {}; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); if (bpos_lt(bp->k.p, s->bp_start) || bpos_gt(bp->k.p, s->bp_end)) @@ -567,7 +561,6 @@ err: fsck_err: bch2_trans_iter_exit(trans, &other_extent_iter); bch2_trans_iter_exit(trans, &bp_iter); - printbuf_exit(&buf); return ret; check_existing_bp: /* Do we have a backpointer for a different extent? */ @@ -896,7 +889,7 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b u32 sectors[ALLOC_SECTORS_NR]; memset(sectors, 0, sizeof(sectors)); - struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(trans->c, alloc_k.k->p); + CLASS(bch2_dev_bucket_tryget_noerror, ca)(trans->c, alloc_k.k->p); if (!ca) return 0; @@ -933,12 +926,12 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b }; bch2_trans_iter_exit(trans, &iter); if (ret) - goto err; + return ret; if (need_commit) { ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); if (ret) - goto err; + return ret; } if (sectors[ALLOC_dirty] != a->dirty_sectors || @@ -947,15 +940,14 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_backpointer_bucket_gen) { ret = bch2_backpointers_maybe_flush(trans, alloc_k, last_flushed); if (ret) - goto err; + return ret; } if (sectors[ALLOC_dirty] > a->dirty_sectors || sectors[ALLOC_cached] > a->cached_sectors || sectors[ALLOC_stripe] > a->stripe_sectors) { - ret = check_bucket_backpointers_to_extents(trans, ca, alloc_k.k->p) ?: + return check_bucket_backpointers_to_extents(trans, ca, alloc_k.k->p) ?: bch_err_throw(c, transaction_restart_nested); - goto err; } bool empty = (sectors[ALLOC_dirty] + @@ -971,9 +963,8 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b *had_mismatch = true; } -err: - bch2_dev_put(ca); - return ret; + + return 0; } static bool backpointer_node_has_missing(struct bch_fs *c, struct bkey_s_c k) @@ -1108,7 +1099,7 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) { int ret = 0; - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct extents_to_bp_state s = { .bp_start = POS_MIN }; bch2_bkey_buf_init(&s.last_flushed); @@ -1147,7 +1138,7 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) if (!bpos_eq(s.bp_start, POS_MIN) || !bpos_eq(s.bp_end, SPOS_MAX)) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "check_extents_to_backpointers(): "); bch2_bpos_to_text(&buf, s.bp_start); @@ -1155,7 +1146,6 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) bch2_bpos_to_text(&buf, s.bp_end); bch_verbose(c, "%s", buf.buf); - printbuf_exit(&buf); } ret = bch2_check_extents_to_backpointers_pass(trans, &s); @@ -1170,11 +1160,8 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) bch2_bucket_bitmap_free(&ca->bucket_backpointer_empty); } err: - bch2_trans_put(trans); bch2_bkey_buf_exit(&s.last_flushed, c); bch2_btree_cache_unpin(c); - - bch_err_fn(c, ret); return ret; } @@ -1212,7 +1199,7 @@ int bch2_check_bucket_backpointer_mismatch(struct btree_trans *trans, u64 nr = ca->bucket_backpointer_mismatch.nr; u64 allowed = copygc ? ca->mi.nbuckets >> 7 : 0; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); __bch2_log_msg_start(ca->name, &buf); prt_printf(&buf, "Detected missing backpointers in bucket %llu, now have %llu/%llu with missing\n", @@ -1223,7 +1210,6 @@ int bch2_check_bucket_backpointer_mismatch(struct btree_trans *trans, nr < allowed ? RUN_RECOVERY_PASS_ratelimit : 0); bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); return 0; } @@ -1300,7 +1286,7 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans, int bch2_check_backpointers_to_extents(struct bch_fs *c) { - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end; int ret; @@ -1320,7 +1306,7 @@ int bch2_check_backpointers_to_extents(struct bch_fs *c) if (bbpos_cmp(start, BBPOS_MIN) || bbpos_cmp(end, BBPOS_MAX)) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "check_backpointers_to_extents(): "); bch2_bbpos_to_text(&buf, start); @@ -1328,7 +1314,6 @@ int bch2_check_backpointers_to_extents(struct bch_fs *c) bch2_bbpos_to_text(&buf, end); bch_verbose(c, "%s", buf.buf); - printbuf_exit(&buf); } ret = bch2_check_backpointers_to_extents_pass(trans, start, end); @@ -1337,11 +1322,8 @@ int bch2_check_backpointers_to_extents(struct bch_fs *c) start = bbpos_successor(end); } - bch2_trans_put(trans); bch2_btree_cache_unpin(c); - - bch_err_fn(c, ret); return ret; } diff --git a/libbcachefs/bcachefs.h b/libbcachefs/bcachefs.h index 4f1ac7aa..8a6f886b 100644 --- a/libbcachefs/bcachefs.h +++ b/libbcachefs/bcachefs.h @@ -1165,7 +1165,7 @@ static inline bool bch2_ro_ref_tryget(struct bch_fs *c) static inline void bch2_ro_ref_put(struct bch_fs *c) { - if (refcount_dec_and_test(&c->ro_ref)) + if (c && refcount_dec_and_test(&c->ro_ref)) wake_up(&c->ro_ref_wait); } diff --git a/libbcachefs/bkey.c b/libbcachefs/bkey.c index ee823c64..67e39f83 100644 --- a/libbcachefs/bkey.c +++ b/libbcachefs/bkey.c @@ -624,10 +624,8 @@ struct bkey_format bch2_bkey_format_done(struct bkey_format_state *s) } if (static_branch_unlikely(&bch2_debug_check_bkey_unpack)) { - struct printbuf buf = PRINTBUF; - + CLASS(printbuf, buf)(); BUG_ON(bch2_bkey_format_invalid(NULL, &ret, 0, &buf)); - printbuf_exit(&buf); } return ret; diff --git a/libbcachefs/bset.c b/libbcachefs/bset.c index 90fd1574..72698c0d 100644 --- a/libbcachefs/bset.c +++ b/libbcachefs/bset.c @@ -58,7 +58,7 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b, struct bkey_packed *_k, *_n; struct bkey uk, n; struct bkey_s_c k; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); if (!i->u64s) return; @@ -97,8 +97,6 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b, if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p)) printk(KERN_ERR "Duplicate keys\n"); } - - printbuf_exit(&buf); } void bch2_dump_btree_node(struct bch_fs *c, struct btree *b) @@ -113,7 +111,7 @@ void bch2_dump_btree_node_iter(struct btree *b, struct btree_node_iter *iter) { struct btree_node_iter_set *set; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); printk(KERN_ERR "btree node iter with %u/%u sets:\n", __btree_node_iter_used(iter), b->nsets); @@ -128,8 +126,6 @@ void bch2_dump_btree_node_iter(struct btree *b, printk(KERN_ERR "set %zu key %u: %s\n", t - b->set, set->k, buf.buf); } - - printbuf_exit(&buf); } struct btree_nr_keys bch2_btree_node_count_keys(struct btree *b) diff --git a/libbcachefs/btree_cache.c b/libbcachefs/btree_cache.c index 49505653..23ed7393 100644 --- a/libbcachefs/btree_cache.c +++ b/libbcachefs/btree_cache.c @@ -78,9 +78,8 @@ void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b) { struct btree_cache *bc = &c->btree_cache; - mutex_lock(&bc->lock); - __bch2_btree_node_to_freelist(bc, b); - mutex_unlock(&bc->lock); + scoped_guard(mutex, &bc->lock) + __bch2_btree_node_to_freelist(bc, b); six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); @@ -215,14 +214,13 @@ void bch2_node_pin(struct bch_fs *c, struct btree *b) { struct btree_cache *bc = &c->btree_cache; - mutex_lock(&bc->lock); + guard(mutex)(&bc->lock); if (b != btree_node_root(c, b) && !btree_node_pinned(b)) { set_btree_node_pinned(b); list_move(&b->list, &bc->live[1].list); bc->live[0].nr--; bc->live[1].nr++; } - mutex_unlock(&bc->lock); } void bch2_btree_cache_unpin(struct bch_fs *c) @@ -230,7 +228,7 @@ void bch2_btree_cache_unpin(struct bch_fs *c) struct btree_cache *bc = &c->btree_cache; struct btree *b, *n; - mutex_lock(&bc->lock); + guard(mutex)(&bc->lock); c->btree_cache.pinned_nodes_mask[0] = 0; c->btree_cache.pinned_nodes_mask[1] = 0; @@ -240,8 +238,6 @@ void bch2_btree_cache_unpin(struct bch_fs *c) bc->live[0].nr++; bc->live[1].nr--; } - - mutex_unlock(&bc->lock); } /* Btree in memory cache - hash table */ @@ -296,11 +292,8 @@ int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b, b->c.level = level; b->c.btree_id = id; - mutex_lock(&bc->lock); - int ret = __bch2_btree_node_hash_insert(bc, b); - mutex_unlock(&bc->lock); - - return ret; + guard(mutex)(&bc->lock); + return __bch2_btree_node_hash_insert(bc, b); } void bch2_btree_node_update_key_early(struct btree_trans *trans, @@ -317,7 +310,7 @@ void bch2_btree_node_update_key_early(struct btree_trans *trans, b = bch2_btree_node_get_noiter(trans, tmp.k, btree, level, true); if (!IS_ERR_OR_NULL(b)) { - mutex_lock(&c->btree_cache.lock); + guard(mutex)(&c->btree_cache.lock); __bch2_btree_node_hash_remove(&c->btree_cache, b); @@ -325,7 +318,6 @@ void bch2_btree_node_update_key_early(struct btree_trans *trans, ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); BUG_ON(ret); - mutex_unlock(&c->btree_cache.lock); six_unlock_read(&b->c.lock); } @@ -930,20 +922,18 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans, } if (unlikely(!bkey_is_btree_ptr(&k->k))) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k)); int ret = bch2_fs_topology_error(c, "attempting to get btree node with non-btree key %s", buf.buf); - printbuf_exit(&buf); return ERR_PTR(ret); } if (unlikely(k->k.u64s > BKEY_BTREE_PTR_U64s_MAX)) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k)); int ret = bch2_fs_topology_error(c, "attempting to get btree node with too big key %s", buf.buf); - printbuf_exit(&buf); return ERR_PTR(ret); } @@ -1018,11 +1008,10 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans, static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) { - struct printbuf buf = PRINTBUF; - if (c->recovery.pass_done < BCH_RECOVERY_PASS_check_allocations) return; + CLASS(printbuf, buf)(); prt_printf(&buf, "btree node header doesn't match ptr: "); bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); @@ -1038,8 +1027,6 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) bch2_bpos_to_text(&buf, b->data->max_key); bch2_fs_topology_error(c, "%s", buf.buf); - - printbuf_exit(&buf); } static inline void btree_check_header(struct bch_fs *c, struct btree *b) diff --git a/libbcachefs/btree_gc.c b/libbcachefs/btree_gc.c index 7269490a..34cb8a43 100644 --- a/libbcachefs/btree_gc.c +++ b/libbcachefs/btree_gc.c @@ -95,11 +95,10 @@ static struct bkey_s unsafe_bkey_s_c_to_s(struct bkey_s_c k) static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos) { - preempt_disable(); + guard(preempt)(); write_seqcount_begin(&c->gc_pos_lock); c->gc_pos = new_pos; write_seqcount_end(&c->gc_pos_lock); - preempt_enable(); } static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos) @@ -138,14 +137,13 @@ static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min) int ret; if (c->opts.verbose) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); prt_str(&buf, " -> "); bch2_bpos_to_text(&buf, new_min); bch_info(c, "%s(): %s", __func__, buf.buf); - printbuf_exit(&buf); } new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL); @@ -174,14 +172,13 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max) int ret; if (c->opts.verbose) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); prt_str(&buf, " -> "); bch2_bpos_to_text(&buf, new_max); bch_info(c, "%s(): %s", __func__, buf.buf); - printbuf_exit(&buf); } ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level + 1, b->key.k.p); @@ -205,13 +202,12 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max) bch2_btree_node_drop_keys_outside_node(b); - mutex_lock(&c->btree_cache.lock); + guard(mutex)(&c->btree_cache.lock); __bch2_btree_node_hash_remove(&c->btree_cache, b); bkey_copy(&b->key, &new->k_i); ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); BUG_ON(ret); - mutex_unlock(&c->btree_cache.lock); return 0; } @@ -223,7 +219,7 @@ static int btree_check_node_boundaries(struct btree_trans *trans, struct btree * struct bpos expected_start = !prev ? b->data->min_key : bpos_successor(prev->key.k.p); - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 && @@ -253,7 +249,7 @@ static int btree_check_node_boundaries(struct btree_trans *trans, struct btree * expected_start, bpos_predecessor(cur->data->min_key)); if (ret) - goto err; + return ret; *pulled_from_scan = cur->data->min_key; ret = DID_FILL_FROM_SCAN; @@ -286,9 +282,7 @@ static int btree_check_node_boundaries(struct btree_trans *trans, struct btree * } } } -err: fsck_err: - printbuf_exit(&buf); return ret; } @@ -296,7 +290,7 @@ static int btree_repair_node_end(struct btree_trans *trans, struct btree *b, struct btree *child, struct bpos *pulled_from_scan) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; if (bpos_eq(child->key.k.p, b->key.k.p)) @@ -317,7 +311,7 @@ static int btree_repair_node_end(struct btree_trans *trans, struct btree *b, ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0, bpos_successor(child->key.k.p), b->key.k.p); if (ret) - goto err; + return ret; *pulled_from_scan = b->key.k.p; ret = DID_FILL_FROM_SCAN; @@ -325,9 +319,7 @@ static int btree_repair_node_end(struct btree_trans *trans, struct btree *b, ret = set_node_max(c, child, b->key.k.p); } } -err: fsck_err: - printbuf_exit(&buf); return ret; } @@ -340,7 +332,7 @@ static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct struct bkey_buf prev_k, cur_k; struct btree *prev = NULL, *cur = NULL; bool have_child, new_pass = false; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; if (!b->c.level) @@ -529,7 +521,6 @@ fsck_err: bch2_bkey_buf_exit(&prev_k, c); bch2_bkey_buf_exit(&cur_k, c); - printbuf_exit(&buf); bch_err_fn(c, ret); return ret; } @@ -539,7 +530,7 @@ static int bch2_check_root(struct btree_trans *trans, enum btree_id btree, { struct bch_fs *c = trans->c; struct btree_root *r = bch2_btree_id_root(c, btree); - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; bch2_btree_id_to_text(&buf, btree); @@ -568,21 +559,20 @@ static int bch2_check_root(struct btree_trans *trans, enum btree_id btree, bch2_shoot_down_journal_keys(c, btree, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX); ret = bch2_get_scanned_nodes(c, btree, 0, POS_MIN, SPOS_MAX); if (ret) - goto err; + return ret; } *reconstructed_root = true; } err: fsck_err: - printbuf_exit(&buf); bch_err_fn(c, ret); return ret; } int bch2_check_topology(struct bch_fs *c) { - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct bpos pulled_from_scan = POS_MIN; int ret = 0; @@ -603,9 +593,8 @@ recover: six_unlock_read(&b->c.lock); if (ret == DROP_THIS_NODE) { - mutex_lock(&c->btree_cache.lock); - bch2_btree_node_hash_remove(&c->btree_cache, b); - mutex_unlock(&c->btree_cache.lock); + scoped_guard(mutex, &c->btree_cache.lock) + bch2_btree_node_hash_remove(&c->btree_cache, b); r->b = NULL; @@ -614,17 +603,15 @@ recover: goto recover; } - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_btree_id_to_text(&buf, i); bch_err(c, "empty btree root %s", buf.buf); - printbuf_exit(&buf); bch2_btree_root_alloc_fake_trans(trans, i, 0); r->alive = false; ret = 0; } } - bch2_trans_put(trans); return ret; } @@ -651,7 +638,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id, struct bkey deleted = KEY(0, 0, 0); struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL }; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; deleted.p = k.k->p; @@ -675,10 +662,9 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); bch2_dev_btree_bitmap_mark(c, k); bch2_write_super(c); - mutex_unlock(&c->sb_lock); } /* @@ -703,7 +689,6 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id, BTREE_TRIGGER_gc|BTREE_TRIGGER_insert|flags); out: fsck_err: - printbuf_exit(&buf); bch_err_fn(c, ret); return ret; } @@ -771,8 +756,8 @@ static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r) static int bch2_gc_btrees(struct bch_fs *c) { - struct btree_trans *trans = bch2_trans_get(c); - struct printbuf buf = PRINTBUF; + CLASS(btree_trans, trans)(c); + CLASS(printbuf, buf)(); int ret = 0; struct progress_indicator_state progress; @@ -792,8 +777,6 @@ static int bch2_gc_btrees(struct bch_fs *c) ret = bch2_gc_btree(trans, &progress, btree, true); } - printbuf_exit(&buf); - bch2_trans_put(trans); bch_err_fn(c, ret); return ret; } @@ -945,16 +928,16 @@ fsck_err: static int bch2_gc_alloc_done(struct bch_fs *c) { + CLASS(btree_trans, trans)(c); int ret = 0; for_each_member_device(c, ca) { - ret = bch2_trans_run(c, - for_each_btree_key_max_commit(trans, iter, BTREE_ID_alloc, + ret = for_each_btree_key_max_commit(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, ca->mi.first_bucket), POS(ca->dev_idx, ca->mi.nbuckets - 1), BTREE_ITER_slots|BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_alloc_write_key(trans, &iter, ca, k))); + bch2_alloc_write_key(trans, &iter, ca, k)); if (ret) { bch2_dev_put(ca); break; @@ -987,7 +970,7 @@ static int bch2_gc_write_stripes_key(struct btree_trans *trans, struct bkey_s_c k) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); const struct bch_stripe *s; struct gc_stripe *m; bool bad = false; @@ -1032,18 +1015,17 @@ static int bch2_gc_write_stripes_key(struct btree_trans *trans, ret = bch2_trans_update(trans, iter, &new->k_i, 0); } fsck_err: - printbuf_exit(&buf); return ret; } static int bch2_gc_stripes_done(struct bch_fs *c) { - return bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, + CLASS(btree_trans, trans)(c); + return for_each_btree_key_commit(trans, iter, BTREE_ID_stripes, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_gc_write_stripes_key(trans, &iter, k))); + bch2_gc_write_stripes_key(trans, &iter, k)); } /** @@ -1072,8 +1054,8 @@ int bch2_check_allocations(struct bch_fs *c) { int ret; - down_read(&c->state_lock); - down_write(&c->gc_lock); + guard(rwsem_read)(&c->state_lock); + guard(rwsem_write)(&c->gc_lock); bch2_btree_interior_updates_flush(c); @@ -1102,15 +1084,11 @@ int bch2_check_allocations(struct bch_fs *c) bch2_gc_stripes_done(c) ?: bch2_gc_reflink_done(c); out: - percpu_down_write(&c->mark_lock); - /* Indicates that gc is no longer in progress: */ - __gc_pos_set(c, gc_phase(GC_PHASE_not_running)); - - bch2_gc_free(c); - percpu_up_write(&c->mark_lock); - - up_write(&c->gc_lock); - up_read(&c->state_lock); + scoped_guard(percpu_write, &c->mark_lock) { + /* Indicates that gc is no longer in progress: */ + __gc_pos_set(c, gc_phase(GC_PHASE_not_running)); + bch2_gc_free(c); + } /* * At startup, allocations can happen directly instead of via the @@ -1121,7 +1099,6 @@ out: if (!ret && !test_bit(BCH_FS_errors_not_fixed, &c->flags)) bch2_sb_members_clean_deleted(c); - bch_err_fn(c, ret); return ret; } diff --git a/libbcachefs/btree_io.c b/libbcachefs/btree_io.c index 064627a2..bd86dd71 100644 --- a/libbcachefs/btree_io.c +++ b/libbcachefs/btree_io.c @@ -592,7 +592,7 @@ static int __btree_err(int ret, !(test_bit(BCH_FS_in_fsck, &c->flags) && c->opts.fix_errors == FSCK_FIX_ask); - struct printbuf out = PRINTBUF; + CLASS(printbuf, out)(); bch2_log_msg_start(c, &out); if (!print_deferred) @@ -619,13 +619,13 @@ static int __btree_err(int ret, if (!have_retry) ret = bch_err_throw(c, fsck_fix); - goto out; + return ret; case -BCH_ERR_btree_node_read_err_bad_node: prt_str(&out, ", "); break; } - goto out; + return ret; } if (rw == WRITE) { @@ -647,16 +647,14 @@ static int __btree_err(int ret, if (!have_retry) ret = bch_err_throw(c, fsck_fix); - goto out; + return ret; case -BCH_ERR_btree_node_read_err_bad_node: prt_str(&out, ", "); break; } print: bch2_print_str(c, KERN_ERR, out.buf); -out: fsck_err: - printbuf_exit(&out); return ret; } @@ -735,8 +733,8 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, struct printbuf *err_msg) { unsigned version = le16_to_cpu(i->version); - struct printbuf buf1 = PRINTBUF; - struct printbuf buf2 = PRINTBUF; + CLASS(printbuf, buf1)(); + CLASS(printbuf, buf2)(); int ret = 0; btree_err_on(!bch2_version_compatible(version), @@ -755,10 +753,9 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, "bset version %u older than superblock version_min %u", version, c->sb.version_min)) { if (bch2_version_compatible(version)) { - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); c->disk_sb.sb->version_min = cpu_to_le16(version); bch2_write_super(c); - mutex_unlock(&c->sb_lock); } else { /* We have no idea what's going on: */ i->version = cpu_to_le16(c->sb.version); @@ -772,10 +769,9 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, btree_node_bset_newer_than_sb, "bset version %u newer than superblock version %u", version, c->sb.version)) { - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); c->disk_sb.sb->version = cpu_to_le16(version); bch2_write_super(c); - mutex_unlock(&c->sb_lock); } btree_err_on(BSET_SEPARATE_WHITEOUTS(i), @@ -875,8 +871,6 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, &bn->format); } fsck_err: - printbuf_exit(&buf2); - printbuf_exit(&buf1); return ret; } @@ -946,7 +940,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, { unsigned version = le16_to_cpu(i->version); struct bkey_packed *k, *prev = NULL; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); int ret = 0; @@ -1051,7 +1045,6 @@ got_good_key: set_btree_node_need_rewrite_error(b); } fsck_err: - printbuf_exit(&buf); return ret; } @@ -1070,7 +1063,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)); u64 max_journal_seq = 0; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0, write = READ; u64 start_time = local_clock(); @@ -1385,7 +1378,6 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, } fsck_err: mempool_free(iter, &c->fill_iter); - printbuf_exit(&buf); bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time); return ret; } @@ -1401,7 +1393,7 @@ static void btree_node_read_work(struct work_struct *work) struct bch_io_failures failed = { .nr = 0 }; int ret = 0; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_printf(&buf, "btree node read error at btree "); @@ -1493,7 +1485,6 @@ start: bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read], rb->start_time); bio_put(&rb->bio); - printbuf_exit(&buf); clear_btree_node_read_in_flight(b); smp_mb__after_atomic(); wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); @@ -1575,7 +1566,7 @@ static CLOSURE_CALLBACK(btree_node_read_all_replicas_done) closure_type(ra, struct btree_node_read_all, cl); struct bch_fs *c = ra->c; struct btree *b = ra->b; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bool dump_bset_maps = false; int ret = 0, best = -1, write = READ; unsigned i, written = 0, written2 = 0; @@ -1684,11 +1675,10 @@ fsck_err: if (ret) { set_btree_node_read_error(b); - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_btree_lost_data(c, &buf, b->c.btree_id); if (buf.pos) bch_err(c, "%s", buf.buf); - printbuf_exit(&buf); } else if (*saw_error) bch2_btree_node_rewrite_async(c, b); @@ -1699,7 +1689,6 @@ fsck_err: closure_debug_destroy(&ra->cl); kfree(ra); - printbuf_exit(&buf); clear_btree_node_read_in_flight(b); smp_mb__after_atomic(); @@ -1819,7 +1808,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b, if (ret <= 0) { bool ratelimit = true; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_str(&buf, "btree node read error: no device to read from\n at "); @@ -1836,7 +1825,6 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b, DEFAULT_RATELIMIT_BURST); if (!ratelimit || __ratelimit(&rs)) bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); set_btree_node_read_error(b); clear_btree_node_read_in_flight(b); @@ -1918,9 +1906,8 @@ static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id, bch2_btree_node_read(trans, b, true); if (btree_node_read_error(b)) { - mutex_lock(&c->btree_cache.lock); - bch2_btree_node_hash_remove(&c->btree_cache, b); - mutex_unlock(&c->btree_cache.lock); + scoped_guard(mutex, &c->btree_cache.lock) + bch2_btree_node_hash_remove(&c->btree_cache, b); ret = bch_err_throw(c, btree_node_read_error); goto err; @@ -1937,7 +1924,8 @@ err: int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, const struct bkey_i *k, unsigned level) { - return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level)); + CLASS(btree_trans, trans)(c); + return __bch2_btree_root_read(trans, id, k, level); } struct btree_node_scrub { @@ -2016,7 +2004,7 @@ static void btree_node_scrub_work(struct work_struct *work) { struct btree_node_scrub *scrub = container_of(work, struct btree_node_scrub, work); struct bch_fs *c = scrub->c; - struct printbuf err = PRINTBUF; + CLASS(printbuf, err)(); __bch2_btree_pos_to_text(&err, c, scrub->btree, scrub->level, bkey_i_to_s_c(scrub->key.k)); @@ -2031,7 +2019,6 @@ static void btree_node_scrub_work(struct work_struct *work) bch_err_fn_ratelimited(c, ret); } - printbuf_exit(&err); bch2_bkey_buf_exit(&scrub->key, c);; btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf); enumerated_ref_put(&scrub->ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub); @@ -2212,7 +2199,8 @@ static void btree_node_write_work(struct work_struct *work) } } else { - ret = bch2_trans_do(c, + CLASS(btree_trans, trans)(c); + ret = lockrestart_do(trans, bch2_btree_node_update_key_get_iter(trans, b, &wbio->key, BCH_WATERMARK_interior_updates| BCH_TRANS_COMMIT_journal_reclaim| @@ -2231,11 +2219,10 @@ err: set_btree_node_noevict(b); if (!bch2_err_matches(ret, EROFS)) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_printf(&buf, "writing btree node: %s\n ", bch2_err_str(ret)); bch2_btree_pos_to_text(&buf, c, b); bch2_fs_fatal_error(c, "%s", buf.buf); - printbuf_exit(&buf); } goto out; } @@ -2254,13 +2241,12 @@ static void btree_node_write_endio(struct bio *bio) wbio->submit_time, !bio->bi_status); if (ca && bio->bi_status) { - struct printbuf buf = PRINTBUF; - buf.atomic++; + CLASS(printbuf, buf)(); + guard(printbuf_atomic)(&buf); prt_printf(&buf, "btree write error: %s\n ", bch2_blk_status_to_str(bio->bi_status)); bch2_btree_pos_to_text(&buf, c, b); bch_err_dev_ratelimited(ca, "%s", buf.buf); - printbuf_exit(&buf); } if (bio->bi_status) { @@ -2554,9 +2540,14 @@ do_write: } count_event(c, btree_node_write); + /* + * blk-wbt.c throttles all writes except those that have both REQ_SYNC + * and REQ_IDLE set... + */ + wbio = container_of(bio_alloc_bioset(NULL, buf_pages(data, sectors_to_write << 9), - REQ_OP_WRITE|REQ_META, + REQ_OP_WRITE|REQ_META|REQ_SYNC|REQ_IDLE, GFP_NOFS, &c->btree_bio), struct btree_write_bio, wbio.bio); diff --git a/libbcachefs/btree_iter.c b/libbcachefs/btree_iter.c index 74639468..cc771aff 100644 --- a/libbcachefs/btree_iter.c +++ b/libbcachefs/btree_iter.c @@ -903,7 +903,7 @@ static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans, k = bch2_btree_and_journal_iter_peek(&jiter); if (!k.k) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "node not found at pos "); bch2_bpos_to_text(&buf, path->pos); @@ -911,7 +911,6 @@ static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans, bch2_btree_pos_to_text(&buf, c, l->b); ret = bch2_fs_topology_error(c, "%s", buf.buf); - printbuf_exit(&buf); goto err; } @@ -930,7 +929,7 @@ static noinline_for_stack int btree_node_missing_err(struct btree_trans *trans, struct btree_path *path) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "node not found at pos "); bch2_bpos_to_text(&buf, path->pos); @@ -1451,7 +1450,7 @@ void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_ static void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans) { #ifdef CONFIG_BCACHEFS_DEBUG - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_prt_backtrace(&buf, &trans->last_restarted_trace); panic("in transaction restart: %s, last restarted by\n%s", bch2_err_str(trans->restarted), @@ -1601,13 +1600,13 @@ void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans) static noinline __cold void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); + bch2_log_msg_start(trans->c, &buf); __bch2_trans_paths_to_text(&buf, trans, nosort); bch2_trans_updates_to_text(&buf, trans); bch2_print_str(trans->c, KERN_ERR, buf.buf); - printbuf_exit(&buf); } noinline __cold @@ -1620,22 +1619,19 @@ noinline __cold static void bch2_trans_update_max_paths(struct btree_trans *trans) { struct btree_transaction_stats *s = btree_trans_stats(trans); - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths); bch2_trans_paths_to_text(&buf, trans); if (!buf.allocation_failure) { - mutex_lock(&s->lock); + guard(mutex)(&s->lock); if (nr > s->nr_max_paths) { s->nr_max_paths = nr; swap(s->max_paths_text, buf.buf); } - mutex_unlock(&s->lock); } - printbuf_exit(&buf); - trans->nr_paths_max = nr; } @@ -1643,11 +1639,10 @@ noinline __cold int __bch2_btree_trans_too_many_iters(struct btree_trans *trans) { if (trace_trans_restart_too_many_iters_enabled()) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_trans_paths_to_text(&buf, trans); trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf); - printbuf_exit(&buf); } count_event(trans->c, trans_restart_too_many_iters); @@ -3196,14 +3191,13 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size, unsigned long if (WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX)) { #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_printf(&buf, "bump allocator exceeded BTREE_TRANS_MEM_MAX (%u)\n", BTREE_TRANS_MEM_MAX); bch2_trans_kmalloc_trace_to_text(&buf, &trans->trans_kmalloc_trace); bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); #endif } @@ -3213,7 +3207,7 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size, unsigned long struct btree_transaction_stats *s = btree_trans_stats(trans); if (new_bytes > s->max_mem) { - mutex_lock(&s->lock); + guard(mutex)(&s->lock); #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE darray_resize(&s->trans_kmalloc_trace, trans->trans_kmalloc_trace.nr); s->trans_kmalloc_trace.nr = min(s->trans_kmalloc_trace.size, @@ -3225,7 +3219,6 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size, unsigned long s->trans_kmalloc_trace.nr); #endif s->max_mem = new_bytes; - mutex_unlock(&s->lock); } if (trans->used_mempool || new_bytes > BTREE_TRANS_MEM_MAX) { @@ -3535,7 +3528,7 @@ static void check_btree_paths_leaked(struct btree_trans *trans) struct btree_path *path; unsigned i; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_printf(&buf, "btree paths leaked from %s!\n", trans->fn); @@ -3547,7 +3540,6 @@ static void check_btree_paths_leaked(struct btree_trans *trans) bch2_fs_emergency_read_only2(c, &buf); bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); } } #else @@ -3672,11 +3664,11 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans) /* trans->paths is rcu protected vs. freeing */ guard(rcu)(); - out->atomic++; + guard(printbuf_atomic)(out); struct btree_path *paths = rcu_dereference(trans->paths); if (!paths) - goto out; + return; unsigned long *paths_allocated = trans_paths_allocated(paths); @@ -3712,8 +3704,6 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans) bch2_btree_bkey_cached_common_to_text(out, b); prt_newline(out); } -out: - --out->atomic; } void bch2_fs_btree_iter_exit(struct bch_fs *c) diff --git a/libbcachefs/btree_iter.h b/libbcachefs/btree_iter.h index cc2c6bb6..53074ed6 100644 --- a/libbcachefs/btree_iter.h +++ b/libbcachefs/btree_iter.h @@ -1007,13 +1007,19 @@ static inline void class_btree_trans_destructor(struct btree_trans **p) #define class_btree_trans_constructor(_c) bch2_trans_get(_c) +/* deprecated, prefer CLASS(btree_trans) */ #define bch2_trans_run(_c, _do) \ ({ \ CLASS(btree_trans, trans)(_c); \ (_do); \ }) -#define bch2_trans_do(_c, _do) bch2_trans_run(_c, lockrestart_do(trans, _do)) +/* deprecated, prefer CLASS(btree_trans) */ +#define bch2_trans_do(_c, _do) \ +({ \ + CLASS(btree_trans, trans)(_c); \ + lockrestart_do(trans, _do); \ +}) void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *); diff --git a/libbcachefs/btree_journal_iter.c b/libbcachefs/btree_journal_iter.c index 39ecd95c..24f2fbe8 100644 --- a/libbcachefs/btree_journal_iter.c +++ b/libbcachefs/btree_journal_iter.c @@ -462,9 +462,8 @@ void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree, keys->data[idx].level == level && bpos_eq(keys->data[idx].k->k.p, pos) && !keys->data[idx].overwritten) { - mutex_lock(&keys->overwrite_lock); + guard(mutex)(&keys->overwrite_lock); __bch2_journal_key_overwritten(keys, idx); - mutex_unlock(&keys->overwrite_lock); } } @@ -815,7 +814,7 @@ void bch2_shoot_down_journal_keys(struct bch_fs *c, enum btree_id btree, void bch2_journal_keys_dump(struct bch_fs *c) { struct journal_keys *keys = &c->journal_keys; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); pr_info("%zu keys:", keys->nr); @@ -829,7 +828,6 @@ void bch2_journal_keys_dump(struct bch_fs *c) bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k)); pr_err("%s", buf.buf); } - printbuf_exit(&buf); } void bch2_fs_journal_keys_init(struct bch_fs *c) diff --git a/libbcachefs/btree_key_cache.c b/libbcachefs/btree_key_cache.c index 19d1bb80..ebba14da 100644 --- a/libbcachefs/btree_key_cache.c +++ b/libbcachefs/btree_key_cache.c @@ -301,13 +301,12 @@ static noinline_for_stack void do_trace_key_cache_fill(struct btree_trans *trans struct btree_path *ck_path, struct bkey_s_c k) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bpos_to_text(&buf, ck_path->pos); prt_char(&buf, ' '); bch2_bkey_val_to_text(&buf, trans->c, k); trace_key_cache_fill(trans, buf.buf); - printbuf_exit(&buf); } static noinline int btree_key_cache_fill(struct btree_trans *trans, @@ -540,10 +539,10 @@ int bch2_btree_key_cache_journal_flush(struct journal *j, struct bkey_cached *ck = container_of(pin, struct bkey_cached, journal); struct bkey_cached_key key; - struct btree_trans *trans = bch2_trans_get(c); int srcu_idx = srcu_read_lock(&c->btree_trans_barrier); int ret = 0; + CLASS(btree_trans, trans)(c); btree_node_lock_nopath_nofail(trans, &ck->c, SIX_LOCK_read); key = ck->key; @@ -566,8 +565,6 @@ int bch2_btree_key_cache_journal_flush(struct journal *j, BCH_TRANS_COMMIT_journal_reclaim, false)); unlock: srcu_read_unlock(&c->btree_trans_barrier, srcu_idx); - - bch2_trans_put(trans); return ret; } diff --git a/libbcachefs/btree_locking.c b/libbcachefs/btree_locking.c index bed2b4b6..38c5643e 100644 --- a/libbcachefs/btree_locking.c +++ b/libbcachefs/btree_locking.c @@ -159,13 +159,11 @@ static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans count_event(c, trans_restart_would_deadlock); if (trace_trans_restart_would_deadlock_enabled()) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); + guard(printbuf_atomic)(&buf); - buf.atomic++; print_cycle(&buf, g); - trace_trans_restart_would_deadlock(trans, buf.buf); - printbuf_exit(&buf); } } @@ -196,8 +194,8 @@ static int btree_trans_abort_preference(struct btree_trans *trans) static noinline __noreturn void break_cycle_fail(struct lock_graph *g) { - struct printbuf buf = PRINTBUF; - buf.atomic++; + CLASS(printbuf, buf)(); + guard(printbuf_atomic)(&buf); prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks")); @@ -214,7 +212,6 @@ static noinline __noreturn void break_cycle_fail(struct lock_graph *g) } bch2_print_str(g->g->trans->c, KERN_ERR, buf.buf); - printbuf_exit(&buf); BUG(); } @@ -692,7 +689,7 @@ int __bch2_btree_path_upgrade(struct btree_trans *trans, count_event(trans->c, trans_restart_upgrade); if (trace_trans_restart_upgrade_enabled()) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_printf(&buf, "%s %pS\n", trans->fn, (void *) _RET_IP_); prt_printf(&buf, "btree %s pos\n", bch2_btree_id_str(path->btree_id)); @@ -708,7 +705,6 @@ int __bch2_btree_path_upgrade(struct btree_trans *trans, path->l[f.l].lock_seq); trace_trans_restart_upgrade(trans->c, buf.buf); - printbuf_exit(&buf); } out: bch2_trans_verify_locks(trans); @@ -777,7 +773,7 @@ static noinline __cold void bch2_trans_relock_fail(struct btree_trans *trans, st goto out; if (trace_trans_restart_relock_enabled()) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bpos_to_text(&buf, path->pos); prt_printf(&buf, " %s l=%u seq=%u node seq=", @@ -797,7 +793,6 @@ static noinline __cold void bch2_trans_relock_fail(struct btree_trans *trans, st } trace_trans_restart_relock(trans, ip, buf.buf); - printbuf_exit(&buf); } count_event(trans->c, trans_restart_relock); diff --git a/libbcachefs/btree_node_scan.c b/libbcachefs/btree_node_scan.c index 42c9eb2c..d997e381 100644 --- a/libbcachefs/btree_node_scan.c +++ b/libbcachefs/btree_node_scan.c @@ -65,16 +65,6 @@ static void found_btree_node_to_key(struct bkey_i *k, const struct found_btree_n memcpy(bp->v.start, f->ptrs, sizeof(struct bch_extent_ptr) * f->nr_ptrs); } -static inline u64 bkey_journal_seq(struct bkey_s_c k) -{ - switch (k.k->type) { - case KEY_TYPE_inode_v3: - return le64_to_cpu(bkey_s_c_to_inode_v3(k).v->bi_journal_seq); - default: - return 0; - } -} - static int found_btree_node_cmp_cookie(const void *_l, const void *_r) { const struct found_btree_node *l = _l; @@ -206,17 +196,15 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, n.journal_seq = le64_to_cpu(bn->keys.journal_seq), n.sectors_written = b->written; - mutex_lock(&f->lock); + guard(mutex)(&f->lock); if (BSET_BIG_ENDIAN(&bn->keys) != CPU_BIG_ENDIAN) { bch_err(c, "try_read_btree_node() can't handle endian conversion"); f->ret = -EINVAL; - goto unlock; + return; } if (darray_push(&f->nodes, n)) f->ret = -ENOMEM; -unlock: - mutex_unlock(&f->lock); } } @@ -371,7 +359,7 @@ static int handle_overwrites(struct bch_fs *c, int bch2_scan_for_btree_nodes(struct bch_fs *c) { struct find_btree_nodes *f = &c->found_btree_nodes; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); found_btree_nodes nodes_heap = {}; size_t dst; int ret = 0; @@ -478,7 +466,6 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c) eytzinger0_sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL); err: darray_exit(&nodes_heap); - printbuf_exit(&buf); return ret; } @@ -550,7 +537,7 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree, return ret; if (c->opts.verbose) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "recovery "); bch2_btree_id_level_to_text(&buf, btree, level); @@ -560,7 +547,6 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree, bch2_bpos_to_text(&buf, node_max); bch_info(c, "%s(): %s", __func__, buf.buf); - printbuf_exit(&buf); } struct found_btree_node search = { @@ -584,10 +570,9 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree, found_btree_node_to_key(&tmp.k, &n); if (c->opts.verbose) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&tmp.k)); bch_verbose(c, "%s(): recovering %s", __func__, buf.buf); - printbuf_exit(&buf); } BUG_ON(bch2_bkey_validate(c, bkey_i_to_s_c(&tmp.k), diff --git a/libbcachefs/btree_trans_commit.c b/libbcachefs/btree_trans_commit.c index 4afe59f3..1f9965ae 100644 --- a/libbcachefs/btree_trans_commit.c +++ b/libbcachefs/btree_trans_commit.c @@ -235,10 +235,10 @@ static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin, struct bch_fs *c = container_of(j, struct bch_fs, journal); struct btree_write *w = container_of(pin, struct btree_write, journal); struct btree *b = container_of(w, struct btree, writes[i]); - struct btree_trans *trans = bch2_trans_get(c); unsigned long old, new; unsigned idx = w - b->writes; + CLASS(btree_trans, trans)(c); btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); old = READ_ONCE(b->flags); @@ -257,8 +257,6 @@ static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin, btree_node_write_if_need(trans, b, SIX_LOCK_read); six_unlock_read(&b->c.lock); - - bch2_trans_put(trans); return 0; } @@ -674,16 +672,20 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, struct bkey_i *accounting; - percpu_down_read(&c->mark_lock); - for (accounting = btree_trans_subbuf_base(trans, &trans->accounting); - accounting != btree_trans_subbuf_top(trans, &trans->accounting); - accounting = bkey_next(accounting)) { - ret = bch2_accounting_trans_commit_hook(trans, - bkey_i_to_accounting(accounting), flags); - if (ret) - goto revert_fs_usage; - } - percpu_up_read(&c->mark_lock); + scoped_guard(percpu_read, &c->mark_lock) + for (accounting = btree_trans_subbuf_base(trans, &trans->accounting); + accounting != btree_trans_subbuf_top(trans, &trans->accounting); + accounting = bkey_next(accounting)) { + ret = bch2_accounting_trans_commit_hook(trans, + bkey_i_to_accounting(accounting), flags); + if (unlikely(ret)) { + for (struct bkey_i *i = btree_trans_subbuf_base(trans, &trans->accounting); + i != accounting; + i = bkey_next(i)) + bch2_accounting_trans_commit_revert(trans, bkey_i_to_accounting(i), flags); + return ret; + } + } /* XXX: we only want to run this if deltas are nonzero */ bch2_trans_account_disk_usage_change(trans); @@ -795,13 +797,6 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, return 0; fatal_err: bch2_fs_fatal_error(c, "fatal error in transaction commit: %s", bch2_err_str(ret)); - percpu_down_read(&c->mark_lock); -revert_fs_usage: - for (struct bkey_i *i = btree_trans_subbuf_base(trans, &trans->accounting); - i != accounting; - i = bkey_next(i)) - bch2_accounting_trans_commit_revert(trans, bkey_i_to_accounting(i), flags); - percpu_up_read(&c->mark_lock); return ret; } diff --git a/libbcachefs/btree_update.c b/libbcachefs/btree_update.c index 7983c494..f514a8ad 100644 --- a/libbcachefs/btree_update.c +++ b/libbcachefs/btree_update.c @@ -671,8 +671,9 @@ int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k, enum bch_trans_commit_flags commit_flags, enum btree_iter_update_trigger_flags iter_flags) { - return bch2_trans_commit_do(c, disk_res, NULL, commit_flags, - bch2_btree_insert_trans(trans, id, k, iter_flags)); + CLASS(btree_trans, trans)(c); + return commit_do(trans, disk_res, NULL, commit_flags, + bch2_btree_insert_trans(trans, id, k, iter_flags)); } int bch2_btree_delete_at(struct btree_trans *trans, struct btree_iter *iter, @@ -781,9 +782,8 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id, enum btree_iter_update_trigger_flags flags, u64 *journal_seq) { - int ret = bch2_trans_run(c, - bch2_btree_delete_range_trans(trans, id, start, end, - flags, journal_seq)); + CLASS(btree_trans, trans)(c); + int ret = bch2_btree_delete_range_trans(trans, id, start, end, flags, journal_seq); if (ret == -BCH_ERR_transaction_restart_nested) ret = 0; return ret; @@ -877,31 +877,31 @@ static int __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt, va_list args) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_vprintf(&buf, fmt, args); unsigned u64s = DIV_ROUND_UP(buf.pos, sizeof(u64)); int ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0; if (ret) - goto err; + return ret; if (!test_bit(JOURNAL_running, &c->journal.flags)) { ret = darray_make_room(&c->journal.early_journal_entries, jset_u64s(u64s)); if (ret) - goto err; + return ret; struct jset_entry_log *l = (void *) &darray_top(c->journal.early_journal_entries); journal_entry_init(&l->entry, BCH_JSET_ENTRY_log, 0, 1, u64s); memcpy_and_pad(l->d, u64s * sizeof(u64), buf.buf, buf.pos, 0); c->journal.early_journal_entries.nr += jset_u64s(u64s); } else { - ret = bch2_trans_commit_do(c, NULL, NULL, commit_flags, - bch2_trans_log_msg(trans, &buf)); + CLASS(btree_trans, trans)(c); + ret = commit_do(trans, NULL, NULL, commit_flags, + bch2_trans_log_msg(trans, &buf)); } -err: - printbuf_exit(&buf); - return ret; + + return 0; } __printf(2, 3) diff --git a/libbcachefs/btree_update.h b/libbcachefs/btree_update.h index 8e91b9f1..633de3b3 100644 --- a/libbcachefs/btree_update.h +++ b/libbcachefs/btree_update.h @@ -278,6 +278,7 @@ static inline int bch2_trans_commit(struct btree_trans *trans, nested_lockrestart_do(_trans, _do ?: bch2_trans_commit(_trans, (_disk_res),\ (_journal_seq), (_flags))) +/* deprecated, prefer CLASS(btree_trans) */ #define bch2_trans_commit_do(_c, _disk_res, _journal_seq, _flags, _do) \ bch2_trans_run(_c, commit_do(trans, _disk_res, _journal_seq, _flags, _do)) diff --git a/libbcachefs/btree_update_interior.c b/libbcachefs/btree_update_interior.c index ebdb4d2f..312ef203 100644 --- a/libbcachefs/btree_update_interior.c +++ b/libbcachefs/btree_update_interior.c @@ -53,7 +53,7 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) : b->data->min_key; struct btree_and_journal_iter iter; struct bkey_s_c k; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); struct bkey_buf prev; int ret = 0; @@ -133,7 +133,6 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) out: bch2_btree_and_journal_iter_exit(&iter); bch2_bkey_buf_exit(&prev, c); - printbuf_exit(&buf); return ret; err: bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); @@ -240,9 +239,8 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans, __btree_node_free(trans, b); - mutex_lock(&c->btree_cache.lock); - bch2_btree_node_hash_remove(&c->btree_cache, b); - mutex_unlock(&c->btree_cache.lock); + scoped_guard(mutex, &c->btree_cache.lock) + bch2_btree_node_hash_remove(&c->btree_cache, b); six_unlock_write(&b->c.lock); mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); @@ -268,9 +266,8 @@ static void bch2_btree_node_free_never_used(struct btree_update *as, clear_btree_node_dirty_acct(c, b); clear_btree_node_need_write(b); - mutex_lock(&c->btree_cache.lock); - __bch2_btree_node_hash_remove(&c->btree_cache, b); - mutex_unlock(&c->btree_cache.lock); + scoped_guard(mutex, &c->btree_cache.lock) + __bch2_btree_node_hash_remove(&c->btree_cache, b); BUG_ON(p->nr >= ARRAY_SIZE(p->b)); p->b[p->nr++] = b; @@ -560,7 +557,8 @@ static void bch2_btree_update_free(struct btree_update *as, struct btree_trans * bch2_time_stats_update(&c->times[BCH_TIME_btree_interior_update_total], as->start_time); - mutex_lock(&c->btree_interior_update_lock); + guard(mutex)(&c->btree_interior_update_lock); + list_del(&as->unwritten_list); list_del(&as->list); @@ -572,8 +570,6 @@ static void bch2_btree_update_free(struct btree_update *as, struct btree_trans * * since being on btree_interior_update_list is our ref on @c: */ closure_wake_up(&c->btree_interior_update_wait); - - mutex_unlock(&c->btree_interior_update_lock); } static void btree_update_add_key(struct btree_update *as, @@ -602,12 +598,11 @@ static void btree_update_new_nodes_mark_sb(struct btree_update *as) { struct bch_fs *c = as->c; - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); for_each_keylist_key(&as->new_keys, k) bch2_dev_btree_bitmap_mark(c, bkey_i_to_s_c(k)); bch2_write_super(c); - mutex_unlock(&c->sb_lock); } /* @@ -659,7 +654,7 @@ static void btree_update_nodes_written(struct btree_update *as) { struct bch_fs *c = as->c; struct btree *b; - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); u64 journal_seq = 0; unsigned i; int ret; @@ -818,15 +813,15 @@ err: bch2_journal_pin_drop(&c->journal, &as->journal); - mutex_lock(&c->btree_interior_update_lock); - for (i = 0; i < as->nr_new_nodes; i++) { - b = as->new_nodes[i]; + scoped_guard(mutex, &c->btree_interior_update_lock) { + for (i = 0; i < as->nr_new_nodes; i++) { + b = as->new_nodes[i]; - BUG_ON(b->will_make_reachable != (unsigned long) as); - b->will_make_reachable = 0; - clear_btree_node_will_make_reachable(b); + BUG_ON(b->will_make_reachable != (unsigned long) as); + b->will_make_reachable = 0; + clear_btree_node_will_make_reachable(b); + } } - mutex_unlock(&c->btree_interior_update_lock); for (i = 0; i < as->nr_new_nodes; i++) { b = as->new_nodes[i]; @@ -840,7 +835,6 @@ err: bch2_open_bucket_put(c, c->open_buckets + as->open_buckets[i]); bch2_btree_update_free(as, trans); - bch2_trans_put(trans); } static void btree_interior_update_work(struct work_struct *work) @@ -850,12 +844,12 @@ static void btree_interior_update_work(struct work_struct *work) struct btree_update *as; while (1) { - mutex_lock(&c->btree_interior_update_lock); - as = list_first_entry_or_null(&c->btree_interior_updates_unwritten, - struct btree_update, unwritten_list); - if (as && !as->nodes_written) - as = NULL; - mutex_unlock(&c->btree_interior_update_lock); + scoped_guard(mutex, &c->btree_interior_update_lock) { + as = list_first_entry_or_null(&c->btree_interior_updates_unwritten, + struct btree_update, unwritten_list); + if (as && !as->nodes_written) + as = NULL; + } if (!as) break; @@ -869,9 +863,8 @@ static CLOSURE_CALLBACK(btree_update_set_nodes_written) closure_type(as, struct btree_update, cl); struct bch_fs *c = as->c; - mutex_lock(&c->btree_interior_update_lock); - as->nodes_written = true; - mutex_unlock(&c->btree_interior_update_lock); + scoped_guard(mutex, &c->btree_interior_update_lock) + as->nodes_written = true; queue_work(c->btree_interior_update_worker, &c->btree_interior_update_work); } @@ -889,7 +882,7 @@ static void btree_update_updated_node(struct btree_update *as, struct btree *b) BUG_ON(!btree_node_dirty(b)); BUG_ON(!b->c.level); - mutex_lock(&c->btree_interior_update_lock); + guard(mutex)(&c->btree_interior_update_lock); list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten); as->mode = BTREE_UPDATE_node; @@ -898,8 +891,6 @@ static void btree_update_updated_node(struct btree_update *as, struct btree *b) set_btree_node_write_blocked(b); list_add(&as->write_blocked_list, &b->write_blocked); - - mutex_unlock(&c->btree_interior_update_lock); } static int bch2_update_reparent_journal_pin_flush(struct journal *j, @@ -938,11 +929,11 @@ static void btree_update_updated_root(struct btree_update *as, struct btree *b) b->c.btree_id, b->c.level, insert, insert->k.u64s); - mutex_lock(&c->btree_interior_update_lock); - list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten); + scoped_guard(mutex, &c->btree_interior_update_lock) { + list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten); - as->mode = BTREE_UPDATE_root; - mutex_unlock(&c->btree_interior_update_lock); + as->mode = BTREE_UPDATE_root; + } } /* @@ -963,7 +954,8 @@ static void bch2_btree_update_add_new_node(struct btree_update *as, struct btree closure_get(&as->cl); - mutex_lock(&c->btree_interior_update_lock); + guard(mutex)(&c->btree_interior_update_lock); + BUG_ON(as->nr_new_nodes >= ARRAY_SIZE(as->new_nodes)); BUG_ON(b->will_make_reachable); @@ -971,8 +963,6 @@ static void bch2_btree_update_add_new_node(struct btree_update *as, struct btree b->will_make_reachable = 1UL|(unsigned long) as; set_btree_node_will_make_reachable(b); - mutex_unlock(&c->btree_interior_update_lock); - btree_update_add_key(as, &as->new_keys, b); if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { @@ -991,31 +981,29 @@ static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b) { struct btree_update *as; unsigned long v; - unsigned i; - mutex_lock(&c->btree_interior_update_lock); - /* - * When b->will_make_reachable != 0, it owns a ref on as->cl that's - * dropped when it gets written by bch2_btree_complete_write - the - * xchg() is for synchronization with bch2_btree_complete_write: - */ - v = xchg(&b->will_make_reachable, 0); - clear_btree_node_will_make_reachable(b); - as = (struct btree_update *) (v & ~1UL); + scoped_guard(mutex, &c->btree_interior_update_lock) { + /* + * When b->will_make_reachable != 0, it owns a ref on as->cl that's + * dropped when it gets written by bch2_btree_complete_write - the + * xchg() is for synchronization with bch2_btree_complete_write: + */ + v = xchg(&b->will_make_reachable, 0); + clear_btree_node_will_make_reachable(b); + as = (struct btree_update *) (v & ~1UL); - if (!as) { - mutex_unlock(&c->btree_interior_update_lock); - return; - } + if (!as) + return; - for (i = 0; i < as->nr_new_nodes; i++) - if (as->new_nodes[i] == b) - goto found; + unsigned i; + for (i = 0; i < as->nr_new_nodes; i++) + if (as->new_nodes[i] == b) + goto found; - BUG(); -found: - array_remove_item(as->new_nodes, as->nr_new_nodes, i); - mutex_unlock(&c->btree_interior_update_lock); + BUG(); + found: + array_remove_item(as->new_nodes, as->nr_new_nodes, i); + } if (v & 1) closure_put(&as->cl); @@ -1232,9 +1220,8 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, bch2_keylist_init(&as->new_keys, as->_new_keys); bch2_keylist_init(&as->parent_keys, as->inline_keys); - mutex_lock(&c->btree_interior_update_lock); - list_add_tail(&as->list, &c->btree_interior_update_list); - mutex_unlock(&c->btree_interior_update_lock); + scoped_guard(mutex, &c->btree_interior_update_lock) + list_add_tail(&as->list, &c->btree_interior_update_list); struct btree *b = btree_path_node(path, path->level); as->node_start = b->data->min_key; @@ -1318,13 +1305,11 @@ err: static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b) { /* Root nodes cannot be reaped */ - mutex_lock(&c->btree_cache.lock); - list_del_init(&b->list); - mutex_unlock(&c->btree_cache.lock); + scoped_guard(mutex, &c->btree_cache.lock) + list_del_init(&b->list); - mutex_lock(&c->btree_root_lock); - bch2_btree_id_root(c, b->c.btree_id)->b = b; - mutex_unlock(&c->btree_root_lock); + scoped_guard(mutex, &c->btree_root_lock) + bch2_btree_id_root(c, b->c.btree_id)->b = b; bch2_recalc_btree_reserve(c); } @@ -1379,7 +1364,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, { struct bch_fs *c = as->c; struct bkey_packed *k; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); unsigned long old, new; BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 && @@ -1424,8 +1409,6 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, new |= BTREE_WRITE_interior; new |= 1 << BTREE_NODE_need_write; } while (!try_cmpxchg(&b->flags, &old, new)); - - printbuf_exit(&buf); } static int @@ -1452,7 +1435,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, int ret = bch2_btree_node_check_topology(trans, b); if (ret) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); for (struct bkey_i *k = keys->keys; k != insert; @@ -1839,7 +1822,7 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t bch2_verify_keylist_sorted(keys); if (!btree_node_intent_locked(path, b->c.level)) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_printf(&buf, "%s(): node not locked at level %u\n", __func__, b->c.level); @@ -1848,7 +1831,6 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t bch2_fs_emergency_read_only2(c, &buf); bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); return -EIO; } @@ -1971,9 +1953,8 @@ static void __btree_increase_depth(struct btree_update *as, struct btree_trans * bch2_trans_node_add(trans, path, n); six_unlock_intent(&n->c.lock); - mutex_lock(&c->btree_cache.lock); - list_add_tail(&b->list, &c->btree_cache.live[btree_node_pinned(b)].list); - mutex_unlock(&c->btree_cache.lock); + scoped_guard(mutex, &c->btree_cache.lock) + list_add_tail(&b->list, &c->btree_cache.live[btree_node_pinned(b)].list); bch2_trans_verify_locks(trans); } @@ -2073,7 +2054,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, } if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); printbuf_indent_add_nextline(&buf, 2); prt_printf(&buf, "%s(): ", __func__); @@ -2088,7 +2069,6 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, bch2_bpos_to_text(&buf, next->data->min_key); bch_err(c, "%s", buf.buf); - printbuf_exit(&buf); goto err; } @@ -2371,9 +2351,8 @@ static void async_btree_node_rewrite_work(struct work_struct *work) !bch2_err_matches(ret, EROFS)) bch_err_fn_ratelimited(c, ret); - spin_lock(&c->btree_node_rewrites_lock); - list_del(&a->list); - spin_unlock(&c->btree_node_rewrites_lock); + scoped_guard(spinlock, &c->btree_node_rewrites_lock) + list_del(&a->list); closure_wake_up(&c->btree_node_rewrites_wait); @@ -2398,16 +2377,16 @@ void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b) bool now = false, pending = false; - spin_lock(&c->btree_node_rewrites_lock); - if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_journal_replay) && - enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_node_rewrite)) { - list_add(&a->list, &c->btree_node_rewrites); - now = true; - } else if (!test_bit(BCH_FS_may_go_rw, &c->flags)) { - list_add(&a->list, &c->btree_node_rewrites_pending); - pending = true; + scoped_guard(spinlock, &c->btree_node_rewrites_lock) { + if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_journal_replay) && + enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_node_rewrite)) { + list_add(&a->list, &c->btree_node_rewrites); + now = true; + } else if (!test_bit(BCH_FS_may_go_rw, &c->flags)) { + list_add(&a->list, &c->btree_node_rewrites_pending); + pending = true; + } } - spin_unlock(&c->btree_node_rewrites_lock); if (now) { queue_work(c->btree_node_rewrite_worker, &a->work); @@ -2428,13 +2407,14 @@ void bch2_async_btree_node_rewrites_flush(struct bch_fs *c) void bch2_do_pending_node_rewrites(struct bch_fs *c) { while (1) { - spin_lock(&c->btree_node_rewrites_lock); - struct async_btree_rewrite *a = - list_pop_entry(&c->btree_node_rewrites_pending, - struct async_btree_rewrite, list); - if (a) - list_add(&a->list, &c->btree_node_rewrites); - spin_unlock(&c->btree_node_rewrites_lock); + struct async_btree_rewrite *a; + + scoped_guard(spinlock, &c->btree_node_rewrites_lock) { + a = list_pop_entry(&c->btree_node_rewrites_pending, + struct async_btree_rewrite, list); + if (a) + list_add(&a->list, &c->btree_node_rewrites); + } if (!a) break; @@ -2447,11 +2427,11 @@ void bch2_do_pending_node_rewrites(struct bch_fs *c) void bch2_free_pending_node_rewrites(struct bch_fs *c) { while (1) { - spin_lock(&c->btree_node_rewrites_lock); - struct async_btree_rewrite *a = - list_pop_entry(&c->btree_node_rewrites_pending, - struct async_btree_rewrite, list); - spin_unlock(&c->btree_node_rewrites_lock); + struct async_btree_rewrite *a; + + scoped_guard(spinlock, &c->btree_node_rewrites_lock) + a = list_pop_entry(&c->btree_node_rewrites_pending, + struct async_btree_rewrite, list); if (!a) break; @@ -2533,7 +2513,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, bch2_btree_node_lock_write_nofail(trans, btree_iter_path(trans, iter), &b->c); if (new_hash) { - mutex_lock(&c->btree_cache.lock); + guard(mutex)(&c->btree_cache.lock); bch2_btree_node_hash_remove(&c->btree_cache, new_hash); __bch2_btree_node_hash_remove(&c->btree_cache, b); @@ -2541,7 +2521,6 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, bkey_copy(&b->key, new_key); ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); BUG_ON(ret); - mutex_unlock(&c->btree_cache.lock); } else { bkey_copy(&b->key, new_key); } @@ -2552,9 +2531,8 @@ out: return ret; err: if (new_hash) { - mutex_lock(&c->btree_cache.lock); + guard(mutex)(&c->btree_cache.lock); bch2_btree_node_hash_remove(&c->btree_cache, b); - mutex_unlock(&c->btree_cache.lock); } goto out; } @@ -2689,7 +2667,8 @@ int bch2_btree_root_alloc_fake_trans(struct btree_trans *trans, enum btree_id id void bch2_btree_root_alloc_fake(struct bch_fs *c, enum btree_id id, unsigned level) { - bch2_trans_run(c, lockrestart_do(trans, bch2_btree_root_alloc_fake_trans(trans, id, level))); + CLASS(btree_trans, trans)(c); + lockrestart_do(trans, bch2_btree_root_alloc_fake_trans(trans, id, level)); } static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update *as) @@ -2722,21 +2701,15 @@ void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c) { struct btree_update *as; - mutex_lock(&c->btree_interior_update_lock); + guard(mutex)(&c->btree_interior_update_lock); list_for_each_entry(as, &c->btree_interior_update_list, list) bch2_btree_update_to_text(out, as); - mutex_unlock(&c->btree_interior_update_lock); } static bool bch2_btree_interior_updates_pending(struct bch_fs *c) { - bool ret; - - mutex_lock(&c->btree_interior_update_lock); - ret = !list_empty(&c->btree_interior_update_list); - mutex_unlock(&c->btree_interior_update_lock); - - return ret; + guard(mutex)(&c->btree_interior_update_lock); + return !list_empty(&c->btree_interior_update_list); } bool bch2_btree_interior_updates_flush(struct bch_fs *c) @@ -2753,13 +2726,11 @@ void bch2_journal_entry_to_btree_root(struct bch_fs *c, struct jset_entry *entry { struct btree_root *r = bch2_btree_id_root(c, entry->btree_id); - mutex_lock(&c->btree_root_lock); + guard(mutex)(&c->btree_interior_update_lock); r->level = entry->level; r->alive = true; bkey_copy(&r->key, (struct bkey_i *) entry->start); - - mutex_unlock(&c->btree_root_lock); } struct jset_entry * @@ -2767,11 +2738,9 @@ bch2_btree_roots_to_journal_entries(struct bch_fs *c, struct jset_entry *end, unsigned long skip) { - unsigned i; - - mutex_lock(&c->btree_root_lock); + guard(mutex)(&c->btree_interior_update_lock); - for (i = 0; i < btree_id_nr_alive(c); i++) { + for (unsigned i = 0; i < btree_id_nr_alive(c); i++) { struct btree_root *r = bch2_btree_id_root(c, i); if (r->alive && !test_bit(i, &skip)) { @@ -2781,8 +2750,6 @@ bch2_btree_roots_to_journal_entries(struct bch_fs *c, } } - mutex_unlock(&c->btree_root_lock); - return end; } diff --git a/libbcachefs/btree_write_buffer.c b/libbcachefs/btree_write_buffer.c index c8971678..9cfc3edc 100644 --- a/libbcachefs/btree_write_buffer.c +++ b/libbcachefs/btree_write_buffer.c @@ -259,9 +259,8 @@ out: bch2_btree_write_buffer_journal_flush); if (j->watermark) { - spin_lock(&j->lock); + guard(spinlock)(&j->lock); bch2_journal_set_watermark(j); - spin_unlock(&j->lock); } BUG_ON(wb->sorted.size < wb->flushing.keys.nr); @@ -270,7 +269,7 @@ out: int bch2_btree_write_buffer_insert_err(struct bch_fs *c, enum btree_id btree, struct bkey_i *k) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_printf(&buf, "attempting to do write buffer update on non wb btree="); bch2_btree_id_to_text(&buf, btree); @@ -278,7 +277,6 @@ int bch2_btree_write_buffer_insert_err(struct bch_fs *c, bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k)); bch2_fs_inconsistent(c, "%s", buf.buf); - printbuf_exit(&buf); return -EROFS; } @@ -300,9 +298,8 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) bch2_trans_unlock(trans); bch2_trans_begin(trans); - mutex_lock(&wb->inc.lock); - move_keys_from_inc_to_flushing(wb); - mutex_unlock(&wb->inc.lock); + scoped_guard(mutex, &wb->inc.lock) + move_keys_from_inc_to_flushing(wb); for (size_t i = 0; i < wb->flushing.keys.nr; i++) { wb->sorted.data[i].idx = i; @@ -533,9 +530,8 @@ static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 max_seq) ret = bch2_journal_keys_to_write_buffer(c, buf); if (!blocked && !ret) { - spin_lock(&j->lock); + guard(spinlock)(&j->lock); buf->need_flush_to_write_buffer = false; - spin_unlock(&j->lock); } mutex_unlock(&j->buf_lock); @@ -567,9 +563,8 @@ static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 max_seq, * On memory allocation failure, bch2_btree_write_buffer_flush_locked() * is not guaranteed to empty wb->inc: */ - mutex_lock(&wb->flushing.lock); - ret = bch2_btree_write_buffer_flush_locked(trans); - mutex_unlock(&wb->flushing.lock); + scoped_guard(mutex, &wb->flushing.lock) + ret = bch2_btree_write_buffer_flush_locked(trans); } while (!ret && (fetch_from_journal_err || (wb->inc.pin.seq && wb->inc.pin.seq <= max_seq) || @@ -582,9 +577,10 @@ static int bch2_btree_write_buffer_journal_flush(struct journal *j, struct journal_entry_pin *_pin, u64 seq) { struct bch_fs *c = container_of(j, struct bch_fs, journal); + CLASS(btree_trans, trans)(c); bool did_work = false; - return bch2_trans_run(c, btree_write_buffer_flush_seq(trans, seq, &did_work)); + return btree_write_buffer_flush_seq(trans, seq, &did_work); } int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans) @@ -606,9 +602,9 @@ bool bch2_btree_write_buffer_flush_going_ro(struct bch_fs *c) if (bch2_journal_error(&c->journal)) return false; + CLASS(btree_trans, trans)(c); bool did_work = false; - bch2_trans_run(c, btree_write_buffer_flush_seq(trans, - journal_cur_seq(&c->journal), &did_work)); + btree_write_buffer_flush_seq(trans, journal_cur_seq(&c->journal), &did_work); return did_work; } @@ -655,11 +651,10 @@ int bch2_btree_write_buffer_maybe_flush(struct btree_trans *trans, if (!bkey_and_val_eq(referring_k, bkey_i_to_s_c(last_flushed->k))) { if (trace_write_buffer_maybe_flush_enabled()) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, referring_k); trace_write_buffer_maybe_flush(trans, _RET_IP_, buf.buf); - printbuf_exit(&buf); } bch2_bkey_buf_reassemble(&tmp, c, referring_k); @@ -690,11 +685,12 @@ static void bch2_btree_write_buffer_flush_work(struct work_struct *work) struct btree_write_buffer *wb = &c->btree_write_buffer; int ret; - mutex_lock(&wb->flushing.lock); - do { - ret = bch2_trans_run(c, bch2_btree_write_buffer_flush_locked(trans)); - } while (!ret && bch2_btree_write_buffer_should_flush(c)); - mutex_unlock(&wb->flushing.lock); + scoped_guard(mutex, &wb->flushing.lock) { + CLASS(btree_trans, trans)(c); + do { + ret = bch2_btree_write_buffer_flush_locked(trans); + } while (!ret && bch2_btree_write_buffer_should_flush(c)); + } enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_write_buffer); } diff --git a/libbcachefs/buckets.c b/libbcachefs/buckets.c index f25903c1..5aab527e 100644 --- a/libbcachefs/buckets.c +++ b/libbcachefs/buckets.c @@ -71,13 +71,8 @@ __bch2_fs_usage_read_short(struct bch_fs *c) struct bch_fs_usage_short bch2_fs_usage_read_short(struct bch_fs *c) { - struct bch_fs_usage_short ret; - - percpu_down_read(&c->mark_lock); - ret = __bch2_fs_usage_read_short(c); - percpu_up_read(&c->mark_lock); - - return ret; + guard(percpu_read)(&c->mark_lock); + return __bch2_fs_usage_read_short(c); } void bch2_dev_usage_to_text(struct printbuf *out, @@ -113,10 +108,10 @@ static int bch2_check_fix_ptr(struct btree_trans *trans, bool *do_update) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; - struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev); + CLASS(bch2_dev_tryget, ca)(c, p.ptr.dev); if (!ca) { if (fsck_err_on(p.ptr.dev != BCH_SB_MEMBER_INVALID, trans, ptr_to_invalid_device, @@ -138,7 +133,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, k), buf.buf))) *do_update = true; - goto out; + return 0; } enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry); @@ -158,7 +153,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans, } else { /* this pointer will be dropped */ *do_update = true; - goto out; + return 0; } } @@ -208,7 +203,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans, *do_update = true; if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen) - goto out; + return 0; if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type), trans, ptr_bucket_data_type_mismatch, @@ -224,14 +219,13 @@ static int bch2_check_fix_ptr(struct btree_trans *trans, switch (g->data_type) { case BCH_DATA_sb: bch_err(c, "btree and superblock in the same bucket - cannot repair"); - ret = bch_err_throw(c, fsck_repair_unimplemented); - goto out; + return bch_err_throw(c, fsck_repair_unimplemented); case BCH_DATA_journal: ret = bch2_dev_journal_bucket_delete(ca, PTR_BUCKET_NR(ca, &p.ptr)); bch_err_msg(c, ret, "error deleting journal bucket %zu", PTR_BUCKET_NR(ca, &p.ptr)); if (ret) - goto out; + return ret; break; } @@ -265,10 +259,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans, bch2_bkey_val_to_text(&buf, c, k), buf.buf))) *do_update = true; } -out: fsck_err: - bch2_dev_put(ca); - printbuf_exit(&buf); return ret; } @@ -281,7 +272,7 @@ int bch2_check_fix_ptrs(struct btree_trans *trans, const union bch_extent_entry *entry_c; struct extent_ptr_decoded p = { 0 }; bool do_update = false; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; /* We don't yet do btree key updates correctly for when we're RW */ @@ -290,14 +281,14 @@ int bch2_check_fix_ptrs(struct btree_trans *trans, bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) { ret = bch2_check_fix_ptr(trans, k, p, entry_c, &do_update); if (ret) - goto err; + return ret; } if (do_update) { struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); ret = PTR_ERR_OR_ZERO(new); if (ret) - goto err; + return ret; scoped_guard(rcu) bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_exists(c, ptr->dev)); @@ -387,7 +378,7 @@ found: BTREE_TRIGGER_norun); bch2_trans_iter_exit(trans, &iter); if (ret) - goto err; + return ret; if (level) bch2_btree_node_update_key_early(trans, btree, level - 1, k, new); @@ -396,7 +387,7 @@ found: jset_u64s(new->k.u64s)); ret = PTR_ERR_OR_ZERO(e); if (ret) - goto err; + return ret; journal_entry_set(e, BCH_JSET_ENTRY_btree_root, @@ -413,9 +404,8 @@ found: bkey_copy(&b->key, new); } } -err: - printbuf_exit(&buf); - return ret; + + return 0; } static int bucket_ref_update_err(struct btree_trans *trans, struct printbuf *buf, @@ -460,9 +450,8 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, { struct bch_fs *c = trans->c; size_t bucket_nr = PTR_BUCKET_NR(ca, ptr); - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bool inserting = sectors > 0; - int ret = 0; BUG_ON(!sectors); @@ -474,9 +463,8 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, bch2_data_type_str(bucket_data_type ?: ptr_data_type), ptr->gen); - ret = bucket_ref_update_err(trans, &buf, k, inserting, - BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen); - goto out; + return bucket_ref_update_err(trans, &buf, k, inserting, + BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen); } if (unlikely(gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX)) { @@ -487,15 +475,12 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, bch2_data_type_str(bucket_data_type ?: ptr_data_type), ptr->gen); - ret = bucket_ref_update_err(trans, &buf, k, inserting, - BCH_FSCK_ERR_ptr_too_stale); - goto out; + return bucket_ref_update_err(trans, &buf, k, inserting, + BCH_FSCK_ERR_ptr_too_stale); } - if (b_gen != ptr->gen && ptr->cached) { - ret = 1; - goto out; - } + if (b_gen != ptr->gen && ptr->cached) + return 1; if (unlikely(b_gen != ptr->gen)) { bch2_log_msg_start(c, &buf); @@ -506,9 +491,8 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, bch2_data_type_str(bucket_data_type ?: ptr_data_type), ptr->gen); - ret = bucket_ref_update_err(trans, &buf, k, inserting, - BCH_FSCK_ERR_stale_dirty_ptr); - goto out; + return bucket_ref_update_err(trans, &buf, k, inserting, + BCH_FSCK_ERR_stale_dirty_ptr); } if (unlikely(bucket_data_type_mismatch(bucket_data_type, ptr_data_type))) { @@ -518,9 +502,8 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, bch2_data_type_str(bucket_data_type), bch2_data_type_str(ptr_data_type)); - ret = bucket_ref_update_err(trans, &buf, k, inserting, + return bucket_ref_update_err(trans, &buf, k, inserting, BCH_FSCK_ERR_ptr_bucket_data_type_mismatch); - goto out; } if (unlikely((u64) *bucket_sectors + sectors > U32_MAX)) { @@ -531,16 +514,13 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, bch2_data_type_str(bucket_data_type ?: ptr_data_type), *bucket_sectors, sectors); - ret = bucket_ref_update_err(trans, &buf, k, inserting, - BCH_FSCK_ERR_bucket_sector_count_overflow); sectors = -*bucket_sectors; - goto out; + return bucket_ref_update_err(trans, &buf, k, inserting, + BCH_FSCK_ERR_bucket_sector_count_overflow); } *bucket_sectors += sectors; -out: - printbuf_exit(&buf); - return ret; + return 0; } void bch2_trans_account_disk_usage_change(struct btree_trans *trans) @@ -550,7 +530,7 @@ void bch2_trans_account_disk_usage_change(struct btree_trans *trans) static int warned_disk_usage = 0; bool warn = false; - percpu_down_read(&c->mark_lock); + guard(percpu_read)(&c->mark_lock); struct bch_fs_usage_base *src = &trans->fs_usage_delta; s64 added = src->btree + src->data + src->reserved; @@ -578,11 +558,10 @@ void bch2_trans_account_disk_usage_change(struct btree_trans *trans) this_cpu_sub(*c->online_reserved, added); } - preempt_disable(); - struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage); - acc_u64s((u64 *) dst, (u64 *) src, sizeof(*src) / sizeof(u64)); - preempt_enable(); - percpu_up_read(&c->mark_lock); + scoped_guard(preempt) { + struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage); + acc_u64s((u64 *) dst, (u64 *) src, sizeof(*src) / sizeof(u64)); + } if (unlikely(warn) && !xchg(&warned_disk_usage, 1)) bch2_trans_inconsistent(trans, @@ -621,40 +600,34 @@ static int bch2_trigger_pointer(struct btree_trans *trans, { struct bch_fs *c = trans->c; bool insert = !(flags & BTREE_TRIGGER_overwrite); - struct printbuf buf = PRINTBUF; - int ret = 0; + CLASS(printbuf, buf)(); struct bkey_i_backpointer bp; bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bp); *sectors = insert ? bp.v.bucket_len : -(s64) bp.v.bucket_len; - struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev); + CLASS(bch2_dev_tryget, ca)(c, p.ptr.dev); if (unlikely(!ca)) { if (insert && p.ptr.dev != BCH_SB_MEMBER_INVALID) - ret = bch_err_throw(c, trigger_pointer); - goto err; + return bch_err_throw(c, trigger_pointer); + return 0; } struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr); if (!bucket_valid(ca, bucket.offset)) { if (insert) { bch2_dev_bucket_missing(ca, bucket.offset); - ret = bch_err_throw(c, trigger_pointer); + return bch_err_throw(c, trigger_pointer); } - goto err; + return 0; } if (flags & BTREE_TRIGGER_transactional) { struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0); - ret = PTR_ERR_OR_ZERO(a) ?: - __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &a->v, insert); - if (ret) - goto err; - - ret = bch2_bucket_backpointer_mod(trans, k, &bp, insert); - if (ret) - goto err; + return PTR_ERR_OR_ZERO(a) ?: + __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &a->v, insert) ?: + bch2_bucket_backpointer_mod(trans, k, &bp, insert); } if (flags & BTREE_TRIGGER_gc) { @@ -662,23 +635,22 @@ static int bch2_trigger_pointer(struct btree_trans *trans, if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s", p.ptr.dev, (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - ret = bch_err_throw(c, trigger_pointer); - goto err; + return bch_err_throw(c, trigger_pointer); } bucket_lock(g); struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; - ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &new, insert); + int ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &new, insert); alloc_to_bucket(g, new); bucket_unlock(g); - if (!ret) - ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags); + if (ret) + return ret; + + return bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags); } -err: - bch2_dev_put(ca); - printbuf_exit(&buf); - return ret; + + return 0; } static int bch2_trigger_stripe_ptr(struct btree_trans *trans, @@ -738,14 +710,13 @@ err: if (!m || !m->alive) { gc_stripe_unlock(m); - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_printf(&buf, "pointer to nonexistent stripe %llu\n while marking ", (u64) p.ec.idx); bch2_bkey_val_to_text(&buf, c, k); __bch2_inconsistent_error(c, &buf); bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); return bch_err_throw(c, trigger_stripe_pointer); } @@ -996,7 +967,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, return PTR_ERR(a); if (a->v.data_type && type && a->v.data_type != type) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_printf(&buf, "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n" "while marking %s\n", @@ -1012,7 +983,6 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, /* Always print, this is always fatal */ bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); if (!ret) ret = bch_err_throw(c, metadata_bucket_inconsistency); goto err; @@ -1034,7 +1004,6 @@ static int bch2_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev * enum btree_iter_update_trigger_flags flags) { struct bch_fs *c = trans->c; - int ret = 0; struct bucket *g = gc_bucket(ca, b); if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s", @@ -1062,8 +1031,7 @@ static int bch2_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev * g->dirty_sectors += sectors; struct bch_alloc_v4 new = bucket_m_to_alloc(*g); bucket_unlock(g); - ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags); - return ret; + return bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags); err_unlock: bucket_unlock(g); err: @@ -1125,10 +1093,10 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *c enum btree_iter_update_trigger_flags flags) { struct bch_fs *c = trans->c; + struct bch_sb_layout layout; - mutex_lock(&c->sb_lock); - struct bch_sb_layout layout = ca->disk_sb.sb->layout; - mutex_unlock(&c->sb_lock); + scoped_guard(mutex, &c->sb_lock) + layout = ca->disk_sb.sb->layout; u64 bucket = 0; unsigned i, bucket_sectors = 0; @@ -1173,8 +1141,8 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *c int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca, enum btree_iter_update_trigger_flags flags) { - int ret = bch2_trans_run(c, - __bch2_trans_mark_dev_sb(trans, ca, flags)); + CLASS(btree_trans, trans)(c); + int ret = __bch2_trans_mark_dev_sb(trans, ca, flags); bch_err_fn(c, ret); return ret; } @@ -1227,15 +1195,38 @@ bool bch2_is_superblock_bucket(struct bch_dev *ca, u64 b) #define SECTORS_CACHE 1024 +static int disk_reservation_recalc_sectors_available(struct bch_fs *c, + struct disk_reservation *res, + u64 sectors, enum bch_reservation_flags flags) +{ + guard(mutex)(&c->sectors_available_lock); + + percpu_u64_set(&c->pcpu->sectors_available, 0); + u64 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free); + + if (sectors_available && (flags & BCH_DISK_RESERVATION_PARTIAL)) + sectors = min(sectors, sectors_available); + + if (sectors <= sectors_available || + (flags & BCH_DISK_RESERVATION_NOFAIL)) { + atomic64_set(&c->sectors_available, + max_t(s64, 0, sectors_available - sectors)); + this_cpu_add(*c->online_reserved, sectors); + res->sectors += sectors; + return 0; + } else { + atomic64_set(&c->sectors_available, sectors_available); + return bch_err_throw(c, ENOSPC_disk_reservation); + } +} + int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, u64 sectors, enum bch_reservation_flags flags) { struct bch_fs_pcpu *pcpu; u64 old, get; - u64 sectors_available; - int ret; - percpu_down_read(&c->mark_lock); + guard(percpu_read)(&c->mark_lock); preempt_disable(); pcpu = this_cpu_ptr(c->pcpu); @@ -1246,9 +1237,10 @@ int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, do { get = min((u64) sectors + SECTORS_CACHE, old); - if (get < sectors) { + if (unlikely(get < sectors)) { preempt_enable(); - goto recalculate; + return disk_reservation_recalc_sectors_available(c, + res, sectors, flags); } } while (!atomic64_try_cmpxchg(&c->sectors_available, &old, old - get)); @@ -1259,36 +1251,8 @@ out: pcpu->sectors_available -= sectors; this_cpu_add(*c->online_reserved, sectors); res->sectors += sectors; - preempt_enable(); - percpu_up_read(&c->mark_lock); return 0; - -recalculate: - mutex_lock(&c->sectors_available_lock); - - percpu_u64_set(&c->pcpu->sectors_available, 0); - sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free); - - if (sectors_available && (flags & BCH_DISK_RESERVATION_PARTIAL)) - sectors = min(sectors, sectors_available); - - if (sectors <= sectors_available || - (flags & BCH_DISK_RESERVATION_NOFAIL)) { - atomic64_set(&c->sectors_available, - max_t(s64, 0, sectors_available - sectors)); - this_cpu_add(*c->online_reserved, sectors); - res->sectors += sectors; - ret = 0; - } else { - atomic64_set(&c->sectors_available, sectors_available); - ret = bch_err_throw(c, ENOSPC_disk_reservation); - } - - mutex_unlock(&c->sectors_available_lock); - percpu_up_read(&c->mark_lock); - - return ret; } /* Startup/shutdown: */ diff --git a/libbcachefs/buckets_waiting_for_journal.c b/libbcachefs/buckets_waiting_for_journal.c index 832eff93..ca341586 100644 --- a/libbcachefs/buckets_waiting_for_journal.c +++ b/libbcachefs/buckets_waiting_for_journal.c @@ -25,25 +25,20 @@ static void bucket_table_init(struct buckets_waiting_for_journal_table *t, size_ u64 bch2_bucket_journal_seq_ready(struct buckets_waiting_for_journal *b, unsigned dev, u64 bucket) { - struct buckets_waiting_for_journal_table *t; u64 dev_bucket = (u64) dev << 56 | bucket; - u64 ret = 0; - mutex_lock(&b->lock); - t = b->t; + guard(mutex)(&b->lock); + + struct buckets_waiting_for_journal_table *t = b->t; for (unsigned i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) { struct bucket_hashed *h = bucket_hash(t, i, dev_bucket); - if (h->dev_bucket == dev_bucket) { - ret = h->journal_seq; - break; - } + if (h->dev_bucket == dev_bucket) + return h->journal_seq; } - mutex_unlock(&b->lock); - - return ret; + return 0; } static bool bucket_table_insert(struct buckets_waiting_for_journal_table *t, @@ -92,12 +87,11 @@ int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b, .journal_seq = journal_seq, }; size_t i, size, new_bits, nr_elements = 1, nr_rehashes = 0, nr_rehashes_this_size = 0; - int ret = 0; - mutex_lock(&b->lock); + guard(mutex)(&b->lock); if (likely(bucket_table_insert(b->t, &new, flushed_seq))) - goto out; + return 0; t = b->t; size = 1UL << t->bits; @@ -109,8 +103,7 @@ realloc: n = kvmalloc(sizeof(*n) + (sizeof(n->d[0]) << new_bits), GFP_KERNEL); if (!n) { struct bch_fs *c = container_of(b, struct bch_fs, buckets_waiting_for_journal); - ret = bch_err_throw(c, ENOMEM_buckets_waiting_for_journal_set); - goto out; + return bch_err_throw(c, ENOMEM_buckets_waiting_for_journal_set); } retry_rehash: @@ -143,10 +136,7 @@ retry_rehash: pr_debug("took %zu rehashes, table at %zu/%lu elements", nr_rehashes, nr_elements, 1UL << b->t->bits); -out: - mutex_unlock(&b->lock); - - return ret; + return 0; } void bch2_fs_buckets_waiting_for_journal_exit(struct bch_fs *c) diff --git a/libbcachefs/chardev.c b/libbcachefs/chardev.c index 5ea89aa2..467fc45e 100644 --- a/libbcachefs/chardev.c +++ b/libbcachefs/chardev.c @@ -52,6 +52,11 @@ static struct bch_dev *bch2_device_lookup(struct bch_fs *c, u64 dev, return ca; } +DEFINE_CLASS(bch2_device_lookup, struct bch_dev *, + bch2_dev_put(_T), + bch2_device_lookup(c, dev, flags), + struct bch_fs *c, u64 dev, unsigned flags); + #if 0 static long bch2_ioctl_assemble(struct bch_ioctl_assemble __user *user_arg) { @@ -207,8 +212,6 @@ static long bch2_ioctl_disk_add(struct bch_fs *c, struct bch_ioctl_disk arg) static long bch2_ioctl_disk_remove(struct bch_fs *c, struct bch_ioctl_disk arg) { - struct bch_dev *ca; - if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -219,7 +222,7 @@ static long bch2_ioctl_disk_remove(struct bch_fs *c, struct bch_ioctl_disk arg) arg.pad) return -EINVAL; - ca = bch2_device_lookup(c, arg.dev, arg.flags); + struct bch_dev *ca = bch2_device_lookup(c, arg.dev, arg.flags); if (IS_ERR(ca)) return PTR_ERR(ca); @@ -249,9 +252,6 @@ static long bch2_ioctl_disk_online(struct bch_fs *c, struct bch_ioctl_disk arg) static long bch2_ioctl_disk_offline(struct bch_fs *c, struct bch_ioctl_disk arg) { - struct bch_dev *ca; - int ret; - if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -262,21 +262,16 @@ static long bch2_ioctl_disk_offline(struct bch_fs *c, struct bch_ioctl_disk arg) arg.pad) return -EINVAL; - ca = bch2_device_lookup(c, arg.dev, arg.flags); + CLASS(bch2_device_lookup, ca)(c, arg.dev, arg.flags); if (IS_ERR(ca)) return PTR_ERR(ca); - ret = bch2_dev_offline(c, ca, arg.flags); - bch2_dev_put(ca); - return ret; + return bch2_dev_offline(c, ca, arg.flags); } static long bch2_ioctl_disk_set_state(struct bch_fs *c, struct bch_ioctl_disk_set_state arg) { - struct bch_dev *ca; - int ret; - if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -288,15 +283,12 @@ static long bch2_ioctl_disk_set_state(struct bch_fs *c, arg.new_state >= BCH_MEMBER_STATE_NR) return -EINVAL; - ca = bch2_device_lookup(c, arg.dev, arg.flags); + CLASS(bch2_device_lookup, ca)(c, arg.dev, arg.flags); if (IS_ERR(ca)) return PTR_ERR(ca); - ret = bch2_dev_set_state(c, ca, arg.new_state, arg.flags); - if (ret) - bch_err(c, "Error setting device state: %s", bch2_err_str(ret)); - - bch2_dev_put(ca); + int ret = bch2_dev_set_state(c, ca, arg.new_state, arg.flags); + bch_err_msg(ca, ret, "setting device state"); return ret; } @@ -312,7 +304,7 @@ static int bch2_data_thread(void *arg) { struct bch_data_ctx *ctx = container_of(arg, struct bch_data_ctx, thr); - ctx->thr.ret = bch2_data_job(ctx->c, &ctx->stats, ctx->arg); + ctx->thr.ret = bch2_data_job(ctx->c, &ctx->stats, &ctx->arg); if (ctx->thr.ret == -BCH_ERR_device_offline) ctx->stats.ret = BCH_IOCTL_DATA_EVENT_RET_device_offline; else { @@ -349,14 +341,13 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf, }; if (ctx->arg.op == BCH_DATA_OP_scrub) { - struct bch_dev *ca = bch2_dev_tryget(c, ctx->arg.scrub.dev); + CLASS(bch2_dev_tryget_noerror, ca)(c, ctx->arg.scrub.dev); if (ca) { struct bch_dev_usage_full u; bch2_dev_usage_full_read_fast(ca, &u); for (unsigned i = BCH_DATA_btree; i < ARRAY_SIZE(u.d); i++) if (ctx->arg.scrub.data_types & BIT(i)) e.p.sectors_total += u.d[i].sectors; - bch2_dev_put(ca); } } else { e.p.sectors_total = bch2_fs_usage_read_short(c).used; @@ -418,9 +409,8 @@ static noinline_for_stack long bch2_ioctl_fs_usage(struct bch_fs *c, struct bch_ioctl_fs_usage __user *user_arg) { struct bch_ioctl_fs_usage arg = {}; - darray_char replicas = {}; + CLASS(darray_char, replicas)(); u32 replica_entries_bytes; - int ret = 0; if (!test_bit(BCH_FS_started, &c->flags)) return -EINVAL; @@ -428,11 +418,11 @@ static noinline_for_stack long bch2_ioctl_fs_usage(struct bch_fs *c, if (get_user(replica_entries_bytes, &user_arg->replica_entries_bytes)) return -EFAULT; - ret = bch2_fs_replicas_usage_read(c, &replicas) ?: + int ret = bch2_fs_replicas_usage_read(c, &replicas) ?: (replica_entries_bytes < replicas.nr ? -ERANGE : 0) ?: copy_to_user_errcode(&user_arg->replicas, replicas.data, replicas.nr); if (ret) - goto err; + return ret; struct bch_fs_usage_short u = bch2_fs_usage_read_short(c); arg.capacity = c->capacity; @@ -449,52 +439,41 @@ static noinline_for_stack long bch2_ioctl_fs_usage(struct bch_fs *c, &arg.persistent_reserved[i], 1); } - ret = copy_to_user_errcode(user_arg, &arg, sizeof(arg)); -err: - darray_exit(&replicas); - return ret; + return copy_to_user_errcode(user_arg, &arg, sizeof(arg)); } static long bch2_ioctl_query_accounting(struct bch_fs *c, struct bch_ioctl_query_accounting __user *user_arg) { struct bch_ioctl_query_accounting arg; - darray_char accounting = {}; - int ret = 0; + CLASS(darray_char, accounting)(); if (!test_bit(BCH_FS_started, &c->flags)) return -EINVAL; - ret = copy_from_user_errcode(&arg, user_arg, sizeof(arg)) ?: + int ret = copy_from_user_errcode(&arg, user_arg, sizeof(arg)) ?: bch2_fs_accounting_read(c, &accounting, arg.accounting_types_mask) ?: (arg.accounting_u64s * sizeof(u64) < accounting.nr ? -ERANGE : 0) ?: copy_to_user_errcode(&user_arg->accounting, accounting.data, accounting.nr); if (ret) - goto err; + return ret; arg.capacity = c->capacity; arg.used = bch2_fs_usage_read_short(c).used; arg.online_reserved = percpu_u64_get(c->online_reserved); arg.accounting_u64s = accounting.nr / sizeof(u64); - ret = copy_to_user_errcode(user_arg, &arg, sizeof(arg)); -err: - darray_exit(&accounting); - return ret; + return copy_to_user_errcode(user_arg, &arg, sizeof(arg)); } /* obsolete, didn't allow for new data types: */ static noinline_for_stack long bch2_ioctl_dev_usage(struct bch_fs *c, struct bch_ioctl_dev_usage __user *user_arg) { - struct bch_ioctl_dev_usage arg; - struct bch_dev_usage_full src; - struct bch_dev *ca; - unsigned i; - if (!test_bit(BCH_FS_started, &c->flags)) return -EINVAL; + struct bch_ioctl_dev_usage arg; if (copy_from_user(&arg, user_arg, sizeof(arg))) return -EFAULT; @@ -504,38 +483,32 @@ static noinline_for_stack long bch2_ioctl_dev_usage(struct bch_fs *c, arg.pad[2]) return -EINVAL; - ca = bch2_device_lookup(c, arg.dev, arg.flags); + CLASS(bch2_device_lookup, ca)(c, arg.dev, arg.flags); if (IS_ERR(ca)) return PTR_ERR(ca); - src = bch2_dev_usage_full_read(ca); + struct bch_dev_usage_full src = bch2_dev_usage_full_read(ca); arg.state = ca->mi.state; arg.bucket_size = ca->mi.bucket_size; arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket; - for (i = 0; i < ARRAY_SIZE(arg.d); i++) { + for (unsigned i = 0; i < ARRAY_SIZE(arg.d); i++) { arg.d[i].buckets = src.d[i].buckets; arg.d[i].sectors = src.d[i].sectors; arg.d[i].fragmented = src.d[i].fragmented; } - bch2_dev_put(ca); - return copy_to_user_errcode(user_arg, &arg, sizeof(arg)); } static long bch2_ioctl_dev_usage_v2(struct bch_fs *c, struct bch_ioctl_dev_usage_v2 __user *user_arg) { - struct bch_ioctl_dev_usage_v2 arg; - struct bch_dev_usage_full src; - struct bch_dev *ca; - int ret = 0; - if (!test_bit(BCH_FS_started, &c->flags)) return -EINVAL; + struct bch_ioctl_dev_usage_v2 arg; if (copy_from_user(&arg, user_arg, sizeof(arg))) return -EFAULT; @@ -545,20 +518,20 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c, arg.pad[2]) return -EINVAL; - ca = bch2_device_lookup(c, arg.dev, arg.flags); + CLASS(bch2_device_lookup, ca)(c, arg.dev, arg.flags); if (IS_ERR(ca)) return PTR_ERR(ca); - src = bch2_dev_usage_full_read(ca); + struct bch_dev_usage_full src = bch2_dev_usage_full_read(ca); arg.state = ca->mi.state; arg.bucket_size = ca->mi.bucket_size; arg.nr_data_types = min(arg.nr_data_types, BCH_DATA_NR); arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket; - ret = copy_to_user_errcode(user_arg, &arg, sizeof(arg)); + int ret = copy_to_user_errcode(user_arg, &arg, sizeof(arg)); if (ret) - goto err; + return ret; for (unsigned i = 0; i < arg.nr_data_types; i++) { struct bch_ioctl_dev_usage_type t = { @@ -569,11 +542,10 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c, ret = copy_to_user_errcode(&user_arg->d[i], &t, sizeof(t)); if (ret) - goto err; + return ret; } -err: - bch2_dev_put(ca); - return ret; + + return 0; } static long bch2_ioctl_read_super(struct bch_fs *c, @@ -590,13 +562,13 @@ static long bch2_ioctl_read_super(struct bch_fs *c, arg.pad) return -EINVAL; - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); if (arg.flags & BCH_READ_DEV) { ca = bch2_device_lookup(c, arg.dev, arg.flags); ret = PTR_ERR_OR_ZERO(ca); if (ret) - goto err_unlock; + return ret; sb = ca->disk_sb.sb; } else { @@ -612,8 +584,6 @@ static long bch2_ioctl_read_super(struct bch_fs *c, vstruct_bytes(sb)); err: bch2_dev_put(ca); -err_unlock: - mutex_unlock(&c->sb_lock); return ret; } @@ -639,9 +609,6 @@ static long bch2_ioctl_disk_get_idx(struct bch_fs *c, static long bch2_ioctl_disk_resize(struct bch_fs *c, struct bch_ioctl_disk_resize arg) { - struct bch_dev *ca; - int ret; - if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -649,22 +616,16 @@ static long bch2_ioctl_disk_resize(struct bch_fs *c, arg.pad) return -EINVAL; - ca = bch2_device_lookup(c, arg.dev, arg.flags); + CLASS(bch2_device_lookup, ca)(c, arg.dev, arg.flags); if (IS_ERR(ca)) return PTR_ERR(ca); - ret = bch2_dev_resize(c, ca, arg.nbuckets); - - bch2_dev_put(ca); - return ret; + return bch2_dev_resize(c, ca, arg.nbuckets); } static long bch2_ioctl_disk_resize_journal(struct bch_fs *c, struct bch_ioctl_disk_resize_journal arg) { - struct bch_dev *ca; - int ret; - if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -675,14 +636,11 @@ static long bch2_ioctl_disk_resize_journal(struct bch_fs *c, if (arg.nbuckets > U32_MAX) return -EINVAL; - ca = bch2_device_lookup(c, arg.dev, arg.flags); + CLASS(bch2_device_lookup, ca)(c, arg.dev, arg.flags); if (IS_ERR(ca)) return PTR_ERR(ca); - ret = bch2_set_nr_journal_buckets(c, ca, arg.nbuckets); - - bch2_dev_put(ca); - return ret; + return bch2_set_nr_journal_buckets(c, ca, arg.nbuckets); } #define BCH_IOCTL(_name, _argtype) \ diff --git a/libbcachefs/checksum.c b/libbcachefs/checksum.c index a6795e73..b1ec3899 100644 --- a/libbcachefs/checksum.c +++ b/libbcachefs/checksum.c @@ -361,7 +361,7 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio, extent_nonce(version, crc_old), bio); if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_printf(&buf, "checksum error in %s() (memory corruption or bug?)\n" " expected %0llx:%0llx got %0llx:%0llx (old type ", __func__, @@ -374,7 +374,6 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio, bch2_prt_csum_type(&buf, new_csum_type); prt_str(&buf, ")"); WARN_RATELIMIT(1, "%s", buf.buf); - printbuf_exit(&buf); return bch_err_throw(c, recompute_checksum); } @@ -438,23 +437,21 @@ const struct bch_sb_field_ops bch_sb_field_ops_crypt = { #ifdef __KERNEL__ static int __bch2_request_key(char *key_description, struct bch_key *key) { - struct key *keyring_key; - const struct user_key_payload *ukp; int ret; - keyring_key = request_key(&key_type_user, key_description, NULL); + struct key *keyring_key = request_key(&key_type_user, key_description, NULL); if (IS_ERR(keyring_key)) return PTR_ERR(keyring_key); - down_read(&keyring_key->sem); - ukp = dereference_key_locked(keyring_key); - if (ukp->datalen == sizeof(*key)) { - memcpy(key, ukp->data, ukp->datalen); - ret = 0; - } else { - ret = -EINVAL; + scoped_guard(rwsem_read, &keyring_key->sem) { + const struct user_key_payload *ukp = dereference_key_locked(keyring_key); + if (ukp->datalen == sizeof(*key)) { + memcpy(key, ukp->data, ukp->datalen); + ret = 0; + } else { + ret = -EINVAL; + } } - up_read(&keyring_key->sem); key_put(keyring_key); return ret; @@ -495,14 +492,13 @@ got_key: int bch2_request_key(struct bch_sb *sb, struct bch_key *key) { - struct printbuf key_description = PRINTBUF; + CLASS(printbuf, key_description)(); int ret; prt_printf(&key_description, "bcachefs:"); pr_uuid(&key_description, sb->user_uuid.b); ret = __bch2_request_key(key_description.buf, key); - printbuf_exit(&key_description); #ifndef __KERNEL__ if (ret) { @@ -524,13 +520,12 @@ int bch2_request_key(struct bch_sb *sb, struct bch_key *key) int bch2_revoke_key(struct bch_sb *sb) { key_serial_t key_id; - struct printbuf key_description = PRINTBUF; + CLASS(printbuf, key_description)(); prt_printf(&key_description, "bcachefs:"); pr_uuid(&key_description, sb->user_uuid.b); key_id = request_key("user", key_description.buf, NULL, KEY_SPEC_USER_KEYRING); - printbuf_exit(&key_description); if (key_id < 0) return errno; @@ -584,34 +579,28 @@ err: */ int bch2_disable_encryption(struct bch_fs *c) { - struct bch_sb_field_crypt *crypt; - struct bch_key key; - int ret = -EINVAL; - - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); - crypt = bch2_sb_field_get(c->disk_sb.sb, crypt); + struct bch_sb_field_crypt *crypt = bch2_sb_field_get(c->disk_sb.sb, crypt); if (!crypt) - goto out; + return -EINVAL; /* is key encrypted? */ ret = 0; if (bch2_key_is_encrypted(&crypt->key)) - goto out; + return 0; - ret = bch2_decrypt_sb_key(c, crypt, &key); + struct bch_key key; + int ret = bch2_decrypt_sb_key(c, crypt, &key); if (ret) - goto out; + return ret; crypt->key.magic = cpu_to_le64(BCH_KEY_MAGIC); crypt->key.key = key; SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0); bch2_write_super(c); -out: - mutex_unlock(&c->sb_lock); - - return ret; + return 0; } /* @@ -625,7 +614,7 @@ int bch2_enable_encryption(struct bch_fs *c, bool keyed) struct bch_sb_field_crypt *crypt; int ret = -EINVAL; - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); /* Do we already have an encryption key? */ if (bch2_sb_field_get(c->disk_sb.sb, crypt)) @@ -669,7 +658,6 @@ int bch2_enable_encryption(struct bch_fs *c, bool keyed) SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1); bch2_write_super(c); err: - mutex_unlock(&c->sb_lock); memzero_explicit(&user_key, sizeof(user_key)); memzero_explicit(&key, sizeof(key)); return ret; diff --git a/libbcachefs/clock.c b/libbcachefs/clock.c index 8e9264b5..5185794f 100644 --- a/libbcachefs/clock.c +++ b/libbcachefs/clock.c @@ -21,7 +21,7 @@ static const struct min_heap_callbacks callbacks = { void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer) { - spin_lock(&clock->timer_lock); + guard(spinlock)(&clock->timer_lock); if (time_after_eq64((u64) atomic64_read(&clock->now), timer->expire)) { spin_unlock(&clock->timer_lock); @@ -31,24 +31,20 @@ void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer) for (size_t i = 0; i < clock->timers.nr; i++) if (clock->timers.data[i] == timer) - goto out; + return; BUG_ON(!min_heap_push(&clock->timers, &timer, &callbacks, NULL)); -out: - spin_unlock(&clock->timer_lock); } void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer) { - spin_lock(&clock->timer_lock); + guard(spinlock)(&clock->timer_lock); for (size_t i = 0; i < clock->timers.nr; i++) if (clock->timers.data[i] == timer) { min_heap_del(&clock->timers, i, &callbacks, NULL); - break; + return; } - - spin_unlock(&clock->timer_lock); } struct io_clock_wait { @@ -133,28 +129,27 @@ void __bch2_increment_clock(struct io_clock *clock, u64 sectors) struct io_timer *timer; u64 now = atomic64_add_return(sectors, &clock->now); - spin_lock(&clock->timer_lock); + guard(spinlock)(&clock->timer_lock); + while ((timer = get_expired_timer(clock, now))) timer->fn(timer); - spin_unlock(&clock->timer_lock); } void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock) { - out->atomic++; - spin_lock(&clock->timer_lock); u64 now = atomic64_read(&clock->now); printbuf_tabstop_push(out, 40); prt_printf(out, "current time:\t%llu\n", now); + guard(printbuf_atomic)(out); + guard(spinlock)(&clock->timer_lock); + for (unsigned i = 0; i < clock->timers.nr; i++) prt_printf(out, "%ps %ps:\t%llu\n", clock->timers.data[i]->fn, clock->timers.data[i]->fn2, clock->timers.data[i]->expire); - spin_unlock(&clock->timer_lock); - --out->atomic; } void bch2_io_clock_exit(struct io_clock *clock) diff --git a/libbcachefs/compress.c b/libbcachefs/compress.c index 5f74de92..aeb9b9bd 100644 --- a/libbcachefs/compress.c +++ b/libbcachefs/compress.c @@ -579,23 +579,17 @@ static int __bch2_check_set_has_compressed_data(struct bch_fs *c, u64 f) if ((c->sb.features & f) == f) return 0; - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); - if ((c->sb.features & f) == f) { - mutex_unlock(&c->sb_lock); + if ((c->sb.features & f) == f) return 0; - } ret = __bch2_fs_compress_init(c, c->sb.features|f); - if (ret) { - mutex_unlock(&c->sb_lock); + if (ret) return ret; - } c->disk_sb.sb->features[0] |= cpu_to_le64(f); bch2_write_super(c); - mutex_unlock(&c->sb_lock); - return 0; } diff --git a/libbcachefs/data_update.c b/libbcachefs/data_update.c index 954d5323..ccedc93f 100644 --- a/libbcachefs/data_update.c +++ b/libbcachefs/data_update.c @@ -115,7 +115,7 @@ static void trace_io_move_finish2(struct data_update *u, struct bkey_i *insert) { struct bch_fs *c = u->op.c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_newline(&buf); @@ -131,7 +131,6 @@ static void trace_io_move_finish2(struct data_update *u, prt_newline(&buf); trace_io_move_finish(c, buf.buf); - printbuf_exit(&buf); } noinline_for_stack @@ -143,7 +142,7 @@ static void trace_io_move_fail2(struct data_update *m, { struct bch_fs *c = m->op.c; struct bkey_s_c old = bkey_i_to_s_c(m->k.k); - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); unsigned rewrites_found = 0; if (!trace_io_move_fail_enabled()) @@ -187,7 +186,6 @@ static void trace_io_move_fail2(struct data_update *m, } trace_io_move_fail(c, buf.buf); - printbuf_exit(&buf); } noinline_for_stack @@ -196,7 +194,7 @@ static void trace_data_update2(struct data_update *m, struct bkey_i *insert) { struct bch_fs *c = m->op.c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "\nold: "); bch2_bkey_val_to_text(&buf, c, old); @@ -206,7 +204,6 @@ static void trace_data_update2(struct data_update *m, bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); trace_data_update(c, buf.buf); - printbuf_exit(&buf); } noinline_for_stack @@ -215,7 +212,7 @@ static void trace_io_move_created_rebalance2(struct data_update *m, struct bkey_i *insert) { struct bch_fs *c = m->op.c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_data_update_opts_to_text(&buf, c, &m->op.opts, &m->data_opts); @@ -227,7 +224,6 @@ static void trace_io_move_created_rebalance2(struct data_update *m, bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); trace_io_move_created_rebalance(c, buf.buf); - printbuf_exit(&buf); this_cpu_inc(c->counters[BCH_COUNTER_io_move_created_rebalance]); } @@ -238,7 +234,7 @@ static int data_update_invalid_bkey(struct data_update *m, struct bkey_i *insert) { struct bch_fs *c = m->op.c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_str(&buf, "about to insert invalid key in data update path"); @@ -254,7 +250,6 @@ static int data_update_invalid_bkey(struct data_update *m, bch2_fs_emergency_read_only2(c, &buf); bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); return bch_err_throw(c, invalid_bkey); } @@ -499,7 +494,8 @@ out: int bch2_data_update_index_update(struct bch_write_op *op) { - return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op)); + CLASS(btree_trans, trans)(op->c); + return __bch2_data_update_index_update(trans, op); } void bch2_data_update_read_done(struct data_update *m) @@ -784,8 +780,8 @@ static int can_write_extent(struct bch_fs *c, struct data_update *m) __clear_bit(*i, devs.d); CLASS(printbuf, buf)(); - buf.atomic++; + guard(printbuf_atomic)(&buf); guard(rcu)(); unsigned nr_replicas = 0, i; diff --git a/libbcachefs/debug.c b/libbcachefs/debug.c index 07c2a0f7..97d7655a 100644 --- a/libbcachefs/debug.c +++ b/libbcachefs/debug.c @@ -141,7 +141,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b) return; bch2_btree_node_io_lock(b); - mutex_lock(&c->verify_lock); + guard(mutex)(&c->verify_lock); if (!c->verify_ondisk) { c->verify_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL); @@ -172,14 +172,11 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b) failed |= bch2_btree_verify_replica(c, b, p); if (failed) { - struct printbuf buf = PRINTBUF; - + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); bch2_fs_fatal_error(c, ": btree node verify failed for: %s\n", buf.buf); - printbuf_exit(&buf); } out: - mutex_unlock(&c->verify_lock); bch2_btree_node_io_unlock(b); } @@ -367,17 +364,17 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf, i->size = size; i->ret = 0; + CLASS(btree_trans, trans)(i->c); return bch2_debugfs_flush_buf(i) ?: - bch2_trans_run(i->c, - for_each_btree_key(trans, iter, i->id, i->from, - BTREE_ITER_prefetch| - BTREE_ITER_all_snapshots, k, ({ - bch2_bkey_val_to_text(&i->buf, i->c, k); - prt_newline(&i->buf); - bch2_trans_unlock(trans); - i->from = bpos_successor(iter.pos); - bch2_debugfs_flush_buf(i); - }))) ?: + for_each_btree_key(trans, iter, i->id, i->from, + BTREE_ITER_prefetch| + BTREE_ITER_all_snapshots, k, ({ + bch2_bkey_val_to_text(&i->buf, i->c, k); + prt_newline(&i->buf); + bch2_trans_unlock(trans); + i->from = bpos_successor(iter.pos); + bch2_debugfs_flush_buf(i); + })) ?: i->ret; } @@ -404,15 +401,15 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf, if (bpos_eq(SPOS_MAX, i->from)) return i->ret; - return bch2_trans_run(i->c, - for_each_btree_node(trans, iter, i->id, i->from, 0, b, ({ - bch2_btree_node_to_text(&i->buf, i->c, b); - i->from = !bpos_eq(SPOS_MAX, b->key.k.p) - ? bpos_successor(b->key.k.p) - : b->key.k.p; + CLASS(btree_trans, trans)(i->c); + return for_each_btree_node(trans, iter, i->id, i->from, 0, b, ({ + bch2_btree_node_to_text(&i->buf, i->c, b); + i->from = !bpos_eq(SPOS_MAX, b->key.k.p) + ? bpos_successor(b->key.k.p) + : b->key.k.p; - drop_locks_do(trans, bch2_debugfs_flush_buf(i)); - }))) ?: i->ret; + drop_locks_do(trans, bch2_debugfs_flush_buf(i)); + })) ?: i->ret; } static const struct file_operations btree_format_debug_ops = { @@ -431,27 +428,27 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf, i->size = size; i->ret = 0; + CLASS(btree_trans, trans)(i->c); return bch2_debugfs_flush_buf(i) ?: - bch2_trans_run(i->c, - for_each_btree_key(trans, iter, i->id, i->from, - BTREE_ITER_prefetch| - BTREE_ITER_all_snapshots, k, ({ - struct btree_path_level *l = - &btree_iter_path(trans, &iter)->l[0]; - struct bkey_packed *_k = - bch2_btree_node_iter_peek(&l->iter, l->b); - - if (bpos_gt(l->b->key.k.p, i->prev_node)) { - bch2_btree_node_to_text(&i->buf, i->c, l->b); - i->prev_node = l->b->key.k.p; - } - - bch2_bfloat_to_text(&i->buf, l->b, _k); - bch2_trans_unlock(trans); - i->from = bpos_successor(iter.pos); - bch2_debugfs_flush_buf(i); - }))) ?: - i->ret; + for_each_btree_key(trans, iter, i->id, i->from, + BTREE_ITER_prefetch| + BTREE_ITER_all_snapshots, k, ({ + struct btree_path_level *l = + &btree_iter_path(trans, &iter)->l[0]; + struct bkey_packed *_k = + bch2_btree_node_iter_peek(&l->iter, l->b); + + if (bpos_gt(l->b->key.k.p, i->prev_node)) { + bch2_btree_node_to_text(&i->buf, i->c, l->b); + i->prev_node = l->b->key.k.p; + } + + bch2_bfloat_to_text(&i->buf, l->b, _k); + bch2_trans_unlock(trans); + i->from = bpos_successor(iter.pos); + bch2_debugfs_flush_buf(i); + })) ?: + i->ret; } static const struct file_operations bfloat_failed_debug_ops = { @@ -512,8 +509,8 @@ static ssize_t bch2_cached_btree_nodes_read(struct file *file, char __user *buf, if (ret) return ret; - i->buf.atomic++; scoped_guard(rcu) { + guard(printbuf_atomic)(&i->buf); struct bucket_table *tbl = rht_dereference_rcu(c->btree_cache.table.tbl, &c->btree_cache.table); @@ -528,7 +525,6 @@ static ssize_t bch2_cached_btree_nodes_read(struct file *file, char __user *buf, done = true; } } - --i->buf.atomic; } while (!done); if (i->buf.allocation_failure) @@ -771,7 +767,7 @@ static ssize_t btree_transaction_stats_read(struct file *file, char __user *buf, prt_printf(&i->buf, "%s:\n", bch2_btree_transaction_fns[i->iter]); printbuf_indent_add(&i->buf, 2); - mutex_lock(&s->lock); + guard(mutex)(&s->lock); prt_printf(&i->buf, "Max mem used: %u\n", s->max_mem); #ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE @@ -802,8 +798,6 @@ static ssize_t btree_transaction_stats_read(struct file *file, char __user *buf, printbuf_indent_sub(&i->buf, 2); } - mutex_unlock(&s->lock); - printbuf_indent_sub(&i->buf, 2); prt_newline(&i->buf); i->iter++; diff --git a/libbcachefs/dirent.c b/libbcachefs/dirent.c index ccbb0127..dd60c475 100644 --- a/libbcachefs/dirent.c +++ b/libbcachefs/dirent.c @@ -13,6 +13,7 @@ #include <linux/dcache.h> +#if IS_ENABLED(CONFIG_UNICODE) int bch2_casefold(struct btree_trans *trans, const struct bch_hash_info *info, const struct qstr *str, struct qstr *out_cf) { @@ -34,6 +35,7 @@ int bch2_casefold(struct btree_trans *trans, const struct bch_hash_info *info, *out_cf = (struct qstr) QSTR_INIT(buf, ret); return 0; } +#endif static unsigned bch2_dirent_name_bytes(struct bkey_s_c_dirent d) { @@ -256,9 +258,11 @@ int bch2_dirent_init_name(struct bch_fs *c, if (ret) return ret; +#if IS_ENABLED(CONFIG_UNICODE) memcpy(&dirent->v.d_cf_name_block.d_names[0], name->name, name->len); char *cf_out = &dirent->v.d_cf_name_block.d_names[name->len]; + void *val_end = bkey_val_end(bkey_i_to_s(&dirent->k_i)); if (cf_name) { cf_len = cf_name->len; @@ -266,21 +270,20 @@ int bch2_dirent_init_name(struct bch_fs *c, memcpy(cf_out, cf_name->name, cf_name->len); } else { cf_len = utf8_casefold(hash_info->cf_encoding, name, - cf_out, - bkey_val_end(bkey_i_to_s(&dirent->k_i)) - (void *) cf_out); + cf_out, val_end - (void *) cf_out); if (cf_len <= 0) return cf_len; } - memset(&dirent->v.d_cf_name_block.d_names[name->len + cf_len], 0, - bkey_val_bytes(&dirent->k) - - offsetof(struct bch_dirent, d_cf_name_block.d_names) - - name->len + cf_len); + void *name_end = &dirent->v.d_cf_name_block.d_names[name->len + cf_len]; + BUG_ON(name_end > val_end); + memset(name_end, 0, val_end - name_end); dirent->v.d_cf_name_block.d_name_len = cpu_to_le16(name->len); dirent->v.d_cf_name_block.d_cf_name_len = cpu_to_le16(cf_len); EBUG_ON(bch2_dirent_get_casefold_name(dirent_i_to_s_c(dirent)).len != cf_len); +#endif } unsigned u64s = dirent_val_u64s(name->len, cf_len); @@ -617,13 +620,12 @@ u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir, const struct bch_hash_info *hash_info, const struct qstr *name, subvol_inum *inum) { - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct btree_iter iter = {}; int ret = lockrestart_do(trans, bch2_dirent_lookup_trans(trans, &iter, dir, hash_info, name, inum, 0)); bch2_trans_iter_exit(trans, &iter); - bch2_trans_put(trans); return ret; } @@ -683,8 +685,8 @@ int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct bkey_buf sk; bch2_bkey_buf_init(&sk); - int ret = bch2_trans_run(c, - for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_dirents, + CLASS(btree_trans, trans)(c); + int ret = for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_dirents, POS(inum.inum, ctx->pos), POS(inum.inum, U64_MAX), inum.subvol, 0, k, ({ @@ -705,7 +707,7 @@ int bch2_readdir(struct bch_fs *c, subvol_inum inum, continue; ret2 ?: (bch2_trans_unlock(trans), bch2_dir_emit(ctx, dirent, target)); - }))); + })); bch2_bkey_buf_exit(&sk, c); diff --git a/libbcachefs/dirent.h b/libbcachefs/dirent.h index 1e17199c..373d382b 100644 --- a/libbcachefs/dirent.h +++ b/libbcachefs/dirent.h @@ -23,6 +23,7 @@ struct bch_fs; struct bch_hash_info; struct bch_inode_info; +#if IS_ENABLED(CONFIG_UNICODE) int bch2_casefold(struct btree_trans *, const struct bch_hash_info *, const struct qstr *, struct qstr *); @@ -37,6 +38,14 @@ static inline int bch2_maybe_casefold(struct btree_trans *trans, return bch2_casefold(trans, info, str, out_cf); } } +#else +static inline int bch2_maybe_casefold(struct btree_trans *trans, + const struct bch_hash_info *info, + const struct qstr *str, struct qstr *out_cf) +{ + return bch_err_throw(trans->c, no_casefolding_without_utf8); +} +#endif struct qstr bch2_dirent_get_name(struct bkey_s_c_dirent); diff --git a/libbcachefs/disk_accounting.c b/libbcachefs/disk_accounting.c index 2591b4f4..219e3773 100644 --- a/libbcachefs/disk_accounting.c +++ b/libbcachefs/disk_accounting.c @@ -380,11 +380,10 @@ static int __bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accoun accounting_pos_cmp, NULL); if (trace_accounting_mem_insert_enabled()) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_accounting_to_text(&buf, c, a.s_c); trace_accounting_mem_insert(c, buf.buf); - printbuf_exit(&buf); } return 0; err: @@ -404,9 +403,9 @@ int bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a, return bch_err_throw(c, btree_insert_need_mark_replicas); percpu_up_read(&c->mark_lock); - percpu_down_write(&c->mark_lock); - int ret = __bch2_accounting_mem_insert(c, a); - percpu_up_write(&c->mark_lock); + int ret; + scoped_guard(percpu_write, &c->mark_lock) + ret = __bch2_accounting_mem_insert(c, a); percpu_down_read(&c->mark_lock); return ret; } @@ -438,7 +437,7 @@ void bch2_accounting_mem_gc(struct bch_fs *c) { struct bch_accounting_mem *acc = &c->accounting; - percpu_down_write(&c->mark_lock); + guard(percpu_write)(&c->mark_lock); struct accounting_mem_entry *dst = acc->k.data; darray_for_each(acc->k, src) { @@ -453,7 +452,6 @@ void bch2_accounting_mem_gc(struct bch_fs *c) acc->k.nr = dst - acc->k.data; eytzinger0_sort(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]), accounting_pos_cmp, NULL); - percpu_up_write(&c->mark_lock); } /* @@ -471,7 +469,7 @@ int bch2_fs_replicas_usage_read(struct bch_fs *c, darray_char *usage) darray_init(usage); - percpu_down_read(&c->mark_lock); + guard(percpu_read)(&c->mark_lock); darray_for_each(acc->k, i) { union { u8 bytes[struct_size_t(struct bch_replicas_usage, r.devs, @@ -494,7 +492,6 @@ int bch2_fs_replicas_usage_read(struct bch_fs *c, darray_char *usage) memcpy(&darray_top(*usage), &u.r, replicas_usage_bytes(&u.r)); usage->nr += replicas_usage_bytes(&u.r); } - percpu_up_read(&c->mark_lock); if (ret) darray_exit(usage); @@ -509,7 +506,7 @@ int bch2_fs_accounting_read(struct bch_fs *c, darray_char *out_buf, unsigned acc darray_init(out_buf); - percpu_down_read(&c->mark_lock); + guard(percpu_read)(&c->mark_lock); darray_for_each(acc->k, i) { struct disk_accounting_pos a_p; bpos_to_disk_accounting_pos(&a_p, i->pos); @@ -533,8 +530,6 @@ int bch2_fs_accounting_read(struct bch_fs *c, darray_char *out_buf, unsigned acc out_buf->nr += bkey_bytes(&a_out->k); } - percpu_up_read(&c->mark_lock); - if (ret) darray_exit(out_buf); return ret; @@ -553,7 +548,7 @@ int bch2_gc_accounting_start(struct bch_fs *c) struct bch_accounting_mem *acc = &c->accounting; int ret = 0; - percpu_down_write(&c->mark_lock); + guard(percpu_write)(&c->mark_lock); darray_for_each(acc->k, e) { e->v[1] = __alloc_percpu_gfp(e->nr_counters * sizeof(u64), sizeof(u64), GFP_KERNEL); @@ -565,20 +560,18 @@ int bch2_gc_accounting_start(struct bch_fs *c) } acc->gc_running = !ret; - percpu_up_write(&c->mark_lock); - return ret; } int bch2_gc_accounting_done(struct bch_fs *c) { struct bch_accounting_mem *acc = &c->accounting; - struct btree_trans *trans = bch2_trans_get(c); - struct printbuf buf = PRINTBUF; + CLASS(btree_trans, trans)(c); + CLASS(printbuf, buf)(); struct bpos pos = POS_MIN; int ret = 0; - percpu_down_write(&c->mark_lock); + guard(percpu_write)(&c->mark_lock); while (1) { unsigned idx = eytzinger0_find_ge(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]), accounting_pos_cmp, &pos); @@ -638,20 +631,16 @@ int bch2_gc_accounting_done(struct bch_fs *c) bkey_i_to_s_c_accounting(&k_i.k), BCH_ACCOUNTING_normal, true); - preempt_disable(); + guard(preempt)(); struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage); struct bch_fs_usage_base *src = &trans->fs_usage_delta; acc_u64s((u64 *) dst, (u64 *) src, sizeof(*src) / sizeof(u64)); - preempt_enable(); } } } } err: fsck_err: - percpu_up_write(&c->mark_lock); - printbuf_exit(&buf); - bch2_trans_put(trans); bch_err_fn(c, ret); return ret; } @@ -663,11 +652,9 @@ static int accounting_read_key(struct btree_trans *trans, struct bkey_s_c k) if (k.k->type != KEY_TYPE_accounting) return 0; - percpu_down_read(&c->mark_lock); - int ret = bch2_accounting_mem_mod_locked(trans, bkey_s_c_to_accounting(k), - BCH_ACCOUNTING_read, false); - percpu_up_read(&c->mark_lock); - return ret; + guard(percpu_read)(&c->mark_lock); + return bch2_accounting_mem_mod_locked(trans, bkey_s_c_to_accounting(k), + BCH_ACCOUNTING_read, false); } static int bch2_disk_accounting_validate_late(struct btree_trans *trans, @@ -675,7 +662,7 @@ static int bch2_disk_accounting_validate_late(struct btree_trans *trans, u64 *v, unsigned nr) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0, invalid_dev = -1; switch (acc->type) { @@ -724,7 +711,6 @@ static int bch2_disk_accounting_validate_late(struct btree_trans *trans, } fsck_err: - printbuf_exit(&buf); return ret; invalid_device: if (fsck_err(trans, accounting_to_invalid_device, @@ -752,8 +738,8 @@ invalid_device: int bch2_accounting_read(struct bch_fs *c) { struct bch_accounting_mem *acc = &c->accounting; - struct btree_trans *trans = bch2_trans_get(c); - struct printbuf buf = PRINTBUF; + CLASS(btree_trans, trans)(c); + CLASS(printbuf, buf)(); /* * We might run more than once if we rewind to start topology repair or @@ -762,13 +748,13 @@ int bch2_accounting_read(struct bch_fs *c) * * Instead, zero out any accounting we have: */ - percpu_down_write(&c->mark_lock); - darray_for_each(acc->k, e) - percpu_memset(e->v[0], 0, sizeof(u64) * e->nr_counters); - for_each_member_device(c, ca) - percpu_memset(ca->usage, 0, sizeof(*ca->usage)); - percpu_memset(c->usage, 0, sizeof(*c->usage)); - percpu_up_write(&c->mark_lock); + scoped_guard(percpu_write, &c->mark_lock) { + darray_for_each(acc->k, e) + percpu_memset(e->v[0], 0, sizeof(u64) * e->nr_counters); + for_each_member_device(c, ca) + percpu_memset(ca->usage, 0, sizeof(*ca->usage)); + percpu_memset(c->usage, 0, sizeof(*c->usage)); + } struct btree_iter iter; bch2_trans_iter_init(trans, &iter, BTREE_ID_accounting, POS_MIN, @@ -799,7 +785,7 @@ int bch2_accounting_read(struct bch_fs *c) accounting_read_key(trans, k); })); if (ret) - goto err; + return ret; struct journal_keys *keys = &c->journal_keys; struct journal_key *dst = keys->data; @@ -838,14 +824,14 @@ int bch2_accounting_read(struct bch_fs *c) ret = accounting_read_key(trans, k); if (ret) - goto err; + return ret; } *dst++ = *i; } keys->gap = keys->nr = dst - keys->data; - percpu_down_write(&c->mark_lock); + guard(percpu_write)(&c->mark_lock); darray_for_each_reverse(acc->k, i) { struct disk_accounting_pos acc_k; @@ -877,60 +863,55 @@ int bch2_accounting_read(struct bch_fs *c) } if (ret) - goto fsck_err; + return ret; } eytzinger0_sort(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]), accounting_pos_cmp, NULL); - preempt_disable(); - struct bch_fs_usage_base *usage = this_cpu_ptr(c->usage); + scoped_guard(preempt) { + struct bch_fs_usage_base *usage = this_cpu_ptr(c->usage); - for (unsigned i = 0; i < acc->k.nr; i++) { - struct disk_accounting_pos k; - bpos_to_disk_accounting_pos(&k, acc->k.data[i].pos); + for (unsigned i = 0; i < acc->k.nr; i++) { + struct disk_accounting_pos k; + bpos_to_disk_accounting_pos(&k, acc->k.data[i].pos); - u64 v[BCH_ACCOUNTING_MAX_COUNTERS]; - bch2_accounting_mem_read_counters(acc, i, v, ARRAY_SIZE(v), false); + u64 v[BCH_ACCOUNTING_MAX_COUNTERS]; + bch2_accounting_mem_read_counters(acc, i, v, ARRAY_SIZE(v), false); - switch (k.type) { - case BCH_DISK_ACCOUNTING_persistent_reserved: - usage->reserved += v[0] * k.persistent_reserved.nr_replicas; - break; - case BCH_DISK_ACCOUNTING_replicas: - fs_usage_data_type_to_base(usage, k.replicas.data_type, v[0]); - break; - case BCH_DISK_ACCOUNTING_dev_data_type: { - guard(rcu)(); - struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.dev_data_type.dev); - if (ca) { - struct bch_dev_usage_type __percpu *d = &ca->usage->d[k.dev_data_type.data_type]; - percpu_u64_set(&d->buckets, v[0]); - percpu_u64_set(&d->sectors, v[1]); - percpu_u64_set(&d->fragmented, v[2]); - - if (k.dev_data_type.data_type == BCH_DATA_sb || - k.dev_data_type.data_type == BCH_DATA_journal) - usage->hidden += v[0] * ca->mi.bucket_size; + switch (k.type) { + case BCH_DISK_ACCOUNTING_persistent_reserved: + usage->reserved += v[0] * k.persistent_reserved.nr_replicas; + break; + case BCH_DISK_ACCOUNTING_replicas: + fs_usage_data_type_to_base(usage, k.replicas.data_type, v[0]); + break; + case BCH_DISK_ACCOUNTING_dev_data_type: { + guard(rcu)(); + struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.dev_data_type.dev); + if (ca) { + struct bch_dev_usage_type __percpu *d = &ca->usage->d[k.dev_data_type.data_type]; + percpu_u64_set(&d->buckets, v[0]); + percpu_u64_set(&d->sectors, v[1]); + percpu_u64_set(&d->fragmented, v[2]); + + if (k.dev_data_type.data_type == BCH_DATA_sb || + k.dev_data_type.data_type == BCH_DATA_journal) + usage->hidden += v[0] * ca->mi.bucket_size; + } + break; + } } - break; - } } } - preempt_enable(); -fsck_err: - percpu_up_write(&c->mark_lock); -err: - printbuf_exit(&buf); - bch2_trans_put(trans); - bch_err_fn(c, ret); + return ret; } int bch2_dev_usage_remove(struct bch_fs *c, unsigned dev) { - return bch2_trans_run(c, - bch2_btree_write_buffer_flush_sync(trans) ?: + CLASS(btree_trans, trans)(c); + return bch2_btree_write_buffer_flush_sync(trans) ?: for_each_btree_key_commit(trans, iter, BTREE_ID_accounting, POS_MIN, BTREE_ITER_all_snapshots, k, NULL, NULL, 0, ({ struct disk_accounting_pos acc; @@ -941,15 +922,16 @@ int bch2_dev_usage_remove(struct bch_fs *c, unsigned dev) ? bch2_btree_bit_mod_buffered(trans, BTREE_ID_accounting, k.k->p, 0) : 0; })) ?: - bch2_btree_write_buffer_flush_sync(trans)); + bch2_btree_write_buffer_flush_sync(trans); } int bch2_dev_usage_init(struct bch_dev *ca, bool gc) { struct bch_fs *c = ca->fs; + CLASS(btree_trans, trans)(c); u64 v[3] = { ca->mi.nbuckets - ca->mi.first_bucket, 0, 0 }; - int ret = bch2_trans_do(c, ({ + int ret = lockrestart_do(trans, ({ bch2_disk_accounting_mod2(trans, gc, v, dev_data_type, .dev = ca->dev_idx, @@ -965,78 +947,77 @@ void bch2_verify_accounting_clean(struct bch_fs *c) bool mismatch = false; struct bch_fs_usage_base base = {}, base_inmem = {}; - bch2_trans_run(c, - for_each_btree_key(trans, iter, - BTREE_ID_accounting, POS_MIN, - BTREE_ITER_all_snapshots, k, ({ - u64 v[BCH_ACCOUNTING_MAX_COUNTERS]; - struct bkey_s_c_accounting a = bkey_s_c_to_accounting(k); - unsigned nr = bch2_accounting_counters(k.k); + CLASS(btree_trans, trans)(c); + for_each_btree_key(trans, iter, + BTREE_ID_accounting, POS_MIN, + BTREE_ITER_all_snapshots, k, ({ + u64 v[BCH_ACCOUNTING_MAX_COUNTERS]; + struct bkey_s_c_accounting a = bkey_s_c_to_accounting(k); + unsigned nr = bch2_accounting_counters(k.k); - struct disk_accounting_pos acc_k; - bpos_to_disk_accounting_pos(&acc_k, k.k->p); + struct disk_accounting_pos acc_k; + bpos_to_disk_accounting_pos(&acc_k, k.k->p); - if (acc_k.type >= BCH_DISK_ACCOUNTING_TYPE_NR) - break; + if (acc_k.type >= BCH_DISK_ACCOUNTING_TYPE_NR) + break; - if (!bch2_accounting_is_mem(&acc_k)) { - struct disk_accounting_pos next; - memset(&next, 0, sizeof(next)); - next.type = acc_k.type + 1; - bch2_btree_iter_set_pos(trans, &iter, disk_accounting_pos_to_bpos(&next)); - continue; - } + if (!bch2_accounting_is_mem(&acc_k)) { + struct disk_accounting_pos next; + memset(&next, 0, sizeof(next)); + next.type = acc_k.type + 1; + bch2_btree_iter_set_pos(trans, &iter, disk_accounting_pos_to_bpos(&next)); + continue; + } - bch2_accounting_mem_read(c, k.k->p, v, nr); + bch2_accounting_mem_read(c, k.k->p, v, nr); - if (memcmp(a.v->d, v, nr * sizeof(u64))) { - struct printbuf buf = PRINTBUF; + if (memcmp(a.v->d, v, nr * sizeof(u64))) { + CLASS(printbuf, buf)(); - bch2_bkey_val_to_text(&buf, c, k); - prt_str(&buf, " !="); - for (unsigned j = 0; j < nr; j++) - prt_printf(&buf, " %llu", v[j]); + bch2_bkey_val_to_text(&buf, c, k); + prt_str(&buf, " !="); + for (unsigned j = 0; j < nr; j++) + prt_printf(&buf, " %llu", v[j]); - pr_err("%s", buf.buf); - printbuf_exit(&buf); - mismatch = true; - } + pr_err("%s", buf.buf); + mismatch = true; + } - switch (acc_k.type) { - case BCH_DISK_ACCOUNTING_persistent_reserved: - base.reserved += acc_k.persistent_reserved.nr_replicas * a.v->d[0]; - break; - case BCH_DISK_ACCOUNTING_replicas: - fs_usage_data_type_to_base(&base, acc_k.replicas.data_type, a.v->d[0]); - break; - case BCH_DISK_ACCOUNTING_dev_data_type: - { - guard(rcu)(); /* scoped guard is a loop, and doesn't play nicely with continue */ - struct bch_dev *ca = bch2_dev_rcu_noerror(c, acc_k.dev_data_type.dev); - if (!ca) - continue; - - v[0] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].buckets); - v[1] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].sectors); - v[2] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].fragmented); - } + switch (acc_k.type) { + case BCH_DISK_ACCOUNTING_persistent_reserved: + base.reserved += acc_k.persistent_reserved.nr_replicas * a.v->d[0]; + break; + case BCH_DISK_ACCOUNTING_replicas: + fs_usage_data_type_to_base(&base, acc_k.replicas.data_type, a.v->d[0]); + break; + case BCH_DISK_ACCOUNTING_dev_data_type: { + { + guard(rcu)(); /* scoped guard is a loop, and doesn't play nicely with continue */ + struct bch_dev *ca = bch2_dev_rcu_noerror(c, acc_k.dev_data_type.dev); + if (!ca) + continue; + + v[0] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].buckets); + v[1] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].sectors); + v[2] = percpu_u64_get(&ca->usage->d[acc_k.dev_data_type.data_type].fragmented); + } - if (memcmp(a.v->d, v, 3 * sizeof(u64))) { - struct printbuf buf = PRINTBUF; + if (memcmp(a.v->d, v, 3 * sizeof(u64))) { + CLASS(printbuf, buf)(); - bch2_bkey_val_to_text(&buf, c, k); - prt_str(&buf, " in mem"); - for (unsigned j = 0; j < nr; j++) - prt_printf(&buf, " %llu", v[j]); + bch2_bkey_val_to_text(&buf, c, k); + prt_str(&buf, " in mem"); + for (unsigned j = 0; j < nr; j++) + prt_printf(&buf, " %llu", v[j]); - pr_err("dev accounting mismatch: %s", buf.buf); - printbuf_exit(&buf); - mismatch = true; - } + pr_err("dev accounting mismatch: %s", buf.buf); + mismatch = true; } + } + } - 0; - }))); + 0; + })); acc_u64s_percpu(&base_inmem.hidden, &c->usage->hidden, sizeof(base_inmem) / sizeof(u64)); diff --git a/libbcachefs/disk_accounting.h b/libbcachefs/disk_accounting.h index d61abebf..43f4b21d 100644 --- a/libbcachefs/disk_accounting.h +++ b/libbcachefs/disk_accounting.h @@ -211,10 +211,8 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, static inline int bch2_accounting_mem_add(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc) { - percpu_down_read(&trans->c->mark_lock); - int ret = bch2_accounting_mem_mod_locked(trans, a, gc ? BCH_ACCOUNTING_gc : BCH_ACCOUNTING_normal, false); - percpu_up_read(&trans->c->mark_lock); - return ret; + guard(percpu_read)(&trans->c->mark_lock); + return bch2_accounting_mem_mod_locked(trans, a, gc ? BCH_ACCOUNTING_gc : BCH_ACCOUNTING_normal, false); } static inline void bch2_accounting_mem_read_counters(struct bch_accounting_mem *acc, @@ -236,13 +234,12 @@ static inline void bch2_accounting_mem_read_counters(struct bch_accounting_mem * static inline void bch2_accounting_mem_read(struct bch_fs *c, struct bpos p, u64 *v, unsigned nr) { - percpu_down_read(&c->mark_lock); + guard(percpu_read)(&c->mark_lock); struct bch_accounting_mem *acc = &c->accounting; unsigned idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]), accounting_pos_cmp, &p); bch2_accounting_mem_read_counters(acc, idx, v, nr, false); - percpu_up_read(&c->mark_lock); } static inline struct bversion journal_pos_to_bversion(struct journal_res *res, unsigned offset) diff --git a/libbcachefs/disk_groups.c b/libbcachefs/disk_groups.c index cde842ac..293e4726 100644 --- a/libbcachefs/disk_groups.c +++ b/libbcachefs/disk_groups.c @@ -375,7 +375,7 @@ void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c) { bch2_printbuf_make_room(out, 4096); - out->atomic++; + guard(printbuf_atomic)(out); guard(rcu)(); struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups); @@ -396,16 +396,13 @@ void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c) next: prt_newline(out); } - - out->atomic--; } void bch2_disk_path_to_text(struct printbuf *out, struct bch_fs *c, unsigned v) { - out->atomic++; + guard(printbuf_atomic)(out); guard(rcu)(); - __bch2_disk_path_to_text(out, rcu_dereference(c->disk_groups), v), - --out->atomic; + __bch2_disk_path_to_text(out, rcu_dereference(c->disk_groups), v); } void bch2_disk_path_to_text_sb(struct printbuf *out, struct bch_sb *sb, unsigned v) @@ -471,14 +468,9 @@ int __bch2_dev_group_set(struct bch_fs *c, struct bch_dev *ca, const char *name) int bch2_dev_group_set(struct bch_fs *c, struct bch_dev *ca, const char *name) { - int ret; - - mutex_lock(&c->sb_lock); - ret = __bch2_dev_group_set(c, ca, name) ?: + guard(mutex)(&c->sb_lock); + return __bch2_dev_group_set(c, ca, name) ?: bch2_write_super(c); - mutex_unlock(&c->sb_lock); - - return ret; } int bch2_opt_target_parse(struct bch_fs *c, const char *val, u64 *res, @@ -506,9 +498,8 @@ int bch2_opt_target_parse(struct bch_fs *c, const char *val, u64 *res, return 0; } - mutex_lock(&c->sb_lock); - g = bch2_disk_path_find(&c->disk_sb, val); - mutex_unlock(&c->sb_lock); + scoped_guard(mutex, &c->sb_lock) + g = bch2_disk_path_find(&c->disk_sb, val); if (g >= 0) { *res = group_to_target(g); @@ -527,7 +518,7 @@ void bch2_target_to_text(struct printbuf *out, struct bch_fs *c, unsigned v) prt_printf(out, "none"); return; case TARGET_DEV: { - out->atomic++; + guard(printbuf_atomic)(out); guard(rcu)(); struct bch_dev *ca = t.dev < c->sb.nr_devices ? rcu_dereference(c->devs[t.dev]) @@ -539,8 +530,6 @@ void bch2_target_to_text(struct printbuf *out, struct bch_fs *c, unsigned v) prt_printf(out, "offline device %u", t.dev); else prt_printf(out, "invalid device %u", t.dev); - - out->atomic--; return; } case TARGET_GROUP: diff --git a/libbcachefs/ec.c b/libbcachefs/ec.c index 687c3ba9..62dda821 100644 --- a/libbcachefs/ec.c +++ b/libbcachefs/ec.c @@ -197,8 +197,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans, bool parity = ptr_idx >= nr_data; enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe; s64 sectors = parity ? le16_to_cpu(s.v->sectors) : 0; - struct printbuf buf = PRINTBUF; - int ret = 0; + CLASS(printbuf, buf)(); struct bch_fs *c = trans->c; if (deleting) @@ -212,10 +211,8 @@ static int __mark_stripe_bucket(struct btree_trans *trans, bch2_data_type_str(a->data_type), a->dirty_sectors, a->stripe, s.k->p.offset, - (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { - ret = bch_err_throw(c, mark_stripe); - goto err; - } + (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) + return bch_err_throw(c, mark_stripe); if (bch2_trans_inconsistent_on(parity && bch2_bucket_sectors_total(*a), trans, "bucket %llu:%llu gen %u data type %s dirty_sectors %u cached_sectors %u: data already in parity bucket\n%s", @@ -223,30 +220,24 @@ static int __mark_stripe_bucket(struct btree_trans *trans, bch2_data_type_str(a->data_type), a->dirty_sectors, a->cached_sectors, - (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { - ret = bch_err_throw(c, mark_stripe); - goto err; - } + (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) + return bch_err_throw(c, mark_stripe); } else { if (bch2_trans_inconsistent_on(a->stripe != s.k->p.offset || a->stripe_redundancy != s.v->nr_redundant, trans, "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe (got %u)\n%s", bucket.inode, bucket.offset, a->gen, a->stripe, - (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { - ret = bch_err_throw(c, mark_stripe); - goto err; - } + (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) + return bch_err_throw(c, mark_stripe); if (bch2_trans_inconsistent_on(a->data_type != data_type, trans, "bucket %llu:%llu gen %u data type %s: wrong data type when stripe, should be %s\n%s", bucket.inode, bucket.offset, a->gen, bch2_data_type_str(a->data_type), bch2_data_type_str(data_type), - (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { - ret = bch_err_throw(c, mark_stripe); - goto err; - } + (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) + return bch_err_throw(c, mark_stripe); if (bch2_trans_inconsistent_on(parity && (a->dirty_sectors != -sectors || @@ -255,17 +246,15 @@ static int __mark_stripe_bucket(struct btree_trans *trans, bucket.inode, bucket.offset, a->gen, a->dirty_sectors, a->cached_sectors, - (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { - ret = bch_err_throw(c, mark_stripe); - goto err; - } + (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) + return bch_err_throw(c, mark_stripe); } if (sectors) { - ret = bch2_bucket_ref_update(trans, ca, s.s_c, ptr, sectors, data_type, - a->gen, a->data_type, &a->dirty_sectors); + int ret = bch2_bucket_ref_update(trans, ca, s.s_c, ptr, sectors, data_type, + a->gen, a->data_type, &a->dirty_sectors); if (ret) - goto err; + return ret; } if (!deleting) { @@ -277,9 +266,8 @@ static int __mark_stripe_bucket(struct btree_trans *trans, a->stripe_redundancy = 0; alloc_data_type_set(a, BCH_DATA_user); } -err: - printbuf_exit(&buf); - return ret; + + return 0; } static int mark_stripe_bucket(struct btree_trans *trans, @@ -289,14 +277,13 @@ static int mark_stripe_bucket(struct btree_trans *trans, { struct bch_fs *c = trans->c; const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx; - struct printbuf buf = PRINTBUF; - int ret = 0; + CLASS(printbuf, buf)(); - struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev); + CLASS(bch2_dev_tryget, ca)(c, ptr->dev); if (unlikely(!ca)) { if (ptr->dev != BCH_SB_MEMBER_INVALID && !(flags & BTREE_TRIGGER_overwrite)) - ret = bch_err_throw(c, mark_stripe); - goto err; + return bch_err_throw(c, mark_stripe); + return 0; } struct bpos bucket = PTR_BUCKET_POS(ca, ptr); @@ -312,36 +299,32 @@ static int mark_stripe_bucket(struct btree_trans *trans, struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0); - ret = PTR_ERR_OR_ZERO(a) ?: + int ret = PTR_ERR_OR_ZERO(a) ?: __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &a->v, flags) ?: bch2_bucket_backpointer_mod(trans, s.s_c, &bp, !(flags & BTREE_TRIGGER_overwrite)); if (ret) - goto err; + return ret; } if (flags & BTREE_TRIGGER_gc) { struct bucket *g = gc_bucket(ca, bucket.offset); if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n%s", ptr->dev, - (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { - ret = bch_err_throw(c, mark_stripe); - goto err; - } + (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) + return bch_err_throw(c, mark_stripe); bucket_lock(g); struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; - ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags); + int ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags); alloc_to_bucket(g, new); bucket_unlock(g); if (!ret) ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags); } -err: - bch2_dev_put(ca); - printbuf_exit(&buf); - return ret; + + return 0; } static int mark_stripe_buckets(struct btree_trans *trans, @@ -630,16 +613,15 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf) struct bch_csum got = ec_block_checksum(buf, i, offset); if (bch2_crc_cmp(want, got)) { - struct bch_dev *ca = bch2_dev_tryget(c, v->ptrs[i].dev); + CLASS(bch2_dev_tryget, ca)(c, v->ptrs[i].dev); if (ca) { - struct printbuf err = PRINTBUF; + CLASS(printbuf, err)(); prt_str(&err, "stripe "); bch2_csum_err_msg(&err, v->csum_type, want, got); prt_printf(&err, " for %ps at %u of\n ", (void *) _RET_IP_, i); bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key)); bch_err_ratelimited(ca, "%s", err.buf); - printbuf_exit(&err); bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); } @@ -703,8 +685,8 @@ static void ec_block_endio(struct bio *bio) struct closure *cl = bio->bi_private; int rw = ec_bio->rw; unsigned ref = rw == READ - ? BCH_DEV_READ_REF_ec_block - : BCH_DEV_WRITE_REF_ec_block; + ? (unsigned) BCH_DEV_READ_REF_ec_block + : (unsigned) BCH_DEV_WRITE_REF_ec_block; bch2_account_io_completion(ca, bio_data_dir(bio), ec_bio->submit_time, !bio->bi_status); @@ -741,8 +723,8 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, : BCH_DATA_parity; int rw = op_is_write(opf); unsigned ref = rw == READ - ? BCH_DEV_READ_REF_ec_block - : BCH_DEV_WRITE_REF_ec_block; + ? (unsigned) BCH_DEV_READ_REF_ec_block + : (unsigned) BCH_DEV_WRITE_REF_ec_block; struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw, ref); if (!ca) { @@ -832,7 +814,7 @@ int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio, struct bch_stripe *v; unsigned i, offset; const char *msg = NULL; - struct printbuf msgbuf = PRINTBUF; + CLASS(printbuf, msgbuf)(); int ret = 0; closure_init_stack(&cl); @@ -894,7 +876,6 @@ err: bch2_bkey_val_to_text(&msgbuf, c, orig_k); bch_err_ratelimited(c, "error doing reconstruct read: %s\n %s", msg, msgbuf.buf); - printbuf_exit(&msgbuf); ret = bch_err_throw(c, stripe_reconstruct); goto out; } @@ -936,31 +917,22 @@ static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx) static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx) { - bool ret = false; - - spin_lock(&c->ec_stripes_new_lock); - ret = __bch2_stripe_is_open(c, idx); - spin_unlock(&c->ec_stripes_new_lock); - - return ret; + guard(spinlock)(&c->ec_stripes_new_lock); + return __bch2_stripe_is_open(c, idx); } static bool bch2_try_open_stripe(struct bch_fs *c, struct ec_stripe_new *s, u64 idx) { - bool ret; - - spin_lock(&c->ec_stripes_new_lock); - ret = !__bch2_stripe_is_open(c, idx); + guard(spinlock)(&c->ec_stripes_new_lock); + bool ret = !__bch2_stripe_is_open(c, idx); if (ret) { unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new))); s->idx = idx; hlist_add_head(&s->hash, &c->ec_stripes_new[hash]); } - spin_unlock(&c->ec_stripes_new_lock); - return ret; } @@ -968,9 +940,8 @@ static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s) { BUG_ON(!s->idx); - spin_lock(&c->ec_stripes_new_lock); + guard(spinlock)(&c->ec_stripes_new_lock); hlist_del_init(&s->hash); - spin_unlock(&c->ec_stripes_new_lock); s->idx = 0; } @@ -1063,7 +1034,7 @@ static int ec_stripe_key_update(struct btree_trans *trans, unsigned sectors = stripe_blockcount_get(v, i); if (!bch2_extent_ptr_eq(old->v.ptrs[i], new->v.ptrs[i]) && sectors) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_printf(&buf, "stripe changed nonempty block %u", i); prt_str(&buf, "\nold: "); @@ -1071,7 +1042,6 @@ static int ec_stripe_key_update(struct btree_trans *trans, prt_str(&buf, "\nnew: "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new->k_i)); bch2_fs_inconsistent(c, "%s", buf.buf); - printbuf_exit(&buf); ret = -EINVAL; goto err; } @@ -1115,21 +1085,18 @@ static int ec_stripe_update_extent(struct btree_trans *trans, int ret, dev, block; if (bp.v->level) { - struct printbuf buf = PRINTBUF; struct btree_iter node_iter; - struct btree *b; - - b = bch2_backpointer_get_node(trans, bp, &node_iter, last_flushed); + struct btree *b = bch2_backpointer_get_node(trans, bp, &node_iter, last_flushed); bch2_trans_iter_exit(trans, &node_iter); if (!b) return 0; + CLASS(printbuf, buf)(); prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b); bch2_bkey_val_to_text(&buf, c, bp.s_c); bch2_fs_inconsistent(c, "%s", buf.buf); - printbuf_exit(&buf); return bch_err_throw(c, erasure_coding_found_btree_node); } @@ -1194,7 +1161,7 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b struct bch_extent_ptr ptr = v->ptrs[block]; int ret = 0; - struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev); + CLASS(bch2_dev_tryget, ca)(c, ptr.dev); if (!ca) return bch_err_throw(c, ENOENT_dev_not_found); @@ -1225,28 +1192,26 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b })); bch2_bkey_buf_exit(&last_flushed, c); - bch2_dev_put(ca); return ret; } static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s) { - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; unsigned nr_data = v->nr_blocks - v->nr_redundant; int ret = bch2_btree_write_buffer_flush_sync(trans); if (ret) - goto err; + return ret; for (unsigned i = 0; i < nr_data; i++) { ret = ec_stripe_update_bucket(trans, s, i); if (ret) - break; + return ret; } -err: - bch2_trans_put(trans); - return ret; + + return 0; } static void zero_out_rest_of_ec_bucket(struct bch_fs *c, @@ -1385,9 +1350,8 @@ err: } } - mutex_lock(&c->ec_stripe_new_lock); - list_del(&s->list); - mutex_unlock(&c->ec_stripe_new_lock); + scoped_guard(mutex, &c->ec_stripe_new_lock) + list_del(&s->list); wake_up(&c->ec_stripe_new_wait); ec_stripe_buf_exit(&s->existing_stripe); @@ -1401,15 +1365,11 @@ static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c) { struct ec_stripe_new *s; - mutex_lock(&c->ec_stripe_new_lock); + guard(mutex)(&c->ec_stripe_new_lock); list_for_each_entry(s, &c->ec_stripe_new_list, list) if (!atomic_read(&s->ref[STRIPE_REF_io])) - goto out; - s = NULL; -out: - mutex_unlock(&c->ec_stripe_new_lock); - - return s; + return s; + return NULL; } static void ec_stripe_create_work(struct work_struct *work) @@ -1443,9 +1403,8 @@ static void ec_stripe_new_set_pending(struct bch_fs *c, struct ec_stripe_head *h h->s = NULL; s->pending = true; - mutex_lock(&c->ec_stripe_new_lock); - list_add(&s->list, &c->ec_stripe_new_list); - mutex_unlock(&c->ec_stripe_new_lock); + scoped_guard(mutex, &c->ec_stripe_new_lock) + list_add(&s->list, &c->ec_stripe_new_list); ec_stripe_new_put(c, s, STRIPE_REF_io); } @@ -2199,13 +2158,13 @@ static int bch2_invalidate_stripe_to_dev_from_alloc(struct btree_trans *trans, s int bch2_dev_remove_stripes(struct bch_fs *c, unsigned dev_idx, unsigned flags) { - int ret = bch2_trans_run(c, - for_each_btree_key_max_commit(trans, iter, + CLASS(btree_trans, trans)(c); + int ret = for_each_btree_key_max_commit(trans, iter, BTREE_ID_alloc, POS(dev_idx, 0), POS(dev_idx, U64_MAX), BTREE_ITER_intent, k, NULL, NULL, 0, ({ bch2_invalidate_stripe_to_dev_from_alloc(trans, k, flags); - }))); + })); bch_err_fn(c, ret); return ret; } @@ -2215,33 +2174,28 @@ int bch2_dev_remove_stripes(struct bch_fs *c, unsigned dev_idx, unsigned flags) static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca) { struct ec_stripe_head *h; - struct open_bucket *ob; - unsigned i; - mutex_lock(&c->ec_stripe_head_lock); + guard(mutex)(&c->ec_stripe_head_lock); list_for_each_entry(h, &c->ec_stripe_head_list, list) { - mutex_lock(&h->lock); + guard(mutex)(&h->lock); if (!h->s) - goto unlock; + continue; if (!ca) goto found; - for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) { + for (unsigned i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) { if (!h->s->blocks[i]) continue; - ob = c->open_buckets + h->s->blocks[i]; + struct open_bucket *ob = c->open_buckets + h->s->blocks[i]; if (ob->dev == ca->dev_idx) goto found; } - goto unlock; + continue; found: ec_stripe_new_cancel(c, h, -BCH_ERR_erofs_no_writes); -unlock: - mutex_unlock(&h->lock); } - mutex_unlock(&c->ec_stripe_head_lock); } void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca) @@ -2258,11 +2212,8 @@ static bool bch2_fs_ec_flush_done(struct bch_fs *c) { sched_annotate_sleep(); - mutex_lock(&c->ec_stripe_new_lock); - bool ret = list_empty(&c->ec_stripe_new_list); - mutex_unlock(&c->ec_stripe_new_lock); - - return ret; + guard(mutex)(&c->ec_stripe_new_lock); + return list_empty(&c->ec_stripe_new_list); } void bch2_fs_ec_flush(struct bch_fs *c) @@ -2299,41 +2250,40 @@ void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c) struct ec_stripe_head *h; struct ec_stripe_new *s; - mutex_lock(&c->ec_stripe_head_lock); - list_for_each_entry(h, &c->ec_stripe_head_list, list) { - prt_printf(out, "disk label %u algo %u redundancy %u %s nr created %llu:\n", - h->disk_label, h->algo, h->redundancy, - bch2_watermarks[h->watermark], - h->nr_created); + scoped_guard(mutex, &c->ec_stripe_head_lock) + list_for_each_entry(h, &c->ec_stripe_head_list, list) { + prt_printf(out, "disk label %u algo %u redundancy %u %s nr created %llu:\n", + h->disk_label, h->algo, h->redundancy, + bch2_watermarks[h->watermark], + h->nr_created); - if (h->s) - bch2_new_stripe_to_text(out, c, h->s); - } - mutex_unlock(&c->ec_stripe_head_lock); + if (h->s) + bch2_new_stripe_to_text(out, c, h->s); + } prt_printf(out, "in flight:\n"); - mutex_lock(&c->ec_stripe_new_lock); - list_for_each_entry(s, &c->ec_stripe_new_list, list) - bch2_new_stripe_to_text(out, c, s); - mutex_unlock(&c->ec_stripe_new_lock); + scoped_guard(mutex, &c->ec_stripe_new_lock) + list_for_each_entry(s, &c->ec_stripe_new_list, list) + bch2_new_stripe_to_text(out, c, s); } void bch2_fs_ec_exit(struct bch_fs *c) { - struct ec_stripe_head *h; - unsigned i; while (1) { - mutex_lock(&c->ec_stripe_head_lock); - h = list_pop_entry(&c->ec_stripe_head_list, struct ec_stripe_head, list); - mutex_unlock(&c->ec_stripe_head_lock); + struct ec_stripe_head *h; + + scoped_guard(mutex, &c->ec_stripe_head_lock) + h = list_pop_entry(&c->ec_stripe_head_list, struct ec_stripe_head, list); if (!h) break; if (h->s) { - for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) + for (unsigned i = 0; + i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; + i++) BUG_ON(h->s->blocks[i]); kfree(h->s); @@ -2386,20 +2336,18 @@ static int bch2_check_stripe_to_lru_ref(struct btree_trans *trans, return 0; } -int bch2_check_stripe_to_lru_refs(struct bch_fs *c) +int bch2_check_stripe_to_lru_refs(struct btree_trans *trans) { struct bkey_buf last_flushed; - bch2_bkey_buf_init(&last_flushed); bkey_init(&last_flushed.k->k); - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, BTREE_ID_stripes, + int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_stripes, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_check_stripe_to_lru_ref(trans, k, &last_flushed))); + bch2_check_stripe_to_lru_ref(trans, k, &last_flushed)); - bch2_bkey_buf_exit(&last_flushed, c); - bch_err_fn(c, ret); + bch2_bkey_buf_exit(&last_flushed, trans->c); + bch_err_fn(trans->c, ret); return ret; } diff --git a/libbcachefs/ec.h b/libbcachefs/ec.h index 548048ad..e807e702 100644 --- a/libbcachefs/ec.h +++ b/libbcachefs/ec.h @@ -304,6 +304,6 @@ void bch2_fs_ec_exit(struct bch_fs *); void bch2_fs_ec_init_early(struct bch_fs *); int bch2_fs_ec_init(struct bch_fs *); -int bch2_check_stripe_to_lru_refs(struct bch_fs *); +int bch2_check_stripe_to_lru_refs(struct btree_trans *); #endif /* _BCACHEFS_EC_H */ diff --git a/libbcachefs/enumerated_ref.c b/libbcachefs/enumerated_ref.c index 56ab430f..2ded7413 100644 --- a/libbcachefs/enumerated_ref.c +++ b/libbcachefs/enumerated_ref.c @@ -75,13 +75,11 @@ void enumerated_ref_stop(struct enumerated_ref *ref, { enumerated_ref_stop_async(ref); while (!wait_for_completion_timeout(&ref->stop_complete, HZ * 10)) { - struct printbuf buf = PRINTBUF; - + CLASS(printbuf, buf)(); prt_str(&buf, "Waited for 10 seconds to shutdown enumerated ref\n"); prt_str(&buf, "Outstanding refs:\n"); enumerated_ref_to_text(&buf, ref, names); printk(KERN_ERR "%s", buf.buf); - printbuf_exit(&buf); } } diff --git a/libbcachefs/error.c b/libbcachefs/error.c index c7ee81b7..32a286b3 100644 --- a/libbcachefs/error.c +++ b/libbcachefs/error.c @@ -42,15 +42,14 @@ bool __bch2_inconsistent_error(struct bch_fs *c, struct printbuf *out) bool bch2_inconsistent_error(struct bch_fs *c) { - struct printbuf buf = PRINTBUF; - buf.atomic++; + CLASS(printbuf, buf)(); + guard(printbuf_atomic)(&buf); printbuf_indent_add_nextline(&buf, 2); bool ret = __bch2_inconsistent_error(c, &buf); if (ret) bch_err(c, "%s", buf.buf); - printbuf_exit(&buf); return ret; } @@ -58,8 +57,8 @@ __printf(3, 0) static bool bch2_fs_trans_inconsistent(struct bch_fs *c, struct btree_trans *trans, const char *fmt, va_list args) { - struct printbuf buf = PRINTBUF; - buf.atomic++; + CLASS(printbuf, buf)(); + guard(printbuf_atomic)(&buf); bch2_log_msg_start(c, &buf); @@ -70,8 +69,6 @@ static bool bch2_fs_trans_inconsistent(struct bch_fs *c, struct btree_trans *tra bch2_trans_updates_to_text(&buf, trans); bool ret = __bch2_inconsistent_error(c, &buf); bch2_print_str(c, KERN_ERR, buf.buf); - - printbuf_exit(&buf); return ret; } @@ -109,8 +106,7 @@ int __bch2_topology_error(struct bch_fs *c, struct printbuf *out) int bch2_fs_topology_error(struct bch_fs *c, const char *fmt, ...) { - struct printbuf buf = PRINTBUF; - + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); va_list args; @@ -120,8 +116,6 @@ int bch2_fs_topology_error(struct bch_fs *c, const char *fmt, ...) int ret = __bch2_topology_error(c, &buf); bch2_print_str(c, KERN_ERR, buf.buf); - - printbuf_exit(&buf); return ret; } @@ -138,18 +132,18 @@ void bch2_io_error_work(struct work_struct *work) /* XXX: if it's reads or checksums that are failing, set it to failed */ - down_write(&c->state_lock); + guard(rwsem_write)(&c->state_lock); unsigned long write_errors_start = READ_ONCE(ca->write_errors_start); if (write_errors_start && time_after(jiffies, write_errors_start + c->opts.write_error_timeout * HZ)) { if (ca->mi.state >= BCH_MEMBER_STATE_ro) - goto out; + return; bool dev = !__bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_ro, BCH_FORCE_IF_DEGRADED); - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); __bch2_log_msg_start(ca->name, &buf); prt_printf(&buf, "writes erroring for %u seconds, setting %s ro", @@ -159,10 +153,7 @@ void bch2_io_error_work(struct work_struct *work) bch2_fs_emergency_read_only2(c, &buf); bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); } -out: - up_write(&c->state_lock); } void bch2_io_error(struct bch_dev *ca, enum bch_member_error_type type) @@ -382,11 +373,10 @@ bool __bch2_count_fsck_err(struct bch_fs *c, { bch2_sb_error_count(c, id); - mutex_lock(&c->fsck_error_msgs_lock); bool print = true, repeat = false, suppress = false; - count_fsck_err_locked(c, id, msg->buf, &repeat, &print, &suppress); - mutex_unlock(&c->fsck_error_msgs_lock); + scoped_guard(mutex, &c->fsck_error_msgs_lock) + count_fsck_err_locked(c, id, msg->buf, &repeat, &print, &suppress); if (suppress) prt_printf(msg, "Ratelimiting new instances of previous error\n"); @@ -444,7 +434,8 @@ int __bch2_fsck_err(struct bch_fs *c, const char *fmt, ...) { va_list args; - struct printbuf buf = PRINTBUF, *out = &buf; + CLASS(printbuf, buf)(); + struct printbuf *out = &buf; int ret = 0; const char *action_orig = "fix?", *action = action_orig; @@ -648,7 +639,6 @@ err: if (action != action_orig) kfree(action); - printbuf_exit(&buf); BUG_ON(!ret); return ret; @@ -680,7 +670,7 @@ int __bch2_bkey_fsck_err(struct bch_fs *c, if (!WARN_ON(err >= ARRAY_SIZE(fsck_flags_extra))) fsck_flags |= fsck_flags_extra[err]; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_printf(&buf, "invalid bkey in %s", bch2_bkey_validate_contexts[from.from]); @@ -701,7 +691,6 @@ int __bch2_bkey_fsck_err(struct bch_fs *c, va_end(args); int ret = __bch2_fsck_err(c, NULL, fsck_flags, err, "%s, delete?", buf.buf); - printbuf_exit(&buf); return ret; } @@ -709,7 +698,7 @@ static void __bch2_flush_fsck_errs(struct bch_fs *c, bool print) { struct fsck_err_state *s, *n; - mutex_lock(&c->fsck_error_msgs_lock); + guard(mutex)(&c->fsck_error_msgs_lock); list_for_each_entry_safe(s, n, &c->fsck_error_msgs, list) { if (print && s->ratelimited && s->last_msg) @@ -719,8 +708,6 @@ static void __bch2_flush_fsck_errs(struct bch_fs *c, bool print) kfree(s->last_msg); kfree(s); } - - mutex_unlock(&c->fsck_error_msgs_lock); } void bch2_flush_fsck_errs(struct bch_fs *c) @@ -754,7 +741,8 @@ int bch2_inum_offset_err_msg_trans(struct btree_trans *trans, struct printbuf *o void bch2_inum_offset_err_msg(struct bch_fs *c, struct printbuf *out, subvol_inum inum, u64 offset) { - bch2_trans_do(c, bch2_inum_offset_err_msg_trans(trans, out, inum, offset)); + CLASS(btree_trans, trans)(c); + lockrestart_do(trans, bch2_inum_offset_err_msg_trans(trans, out, inum, offset)); } int bch2_inum_snap_offset_err_msg_trans(struct btree_trans *trans, struct printbuf *out, @@ -771,5 +759,6 @@ int bch2_inum_snap_offset_err_msg_trans(struct btree_trans *trans, struct printb void bch2_inum_snap_offset_err_msg(struct bch_fs *c, struct printbuf *out, struct bpos pos) { - bch2_trans_do(c, bch2_inum_snap_offset_err_msg_trans(trans, out, pos)); + CLASS(btree_trans, trans)(c); + lockrestart_do(trans, bch2_inum_snap_offset_err_msg_trans(trans, out, pos)); } diff --git a/libbcachefs/extents.c b/libbcachefs/extents.c index a286bd99..b36ecfc0 100644 --- a/libbcachefs/extents.c +++ b/libbcachefs/extents.c @@ -63,15 +63,14 @@ void bch2_io_failures_to_text(struct printbuf *out, ((!!f->failed_ec) << 3); bch2_printbuf_make_room(out, 1024); - out->atomic++; scoped_guard(rcu) { + guard(printbuf_atomic)(out); struct bch_dev *ca = bch2_dev_rcu_noerror(c, f->dev); if (ca) prt_str(out, ca->name); else prt_printf(out, "(invalid device %u)", f->dev); } - --out->atomic; prt_char(out, ' '); @@ -1237,7 +1236,7 @@ restart_drop_ptrs: void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr) { - out->atomic++; + guard(printbuf_atomic)(out); guard(rcu)(); struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev); if (!ca) { @@ -1262,7 +1261,6 @@ void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struc else if (stale) prt_printf(out, " invalid"); } - --out->atomic; } void bch2_extent_crc_unpacked_to_text(struct printbuf *out, struct bch_extent_crc_unpacked *crc) diff --git a/libbcachefs/fs-io-buffered.c b/libbcachefs/fs-io-buffered.c index 4e82dfa6..f2389054 100644 --- a/libbcachefs/fs-io-buffered.c +++ b/libbcachefs/fs-io-buffered.c @@ -254,12 +254,11 @@ err: bch2_trans_iter_exit(trans, &iter); if (ret) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); lockrestart_do(trans, bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter.pos.offset << 9)); prt_printf(&buf, "read error %s from btree lookup", bch2_err_str(ret)); bch_err_ratelimited(c, "%s", buf.buf); - printbuf_exit(&buf); rbio->bio.bi_status = BLK_STS_IOERR; bio_endio(&rbio->bio); @@ -425,27 +424,23 @@ static void bch2_writepage_io_done(struct bch_write_op *op) set_bit(EI_INODE_ERROR, &io->inode->ei_flags); bio_for_each_folio_all(fi, bio) { - struct bch_folio *s; - mapping_set_error(fi.folio->mapping, -EIO); - s = __bch2_folio(fi.folio); - spin_lock(&s->lock); + struct bch_folio *s = __bch2_folio(fi.folio); + guard(spinlock)(&s->lock); + for (i = 0; i < folio_sectors(fi.folio); i++) s->s[i].nr_replicas = 0; - spin_unlock(&s->lock); } } if (io->op.flags & BCH_WRITE_wrote_data_inline) { bio_for_each_folio_all(fi, bio) { - struct bch_folio *s; + struct bch_folio *s = __bch2_folio(fi.folio); + guard(spinlock)(&s->lock); - s = __bch2_folio(fi.folio); - spin_lock(&s->lock); for (i = 0; i < folio_sectors(fi.folio); i++) s->s[i].nr_replicas = 0; - spin_unlock(&s->lock); } } @@ -571,30 +566,30 @@ do_io: BUG_ON(ret); /* Before unlocking the page, get copy of reservations: */ - spin_lock(&s->lock); - memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors); + scoped_guard(spinlock, &s->lock) { + memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors); - for (i = 0; i < f_sectors; i++) { - if (s->s[i].state < SECTOR_dirty) - continue; + for (i = 0; i < f_sectors; i++) { + if (s->s[i].state < SECTOR_dirty) + continue; - nr_replicas_this_write = - min_t(unsigned, nr_replicas_this_write, - s->s[i].nr_replicas + - s->s[i].replicas_reserved); - } + nr_replicas_this_write = + min_t(unsigned, nr_replicas_this_write, + s->s[i].nr_replicas + + s->s[i].replicas_reserved); + } - for (i = 0; i < f_sectors; i++) { - if (s->s[i].state < SECTOR_dirty) - continue; + for (i = 0; i < f_sectors; i++) { + if (s->s[i].state < SECTOR_dirty) + continue; - s->s[i].nr_replicas = w->opts.compression - ? 0 : nr_replicas_this_write; + s->s[i].nr_replicas = w->opts.compression + ? 0 : nr_replicas_this_write; - s->s[i].replicas_reserved = 0; - bch2_folio_sector_set(folio, s, i, SECTOR_allocated); + s->s[i].replicas_reserved = 0; + bch2_folio_sector_set(folio, s, i, SECTOR_allocated); + } } - spin_unlock(&s->lock); BUG_ON(atomic_read(&s->write_count)); atomic_set(&s->write_count, 1); @@ -780,10 +775,9 @@ int bch2_write_end(struct file *file, struct address_space *mapping, copied = 0; } - spin_lock(&inode->v.i_lock); - if (pos + copied > inode->v.i_size) - i_size_write(&inode->v, pos + copied); - spin_unlock(&inode->v.i_lock); + scoped_guard(spinlock, &inode->v.i_lock) + if (pos + copied > inode->v.i_size) + i_size_write(&inode->v, pos + copied); if (copied) { if (!folio_test_uptodate(folio)) @@ -942,10 +936,9 @@ static int __bch2_buffered_write(struct bch_inode_info *inode, end = pos + copied; - spin_lock(&inode->v.i_lock); - if (end > inode->v.i_size) - i_size_write(&inode->v, end); - spin_unlock(&inode->v.i_lock); + scoped_guard(spinlock, &inode->v.i_lock) + if (end > inode->v.i_size) + i_size_write(&inode->v, end); f_pos = pos; f_offset = pos - folio_pos(darray_first(fs)); diff --git a/libbcachefs/fs-io-direct.c b/libbcachefs/fs-io-direct.c index 1f5154d9..73d44875 100644 --- a/libbcachefs/fs-io-direct.c +++ b/libbcachefs/fs-io-direct.c @@ -252,7 +252,7 @@ static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum, u64 offset, u64 size, unsigned nr_replicas, bool compressed) { - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct btree_iter iter; struct bkey_s_c k; u64 end = offset + size; @@ -285,7 +285,6 @@ retry: err: if (bch2_err_matches(err, BCH_ERR_transaction_restart)) goto retry; - bch2_trans_put(trans); return err ? false : ret; } @@ -428,17 +427,15 @@ static __always_inline void bch2_dio_write_end(struct dio_write *dio) dio->written += dio->op.written; if (dio->extending) { - spin_lock(&inode->v.i_lock); + guard(spinlock)(&inode->v.i_lock); if (req->ki_pos > inode->v.i_size) i_size_write(&inode->v, req->ki_pos); - spin_unlock(&inode->v.i_lock); } if (dio->op.i_sectors_delta || dio->quota_res.sectors) { - mutex_lock(&inode->ei_quota_lock); + guard(mutex)(&inode->ei_quota_lock); __bch2_i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta); __bch2_quota_reservation_put(c, inode, &dio->quota_res); - mutex_unlock(&inode->ei_quota_lock); } bio_release_pages(bio, false); diff --git a/libbcachefs/fs-io-pagecache.c b/libbcachefs/fs-io-pagecache.c index c2cc4058..2a670518 100644 --- a/libbcachefs/fs-io-pagecache.c +++ b/libbcachefs/fs-io-pagecache.c @@ -125,11 +125,9 @@ folio_sector_reserve(enum bch_folio_sector_state state) /* for newly allocated folios: */ struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp) { - struct bch_folio *s; - - s = kzalloc(sizeof(*s) + - sizeof(struct bch_folio_sector) * - folio_sectors(folio), gfp); + struct bch_folio *s = kzalloc(sizeof(*s) + + sizeof(struct bch_folio_sector) * + folio_sectors(folio), gfp); if (!s) return NULL; @@ -162,7 +160,7 @@ static void __bch2_folio_set(struct folio *folio, BUG_ON(pg_offset >= sectors); BUG_ON(pg_offset + pg_len > sectors); - spin_lock(&s->lock); + guard(spinlock)(&s->lock); for (i = pg_offset; i < pg_offset + pg_len; i++) { s->s[i].nr_replicas = nr_ptrs; @@ -171,8 +169,6 @@ static void __bch2_folio_set(struct folio *folio, if (i == sectors) s->uptodate = true; - - spin_unlock(&s->lock); } /* @@ -276,10 +272,9 @@ void bch2_mark_pagecache_unallocated(struct bch_inode_info *inode, s = bch2_folio(folio); if (s) { - spin_lock(&s->lock); + guard(spinlock)(&s->lock); for (j = folio_offset; j < folio_offset + folio_len; j++) s->s[j].nr_replicas = 0; - spin_unlock(&s->lock); } folio_unlock(folio); @@ -330,13 +325,12 @@ int bch2_mark_pagecache_reserved(struct bch_inode_info *inode, unsigned folio_offset = max(*start, folio_start) - folio_start; unsigned folio_len = min(end, folio_end) - folio_offset - folio_start; - spin_lock(&s->lock); + guard(spinlock)(&s->lock); for (unsigned j = folio_offset; j < folio_offset + folio_len; j++) { i_sectors_delta -= s->s[j].state == SECTOR_dirty; bch2_folio_sector_set(folio, s, j, folio_sector_reserve(s->s[j].state)); } - spin_unlock(&s->lock); } folio_unlock(folio); @@ -529,29 +523,26 @@ void bch2_set_folio_dirty(struct bch_fs *c, BUG_ON(!s->uptodate); - spin_lock(&s->lock); - - for (i = round_down(offset, block_bytes(c)) >> 9; - i < round_up(offset + len, block_bytes(c)) >> 9; - i++) { - unsigned sectors = sectors_to_reserve(&s->s[i], - res->disk.nr_replicas); - - /* - * This can happen if we race with the error path in - * bch2_writepage_io_done(): - */ - sectors = min_t(unsigned, sectors, res->disk.sectors); + scoped_guard(spinlock, &s->lock) + for (i = round_down(offset, block_bytes(c)) >> 9; + i < round_up(offset + len, block_bytes(c)) >> 9; + i++) { + unsigned sectors = sectors_to_reserve(&s->s[i], + res->disk.nr_replicas); - s->s[i].replicas_reserved += sectors; - res->disk.sectors -= sectors; + /* + * This can happen if we race with the error path in + * bch2_writepage_io_done(): + */ + sectors = min_t(unsigned, sectors, res->disk.sectors); - dirty_sectors += s->s[i].state == SECTOR_unallocated; + s->s[i].replicas_reserved += sectors; + res->disk.sectors -= sectors; - bch2_folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state)); - } + dirty_sectors += s->s[i].state == SECTOR_unallocated; - spin_unlock(&s->lock); + bch2_folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state)); + } bch2_i_sectors_acct(c, inode, &res->quota, dirty_sectors); diff --git a/libbcachefs/fs-io.c b/libbcachefs/fs-io.c index dc5f713e..cc203752 100644 --- a/libbcachefs/fs-io.c +++ b/libbcachefs/fs-io.c @@ -148,7 +148,7 @@ void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode, struct quota_res *quota_res, s64 sectors) { if (unlikely((s64) inode->v.i_blocks + sectors < 0)) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_printf(&buf, "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)", inode->v.i_ino, (u64) inode->v.i_blocks, sectors, @@ -157,7 +157,6 @@ void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode, bool print = bch2_count_fsck_err(c, vfs_inode_i_blocks_underflow, &buf); if (print) bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); if (sectors < 0) sectors = -inode->v.i_blocks; @@ -187,7 +186,6 @@ void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode, static int bch2_get_inode_journal_seq_trans(struct btree_trans *trans, subvol_inum inum, u64 *seq) { - struct printbuf buf = PRINTBUF; struct bch_inode_unpacked u; struct btree_iter iter; int ret = bch2_inode_peek(trans, &iter, &u, inum, 0); @@ -197,6 +195,7 @@ static int bch2_get_inode_journal_seq_trans(struct btree_trans *trans, subvol_in u64 cur_seq = journal_cur_seq(&trans->c->journal); *seq = min(cur_seq, u.bi_journal_seq); + CLASS(printbuf, buf)(); if (fsck_err_on(u.bi_journal_seq > cur_seq, trans, inode_journal_seq_in_future, "inode journal seq in future (currently at %llu)\n%s", @@ -208,7 +207,6 @@ static int bch2_get_inode_journal_seq_trans(struct btree_trans *trans, subvol_in } fsck_err: bch2_trans_iter_exit(trans, &iter); - printbuf_exit(&buf); return ret; } @@ -225,9 +223,10 @@ static int bch2_flush_inode(struct bch_fs *c, if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_fsync)) return -EROFS; + CLASS(btree_trans, trans)(c); u64 seq; - int ret = bch2_trans_commit_do(c, NULL, NULL, 0, - bch2_get_inode_journal_seq_trans(trans, inode_inum(inode), &seq)) ?: + int ret = commit_do(trans, NULL, NULL, 0, + bch2_get_inode_journal_seq_trans(trans, inode_inum(inode), &seq)) ?: bch2_journal_flush_seq(&c->journal, seq, TASK_INTERRUPTIBLE) ?: bch2_inode_flush_nocow_writes(c, inode); enumerated_ref_put(&c->writes, BCH_WRITE_REF_fsync); @@ -267,11 +266,11 @@ static inline int range_has_data(struct bch_fs *c, u32 subvol, struct bpos start, struct bpos end) { - return bch2_trans_run(c, - for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, start, end, + CLASS(btree_trans, trans)(c); + return for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, start, end, subvol, 0, k, ({ - bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k); - }))); + bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k); + })); } static int __bch2_truncate_folio(struct bch_inode_info *inode, @@ -521,7 +520,7 @@ int bchfs_truncate(struct mnt_idmap *idmap, if (unlikely(!inode->v.i_size && inode->v.i_blocks && !bch2_journal_error(&c->journal))) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_printf(&buf, "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)", @@ -531,7 +530,6 @@ int bchfs_truncate(struct mnt_idmap *idmap, bool print = bch2_count_fsck_err(c, vfs_inode_i_blocks_not_zero_at_truncate, &buf); if (print) bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); } ret = bch2_setattr_nonsize(idmap, inode, iattr); @@ -559,11 +557,10 @@ static noinline long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, l u64 block_start = round_up(offset, block_bytes(c)); u64 block_end = round_down(end, block_bytes(c)); bool truncated_last_page; - int ret = 0; - ret = bch2_truncate_folios(inode, offset, end); + int ret = bch2_truncate_folios(inode, offset, end); if (unlikely(ret < 0)) - goto err; + return ret; truncated_last_page = ret; @@ -576,19 +573,18 @@ static noinline long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, l block_start >> 9, block_end >> 9, &i_sectors_delta); bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta); + + if (ret) + return ret; } - mutex_lock(&inode->ei_update_lock); - if (end >= inode->v.i_size && !truncated_last_page) { - ret = bch2_write_inode_size(c, inode, inode->v.i_size, - ATTR_MTIME|ATTR_CTIME); - } else { - ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL, + guard(mutex)(&inode->ei_update_lock); + if (end >= inode->v.i_size && !truncated_last_page) + return bch2_write_inode_size(c, inode, inode->v.i_size, + ATTR_MTIME|ATTR_CTIME); + else + return bch2_write_inode(c, inode, inode_update_times_fn, NULL, ATTR_MTIME|ATTR_CTIME); - } - mutex_unlock(&inode->ei_update_lock); -err: - return ret; } static noinline long bchfs_fcollapse_finsert(struct bch_inode_info *inode, @@ -631,7 +627,7 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode, u64 start_sector, u64 end_sector) { struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct btree_iter iter; struct bpos end_pos = POS(inode->v.i_ino, end_sector); struct bch_io_opts opts; @@ -753,7 +749,6 @@ bkey_err: } bch2_trans_iter_exit(trans, &iter); - bch2_trans_put(trans); return ret; } @@ -802,13 +797,11 @@ static noinline long bchfs_fallocate(struct bch_inode_info *inode, int mode, if (end >= inode->v.i_size && (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) || !(mode & FALLOC_FL_KEEP_SIZE))) { - spin_lock(&inode->v.i_lock); - i_size_write(&inode->v, end); - spin_unlock(&inode->v.i_lock); + scoped_guard(spinlock, &inode->v.i_lock) + i_size_write(&inode->v, end); - mutex_lock(&inode->ei_update_lock); - ret2 = bch2_write_inode_size(c, inode, end, 0); - mutex_unlock(&inode->ei_update_lock); + scoped_guard(mutex, &inode->ei_update_lock) + ret2 = bch2_write_inode_size(c, inode, end, 0); } return ret ?: ret2; @@ -861,8 +854,8 @@ static int quota_reserve_range(struct bch_inode_info *inode, struct bch_fs *c = inode->v.i_sb->s_fs_info; u64 sectors = end - start; - int ret = bch2_trans_run(c, - for_each_btree_key_in_subvolume_max(trans, iter, + CLASS(btree_trans, trans)(c); + int ret = for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, POS(inode->v.i_ino, start), POS(inode->v.i_ino, end - 1), @@ -875,7 +868,7 @@ static int quota_reserve_range(struct bch_inode_info *inode, } 0; - }))); + })); return ret ?: bch2_quota_reservation_add(c, inode, res, sectors, true); } @@ -955,10 +948,9 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src, bch2_i_sectors_acct(c, dst, "a_res, i_sectors_delta); - spin_lock(&dst->v.i_lock); - if (pos_dst + ret > dst->v.i_size) - i_size_write(&dst->v, pos_dst + ret); - spin_unlock(&dst->v.i_lock); + scoped_guard(spinlock, &dst->v.i_lock) + if (pos_dst + ret > dst->v.i_size) + i_size_write(&dst->v, pos_dst + ret); if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) || IS_SYNC(file_inode(file_dst))) @@ -1020,38 +1012,38 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset) if (offset >= isize) return -ENXIO; - int ret = bch2_trans_run(c, - for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, + CLASS(btree_trans, trans)(c); + int ret = for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, POS(inode->v.i_ino, offset >> 9), POS(inode->v.i_ino, U64_MAX), inum.subvol, BTREE_ITER_slots, k, ({ - if (k.k->p.inode != inode->v.i_ino || - !bkey_extent_is_data(k.k)) { - loff_t start_offset = k.k->p.inode == inode->v.i_ino - ? max(offset, bkey_start_offset(k.k) << 9) - : offset; - loff_t end_offset = k.k->p.inode == inode->v.i_ino - ? MAX_LFS_FILESIZE - : k.k->p.offset << 9; - - /* - * Found a hole in the btree, now make sure it's - * a hole in the pagecache. We might have to - * keep searching if this hole is entirely dirty - * in the page cache: - */ - bch2_trans_unlock(trans); - loff_t pagecache_hole = bch2_seek_pagecache_hole(&inode->v, - start_offset, end_offset, 0, false); - if (pagecache_hole < end_offset) { - next_hole = pagecache_hole; - break; - } - } else { - offset = max(offset, bkey_start_offset(k.k) << 9); + if (k.k->p.inode != inode->v.i_ino || + !bkey_extent_is_data(k.k)) { + loff_t start_offset = k.k->p.inode == inode->v.i_ino + ? max(offset, bkey_start_offset(k.k) << 9) + : offset; + loff_t end_offset = k.k->p.inode == inode->v.i_ino + ? MAX_LFS_FILESIZE + : k.k->p.offset << 9; + + /* + * Found a hole in the btree, now make sure it's + * a hole in the pagecache. We might have to + * keep searching if this hole is entirely dirty + * in the page cache: + */ + bch2_trans_unlock(trans); + loff_t pagecache_hole = bch2_seek_pagecache_hole(&inode->v, + start_offset, end_offset, 0, false); + if (pagecache_hole < end_offset) { + next_hole = pagecache_hole; + break; } - 0; - }))); + } else { + offset = max(offset, bkey_start_offset(k.k) << 9); + } + 0; + })); if (ret) return ret; diff --git a/libbcachefs/fs-io.h b/libbcachefs/fs-io.h index ca70346e..d229f722 100644 --- a/libbcachefs/fs-io.h +++ b/libbcachefs/fs-io.h @@ -77,9 +77,8 @@ static inline void bch2_quota_reservation_put(struct bch_fs *c, struct quota_res *res) { if (res->sectors) { - mutex_lock(&inode->ei_quota_lock); + guard(mutex)(&inode->ei_quota_lock); __bch2_quota_reservation_put(c, inode, res); - mutex_unlock(&inode->ei_quota_lock); } } @@ -94,16 +93,15 @@ static inline int bch2_quota_reservation_add(struct bch_fs *c, if (test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags)) return 0; - mutex_lock(&inode->ei_quota_lock); + guard(mutex)(&inode->ei_quota_lock); ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK); - if (likely(!ret)) { - inode->ei_quota_reserved += sectors; - res->sectors += sectors; - } - mutex_unlock(&inode->ei_quota_lock); + if (ret) + return ret; - return ret; + inode->ei_quota_reserved += sectors; + res->sectors += sectors; + return 0; } #else @@ -134,9 +132,8 @@ static inline void bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info * struct quota_res *quota_res, s64 sectors) { if (sectors) { - mutex_lock(&inode->ei_quota_lock); + guard(mutex)(&inode->ei_quota_lock); __bch2_i_sectors_acct(c, inode, quota_res, sectors); - mutex_unlock(&inode->ei_quota_lock); } } diff --git a/libbcachefs/fs-ioctl.c b/libbcachefs/fs-ioctl.c index 4e72e654..8b9d3c7d 100644 --- a/libbcachefs/fs-ioctl.c +++ b/libbcachefs/fs-ioctl.c @@ -111,9 +111,8 @@ static int bch2_ioc_getlabel(struct bch_fs *c, char __user *user_label) BUILD_BUG_ON(BCH_SB_LABEL_SIZE >= FSLABEL_MAX); - mutex_lock(&c->sb_lock); - memcpy(label, c->disk_sb.sb->label, BCH_SB_LABEL_SIZE); - mutex_unlock(&c->sb_lock); + scoped_guard(mutex, &c->sb_lock) + memcpy(label, c->disk_sb.sb->label, BCH_SB_LABEL_SIZE); len = strnlen(label, BCH_SB_LABEL_SIZE); if (len == BCH_SB_LABEL_SIZE) { @@ -152,10 +151,10 @@ static int bch2_ioc_setlabel(struct bch_fs *c, if (ret) return ret; - mutex_lock(&c->sb_lock); - strscpy(c->disk_sb.sb->label, label, BCH_SB_LABEL_SIZE); - ret = bch2_write_super(c); - mutex_unlock(&c->sb_lock); + scoped_guard(mutex, &c->sb_lock) { + strscpy(c->disk_sb.sb->label, label, BCH_SB_LABEL_SIZE); + ret = bch2_write_super(c); + } mnt_drop_write_file(file); return ret; @@ -172,7 +171,7 @@ static int bch2_ioc_goingdown(struct bch_fs *c, u32 __user *arg) if (get_user(flags, arg)) return -EFAULT; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_printf(&buf, "shutdown by ioctl type %u", flags); @@ -193,13 +192,10 @@ static int bch2_ioc_goingdown(struct bch_fs *c, u32 __user *arg) bch2_fs_emergency_read_only2(c, &buf); break; default: - ret = -EINVAL; - goto noprint; + return -EINVAL; } bch2_print_str(c, KERN_ERR, buf.buf); -noprint: - printbuf_exit(&buf); return ret; } @@ -234,9 +230,8 @@ static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp, if (arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE) { /* sync_inodes_sb enforce s_umount is locked */ - down_read(&c->vfs_sb->s_umount); + guard(rwsem_read)(&c->vfs_sb->s_umount); sync_inodes_sb(c->vfs_sb); - up_read(&c->vfs_sb->s_umount); } if (arg.src_ptr) { @@ -301,12 +296,10 @@ static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp, !arg.src_ptr) snapshot_src.subvol = inode_inum(to_bch_ei(dir)).subvol; - down_write(&c->snapshot_create_lock); - inode = __bch2_create(file_mnt_idmap(filp), to_bch_ei(dir), - dst_dentry, arg.mode|S_IFDIR, - 0, snapshot_src, create_flags); - up_write(&c->snapshot_create_lock); - + scoped_guard(rwsem_write, &c->snapshot_create_lock) + inode = __bch2_create(file_mnt_idmap(filp), to_bch_ei(dir), + dst_dentry, arg.mode|S_IFDIR, + 0, snapshot_src, create_flags); error = PTR_ERR_OR_ZERO(inode); if (error) goto err3; diff --git a/libbcachefs/fs.c b/libbcachefs/fs.c index 3b0783f1..2789b30a 100644 --- a/libbcachefs/fs.c +++ b/libbcachefs/fs.c @@ -106,14 +106,13 @@ int __must_check bch2_write_inode(struct bch_fs *c, inode_set_fn set, void *p, unsigned fields) { - struct btree_trans *trans = bch2_trans_get(c); - struct btree_iter iter = {}; - struct bch_inode_unpacked inode_u; - int ret; + CLASS(btree_trans, trans)(c); retry: bch2_trans_begin(trans); - ret = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode), BTREE_ITER_intent); + struct btree_iter iter = {}; + struct bch_inode_unpacked inode_u; + int ret = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode), BTREE_ITER_intent); if (ret) goto err; @@ -156,7 +155,6 @@ err: inode_inum(inode).subvol, inode_inum(inode).inum); - bch2_trans_put(trans); return ret < 0 ? ret : 0; } @@ -166,32 +164,27 @@ int bch2_fs_quota_transfer(struct bch_fs *c, unsigned qtypes, enum quota_acct_mode mode) { - unsigned i; - int ret; - qtypes &= enabled_qtypes(c); - for (i = 0; i < QTYP_NR; i++) + for (unsigned i = 0; i < QTYP_NR; i++) if (new_qid.q[i] == inode->ei_qid.q[i]) qtypes &= ~(1U << i); if (!qtypes) return 0; - mutex_lock(&inode->ei_quota_lock); + guard(mutex)(&inode->ei_quota_lock); - ret = bch2_quota_transfer(c, qtypes, new_qid, + int ret = bch2_quota_transfer(c, qtypes, new_qid, inode->ei_qid, inode->v.i_blocks + inode->ei_quota_reserved, mode); if (!ret) - for (i = 0; i < QTYP_NR; i++) + for (unsigned i = 0; i < QTYP_NR; i++) if (qtypes & (1 << i)) inode->ei_qid.q[i] = new_qid.q[i]; - mutex_unlock(&inode->ei_quota_lock); - return ret; } @@ -241,7 +234,7 @@ int bch2_inode_or_descendents_is_open(struct btree_trans *trans, struct bpos p) struct bch_fs *c = trans->c; struct rhltable *ht = &c->vfs_inodes_by_inum_table; u64 inum = p.offset; - DARRAY(u32) subvols; + CLASS(darray_u32, subvols)(); int ret = 0; if (!test_bit(BCH_FS_started, &c->flags)) @@ -280,7 +273,7 @@ restart: rcu_read_unlock(); ret = darray_make_room(&subvols, 1); if (ret) - goto err; + return ret; subvols.nr = 0; goto restart_from_top; } @@ -303,14 +296,13 @@ restart: u32 snap; ret = bch2_subvolume_get_snapshot(trans, *i, &snap); if (ret) - goto err; + return ret; ret = bch2_snapshot_is_ancestor(c, snap, p.snapshot); if (ret) break; } -err: - darray_exit(&subvols); + return ret; } @@ -367,9 +359,9 @@ repeat: static void bch2_inode_hash_remove(struct bch_fs *c, struct bch_inode_info *inode) { - spin_lock(&inode->v.i_lock); - bool remove = test_and_clear_bit(EI_INODE_HASHED, &inode->ei_flags); - spin_unlock(&inode->v.i_lock); + bool remove; + scoped_guard(spinlock, &inode->v.i_lock) + remove = test_and_clear_bit(EI_INODE_HASHED, &inode->ei_flags); if (remove) { int ret = rhltable_remove(&c->vfs_inodes_by_inum_table, @@ -430,9 +422,8 @@ retry: inode_sb_list_add(&inode->v); - mutex_lock(&c->vfs_inodes_lock); - list_add(&inode->ei_vfs_inode_list, &c->vfs_inodes_list); - mutex_unlock(&c->vfs_inodes_lock); + scoped_guard(mutex, &c->vfs_inodes_lock) + list_add(&inode->ei_vfs_inode_list, &c->vfs_inodes_list); return inode; } } @@ -514,7 +505,7 @@ struct inode *bch2_vfs_inode_get(struct bch_fs *c, subvol_inum inum) if (inode) return &inode->v; - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct bch_inode_unpacked inode_u; struct bch_subvolume subvol; @@ -522,7 +513,6 @@ struct inode *bch2_vfs_inode_get(struct bch_fs *c, subvol_inum inum) bch2_subvolume_get(trans, inum.subvol, true, &subvol) ?: bch2_inode_find_by_inum_trans(trans, inum, &inode_u)) ?: PTR_ERR_OR_ZERO(inode = bch2_inode_hash_init_insert(trans, inum, &inode_u, &subvol)); - bch2_trans_put(trans); return ret ? ERR_PTR(ret) : &inode->v; } @@ -534,7 +524,6 @@ __bch2_create(struct mnt_idmap *idmap, unsigned flags) { struct bch_fs *c = dir->v.i_sb->s_fs_info; - struct btree_trans *trans; struct bch_inode_unpacked dir_u; struct bch_inode_info *inode; struct bch_inode_unpacked inode_u; @@ -555,18 +544,23 @@ __bch2_create(struct mnt_idmap *idmap, if (ret) return ERR_PTR(ret); #endif + inode = __bch2_new_inode(c, GFP_NOFS); if (unlikely(!inode)) { - inode = ERR_PTR(-ENOMEM); - goto err; + posix_acl_release(default_acl); + posix_acl_release(acl); + return ERR_PTR(-ENOMEM); } bch2_inode_init_early(c, &inode_u); if (!(flags & BCH_CREATE_TMPFILE)) mutex_lock(&dir->ei_update_lock); - - trans = bch2_trans_get(c); + /* + * posix_acl_create() calls get_acl -> btree transaction, don't start + * ours until after, ei->update_lock must also be taken first: + */ + CLASS(btree_trans, trans)(c); retry: bch2_trans_begin(trans); @@ -625,7 +619,6 @@ err_before_quota: * restart here. */ inode = bch2_inode_hash_insert(c, NULL, inode); - bch2_trans_put(trans); err: posix_acl_release(default_acl); posix_acl_release(acl); @@ -634,7 +627,6 @@ err_trans: if (!(flags & BCH_CREATE_TMPFILE)) mutex_unlock(&dir->ei_update_lock); - bch2_trans_put(trans); make_bad_inode(&inode->v); iput(&inode->v); inode = ERR_PTR(ret); @@ -649,7 +641,7 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans, { struct bch_fs *c = trans->c; subvol_inum inum = {}; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); struct qstr lookup_name; int ret = bch2_maybe_casefold(trans, dir_hash_info, name, &lookup_name); @@ -701,7 +693,6 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans, goto err; out: bch2_trans_iter_exit(trans, &dirent_iter); - printbuf_exit(&buf); return inode; err: inode = ERR_PTR(ret); @@ -770,8 +761,8 @@ static int __bch2_link(struct bch_fs *c, struct bch_inode_unpacked dir_u, inode_u; int ret; - mutex_lock(&inode->ei_update_lock); - struct btree_trans *trans = bch2_trans_get(c); + guard(mutex)(&inode->ei_update_lock); + CLASS(btree_trans, trans)(c); ret = commit_do(trans, NULL, NULL, 0, bch2_link_trans(trans, @@ -785,8 +776,6 @@ static int __bch2_link(struct bch_fs *c, bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME); } - bch2_trans_put(trans); - mutex_unlock(&inode->ei_update_lock); return ret; } @@ -821,8 +810,7 @@ int __bch2_unlink(struct inode *vdir, struct dentry *dentry, int ret; bch2_lock_inodes(INODE_UPDATE_LOCK, dir, inode); - - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, @@ -849,7 +837,6 @@ int __bch2_unlink(struct inode *vdir, struct dentry *dentry, if (IS_CASEFOLDED(vdir)) d_invalidate(dentry); err: - bch2_trans_put(trans); bch2_unlock_inodes(INODE_UPDATE_LOCK, dir, inode); return ret; @@ -918,7 +905,6 @@ static int bch2_rename2(struct mnt_idmap *idmap, struct bch_inode_info *dst_inode = to_bch_ei(dst_dentry->d_inode); struct bch_inode_unpacked dst_dir_u, src_dir_u; struct bch_inode_unpacked src_inode_u, dst_inode_u, *whiteout_inode_u; - struct btree_trans *trans; enum bch_rename_mode mode = flags & RENAME_EXCHANGE ? BCH_RENAME_EXCHANGE : dst_dentry->d_inode @@ -942,7 +928,7 @@ static int bch2_rename2(struct mnt_idmap *idmap, src_inode, dst_inode); - trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); ret = bch2_subvol_is_ro_trans(trans, src_dir->ei_inum.subvol) ?: bch2_subvol_is_ro_trans(trans, dst_dir->ei_inum.subvol); @@ -1028,8 +1014,6 @@ err_tx_restart: bch2_inode_update_after_write(trans, dst_inode, &dst_inode_u, ATTR_CTIME); err: - bch2_trans_put(trans); - bch2_fs_quota_transfer(c, src_inode, bch_qid(&src_inode->ei_inode), 1 << QTYP_PRJ, @@ -1097,7 +1081,6 @@ int bch2_setattr_nonsize(struct mnt_idmap *idmap, { struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bch_qid qid; - struct btree_trans *trans; struct btree_iter inode_iter = {}; struct bch_inode_unpacked inode_u; struct posix_acl *acl = NULL; @@ -1105,7 +1088,7 @@ int bch2_setattr_nonsize(struct mnt_idmap *idmap, kgid_t kgid; int ret; - mutex_lock(&inode->ei_update_lock); + guard(mutex)(&inode->ei_update_lock); qid = inode->ei_qid; @@ -1122,9 +1105,9 @@ int bch2_setattr_nonsize(struct mnt_idmap *idmap, ret = bch2_fs_quota_transfer(c, inode, qid, ~0, KEY_TYPE_QUOTA_PREALLOC); if (ret) - goto err; + return ret; - trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); retry: bch2_trans_begin(trans); kfree(acl); @@ -1153,18 +1136,13 @@ btree_err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; if (unlikely(ret)) - goto err_trans; + return ret; bch2_inode_update_after_write(trans, inode, &inode_u, attr->ia_valid); if (acl) set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl); -err_trans: - bch2_trans_put(trans); -err: - mutex_unlock(&inode->ei_update_lock); - - return bch2_err_class(ret); + return 0; } static int bch2_getattr(struct mnt_idmap *idmap, @@ -1228,18 +1206,16 @@ static int bch2_setattr(struct mnt_idmap *idmap, { struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); struct bch_fs *c = inode->v.i_sb->s_fs_info; - int ret; lockdep_assert_held(&inode->v.i_rwsem); - ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?: - setattr_prepare(idmap, dentry, iattr); - if (ret) - return ret; + int ret = bch2_subvol_is_ro(c, inode->ei_inum.subvol) ?: + setattr_prepare(idmap, dentry, iattr) ?: + (iattr->ia_valid & ATTR_SIZE + ? bchfs_truncate(idmap, inode, iattr) + : bch2_setattr_nonsize(idmap, inode, iattr)); - return iattr->ia_valid & ATTR_SIZE - ? bchfs_truncate(idmap, inode, iattr) - : bch2_setattr_nonsize(idmap, inode, iattr); + return bch2_err_class(ret); } static int bch2_tmpfile(struct mnt_idmap *idmap, @@ -1487,7 +1463,6 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, { struct bch_fs *c = vinode->i_sb->s_fs_info; struct bch_inode_info *ei = to_bch_ei(vinode); - struct btree_trans *trans; struct bch_fiemap_extent cur, prev; int ret = 0; @@ -1505,7 +1480,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, bch2_bkey_buf_init(&prev.kbuf); bkey_init(&prev.kbuf.k->k); - trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); while (start < end) { ret = lockrestart_do(trans, @@ -1538,7 +1513,6 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, ret = bch2_fill_extent(c, info, &prev); } err: - bch2_trans_put(trans); bch2_bkey_buf_exit(&cur.kbuf, c); bch2_bkey_buf_exit(&prev.kbuf, c); @@ -1968,7 +1942,6 @@ static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child struct bch_inode_info *inode = to_bch_ei(child->d_inode); struct bch_inode_info *dir = to_bch_ei(parent->d_inode); struct bch_fs *c = inode->v.i_sb->s_fs_info; - struct btree_trans *trans; struct btree_iter iter1; struct btree_iter iter2; struct bkey_s_c k; @@ -1983,8 +1956,7 @@ static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child if (!S_ISDIR(dir->v.i_mode)) return -EINVAL; - trans = bch2_trans_get(c); - + CLASS(btree_trans, trans)(c); bch2_trans_iter_init(trans, &iter1, BTREE_ID_dirents, POS(dir->ei_inode.bi_inum, 0), 0); bch2_trans_iter_init(trans, &iter2, BTREE_ID_dirents, @@ -2063,8 +2035,6 @@ err: bch2_trans_iter_exit(trans, &iter1); bch2_trans_iter_exit(trans, &iter2); - bch2_trans_put(trans); - return ret; } @@ -2148,12 +2118,11 @@ static int bch2_vfs_write_inode(struct inode *vinode, { struct bch_fs *c = vinode->i_sb->s_fs_info; struct bch_inode_info *inode = to_bch_ei(vinode); - int ret; - mutex_lock(&inode->ei_update_lock); - ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL, - ATTR_ATIME|ATTR_MTIME|ATTR_CTIME); - mutex_unlock(&inode->ei_update_lock); + guard(mutex)(&inode->ei_update_lock); + + int ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL, + ATTR_ATIME|ATTR_MTIME|ATTR_CTIME); return bch2_err_class(ret); } @@ -2200,9 +2169,8 @@ static void bch2_evict_inode(struct inode *vinode) bch2_inode_hash_remove(c, inode); } - mutex_lock(&c->vfs_inodes_lock); - list_del_init(&inode->ei_vfs_inode_list); - mutex_unlock(&c->vfs_inodes_lock); + scoped_guard(mutex, &c->vfs_inodes_lock) + list_del_init(&inode->ei_vfs_inode_list); } void bch2_evict_subvolume_inodes(struct bch_fs *c, snapshot_id_list *s) @@ -2352,16 +2320,14 @@ static int bch2_show_devname(struct seq_file *seq, struct dentry *root) static int bch2_show_options(struct seq_file *seq, struct dentry *root) { struct bch_fs *c = root->d_sb->s_fs_info; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_opts_to_text(&buf, c->opts, c, c->disk_sb.sb, OPT_MOUNT, OPT_HIDDEN, OPT_SHOW_MOUNT_STYLE); printbuf_nul_terminate(&buf); seq_printf(seq, ",%s", buf.buf); - int ret = buf.allocation_failure ? -ENOMEM : 0; - printbuf_exit(&buf); - return ret; + return buf.allocation_failure ? -ENOMEM : 0; } static void bch2_put_super(struct super_block *sb) @@ -2383,24 +2349,20 @@ static int bch2_freeze(struct super_block *sb) { struct bch_fs *c = sb->s_fs_info; - down_write(&c->state_lock); + guard(rwsem_write)(&c->state_lock); bch2_fs_read_only(c); - up_write(&c->state_lock); return 0; } static int bch2_unfreeze(struct super_block *sb) { struct bch_fs *c = sb->s_fs_info; - int ret; if (test_bit(BCH_FS_emergency_ro, &c->flags)) return 0; - down_write(&c->state_lock); - ret = bch2_fs_read_write(c); - up_write(&c->state_lock); - return ret; + guard(rwsem_write)(&c->state_lock); + return bch2_fs_read_write(c); } static const struct super_operations bch_super_operations = { @@ -2671,7 +2633,7 @@ static int bch2_fs_reconfigure(struct fs_context *fc) opt_set(opts->opts, read_only, (fc->sb_flags & SB_RDONLY) != 0); if (opts->opts.read_only != c->opts.read_only) { - down_write(&c->state_lock); + guard(rwsem_write)(&c->state_lock); if (opts->opts.read_only) { bch2_fs_read_only(c); @@ -2681,22 +2643,18 @@ static int bch2_fs_reconfigure(struct fs_context *fc) ret = bch2_fs_read_write(c); if (ret) { bch_err(c, "error going rw: %i", ret); - up_write(&c->state_lock); - ret = -EINVAL; - goto err; + return -EINVAL; } sb->s_flags &= ~SB_RDONLY; } c->opts.read_only = opts->opts.read_only; - - up_write(&c->state_lock); } if (opt_defined(opts->opts, errors)) c->opts.errors = opts->opts.errors; -err: + return bch2_err_class(ret); } diff --git a/libbcachefs/fsck.c b/libbcachefs/fsck.c index 9d06f32b..df0aa252 100644 --- a/libbcachefs/fsck.c +++ b/libbcachefs/fsck.c @@ -53,10 +53,9 @@ static int dirent_points_to_inode(struct bch_fs *c, { int ret = dirent_points_to_inode_nowarn(c, dirent, inode); if (ret) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); dirent_inode_mismatch_msg(&buf, c, dirent, inode); bch_warn(c, "%s", buf.buf); - printbuf_exit(&buf); } return ret; } @@ -253,14 +252,13 @@ create_lostfound: * XXX: we could have a nicer log message here if we had a nice way to * walk backpointers to print a path */ - struct printbuf path = PRINTBUF; + CLASS(printbuf, path)(); ret = bch2_inum_to_path(trans, root_inum, &path); if (ret) goto err; bch_notice(c, "creating %s/lost+found in subvol %llu snapshot %u", path.buf, root_inum.subvol, snapshot); - printbuf_exit(&path); u64 now = bch2_current_time(c); u64 cpu = raw_smp_processor_id(); @@ -455,7 +453,7 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * * whiteouts for the dirent we just created. */ if (!inode->bi_subvol && bch2_snapshot_is_leaf(c, inode->bi_snapshot) <= 0) { - snapshot_id_list whiteouts_done; + CLASS(snapshot_id_list, whiteouts_done)(); struct btree_iter iter; struct bkey_s_c k; @@ -499,7 +497,6 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * break; } } - darray_exit(&whiteouts_done); bch2_trans_iter_exit(trans, &iter); } @@ -683,11 +680,15 @@ static inline void snapshots_seen_exit(struct snapshots_seen *s) darray_exit(&s->ids); } -static inline void snapshots_seen_init(struct snapshots_seen *s) +static inline struct snapshots_seen snapshots_seen_init(void) { - memset(s, 0, sizeof(*s)); + return (struct snapshots_seen) {}; } +DEFINE_CLASS(snapshots_seen, struct snapshots_seen, + snapshots_seen_exit(&_T), + snapshots_seen_init(), void) + static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s, u32 id) { u32 *i; @@ -815,9 +816,13 @@ static void inode_walker_exit(struct inode_walker *w) static struct inode_walker inode_walker_init(void) { - return (struct inode_walker) { 0, }; + return (struct inode_walker) {}; } +DEFINE_CLASS(inode_walker, struct inode_walker, + inode_walker_exit(&_T), + inode_walker_init(), void) + static int add_inode(struct bch_fs *c, struct inode_walker *w, struct bkey_s_c inode) { @@ -917,7 +922,7 @@ lookup_inode_for_snapshot(struct btree_trans *trans, struct inode_walker *w, str if (!i) return NULL; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; if (fsck_err_on(k.k->p.snapshot != i->inode.bi_snapshot, @@ -967,10 +972,8 @@ lookup_inode_for_snapshot(struct btree_trans *trans, struct inode_walker *w, str goto fsck_err; } - printbuf_exit(&buf); return i; fsck_err: - printbuf_exit(&buf); return ERR_PTR(ret); } @@ -1004,27 +1007,25 @@ int bch2_fsck_update_backpointers(struct btree_trans *trans, return 0; struct bkey_i_dirent *d = bkey_i_to_dirent(new); - struct inode_walker target = inode_walker_init(); - int ret = 0; + CLASS(inode_walker, target)(); if (d->v.d_type == DT_SUBVOL) { bch_err(trans->c, "%s does not support DT_SUBVOL", __func__); - ret = -BCH_ERR_fsck_repair_unimplemented; + return bch_err_throw(trans->c, fsck_repair_unimplemented); } else { - ret = get_visible_inodes(trans, &target, s, le64_to_cpu(d->v.d_inum)); + int ret = get_visible_inodes(trans, &target, s, le64_to_cpu(d->v.d_inum)); if (ret) - goto err; + return ret; darray_for_each(target.inodes, i) { i->inode.bi_dir_offset = d->k.p.offset; ret = __bch2_fsck_write_inode(trans, &i->inode); if (ret) - goto err; + return ret; } + + return 0; } -err: - inode_walker_exit(&target); - return ret; } static struct bkey_s_c_dirent inode_get_dirent(struct btree_trans *trans, @@ -1056,7 +1057,7 @@ static int check_inode_dirent_inode(struct btree_trans *trans, bool *write_inode) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); u32 inode_snapshot = inode->bi_snapshot; struct btree_iter dirent_iter = {}; @@ -1106,7 +1107,6 @@ out: ret = 0; fsck_err: bch2_trans_iter_exit(trans, &dirent_iter); - printbuf_exit(&buf); bch_err_fn(c, ret); return ret; } @@ -1118,7 +1118,7 @@ static int check_inode(struct btree_trans *trans, struct snapshots_seen *s) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); struct bch_inode_unpacked u; bool do_update = false; int ret; @@ -1234,7 +1234,7 @@ static int check_inode(struct btree_trans *trans, */ ret = check_inode_deleted_list(trans, k.k->p); if (ret < 0) - goto err_noprint; + return ret; fsck_err_on(!ret, trans, unlinked_inode_not_on_deleted_list, @@ -1255,7 +1255,7 @@ static int check_inode(struct btree_trans *trans, u.bi_inum, u.bi_snapshot)) { ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot); bch_err_msg(c, ret, "in fsck deleting inode"); - goto err_noprint; + return ret; } ret = 0; } @@ -1316,33 +1316,26 @@ do_update: ret = __bch2_fsck_write_inode(trans, &u); bch_err_msg(c, ret, "in fsck updating inode"); if (ret) - goto err_noprint; + return ret; } err: fsck_err: bch_err_fn(c, ret); -err_noprint: - printbuf_exit(&buf); return ret; } int bch2_check_inodes(struct bch_fs *c) { struct bch_inode_unpacked snapshot_root = {}; - struct snapshots_seen s; - snapshots_seen_init(&s); + CLASS(btree_trans, trans)(c); + CLASS(snapshots_seen, s)(); - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, + return for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN, BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - check_inode(trans, &iter, k, &snapshot_root, &s))); - - snapshots_seen_exit(&s); - bch_err_fn(c, ret); - return ret; + check_inode(trans, &iter, k, &snapshot_root, &s)); } static int find_oldest_inode_needs_reattach(struct btree_trans *trans, @@ -1390,7 +1383,7 @@ static int check_unreachable_inode(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; if (!bkey_is_inode(k.k)) @@ -1414,7 +1407,6 @@ static int check_unreachable_inode(struct btree_trans *trans, buf.buf))) ret = reattach_inode(trans, &inode); fsck_err: - printbuf_exit(&buf); return ret; } @@ -1430,14 +1422,12 @@ fsck_err: */ int bch2_check_unreachable_inodes(struct bch_fs *c) { - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, + CLASS(btree_trans, trans)(c); + return for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN, BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - check_unreachable_inode(trans, &iter, k))); - bch_err_fn(c, ret); - return ret; + check_unreachable_inode(trans, &iter, k)); } static inline bool btree_matches_i_mode(enum btree_id btree, unsigned mode) @@ -1461,7 +1451,7 @@ static int check_key_has_inode(struct btree_trans *trans, struct bkey_s_c k) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); struct btree_iter iter2 = {}; int ret = PTR_ERR_OR_ZERO(i); if (ret) @@ -1557,7 +1547,6 @@ out: err: fsck_err: bch2_trans_iter_exit(trans, &iter2); - printbuf_exit(&buf); bch_err_fn(c, ret); return ret; delete: @@ -1627,23 +1616,28 @@ static int check_i_sectors_notnested(struct btree_trans *trans, struct inode_wal if (i->inode.bi_sectors == i->count) continue; + CLASS(printbuf, buf)(); + lockrestart_do(trans, + bch2_inum_snapshot_to_path(trans, + i->inode.bi_inum, + i->inode.bi_snapshot, NULL, &buf)); + count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->inode.bi_snapshot); if (w->recalculate_sums) i->count = count2; if (i->count != count2) { - bch_err_ratelimited(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu", - w->last_pos.inode, i->inode.bi_snapshot, i->count, count2); + bch_err_ratelimited(c, "fsck counted i_sectors wrong: got %llu should be %llu\n%s", + i->count, count2, buf.buf); i->count = count2; } if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty) && i->inode.bi_sectors != i->count, trans, inode_i_sectors_wrong, - "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu", - w->last_pos.inode, i->inode.bi_snapshot, - i->inode.bi_sectors, i->count)) { + "incorrect i_sectors: got %llu, should be %llu\n%s", + i->inode.bi_sectors, i->count, buf.buf)) { i->inode.bi_sectors = i->count; ret = bch2_fsck_write_inode(trans, &i->inode); if (ret) @@ -1686,11 +1680,15 @@ static void extent_ends_exit(struct extent_ends *extent_ends) darray_exit(&extent_ends->e); } -static void extent_ends_init(struct extent_ends *extent_ends) +static struct extent_ends extent_ends_init(void) { - memset(extent_ends, 0, sizeof(*extent_ends)); + return (struct extent_ends) {}; } +DEFINE_CLASS(extent_ends, struct extent_ends, + extent_ends_exit(&_T), + extent_ends_init(), void) + static int extent_ends_at(struct bch_fs *c, struct extent_ends *extent_ends, struct snapshots_seen *seen, @@ -1730,7 +1728,7 @@ static int overlapping_extents_found(struct btree_trans *trans, struct extent_end *extent_end) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); struct btree_iter iter1, iter2 = {}; struct bkey_s_c k1, k2; int ret; @@ -1836,7 +1834,6 @@ fsck_err: err: bch2_trans_iter_exit(trans, &iter2); bch2_trans_iter_exit(trans, &iter1); - printbuf_exit(&buf); return ret; } @@ -1893,11 +1890,10 @@ static int check_extent_overbig(struct btree_trans *trans, struct btree_iter *it bkey_for_each_crc(k.k, ptrs, crc, i) if (crc_is_encoded(crc) && crc.uncompressed_size > encoded_extent_max_sectors) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, k); bch_err(c, "overbig encoded extent, please report this:\n %s", buf.buf); - printbuf_exit(&buf); } return 0; @@ -1911,7 +1907,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter, struct disk_reservation *res) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; ret = bch2_check_key_has_snapshot(trans, iter, k); @@ -2004,7 +2000,6 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter, out: err: fsck_err: - printbuf_exit(&buf); bch_err_fn(c, ret); return ret; } @@ -2015,49 +2010,41 @@ fsck_err: */ int bch2_check_extents(struct bch_fs *c) { - struct inode_walker w = inode_walker_init(); - struct snapshots_seen s; - struct extent_ends extent_ends; struct disk_reservation res = { 0 }; - snapshots_seen_init(&s); - extent_ends_init(&extent_ends); + CLASS(btree_trans, trans)(c); + CLASS(snapshots_seen, s)(); + CLASS(inode_walker, w)(); + CLASS(extent_ends, extent_ends)(); - int ret = bch2_trans_run(c, - for_each_btree_key(trans, iter, BTREE_ID_extents, + int ret = for_each_btree_key(trans, iter, BTREE_ID_extents, POS(BCACHEFS_ROOT_INO, 0), BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({ bch2_disk_reservation_put(c, &res); check_extent(trans, &iter, k, &w, &s, &extent_ends, &res) ?: check_extent_overbig(trans, &iter, k); })) ?: - check_i_sectors_notnested(trans, &w)); + check_i_sectors_notnested(trans, &w); bch2_disk_reservation_put(c, &res); - extent_ends_exit(&extent_ends); - inode_walker_exit(&w); - snapshots_seen_exit(&s); - - bch_err_fn(c, ret); return ret; } int bch2_check_indirect_extents(struct bch_fs *c) { + CLASS(btree_trans, trans)(c); struct disk_reservation res = { 0 }; - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, BTREE_ID_reflink, + int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_reflink, POS_MIN, BTREE_ITER_prefetch, k, &res, NULL, BCH_TRANS_COMMIT_no_enospc, ({ bch2_disk_reservation_put(c, &res); check_extent_overbig(trans, &iter, k); - }))); + })); bch2_disk_reservation_put(c, &res); - bch_err_fn(c, ret); return ret; } @@ -2150,7 +2137,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter * u32 parent_snapshot; u32 new_parent_subvol = 0; u64 parent_inum; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; ret = subvol_lookup(trans, parent_subvol, &parent_snapshot, &parent_inum); @@ -2274,7 +2261,6 @@ out: err: fsck_err: bch2_trans_iter_exit(trans, &subvol_iter); - printbuf_exit(&buf); return ret; } @@ -2288,39 +2274,37 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, { struct bch_fs *c = trans->c; struct inode_walker_entry *i; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; ret = bch2_check_key_has_snapshot(trans, iter, k); - if (ret) { - ret = ret < 0 ? ret : 0; - goto out; - } + if (ret) + return ret < 0 ? ret : 0; ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p); if (ret) - goto err; + return ret; if (k.k->type == KEY_TYPE_whiteout) - goto out; + return 0; if (dir->last_pos.inode != k.k->p.inode && dir->have_inodes) { ret = check_subdir_dirents_count(trans, dir); if (ret) - goto err; + return ret; } i = walk_inode(trans, dir, k); ret = PTR_ERR_OR_ZERO(i); - if (ret < 0) - goto err; + if (ret) + return ret; ret = check_key_has_inode(trans, iter, dir, i, k); if (ret) - goto err; + return ret; if (!i || i->whiteout) - goto out; + return 0; if (dir->first_this_inode) *hash_info = bch2_hash_info_init(c, &i->inode); @@ -2331,15 +2315,11 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, ret = bch2_str_hash_check_key(trans, s, &bch2_dirent_hash_desc, hash_info, iter, k, need_second_pass); if (ret < 0) - goto err; - if (ret) { - /* dirent has been deleted */ - ret = 0; - goto out; - } - + return ret; + if (ret) + return 0; /* dirent has been deleted */ if (k.k->type != KEY_TYPE_dirent) - goto out; + return 0; struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); @@ -2364,13 +2344,13 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, d.v->d_type, &name, NULL, target); ret = PTR_ERR_OR_ZERO(new_d); if (ret) - goto out; + return ret; new_d->k.p.inode = d.k->p.inode; new_d->k.p.snapshot = d.k->p.snapshot; struct btree_iter dup_iter = {}; - ret = bch2_hash_delete_at(trans, + return bch2_hash_delete_at(trans, bch2_dirent_hash_desc, hash_info, iter, BTREE_UPDATE_internal_snapshot_node) ?: bch2_str_hash_repair_key(trans, s, @@ -2378,17 +2358,16 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, iter, bkey_i_to_s_c(&new_d->k_i), &dup_iter, bkey_s_c_null, need_second_pass); - goto out; } if (d.v->d_type == DT_SUBVOL) { ret = check_dirent_to_subvol(trans, iter, d); if (ret) - goto err; + return ret; } else { ret = get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum)); if (ret) - goto err; + return ret; if (!target->inodes.nr) { ret = maybe_reconstruct_inum(trans, le64_to_cpu(d.v->d_inum), @@ -2405,13 +2384,13 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, buf.buf))) { ret = bch2_fsck_remove_dirent(trans, d.k->p); if (ret) - goto err; + return ret; } darray_for_each(target->inodes, i) { ret = bch2_check_dirent_target(trans, iter, d, &i->inode, true); if (ret) - goto err; + return ret; } darray_for_each(target->deletes, i) @@ -2434,24 +2413,27 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, BTREE_UPDATE_internal_snapshot_node); bch2_trans_iter_exit(trans, &delete_iter); if (ret) - goto err; + return ret; } } + /* + * Cannot access key values after doing a transaction commit without + * revalidating: + */ + bool have_dir = d.v->d_type == DT_DIR; + ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); if (ret) - goto err; + return ret; for_each_visible_inode(c, s, dir, d.k->p.snapshot, i) { - if (d.v->d_type == DT_DIR) + if (have_dir) i->count++; i->i_size += bkey_bytes(d.k); } -out: -err: fsck_err: - printbuf_exit(&buf); return ret; } @@ -2461,23 +2443,21 @@ fsck_err: */ int bch2_check_dirents(struct bch_fs *c) { - struct inode_walker dir = inode_walker_init(); - struct inode_walker target = inode_walker_init(); - struct snapshots_seen s; struct bch_hash_info hash_info; + CLASS(btree_trans, trans)(c); + CLASS(snapshots_seen, s)(); + CLASS(inode_walker, dir)(); + CLASS(inode_walker, target)(); bool need_second_pass = false, did_second_pass = false; int ret; - - snapshots_seen_init(&s); again: - ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, BTREE_ID_dirents, + ret = for_each_btree_key_commit(trans, iter, BTREE_ID_dirents, POS(BCACHEFS_ROOT_INO, 0), BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s, &need_second_pass)) ?: - check_subdir_count_notnested(trans, &dir)); + check_subdir_count_notnested(trans, &dir); if (!ret && need_second_pass && !did_second_pass) { bch_info(c, "check_dirents requires second pass"); @@ -2490,10 +2470,6 @@ again: ret = -EINVAL; } - snapshots_seen_exit(&s); - inode_walker_exit(&dir); - inode_walker_exit(&target); - bch_err_fn(c, ret); return ret; } @@ -2536,21 +2512,17 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter, */ int bch2_check_xattrs(struct bch_fs *c) { - struct inode_walker inode = inode_walker_init(); struct bch_hash_info hash_info; - int ret = 0; + CLASS(btree_trans, trans)(c); + CLASS(inode_walker, inode)(); - ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs, + int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs, POS(BCACHEFS_ROOT_INO, 0), BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - check_xattr(trans, &iter, k, &hash_info, &inode))); - - inode_walker_exit(&inode); - bch_err_fn(c, ret); + check_xattr(trans, &iter, k, &hash_info, &inode)); return ret; } @@ -2615,18 +2587,17 @@ fsck_err: /* Get root directory, create if it doesn't exist: */ int bch2_check_root(struct bch_fs *c) { - int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - check_root_trans(trans)); - bch_err_fn(c, ret); - return ret; + CLASS(btree_trans, trans)(c); + return commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, + check_root_trans(trans)); } static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) { struct bch_fs *c = trans->c; struct btree_iter parent_iter = {}; - darray_u32 subvol_path = {}; - struct printbuf buf = PRINTBUF; + CLASS(darray_u32, subvol_path)(); + CLASS(printbuf, buf)(); int ret = 0; if (k.k->type != KEY_TYPE_subvolume) @@ -2686,21 +2657,17 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, } fsck_err: err: - printbuf_exit(&buf); - darray_exit(&subvol_path); bch2_trans_iter_exit(trans, &parent_iter); return ret; } int bch2_check_subvolume_structure(struct bch_fs *c) { - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, + CLASS(btree_trans, trans)(c); + return for_each_btree_key_commit(trans, iter, BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - check_subvol_path(trans, &iter, k))); - bch_err_fn(c, ret); - return ret; + check_subvol_path(trans, &iter, k)); } static int bch2_bi_depth_renumber_one(struct btree_trans *trans, @@ -2751,8 +2718,8 @@ static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) { struct bch_fs *c = trans->c; struct btree_iter inode_iter = {}; - darray_u64 path = {}; - struct printbuf buf = PRINTBUF; + CLASS(darray_u64, path)(); + CLASS(printbuf, buf)(); u32 snapshot = inode_k.k->p.snapshot; bool redo_bi_depth = false; u32 min_bi_depth = U32_MAX; @@ -2858,8 +2825,6 @@ static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) out: fsck_err: bch2_trans_iter_exit(trans, &inode_iter); - darray_exit(&path); - printbuf_exit(&buf); bch_err_fn(c, ret); return ret; } @@ -2870,8 +2835,8 @@ fsck_err: */ int bch2_check_directory_structure(struct bch_fs *c) { - int ret = bch2_trans_run(c, - for_each_btree_key_reverse_commit(trans, iter, BTREE_ID_inodes, POS_MIN, + CLASS(btree_trans, trans)(c); + return for_each_btree_key_reverse_commit(trans, iter, BTREE_ID_inodes, POS_MIN, BTREE_ITER_intent| BTREE_ITER_prefetch| BTREE_ITER_all_snapshots, k, @@ -2883,10 +2848,7 @@ int bch2_check_directory_structure(struct bch_fs *c) continue; check_path_loop(trans, k); - }))); - - bch_err_fn(c, ret); - return ret; + })); } struct nlink_table { @@ -2970,8 +2932,8 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c, struct nlink_table *t, u64 start, u64 *end) { - int ret = bch2_trans_run(c, - for_each_btree_key(trans, iter, BTREE_ID_inodes, + CLASS(btree_trans, trans)(c); + int ret = for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, start), BTREE_ITER_intent| BTREE_ITER_prefetch| @@ -3006,7 +2968,7 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c, break; } 0; - }))); + })); bch_err_fn(c, ret); return ret; @@ -3016,12 +2978,10 @@ noinline_for_stack static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links, u64 range_start, u64 range_end) { - struct snapshots_seen s; + CLASS(btree_trans, trans)(c); + CLASS(snapshots_seen, s)(); - snapshots_seen_init(&s); - - int ret = bch2_trans_run(c, - for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN, + int ret = for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN, BTREE_ITER_intent| BTREE_ITER_prefetch| BTREE_ITER_all_snapshots, k, ({ @@ -3038,9 +2998,7 @@ static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links le64_to_cpu(d.v->d_inum), d.k->p.snapshot); } 0; - }))); - - snapshots_seen_exit(&s); + })); bch_err_fn(c, ret); return ret; @@ -3094,14 +3052,14 @@ static int check_nlinks_update_hardlinks(struct bch_fs *c, struct nlink_table *links, u64 range_start, u64 range_end) { + CLASS(btree_trans, trans)(c); size_t idx = 0; - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, + int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS(0, range_start), BTREE_ITER_intent|BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end))); + check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end)); if (ret < 0) { bch_err(c, "error in fsck walking inodes: %s", bch2_err_str(ret)); return ret; @@ -3140,7 +3098,6 @@ int bch2_check_nlinks(struct bch_fs *c) } while (next_iter_range_start != U64_MAX); kvfree(links.d); - bch_err_fn(c, ret); return ret; } @@ -3175,15 +3132,13 @@ int bch2_fix_reflink_p(struct bch_fs *c) if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) return 0; - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, + CLASS(btree_trans, trans)(c); + return for_each_btree_key_commit(trans, iter, BTREE_ID_extents, POS_MIN, BTREE_ITER_intent|BTREE_ITER_prefetch| BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - fix_reflink_p_key(trans, &iter, k))); - bch_err_fn(c, ret); - return ret; + fix_reflink_p_key(trans, &iter, k)); } #ifndef NO_BCACHEFS_CHARDEV diff --git a/libbcachefs/inode.c b/libbcachefs/inode.c index 307fb0c9..4a9725f3 100644 --- a/libbcachefs/inode.c +++ b/libbcachefs/inode.c @@ -417,7 +417,8 @@ int bch2_inode_find_by_inum_trans(struct btree_trans *trans, int bch2_inode_find_by_inum(struct bch_fs *c, subvol_inum inum, struct bch_inode_unpacked *inode) { - return bch2_trans_do(c, bch2_inode_find_by_inum_trans(trans, inum, inode)); + CLASS(btree_trans, trans)(c); + return lockrestart_do(trans, bch2_inode_find_by_inum_trans(trans, inum, inode)); } int bch2_inode_find_snapshot_root(struct btree_trans *trans, u64 inum, @@ -1132,7 +1133,7 @@ err: int bch2_inode_rm(struct bch_fs *c, subvol_inum inum) { - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct btree_iter iter = {}; struct bkey_s_c k; struct bch_inode_unpacked inode; @@ -1141,7 +1142,7 @@ int bch2_inode_rm(struct bch_fs *c, subvol_inum inum) ret = lockrestart_do(trans, may_delete_deleted_inum(trans, inum, &inode)); if (ret) - goto err2; + return ret; /* * If this was a directory, there shouldn't be any real dirents left - @@ -1156,7 +1157,7 @@ int bch2_inode_rm(struct bch_fs *c, subvol_inum inum) : bch2_inode_delete_keys(trans, inum, BTREE_ID_dirents)) ?: bch2_inode_delete_keys(trans, inum, BTREE_ID_xattrs); if (ret) - goto err2; + return ret; retry: bch2_trans_begin(trans); @@ -1188,12 +1189,9 @@ err: goto retry; if (ret) - goto err2; + return ret; - ret = delete_ancestor_snapshot_inodes(trans, SPOS(0, inum.inum, snapshot)); -err2: - bch2_trans_put(trans); - return ret; + return delete_ancestor_snapshot_inodes(trans, SPOS(0, inum.inum, snapshot)); } int bch2_inode_nlink_inc(struct bch_inode_unpacked *bi) @@ -1413,7 +1411,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, struct bch_fs *c = trans->c; struct btree_iter inode_iter; struct bkey_s_c k; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret; k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_cached); @@ -1506,7 +1504,6 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos, out: fsck_err: bch2_trans_iter_exit(trans, &inode_iter); - printbuf_exit(&buf); return ret; delete: ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, pos, false); @@ -1524,29 +1521,23 @@ static int may_delete_deleted_inum(struct btree_trans *trans, subvol_inum inum, int bch2_delete_dead_inodes(struct bch_fs *c) { - struct btree_trans *trans = bch2_trans_get(c); - int ret; - + CLASS(btree_trans, trans)(c); /* * if we ran check_inodes() unlinked inodes will have already been * cleaned up but the write buffer will be out of sync; therefore we * alway need a write buffer flush - */ - ret = bch2_btree_write_buffer_flush_sync(trans); - if (ret) - goto err; - - /* + * * Weird transaction restart handling here because on successful delete, * bch2_inode_rm_snapshot() will return a nested transaction restart, * but we can't retry because the btree write buffer won't have been * flushed and we'd spin: */ - ret = for_each_btree_key_commit(trans, iter, BTREE_ID_deleted_inodes, POS_MIN, + return bch2_btree_write_buffer_flush_sync(trans) ?: + for_each_btree_key_commit(trans, iter, BTREE_ID_deleted_inodes, POS_MIN, BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ struct bch_inode_unpacked inode; - ret = may_delete_deleted_inode(trans, k.k->p, &inode, true); + int ret = may_delete_deleted_inode(trans, k.k->p, &inode, true); if (ret > 0) { bch_verbose_ratelimited(c, "deleting unlinked inode %llu:%u", k.k->p.offset, k.k->p.snapshot); @@ -1567,8 +1558,4 @@ int bch2_delete_dead_inodes(struct bch_fs *c) ret; })); -err: - bch2_trans_put(trans); - bch_err_fn(c, ret); - return ret; } diff --git a/libbcachefs/io_misc.c b/libbcachefs/io_misc.c index 07023667..5d6681c0 100644 --- a/libbcachefs/io_misc.c +++ b/libbcachefs/io_misc.c @@ -114,12 +114,11 @@ err: if (!ret && sectors_allocated) bch2_increment_clock(c, sectors_allocated, WRITE); if (should_print_err(ret)) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); lockrestart_do(trans, bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter->pos.offset << 9)); prt_printf(&buf, "fallocate error: %s", bch2_err_str(ret)); bch_err_ratelimited(c, "%s", buf.buf); - printbuf_exit(&buf); } err_noprint: bch2_open_buckets_put(c, &open_buckets); @@ -222,23 +221,18 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter, int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end, s64 *i_sectors_delta) { - struct btree_trans *trans = bch2_trans_get(c); - struct btree_iter iter; - int ret; + CLASS(btree_trans, trans)(c); + struct btree_iter iter; bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, POS(inum.inum, start), BTREE_ITER_intent); - ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta); + int ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta); bch2_trans_iter_exit(trans, &iter); - bch2_trans_put(trans); - - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - ret = 0; - return ret; + return bch2_err_matches(ret, BCH_ERR_transaction_restart) ? 0 : ret; } /* truncate: */ @@ -319,17 +313,13 @@ int bch2_truncate(struct bch_fs *c, subvol_inum inum, u64 new_i_size, u64 *i_sec * snapshot while they're in progress, then crashing, will result in the * resume only proceeding in one of the snapshots */ - down_read(&c->snapshot_create_lock); - struct btree_trans *trans = bch2_trans_get(c); + guard(rwsem_read)(&c->snapshot_create_lock); + CLASS(btree_trans, trans)(c); int ret = bch2_logged_op_start(trans, &op.k_i); if (ret) - goto out; + return ret; ret = __bch2_resume_logged_op_truncate(trans, &op.k_i, i_sectors_delta); ret = bch2_logged_op_finish(trans, &op.k_i) ?: ret; -out: - bch2_trans_put(trans); - up_read(&c->snapshot_create_lock); - return ret; } @@ -555,16 +545,12 @@ int bch2_fcollapse_finsert(struct bch_fs *c, subvol_inum inum, * snapshot while they're in progress, then crashing, will result in the * resume only proceeding in one of the snapshots */ - down_read(&c->snapshot_create_lock); - struct btree_trans *trans = bch2_trans_get(c); + guard(rwsem_read)(&c->snapshot_create_lock); + CLASS(btree_trans, trans)(c); int ret = bch2_logged_op_start(trans, &op.k_i); if (ret) - goto out; + return ret; ret = __bch2_resume_logged_op_finsert(trans, &op.k_i, i_sectors_delta); ret = bch2_logged_op_finish(trans, &op.k_i) ?: ret; -out: - bch2_trans_put(trans); - up_read(&c->snapshot_create_lock); - return ret; } diff --git a/libbcachefs/io_read.c b/libbcachefs/io_read.c index e854adea..b8ccd8c9 100644 --- a/libbcachefs/io_read.c +++ b/libbcachefs/io_read.c @@ -437,7 +437,8 @@ static int bch2_read_err_msg_trans(struct btree_trans *trans, struct printbuf *o static void bch2_read_err_msg(struct bch_fs *c, struct printbuf *out, struct bch_read_bio *rbio, struct bpos read_pos) { - bch2_trans_run(c, bch2_read_err_msg_trans(trans, out, rbio, read_pos)); + CLASS(btree_trans, trans)(c); + bch2_read_err_msg_trans(trans, out, rbio, read_pos); } enum rbio_context { @@ -638,7 +639,7 @@ static void bch2_rbio_retry(struct work_struct *work) }; struct bch_io_failures failed = { .nr = 0 }; - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct bkey_buf sk; bch2_bkey_buf_init(&sk); @@ -680,7 +681,7 @@ static void bch2_rbio_retry(struct work_struct *work) } if (failed.nr || ret) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); lockrestart_do(trans, @@ -708,12 +709,10 @@ static void bch2_rbio_retry(struct work_struct *work) bch2_io_failures_to_text(&buf, c, &failed); bch2_print_str_ratelimited(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); } bch2_rbio_done(rbio); bch2_bkey_buf_exit(&sk, c); - bch2_trans_put(trans); } static void bch2_rbio_error(struct bch_read_bio *rbio, @@ -801,8 +800,9 @@ out: static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio) { - bch2_trans_commit_do(rbio->c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - __bch2_rbio_narrow_crcs(trans, rbio)); + CLASS(btree_trans, trans)(rbio->c); + commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, + __bch2_rbio_narrow_crcs(trans, rbio)); } static void bch2_read_decompress_err(struct work_struct *work) @@ -810,7 +810,7 @@ static void bch2_read_decompress_err(struct work_struct *work) struct bch_read_bio *rbio = container_of(work, struct bch_read_bio, work); struct bch_fs *c = rbio->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); prt_str(&buf, "decompression error"); @@ -822,7 +822,6 @@ static void bch2_read_decompress_err(struct work_struct *work) bch_err_ratelimited(c, "%s", buf.buf); bch2_rbio_error(rbio, -BCH_ERR_data_read_decompress_err, BLK_STS_IOERR); - printbuf_exit(&buf); } static void bch2_read_decrypt_err(struct work_struct *work) @@ -830,7 +829,7 @@ static void bch2_read_decrypt_err(struct work_struct *work) struct bch_read_bio *rbio = container_of(work, struct bch_read_bio, work); struct bch_fs *c = rbio->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); prt_str(&buf, "decrypt error"); @@ -842,7 +841,6 @@ static void bch2_read_decrypt_err(struct work_struct *work) bch_err_ratelimited(c, "%s", buf.buf); bch2_rbio_error(rbio, -BCH_ERR_data_read_decrypt_err, BLK_STS_IOERR); - printbuf_exit(&buf); } /* Inner part that may run in process context */ @@ -1024,7 +1022,7 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, { struct bch_fs *c = trans->c; struct btree_iter iter; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret; bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, @@ -1061,7 +1059,6 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, bch2_fs_inconsistent(c, "%s", buf.buf); bch2_trans_iter_exit(trans, &iter); - printbuf_exit(&buf); } int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, @@ -1113,25 +1110,22 @@ retry_pick: trace_and_count(c, io_read_fail_and_poison, &orig->bio); } - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_read_err_msg_trans(trans, &buf, orig, read_pos); prt_printf(&buf, "%s\n ", bch2_err_str(ret)); bch2_bkey_val_to_text(&buf, c, k); - bch_err_ratelimited(c, "%s", buf.buf); - printbuf_exit(&buf); goto err; } if (unlikely(bch2_csum_type_is_encryption(pick.crc.csum_type)) && !c->chacha20_key_set) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_read_err_msg_trans(trans, &buf, orig, read_pos); prt_printf(&buf, "attempting to read encrypted data without encryption key\n "); bch2_bkey_val_to_text(&buf, c, k); bch_err_ratelimited(c, "%s", buf.buf); - printbuf_exit(&buf); ret = bch_err_throw(c, data_read_no_encryption_key); goto err; } @@ -1500,13 +1494,12 @@ err: if (unlikely(ret)) { if (ret != -BCH_ERR_extent_poisoned) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); lockrestart_do(trans, bch2_inum_offset_err_msg_trans(trans, &buf, inum, bvec_iter.bi_sector << 9)); prt_printf(&buf, "data read error: %s", bch2_err_str(ret)); bch_err_ratelimited(c, "%s", buf.buf); - printbuf_exit(&buf); } rbio->bio.bi_status = BLK_STS_IOERR; diff --git a/libbcachefs/io_read.h b/libbcachefs/io_read.h index 8fef4e47..9d63d591 100644 --- a/libbcachefs/io_read.h +++ b/libbcachefs/io_read.h @@ -165,11 +165,11 @@ static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, rbio->subvol = inum.subvol; - bch2_trans_run(c, - __bch2_read(trans, rbio, rbio->bio.bi_iter, inum, NULL, NULL, - BCH_READ_retry_if_stale| - BCH_READ_may_promote| - BCH_READ_user_mapped)); + CLASS(btree_trans, trans)(c); + __bch2_read(trans, rbio, rbio->bio.bi_iter, inum, NULL, NULL, + BCH_READ_retry_if_stale| + BCH_READ_may_promote| + BCH_READ_user_mapped); } static inline struct bch_read_bio *rbio_init_fragment(struct bio *bio, diff --git a/libbcachefs/io_write.c b/libbcachefs/io_write.c index aedbea63..d7620138 100644 --- a/libbcachefs/io_write.c +++ b/libbcachefs/io_write.c @@ -256,7 +256,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, s64 bi_sectors = le64_to_cpu(inode->v.bi_sectors); if (unlikely(bi_sectors + i_sectors_delta < 0)) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_printf(&buf, "inode %llu i_sectors underflow: %lli + %lli < 0", extent_iter->pos.inode, bi_sectors, i_sectors_delta); @@ -264,7 +264,6 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, bool print = bch2_count_fsck_err(c, inode_i_sectors_underflow, &buf); if (print) bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); if (i_sectors_delta < 0) i_sectors_delta = -bi_sectors; @@ -370,7 +369,6 @@ static int bch2_write_index_default(struct bch_write_op *op) struct bkey_buf sk; struct keylist *keys = &op->insert_keys; struct bkey_i *k = bch2_keylist_front(keys); - struct btree_trans *trans = bch2_trans_get(c); struct btree_iter iter; subvol_inum inum = { .subvol = op->subvol, @@ -380,6 +378,7 @@ static int bch2_write_index_default(struct bch_write_op *op) BUG_ON(!inum.subvol); + CLASS(btree_trans, trans)(c); bch2_bkey_buf_init(&sk); do { @@ -416,7 +415,6 @@ static int bch2_write_index_default(struct bch_write_op *op) bch2_cut_front(iter.pos, k); } while (!bch2_keylist_empty(keys)); - bch2_trans_put(trans); bch2_bkey_buf_exit(&sk, c); return ret; @@ -426,7 +424,7 @@ static int bch2_write_index_default(struct bch_write_op *op) void bch2_write_op_error(struct bch_write_op *op, u64 offset, const char *fmt, ...) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); if (op->subvol) { bch2_inum_offset_err_msg(op->c, &buf, @@ -453,7 +451,6 @@ void bch2_write_op_error(struct bch_write_op *op, u64 offset, const char *fmt, . } bch_err_ratelimited(op->c, "%s", buf.buf); - printbuf_exit(&buf); } void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, @@ -465,8 +462,8 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, struct bch_write_bio *n; unsigned ref_rw = type == BCH_DATA_btree ? READ : WRITE; unsigned ref_idx = type == BCH_DATA_btree - ? BCH_DEV_READ_REF_btree_node_write - : BCH_DEV_WRITE_REF_io_write; + ? (unsigned) BCH_DEV_READ_REF_btree_node_write + : (unsigned) BCH_DEV_WRITE_REF_io_write; BUG_ON(c->opts.nochanges); @@ -1218,6 +1215,7 @@ static bool bch2_extent_is_writeable(struct bch_write_op *op, static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans, struct btree_iter *iter, + struct bch_write_op *op, struct bkey_i *orig, struct bkey_s_c k, u64 new_i_size) @@ -1227,11 +1225,13 @@ static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans, return 0; } - struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); + struct bkey_i *new = bch2_trans_kmalloc_nomemzero(trans, + bkey_bytes(k.k) + sizeof(struct bch_extent_rebalance)); int ret = PTR_ERR_OR_ZERO(new); if (ret) return ret; + bkey_reassemble(new, k); bch2_cut_front(bkey_start_pos(&orig->k), new); bch2_cut_back(orig->k.p, new); @@ -1239,6 +1239,8 @@ static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans, bkey_for_each_ptr(ptrs, ptr) ptr->unwritten = 0; + bch2_bkey_set_needs_rebalance(op->c, &op->opts, new); + /* * Note that we're not calling bch2_subvol_get_snapshot() in this path - * that was done when we kicked off the write, and here it's important @@ -1263,7 +1265,7 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op) bkey_start_pos(&orig->k), orig->k.p, BTREE_ITER_intent, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ - bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size); + bch2_nocow_write_convert_one_unwritten(trans, &iter, op, orig, k, op->new_i_size); })); if (ret) break; @@ -1472,7 +1474,7 @@ err_bucket_stale: break; } - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); if (bch2_fs_inconsistent_on(stale < 0, c, "pointer to invalid bucket in nocow path on device %llu\n %s", stale_at->b.inode, @@ -1482,7 +1484,6 @@ err_bucket_stale: /* We can retry this: */ ret = bch_err_throw(c, transaction_restart); } - printbuf_exit(&buf); goto err_get_ioref; } @@ -1526,7 +1527,7 @@ again: * freeing up space on specific disks, which means that * allocations for specific disks may hang arbitrarily long: */ - ret = bch2_trans_run(c, lockrestart_do(trans, + ret = bch2_trans_do(c, bch2_alloc_sectors_start_trans(trans, op->target, op->opts.erasure_code && !(op->flags & BCH_WRITE_cached), @@ -1536,7 +1537,7 @@ again: op->nr_replicas_required, op->watermark, op->flags, - &op->cl, &wp))); + &op->cl, &wp)); if (unlikely(ret)) { if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) break; diff --git a/libbcachefs/journal.c b/libbcachefs/journal.c index c19ec114..3ba1f9fd 100644 --- a/libbcachefs/journal.c +++ b/libbcachefs/journal.c @@ -88,7 +88,7 @@ static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u6 static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j) { lockdep_assert_held(&j->lock); - out->atomic++; + guard(printbuf_atomic)(out); if (!out->nr_tabstops) printbuf_tabstop_push(out, 24); @@ -98,8 +98,6 @@ static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j) seq++) bch2_journal_buf_to_text(out, j, seq); prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed"); - - --out->atomic; } static inline struct journal_buf * @@ -140,9 +138,9 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags) { struct bch_fs *c = container_of(j, struct bch_fs, journal); bool stuck = false; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); - buf.atomic++; + guard(printbuf_atomic)(&buf); if (!(error == -BCH_ERR_journal_full || error == -BCH_ERR_journal_pin_full) || @@ -150,28 +148,24 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags) (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim) return stuck; - spin_lock(&j->lock); + scoped_guard(spinlock, &j->lock) { + if (j->can_discard) + return stuck; - if (j->can_discard) { - spin_unlock(&j->lock); - return stuck; - } + stuck = true; - stuck = true; + /* + * The journal shutdown path will set ->err_seq, but do it here first to + * serialize against concurrent failures and avoid duplicate error + * reports. + */ + if (j->err_seq) + return stuck; - /* - * The journal shutdown path will set ->err_seq, but do it here first to - * serialize against concurrent failures and avoid duplicate error - * reports. - */ - if (j->err_seq) { - spin_unlock(&j->lock); - return stuck; - } - j->err_seq = journal_cur_seq(j); + j->err_seq = journal_cur_seq(j); - __bch2_journal_debug_to_text(&buf, j); - spin_unlock(&j->lock); + __bch2_journal_debug_to_text(&buf, j); + } prt_printf(&buf, bch2_fmt(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)"), bch2_err_str(error)); bch2_print_str(c, KERN_ERR, buf.buf); @@ -179,7 +173,6 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags) printbuf_reset(&buf); bch2_journal_pins_to_text(&buf, j); bch_err(c, "Journal pins:\n%s", buf.buf); - printbuf_exit(&buf); bch2_fatal_error(c); dump_stack(); @@ -269,22 +262,21 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val, bool t buf->data->u64s = cpu_to_le32(old.cur_entry_offset); if (trace_journal_entry_close_enabled() && trace) { - struct printbuf pbuf = PRINTBUF; - pbuf.atomic++; - - prt_str(&pbuf, "entry size: "); - prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data)); - prt_newline(&pbuf); - bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT); - trace_journal_entry_close(c, pbuf.buf); - printbuf_exit(&pbuf); + CLASS(printbuf, err)(); + guard(printbuf_atomic)(&err); + + prt_str(&err, "entry size: "); + prt_human_readable_u64(&err, vstruct_bytes(buf->data)); + prt_newline(&err); + bch2_prt_task_backtrace(&err, current, 1, GFP_NOWAIT); + trace_journal_entry_close(c, err.buf); } sectors = vstruct_blocks_plus(buf->data, c->block_bits, buf->u64s_reserved) << c->block_bits; if (unlikely(sectors > buf->sectors)) { - struct printbuf err = PRINTBUF; - err.atomic++; + CLASS(printbuf, err)(); + guard(printbuf_atomic)(&err); prt_printf(&err, "journal entry overran reserved space: %u > %u\n", sectors, buf->sectors); @@ -296,7 +288,6 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val, bool t bch2_journal_halt_locked(j); bch_err(c, "%s", err.buf); - printbuf_exit(&err); return; } @@ -344,9 +335,8 @@ void bch2_journal_halt_locked(struct journal *j) void bch2_journal_halt(struct journal *j) { - spin_lock(&j->lock); + guard(spinlock)(&j->lock); bch2_journal_halt_locked(j); - spin_unlock(&j->lock); } static bool journal_entry_want_write(struct journal *j) @@ -371,13 +361,8 @@ static bool journal_entry_want_write(struct journal *j) bool bch2_journal_entry_close(struct journal *j) { - bool ret; - - spin_lock(&j->lock); - ret = journal_entry_want_write(j); - spin_unlock(&j->lock); - - return ret; + guard(spinlock)(&j->lock); + return journal_entry_want_write(j); } /* @@ -394,7 +379,7 @@ static int journal_entry_open(struct journal *j) lockdep_assert_held(&j->lock); BUG_ON(journal_entry_is_open(j)); - BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb)); + BUG_ON(c->sb.clean); if (j->blocked) return bch_err_throw(c, journal_blocked); @@ -542,7 +527,7 @@ static void journal_write_work(struct work_struct *work) { struct journal *j = container_of(work, struct journal, write_work.work); - spin_lock(&j->lock); + guard(spinlock)(&j->lock); if (__journal_entry_is_open(j->reservations)) { long delta = journal_cur_buf(j)->expires - jiffies; @@ -551,7 +536,6 @@ static void journal_write_work(struct work_struct *work) else __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true); } - spin_unlock(&j->lock); } static void journal_buf_prealloc(struct journal *j) @@ -652,34 +636,32 @@ out: if (ret == -BCH_ERR_journal_max_in_flight && track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true) && trace_journal_entry_full_enabled()) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_printbuf_make_room(&buf, 4096); - spin_lock(&j->lock); - prt_printf(&buf, "seq %llu\n", journal_cur_seq(j)); - bch2_journal_bufs_to_text(&buf, j); - spin_unlock(&j->lock); + scoped_guard(spinlock, &j->lock) { + prt_printf(&buf, "seq %llu\n", journal_cur_seq(j)); + bch2_journal_bufs_to_text(&buf, j); + } trace_journal_entry_full(c, buf.buf); - printbuf_exit(&buf); count_event(c, journal_entry_full); } if (ret == -BCH_ERR_journal_max_open && track_event_change(&c->times[BCH_TIME_blocked_journal_max_open], true) && trace_journal_entry_full_enabled()) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_printbuf_make_room(&buf, 4096); - spin_lock(&j->lock); - prt_printf(&buf, "seq %llu\n", journal_cur_seq(j)); - bch2_journal_bufs_to_text(&buf, j); - spin_unlock(&j->lock); + scoped_guard(spinlock, &j->lock) { + prt_printf(&buf, "seq %llu\n", journal_cur_seq(j)); + bch2_journal_bufs_to_text(&buf, j); + } trace_journal_entry_full(c, buf.buf); - printbuf_exit(&buf); count_event(c, journal_entry_full); } @@ -751,11 +733,10 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res, remaining_wait)) return ret; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_journal_debug_to_text(&buf, j); bch2_print_str(c, KERN_ERR, buf.buf); prt_printf(&buf, bch2_fmt(c, "Journal stuck? Waited for 10 seconds, err %s"), bch2_err_str(ret)); - printbuf_exit(&buf); closure_wait_event(&j->async_wait, !bch2_err_matches(ret = __journal_res_get(j, res, flags), BCH_ERR_operation_blocked) || @@ -772,11 +753,13 @@ void bch2_journal_entry_res_resize(struct journal *j, union journal_res_state state; int d = new_u64s - res->u64s; - spin_lock(&j->lock); + guard(spinlock)(&j->lock); + + j->entry_u64s_reserved += d; + res->u64s += d; - j->entry_u64s_reserved += d; if (d <= 0) - goto out; + return; j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d); state = READ_ONCE(j->reservations); @@ -791,9 +774,6 @@ void bch2_journal_entry_res_resize(struct journal *j, } else { journal_cur_buf(j)->u64s_reserved += d; } -out: - spin_unlock(&j->lock); - res->u64s += d; } /* journal flushing: */ @@ -944,7 +924,6 @@ bool bch2_journal_noflush_seq(struct journal *j, u64 start, u64 end) { struct bch_fs *c = container_of(j, struct bch_fs, journal); u64 unwritten_seq; - bool ret = false; if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush))) return false; @@ -952,9 +931,10 @@ bool bch2_journal_noflush_seq(struct journal *j, u64 start, u64 end) if (c->journal.flushed_seq_ondisk >= start) return false; - spin_lock(&j->lock); + guard(spinlock)(&j->lock); + if (c->journal.flushed_seq_ondisk >= start) - goto out; + return false; for (unwritten_seq = journal_last_unwritten_seq(j); unwritten_seq < end; @@ -963,15 +943,12 @@ bool bch2_journal_noflush_seq(struct journal *j, u64 start, u64 end) /* journal flush already in flight, or flush requseted */ if (buf->must_flush) - goto out; + return false; buf->noflush = true; } - ret = true; -out: - spin_unlock(&j->lock); - return ret; + return true; } static int __bch2_journal_meta(struct journal *j) @@ -1010,19 +987,18 @@ int bch2_journal_meta(struct journal *j) void bch2_journal_unblock(struct journal *j) { - spin_lock(&j->lock); - if (!--j->blocked && - j->cur_entry_offset_if_blocked < JOURNAL_ENTRY_CLOSED_VAL && - j->reservations.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL) { - union journal_res_state old, new; - - old.v = atomic64_read(&j->reservations.counter); - do { - new.v = old.v; - new.cur_entry_offset = j->cur_entry_offset_if_blocked; - } while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v)); - } - spin_unlock(&j->lock); + scoped_guard(spinlock, &j->lock) + if (!--j->blocked && + j->cur_entry_offset_if_blocked < JOURNAL_ENTRY_CLOSED_VAL && + j->reservations.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL) { + union journal_res_state old, new; + + old.v = atomic64_read(&j->reservations.counter); + do { + new.v = old.v; + new.cur_entry_offset = j->cur_entry_offset_if_blocked; + } while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v)); + } journal_wake(j); } @@ -1050,9 +1026,8 @@ static void __bch2_journal_block(struct journal *j) void bch2_journal_block(struct journal *j) { - spin_lock(&j->lock); - __bch2_journal_block(j); - spin_unlock(&j->lock); + scoped_guard(spinlock, &j->lock) + __bch2_journal_block(j); journal_quiesce(j); } @@ -1065,7 +1040,7 @@ static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct jou /* We're inside wait_event(), but using mutex_lock(: */ sched_annotate_sleep(); mutex_lock(&j->buf_lock); - spin_lock(&j->lock); + guard(spinlock)(&j->lock); max_seq = min(max_seq, journal_cur_seq(j)); for (u64 seq = journal_last_unwritten_seq(j); @@ -1092,7 +1067,6 @@ static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct jou } } - spin_unlock(&j->lock); if (IS_ERR_OR_NULL(ret)) mutex_unlock(&j->buf_lock); return ret; @@ -1147,10 +1121,10 @@ static int bch2_set_nr_journal_buckets_iter(struct bch_dev *ca, unsigned nr, if (ret) break; - ret = bch2_trans_run(c, - bch2_trans_mark_metadata_bucket(trans, ca, + CLASS(btree_trans, trans)(c); + ret = bch2_trans_mark_metadata_bucket(trans, ca, ob[nr_got]->bucket, BCH_DATA_journal, - ca->mi.bucket_size, BTREE_TRIGGER_transactional)); + ca->mi.bucket_size, BTREE_TRIGGER_transactional); if (ret) { bch2_open_bucket_put(c, ob[nr_got]); bch_err_msg(c, ret, "marking new journal buckets"); @@ -1224,12 +1198,13 @@ err_unblock: mutex_unlock(&c->sb_lock); } - if (ret) + if (ret) { + CLASS(btree_trans, trans)(c); for (i = 0; i < nr_got; i++) - bch2_trans_run(c, - bch2_trans_mark_metadata_bucket(trans, ca, + bch2_trans_mark_metadata_bucket(trans, ca, bu[i], BCH_DATA_free, 0, - BTREE_TRIGGER_transactional)); + BTREE_TRIGGER_transactional); + } err_free: for (i = 0; i < nr_got; i++) bch2_open_bucket_put(c, ob[i]); @@ -1294,10 +1269,8 @@ static int bch2_set_nr_journal_buckets_loop(struct bch_fs *c, struct bch_dev *ca int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, unsigned nr) { - down_write(&c->state_lock); + guard(rwsem_write)(&c->state_lock); int ret = bch2_set_nr_journal_buckets_loop(c, ca, nr, false); - up_write(&c->state_lock); - bch_err_fn(c, ret); return ret; } @@ -1421,21 +1394,18 @@ int bch2_fs_journal_alloc(struct bch_fs *c) static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx) { - bool ret = false; - u64 seq; + guard(spinlock)(&j->lock); - spin_lock(&j->lock); - for (seq = journal_last_unwritten_seq(j); - seq <= journal_cur_seq(j) && !ret; + for (u64 seq = journal_last_unwritten_seq(j); + seq <= journal_cur_seq(j); seq++) { struct journal_buf *buf = journal_seq_to_buf(j, seq); if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx)) - ret = true; + return true; } - spin_unlock(&j->lock); - return ret; + return false; } void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca) @@ -1554,13 +1524,11 @@ int bch2_fs_journal_start(struct journal *j, u64 last_seq, u64 cur_seq) if (!had_entries) j->last_empty_seq = cur_seq - 1; /* to match j->seq */ - spin_lock(&j->lock); - j->last_flush_write = jiffies; - - j->reservations.idx = journal_cur_seq(j); - - c->last_bucket_seq_cleanup = journal_cur_seq(j); - spin_unlock(&j->lock); + scoped_guard(spinlock, &j->lock) { + j->last_flush_write = jiffies; + j->reservations.idx = journal_cur_seq(j); + c->last_bucket_seq_cleanup = journal_cur_seq(j); + } return 0; } @@ -1571,13 +1539,12 @@ void bch2_journal_set_replay_done(struct journal *j) * journal_space_available must happen before setting JOURNAL_running * JOURNAL_running must happen before JOURNAL_replay_done */ - spin_lock(&j->lock); + guard(spinlock)(&j->lock); bch2_journal_space_available(j); set_bit(JOURNAL_need_flush_write, &j->flags); set_bit(JOURNAL_running, &j->flags); set_bit(JOURNAL_replay_done, &j->flags); - spin_unlock(&j->lock); } /* init/exit: */ @@ -1734,9 +1701,10 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) printbuf_tabstops_reset(out); printbuf_tabstop_push(out, 28); - out->atomic++; + guard(printbuf_atomic)(out); guard(rcu)(); + s = READ_ONCE(j->reservations); prt_printf(out, "flags:\t"); @@ -1826,13 +1794,10 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) } prt_printf(out, "replicas want %u need %u\n", c->opts.metadata_replicas, c->opts.metadata_replicas_required); - - --out->atomic; } void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) { - spin_lock(&j->lock); + guard(spinlock)(&j->lock); __bch2_journal_debug_to_text(out, j); - spin_unlock(&j->lock); } diff --git a/libbcachefs/journal.h b/libbcachefs/journal.h index 97790703..b46b9718 100644 --- a/libbcachefs/journal.h +++ b/libbcachefs/journal.h @@ -297,9 +297,8 @@ static inline void bch2_journal_buf_put(struct journal *j, u64 seq) s = journal_state_buf_put(j, idx); if (!journal_state_count(s, idx)) { - spin_lock(&j->lock); + guard(spinlock)(&j->lock); bch2_journal_buf_put_final(j, seq); - spin_unlock(&j->lock); } else if (unlikely(s.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL)) wake_up(&j->wait); } diff --git a/libbcachefs/journal_io.c b/libbcachefs/journal_io.c index d63371e1..2835250a 100644 --- a/libbcachefs/journal_io.c +++ b/libbcachefs/journal_io.c @@ -35,7 +35,8 @@ void bch2_journal_pos_from_member_info_set(struct bch_fs *c) void bch2_journal_pos_from_member_info_resume(struct bch_fs *c) { - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); + for_each_member_device(c, ca) { struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx); @@ -46,16 +47,14 @@ void bch2_journal_pos_from_member_info_resume(struct bch_fs *c) if (offset <= ca->mi.bucket_size) ca->journal.sectors_free = ca->mi.bucket_size - offset; } - mutex_unlock(&c->sb_lock); } static void bch2_journal_ptr_to_text(struct printbuf *out, struct bch_fs *c, struct journal_ptr *p) { - struct bch_dev *ca = bch2_dev_tryget_noerror(c, p->dev); + CLASS(bch2_dev_tryget_noerror, ca)(c, p->dev); prt_printf(out, "%s %u:%u:%u (sector %llu)", ca ? ca->name : "(invalid dev)", p->dev, p->bucket, p->bucket_offset, p->sector); - bch2_dev_put(ca); } void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c, struct journal_replay *j) @@ -157,7 +156,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca, struct journal_replay **_i, *i, *dup; size_t bytes = vstruct_bytes(j); u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = JOURNAL_ENTRY_ADD_OK; if (last_seq && c->opts.journal_rewind) @@ -223,7 +222,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca, ret = darray_push(&dup->ptrs, entry_ptr); if (ret) - goto out; + return ret; bch2_journal_replay_to_text(&buf, c, dup); @@ -240,7 +239,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca, if (entry_ptr.csum_good && !identical) goto replace; - goto out; + return ret; } replace: i = kvmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL); @@ -263,9 +262,7 @@ replace: } *_i = i; -out: fsck_err: - printbuf_exit(&buf); return ret; } @@ -312,7 +309,7 @@ static void journal_entry_err_msg(struct printbuf *out, #define journal_entry_err(c, version, jset, entry, _err, msg, ...) \ ({ \ - struct printbuf _buf = PRINTBUF; \ + CLASS(printbuf, _buf)(); \ \ journal_entry_err_msg(&_buf, version, jset, entry); \ prt_printf(&_buf, msg, ##__VA_ARGS__); \ @@ -331,7 +328,6 @@ static void journal_entry_err_msg(struct printbuf *out, break; \ } \ \ - printbuf_exit(&_buf); \ true; \ }) @@ -617,7 +613,7 @@ static int journal_entry_data_usage_validate(struct bch_fs *c, struct jset_entry_data_usage *u = container_of(entry, struct jset_entry_data_usage, entry); unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); - struct printbuf err = PRINTBUF; + CLASS(printbuf, err)(); int ret = 0; if (journal_entry_err_on(bytes < sizeof(*u) || @@ -626,7 +622,7 @@ static int journal_entry_data_usage_validate(struct bch_fs *c, journal_entry_data_usage_bad_size, "invalid journal entry usage: bad size")) { journal_entry_null_range(entry, vstruct_next(entry)); - goto out; + return 0; } if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c, &err), @@ -634,11 +630,9 @@ static int journal_entry_data_usage_validate(struct bch_fs *c, journal_entry_data_usage_bad_size, "invalid journal entry usage: %s", err.buf)) { journal_entry_null_range(entry, vstruct_next(entry)); - goto out; + return 0; } -out: fsck_err: - printbuf_exit(&err); return ret; } @@ -1165,17 +1159,16 @@ reread: vstruct_end(j) - (void *) j->encrypted_start); bch2_fs_fatal_err_on(ret, c, "decrypting journal entry: %s", bch2_err_str(ret)); - mutex_lock(&jlist->lock); - ret = journal_entry_add(c, ca, (struct journal_ptr) { - .csum_good = csum_good, - .csum = csum, - .dev = ca->dev_idx, - .bucket = bucket, - .bucket_offset = offset - - bucket_to_sector(ca, ja->buckets[bucket]), - .sector = offset, - }, jlist, j); - mutex_unlock(&jlist->lock); + scoped_guard(mutex, &jlist->lock) + ret = journal_entry_add(c, ca, (struct journal_ptr) { + .csum_good = csum_good, + .csum = csum, + .dev = ca->dev_idx, + .bucket = bucket, + .bucket_offset = offset - + bucket_to_sector(ca, ja->buckets[bucket]), + .sector = offset, + }, jlist, j); switch (ret) { case JOURNAL_ENTRY_ADD_OK: @@ -1235,16 +1228,15 @@ out: closure_return(cl); return; err: - mutex_lock(&jlist->lock); - jlist->ret = ret; - mutex_unlock(&jlist->lock); + scoped_guard(mutex, &jlist->lock) + jlist->ret = ret; goto out; } noinline_for_stack static void bch2_journal_print_checksum_error(struct bch_fs *c, struct journal_replay *j) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); enum bch_csum_type csum_type = JSET_CSUM_TYPE(&j->j); @@ -1271,7 +1263,6 @@ static void bch2_journal_print_checksum_error(struct bch_fs *c, struct journal_r prt_printf(&buf, "\n(had good copy on another device)"); bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); } struct u64_range bch2_journal_entry_missing_range(struct bch_fs *c, u64 start, u64 end) @@ -1299,7 +1290,6 @@ struct u64_range bch2_journal_entry_missing_range(struct bch_fs *c, u64 start, u noinline_for_stack static int bch2_journal_check_for_missing(struct bch_fs *c, u64 start_seq, u64 end_seq) { - struct printbuf buf = PRINTBUF; int ret = 0; struct genradix_iter radix_iter; @@ -1318,7 +1308,7 @@ static int bch2_journal_check_for_missing(struct bch_fs *c, u64 start_seq, u64 e struct u64_range missing; while ((missing = bch2_journal_entry_missing_range(c, seq, le64_to_cpu(i->j.seq))).start) { - printbuf_reset(&buf); + CLASS(printbuf, buf)(); prt_printf(&buf, "journal entries %llu-%llu missing! (replaying %llu-%llu)", missing.start, missing.end - 1, start_seq, end_seq); @@ -1342,7 +1332,6 @@ static int bch2_journal_check_for_missing(struct bch_fs *c, u64 start_seq, u64 e seq = le64_to_cpu(i->j.seq) + 1; } fsck_err: - printbuf_exit(&buf); return ret; } @@ -1354,7 +1343,6 @@ int bch2_journal_read(struct bch_fs *c, struct journal_list jlist; struct journal_replay *i, **_i; struct genradix_iter radix_iter; - struct printbuf buf = PRINTBUF; bool degraded = false, last_write_torn = false; u64 seq; int ret = 0; @@ -1443,24 +1431,27 @@ int bch2_journal_read(struct bch_fs *c, return 0; } - printbuf_reset(&buf); - prt_printf(&buf, "journal read done, replaying entries %llu-%llu", - *last_seq, *blacklist_seq - 1); - - /* - * Drop blacklisted entries and entries older than last_seq (or start of - * journal rewind: - */ u64 drop_before = *last_seq; - if (c->opts.journal_rewind) { - drop_before = min(drop_before, c->opts.journal_rewind); - prt_printf(&buf, " (rewinding from %llu)", c->opts.journal_rewind); + { + CLASS(printbuf, buf)(); + prt_printf(&buf, "journal read done, replaying entries %llu-%llu", + *last_seq, *blacklist_seq - 1); + + /* + * Drop blacklisted entries and entries older than last_seq (or start of + * journal rewind: + */ + if (c->opts.journal_rewind) { + drop_before = min(drop_before, c->opts.journal_rewind); + prt_printf(&buf, " (rewinding from %llu)", c->opts.journal_rewind); + } + + *last_seq = drop_before; + if (*start_seq != *blacklist_seq) + prt_printf(&buf, " (unflushed %llu-%llu)", *blacklist_seq, *start_seq - 1); + bch_info(c, "%s", buf.buf); } - *last_seq = drop_before; - if (*start_seq != *blacklist_seq) - prt_printf(&buf, " (unflushed %llu-%llu)", *blacklist_seq, *start_seq - 1); - bch_info(c, "%s", buf.buf); genradix_for_each(&c->journal_entries, radix_iter, _i) { i = *_i; @@ -1483,7 +1474,7 @@ int bch2_journal_read(struct bch_fs *c, ret = bch2_journal_check_for_missing(c, drop_before, *blacklist_seq - 1); if (ret) - goto err; + return ret; genradix_for_each(&c->journal_entries, radix_iter, _i) { union bch_replicas_padded replicas = { @@ -1512,14 +1503,14 @@ int bch2_journal_read(struct bch_fs *c, i->ptrs.data[0].sector, READ); if (ret) - goto err; + return ret; darray_for_each(i->ptrs, ptr) replicas_entry_add_dev(&replicas.e, ptr->dev); bch2_replicas_entry_sort(&replicas.e); - printbuf_reset(&buf); + CLASS(printbuf, buf)(); bch2_replicas_entry_to_text(&buf, &replicas.e); if (!degraded && @@ -1530,12 +1521,10 @@ int bch2_journal_read(struct bch_fs *c, le64_to_cpu(i->j.seq), buf.buf))) { ret = bch2_mark_replicas(c, &replicas.e); if (ret) - goto err; + return ret; } } -err: fsck_err: - printbuf_exit(&buf); return ret; } @@ -1695,10 +1684,10 @@ static void journal_buf_realloc(struct journal *j, struct journal_buf *buf) memcpy(new_buf, buf->data, buf->buf_size); - spin_lock(&j->lock); - swap(buf->data, new_buf); - swap(buf->buf_size, new_size); - spin_unlock(&j->lock); + scoped_guard(spinlock, &j->lock) { + swap(buf->data, new_buf); + swap(buf->buf_size, new_size); + } kvfree(new_buf); } @@ -1725,7 +1714,7 @@ static CLOSURE_CALLBACK(journal_write_done) } if (err && !bch2_journal_error(j)) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); if (err == -BCH_ERR_journal_write_err) @@ -1737,7 +1726,6 @@ static CLOSURE_CALLBACK(journal_write_done) bch2_fs_emergency_read_only2(c, &buf); bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); } closure_debug_destroy(cl); @@ -1879,7 +1867,11 @@ static CLOSURE_CALLBACK(journal_write_submit) jbio->submit_time = local_clock(); - bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META); + /* + * blk-wbt.c throttles all writes except those that have both + * REQ_SYNC and REQ_IDLE set... + */ + bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_IDLE|REQ_META); bio->bi_iter.bi_sector = ptr->offset; bio->bi_end_io = journal_write_endio; bio->bi_private = ca; @@ -2019,9 +2011,8 @@ static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w) } } - spin_lock(&c->journal.lock); - w->need_flush_to_write_buffer = false; - spin_unlock(&c->journal.lock); + scoped_guard(spinlock, &c->journal.lock) + w->need_flush_to_write_buffer = false; start = end = vstruct_last(jset); @@ -2159,21 +2150,21 @@ CLOSURE_CALLBACK(bch2_journal_write) j->write_start_time = local_clock(); - spin_lock(&j->lock); - if (nr_rw_members > 1) - w->separate_flush = true; + scoped_guard(spinlock, &j->lock) { + if (nr_rw_members > 1) + w->separate_flush = true; - ret = bch2_journal_write_pick_flush(j, w); - spin_unlock(&j->lock); + ret = bch2_journal_write_pick_flush(j, w); + } if (unlikely(ret)) goto err; - mutex_lock(&j->buf_lock); - journal_buf_realloc(j, w); + scoped_guard(mutex, &j->buf_lock) { + journal_buf_realloc(j, w); - ret = bch2_journal_write_prep(j, w); - mutex_unlock(&j->buf_lock); + ret = bch2_journal_write_prep(j, w); + } if (unlikely(ret)) goto err; @@ -2194,22 +2185,22 @@ CLOSURE_CALLBACK(bch2_journal_write) if (unlikely(ret)) goto err; - spin_lock(&j->lock); - /* - * write is allocated, no longer need to account for it in - * bch2_journal_space_available(): - */ - w->sectors = 0; - w->write_allocated = true; - j->entry_bytes_written += vstruct_bytes(w->data); + scoped_guard(spinlock, &j->lock) { + /* + * write is allocated, no longer need to account for it in + * bch2_journal_space_available(): + */ + w->sectors = 0; + w->write_allocated = true; + j->entry_bytes_written += vstruct_bytes(w->data); - /* - * journal entry has been compacted and allocated, recalculate space - * available: - */ - bch2_journal_space_available(j); - bch2_journal_do_writes(j); - spin_unlock(&j->lock); + /* + * journal entry has been compacted and allocated, recalculate space + * available: + */ + bch2_journal_space_available(j); + bch2_journal_do_writes(j); + } w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key)); @@ -2233,7 +2224,7 @@ CLOSURE_CALLBACK(bch2_journal_write) return; err_allocate_write: if (!bch2_journal_error(j)) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_journal_debug_to_text(&buf, j); prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write at seq %llu for %zu sectors: %s"), @@ -2241,7 +2232,6 @@ err_allocate_write: vstruct_sectors(w->data, c->block_bits), bch2_err_str(ret)); bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); } err: bch2_fatal_error(c); diff --git a/libbcachefs/journal_reclaim.c b/libbcachefs/journal_reclaim.c index 0042d43b..be50455c 100644 --- a/libbcachefs/journal_reclaim.c +++ b/libbcachefs/journal_reclaim.c @@ -221,8 +221,8 @@ void bch2_journal_space_available(struct journal *j) if (nr_online < metadata_replicas_required(c)) { if (!(c->sb.features & BIT_ULL(BCH_FEATURE_small_image))) { - struct printbuf buf = PRINTBUF; - buf.atomic++; + CLASS(printbuf, buf)(); + guard(printbuf_atomic)(&buf); prt_printf(&buf, "insufficient writeable journal devices available: have %u, need %u\n" "rw journal devs:", nr_online, metadata_replicas_required(c)); @@ -230,7 +230,6 @@ void bch2_journal_space_available(struct journal *j) prt_printf(&buf, " %s", ca->name); bch_err(c, "%s", buf.buf); - printbuf_exit(&buf); } ret = bch_err_throw(c, insufficient_journal_devices); goto out; @@ -280,11 +279,8 @@ static bool __should_discard_bucket(struct journal *j, struct journal_device *ja static bool should_discard_bucket(struct journal *j, struct journal_device *ja) { - spin_lock(&j->lock); - bool ret = __should_discard_bucket(j, ja); - spin_unlock(&j->lock); - - return ret; + guard(spinlock)(&j->lock); + return __should_discard_bucket(j, ja); } /* @@ -295,7 +291,7 @@ void bch2_journal_do_discards(struct journal *j) { struct bch_fs *c = container_of(j, struct bch_fs, journal); - mutex_lock(&j->discard_lock); + guard(mutex)(&j->discard_lock); for_each_rw_member(c, ca, BCH_DEV_WRITE_REF_journal_do_discards) { struct journal_device *ja = &ca->journal; @@ -309,15 +305,12 @@ void bch2_journal_do_discards(struct journal *j) ja->buckets[ja->discard_idx]), ca->mi.bucket_size, GFP_NOFS); - spin_lock(&j->lock); - ja->discard_idx = (ja->discard_idx + 1) % ja->nr; - - bch2_journal_space_available(j); - spin_unlock(&j->lock); + scoped_guard(spinlock, &j->lock) { + ja->discard_idx = (ja->discard_idx + 1) % ja->nr; + bch2_journal_space_available(j); + } } } - - mutex_unlock(&j->discard_lock); } /* @@ -358,9 +351,8 @@ bool __bch2_journal_pin_put(struct journal *j, u64 seq) void bch2_journal_pin_put(struct journal *j, u64 seq) { if (__bch2_journal_pin_put(j, seq)) { - spin_lock(&j->lock); + guard(spinlock)(&j->lock); bch2_journal_reclaim_fast(j); - spin_unlock(&j->lock); } } @@ -393,10 +385,9 @@ static inline bool __journal_pin_drop(struct journal *j, void bch2_journal_pin_drop(struct journal *j, struct journal_entry_pin *pin) { - spin_lock(&j->lock); + guard(spinlock)(&j->lock); if (__journal_pin_drop(j, pin)) bch2_journal_reclaim_fast(j); - spin_unlock(&j->lock); } static enum journal_pin_type journal_pin_type(struct journal_entry_pin *pin, @@ -443,7 +434,7 @@ void bch2_journal_pin_copy(struct journal *j, struct journal_entry_pin *src, journal_pin_flush_fn flush_fn) { - spin_lock(&j->lock); + guard(spinlock)(&j->lock); u64 seq = READ_ONCE(src->seq); @@ -454,7 +445,6 @@ void bch2_journal_pin_copy(struct journal *j, * longer to exist, but that means there's no longer anything to * copy and we can bail out here: */ - spin_unlock(&j->lock); return; } @@ -471,31 +461,32 @@ void bch2_journal_pin_copy(struct journal *j, */ if (seq == journal_last_seq(j)) journal_wake(j); - spin_unlock(&j->lock); } void bch2_journal_pin_set(struct journal *j, u64 seq, struct journal_entry_pin *pin, journal_pin_flush_fn flush_fn) { - spin_lock(&j->lock); + bool wake; - BUG_ON(seq < journal_last_seq(j)); + scoped_guard(spinlock, &j->lock) { + BUG_ON(seq < journal_last_seq(j)); - bool reclaim = __journal_pin_drop(j, pin); + bool reclaim = __journal_pin_drop(j, pin); - bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(pin, flush_fn)); + bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(pin, flush_fn)); - if (reclaim) - bch2_journal_reclaim_fast(j); - /* - * If the journal is currently full, we might want to call flush_fn - * immediately: - */ - if (seq == journal_last_seq(j)) - journal_wake(j); + if (reclaim) + bch2_journal_reclaim_fast(j); + /* + * If the journal is currently full, we might want to call flush_fn + * immediately: + */ + wake = seq == journal_last_seq(j); + } - spin_unlock(&j->lock); + if (wake) + journal_wake(j); } /** @@ -580,17 +571,17 @@ static size_t journal_flush_pins(struct journal *j, j->last_flushed = jiffies; - spin_lock(&j->lock); - pin = journal_get_next_pin(j, seq_to_flush, - allowed_below, - allowed_above, &seq); - if (pin) { - BUG_ON(j->flush_in_progress); - j->flush_in_progress = pin; - j->flush_in_progress_dropped = false; - flush_fn = pin->flush; + scoped_guard(spinlock, &j->lock) { + pin = journal_get_next_pin(j, seq_to_flush, + allowed_below, + allowed_above, &seq); + if (pin) { + BUG_ON(j->flush_in_progress); + j->flush_in_progress = pin; + j->flush_in_progress_dropped = false; + flush_fn = pin->flush; + } } - spin_unlock(&j->lock); if (!pin) break; @@ -603,13 +594,13 @@ static size_t journal_flush_pins(struct journal *j, err = flush_fn(j, pin, seq); - spin_lock(&j->lock); - /* Pin might have been dropped or rearmed: */ - if (likely(!err && !j->flush_in_progress_dropped)) - list_move(&pin->list, &journal_seq_pin(j, seq)->flushed[journal_pin_type(pin, flush_fn)]); - j->flush_in_progress = NULL; - j->flush_in_progress_dropped = false; - spin_unlock(&j->lock); + scoped_guard(spinlock, &j->lock) { + /* Pin might have been dropped or rearmed: */ + if (likely(!err && !j->flush_in_progress_dropped)) + list_move(&pin->list, &journal_seq_pin(j, seq)->flushed[journal_pin_type(pin, flush_fn)]); + j->flush_in_progress = NULL; + j->flush_in_progress_dropped = false; + } wake_up(&j->pin_flush_wait); @@ -770,9 +761,8 @@ static int bch2_journal_reclaim_thread(void *arg) j->reclaim_kicked = false; - mutex_lock(&j->reclaim_lock); - ret = __bch2_journal_reclaim(j, false, kicked); - mutex_unlock(&j->reclaim_lock); + scoped_guard(mutex, &j->reclaim_lock) + ret = __bch2_journal_reclaim(j, false, kicked); now = jiffies; delay = msecs_to_jiffies(c->opts.journal_reclaim_delay); @@ -788,9 +778,8 @@ static int bch2_journal_reclaim_thread(void *arg) if (j->reclaim_kicked) break; - spin_lock(&j->lock); - journal_empty = fifo_empty(&j->pin); - spin_unlock(&j->lock); + scoped_guard(spinlock, &j->lock) + journal_empty = fifo_empty(&j->pin); long timeout = j->next_reclaim - jiffies; @@ -844,10 +833,10 @@ int bch2_journal_reclaim_start(struct journal *j) static bool journal_pins_still_flushing(struct journal *j, u64 seq_to_flush, unsigned types) { + guard(spinlock)(&j->lock); + struct journal_entry_pin_list *pin_list; u64 seq; - - spin_lock(&j->lock); fifo_for_each_entry_ptr(pin_list, &j->pin, seq) { if (seq > seq_to_flush) break; @@ -855,12 +844,9 @@ static bool journal_pins_still_flushing(struct journal *j, u64 seq_to_flush, for (unsigned i = 0; i < JOURNAL_PIN_TYPE_NR; i++) if ((BIT(i) & types) && (!list_empty(&pin_list->unflushed[i]) || - !list_empty(&pin_list->flushed[i]))) { - spin_unlock(&j->lock); + !list_empty(&pin_list->flushed[i]))) return true; - } } - spin_unlock(&j->lock); return false; } @@ -881,32 +867,27 @@ static int journal_flush_done(struct journal *j, u64 seq_to_flush, if (ret) return ret; - mutex_lock(&j->reclaim_lock); + guard(mutex)(&j->reclaim_lock); for (int type = JOURNAL_PIN_TYPE_NR - 1; type >= 0; --type) if (journal_flush_pins_or_still_flushing(j, seq_to_flush, BIT(type))) { *did_work = true; - goto unlock; + return ret; } if (seq_to_flush > journal_cur_seq(j)) bch2_journal_entry_close(j); - spin_lock(&j->lock); /* * If journal replay hasn't completed, the unreplayed journal entries * hold refs on their corresponding sequence numbers */ + guard(spinlock)(&j->lock); ret = !test_bit(JOURNAL_replay_done, &j->flags) || journal_last_seq(j) > seq_to_flush || !fifo_used(&j->pin); - - spin_unlock(&j->lock); -unlock: - mutex_unlock(&j->reclaim_lock); - return ret; } @@ -931,13 +912,12 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx) u64 iter, seq = 0; int ret = 0; - spin_lock(&j->lock); - fifo_for_each_entry_ptr(p, &j->pin, iter) - if (dev_idx >= 0 - ? bch2_dev_list_has_dev(p->devs, dev_idx) - : p->devs.nr < c->opts.metadata_replicas) - seq = iter; - spin_unlock(&j->lock); + scoped_guard(spinlock, &j->lock) + fifo_for_each_entry_ptr(p, &j->pin, iter) + if (dev_idx >= 0 + ? bch2_dev_list_has_dev(p->devs, dev_idx) + : p->devs.nr < c->opts.metadata_replicas) + seq = iter; bch2_journal_flush_pins(j, seq); @@ -945,7 +925,7 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx) if (ret) return ret; - mutex_lock(&c->replicas_gc_lock); + guard(mutex)(&c->replicas_gc_lock); bch2_replicas_gc_start(c, 1 << BCH_DATA_journal); /* @@ -960,29 +940,25 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx) goto err; seq = 0; - spin_lock(&j->lock); - while (!ret) { - union bch_replicas_padded replicas; + scoped_guard(spinlock, &j->lock) + while (!ret) { + union bch_replicas_padded replicas; - seq = max(seq, journal_last_seq(j)); - if (seq >= j->pin.back) - break; - bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, - journal_seq_pin(j, seq)->devs); - seq++; + seq = max(seq, journal_last_seq(j)); + if (seq >= j->pin.back) + break; + bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, + journal_seq_pin(j, seq)->devs); + seq++; - if (replicas.e.nr_devs) { - spin_unlock(&j->lock); - ret = bch2_mark_replicas(c, &replicas.e); - spin_lock(&j->lock); + if (replicas.e.nr_devs) { + spin_unlock(&j->lock); + ret = bch2_mark_replicas(c, &replicas.e); + spin_lock(&j->lock); + } } - } - spin_unlock(&j->lock); err: - ret = bch2_replicas_gc_end(c, ret); - mutex_unlock(&c->replicas_gc_lock); - - return ret; + return bch2_replicas_gc_end(c, ret); } bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq) @@ -990,20 +966,16 @@ bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 struct journal_entry_pin_list *pin_list; struct journal_entry_pin *pin; - spin_lock(&j->lock); - if (!test_bit(JOURNAL_running, &j->flags)) { - spin_unlock(&j->lock); + guard(spinlock)(&j->lock); + guard(printbuf_atomic)(out); + + if (!test_bit(JOURNAL_running, &j->flags)) return true; - } *seq = max(*seq, j->pin.front); - if (*seq >= j->pin.back) { - spin_unlock(&j->lock); + if (*seq >= j->pin.back) return true; - } - - out->atomic++; pin_list = journal_seq_pin(j, *seq); @@ -1022,9 +994,6 @@ bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 printbuf_indent_sub(out, 2); - --out->atomic; - spin_unlock(&j->lock); - return false; } diff --git a/libbcachefs/journal_seq_blacklist.c b/libbcachefs/journal_seq_blacklist.c index 6361809b..399db5b7 100644 --- a/libbcachefs/journal_seq_blacklist.c +++ b/libbcachefs/journal_seq_blacklist.c @@ -49,7 +49,7 @@ int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end) unsigned i = 0, nr; int ret = 0; - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist); nr = blacklist_nr_entries(bl); @@ -77,10 +77,8 @@ int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end) bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist, sb_blacklist_u64s(nr + 1)); - if (!bl) { - ret = bch_err_throw(c, ENOSPC_sb_journal_seq_blacklist); - goto out; - } + if (!bl) + return bch_err_throw(c, ENOSPC_sb_journal_seq_blacklist); array_insert_item(bl->start, nr, i, ((struct journal_seq_blacklist_entry) { .start = cpu_to_le64(start), @@ -89,8 +87,6 @@ int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end) c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << BCH_FEATURE_journal_seq_blacklist_v3); ret = bch2_write_super(c); -out: - mutex_unlock(&c->sb_lock); return ret ?: bch2_blacklist_table_initialize(c); } diff --git a/libbcachefs/logged_ops.c b/libbcachefs/logged_ops.c index 75f27ec2..0367ea37 100644 --- a/libbcachefs/logged_ops.c +++ b/libbcachefs/logged_ops.c @@ -35,7 +35,7 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter, { struct bch_fs *c = trans->c; u32 restart_count = trans->restart_count; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; fsck_err_on(test_bit(BCH_FS_clean_recovery, &c->flags), @@ -56,21 +56,18 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter, bch2_bkey_buf_exit(&sk, c); fsck_err: - printbuf_exit(&buf); return ret ?: trans_was_restarted(trans, restart_count); } int bch2_resume_logged_ops(struct bch_fs *c) { - int ret = bch2_trans_run(c, - for_each_btree_key_max(trans, iter, + CLASS(btree_trans, trans)(c); + return for_each_btree_key_max(trans, iter, BTREE_ID_logged_ops, POS(LOGGED_OPS_INUM_logged_ops, 0), POS(LOGGED_OPS_INUM_logged_ops, U64_MAX), BTREE_ITER_prefetch, k, - resume_logged_op(trans, &iter, k))); - bch_err_fn(c, ret); - return ret; + resume_logged_op(trans, &iter, k)); } static int __bch2_logged_op_start(struct btree_trans *trans, struct bkey_i *k) @@ -107,12 +104,11 @@ int bch2_logged_op_finish(struct btree_trans *trans, struct bkey_i *k) */ if (ret) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k)); bch2_fs_fatal_error(c, "deleting logged operation %s: %s", buf.buf, bch2_err_str(ret)); - printbuf_exit(&buf); } return ret; diff --git a/libbcachefs/lru.c b/libbcachefs/lru.c index 57b5b326..ee14656c 100644 --- a/libbcachefs/lru.c +++ b/libbcachefs/lru.c @@ -86,7 +86,7 @@ int bch2_lru_check_set(struct btree_trans *trans, struct bkey_buf *last_flushed) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); struct btree_iter lru_iter; struct bkey_s_c lru_k = bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru, @@ -112,7 +112,6 @@ int bch2_lru_check_set(struct btree_trans *trans, err: fsck_err: bch2_trans_iter_exit(trans, &lru_iter); - printbuf_exit(&buf); return ret; } @@ -166,8 +165,8 @@ static int bch2_check_lru_key(struct btree_trans *trans, struct bkey_buf *last_flushed) { struct bch_fs *c = trans->c; - struct printbuf buf1 = PRINTBUF; - struct printbuf buf2 = PRINTBUF; + CLASS(printbuf, buf1)(); + CLASS(printbuf, buf2)(); struct bbpos bp = lru_pos_to_bp(lru_k); @@ -198,8 +197,6 @@ static int bch2_check_lru_key(struct btree_trans *trans, err: fsck_err: bch2_trans_iter_exit(trans, &iter); - printbuf_exit(&buf2); - printbuf_exit(&buf1); return ret; } @@ -210,14 +207,13 @@ int bch2_check_lrus(struct bch_fs *c) bch2_bkey_buf_init(&last_flushed); bkey_init(&last_flushed.k->k); - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, + CLASS(btree_trans, trans)(c); + int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_lru, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_check_lru_key(trans, &iter, k, &last_flushed))); + bch2_check_lru_key(trans, &iter, k, &last_flushed)); bch2_bkey_buf_exit(&last_flushed, c); - bch_err_fn(c, ret); return ret; } diff --git a/libbcachefs/migrate.c b/libbcachefs/migrate.c index f296cce9..bd1e54e0 100644 --- a/libbcachefs/migrate.c +++ b/libbcachefs/migrate.c @@ -119,34 +119,29 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, struct progress_indicator_state *progress, unsigned dev_idx, unsigned flags) { - struct btree_trans *trans = bch2_trans_get(c); - enum btree_id id; - int ret = 0; + CLASS(btree_trans, trans)(c); - for (id = 0; id < BTREE_ID_NR; id++) { + for (unsigned id = 0; id < BTREE_ID_NR; id++) { if (!btree_type_has_ptrs(id)) continue; - ret = for_each_btree_key_commit(trans, iter, id, POS_MIN, + int ret = for_each_btree_key_commit(trans, iter, id, POS_MIN, BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ bch2_progress_update_iter(trans, progress, &iter, "dropping user data"); bch2_dev_usrdata_drop_key(trans, &iter, k, dev_idx, flags); })); if (ret) - break; + return ret; } - bch2_trans_put(trans); - - return ret; + return 0; } static int bch2_dev_metadata_drop(struct bch_fs *c, struct progress_indicator_state *progress, unsigned dev_idx, unsigned flags) { - struct btree_trans *trans; struct btree_iter iter; struct closure cl; struct btree *b; @@ -158,7 +153,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, if (flags & BCH_FORCE_IF_METADATA_LOST) return bch_err_throw(c, remove_with_metadata_missing_unimplemented); - trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); bch2_bkey_buf_init(&k); closure_init_stack(&cl); @@ -199,7 +194,6 @@ next: ret = 0; err: bch2_bkey_buf_exit(&k, c); - bch2_trans_put(trans); BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart)); @@ -240,7 +234,7 @@ out: int bch2_dev_data_drop_by_backpointers(struct bch_fs *c, unsigned dev_idx, unsigned flags) { - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct bkey_buf last_flushed; bch2_bkey_buf_init(&last_flushed); @@ -260,7 +254,6 @@ int bch2_dev_data_drop_by_backpointers(struct bch_fs *c, unsigned dev_idx, unsig })); bch2_bkey_buf_exit(&last_flushed, trans->c); - bch2_trans_put(trans); bch_err_fn(c, ret); return ret; } diff --git a/libbcachefs/move.c b/libbcachefs/move.c index 0739287a..3f44bb54 100644 --- a/libbcachefs/move.c +++ b/libbcachefs/move.c @@ -54,22 +54,20 @@ trace_io_move2(struct bch_fs *c, struct bkey_s_c k, struct bch_io_opts *io_opts, struct data_update_opts *data_opts) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, k); prt_newline(&buf); bch2_data_update_opts_to_text(&buf, c, io_opts, data_opts); trace_io_move(c, buf.buf); - printbuf_exit(&buf); } static noinline void trace_io_move_read2(struct bch_fs *c, struct bkey_s_c k) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, k); trace_io_move_read(c, buf.buf); - printbuf_exit(&buf); } static noinline void @@ -78,7 +76,7 @@ trace_io_move_pred2(struct bch_fs *c, struct bkey_s_c k, struct data_update_opts *data_opts, move_pred_fn pred, void *_arg, bool p) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_printf(&buf, "%ps: %u", pred, p); @@ -92,7 +90,6 @@ trace_io_move_pred2(struct bch_fs *c, struct bkey_s_c k, prt_newline(&buf); bch2_data_update_opts_to_text(&buf, c, io_opts, data_opts); trace_io_move_pred(c, buf.buf); - printbuf_exit(&buf); } static noinline void @@ -128,10 +125,9 @@ static void move_free(struct moving_io *io) if (io->b) atomic_dec(&io->b->count); - mutex_lock(&ctxt->lock); - list_del(&io->io_list); + scoped_guard(mutex, &ctxt->lock) + list_del(&io->io_list); wake_up(&ctxt->wait); - mutex_unlock(&ctxt->lock); if (!io->write.data_opts.scrub) { bch2_data_update_exit(&io->write); @@ -150,11 +146,9 @@ static void move_write_done(struct bch_write_op *op) if (op->error) { if (trace_io_move_write_fail_enabled()) { - struct printbuf buf = PRINTBUF; - + CLASS(printbuf, buf)(); bch2_write_op_to_text(&buf, op); trace_io_move_write_fail(c, buf.buf); - printbuf_exit(&buf); } this_cpu_inc(c->counters[BCH_COUNTER_io_move_write_fail]); @@ -203,11 +197,9 @@ static void move_write(struct moving_io *io) } if (trace_io_move_write_enabled()) { - struct printbuf buf = PRINTBUF; - + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(io->write.k.k)); trace_io_move_write(c, buf.buf); - printbuf_exit(&buf); } closure_get(&io->write.ctxt->cl); @@ -276,9 +268,8 @@ void bch2_moving_ctxt_exit(struct moving_context *ctxt) EBUG_ON(atomic_read(&ctxt->read_sectors)); EBUG_ON(atomic_read(&ctxt->read_ios)); - mutex_lock(&c->moving_context_lock); - list_del(&ctxt->list); - mutex_unlock(&c->moving_context_lock); + scoped_guard(mutex, &c->moving_context_lock) + list_del(&ctxt->list); /* * Generally, releasing a transaction within a transaction restart means @@ -314,9 +305,8 @@ void bch2_moving_ctxt_init(struct moving_context *ctxt, INIT_LIST_HEAD(&ctxt->ios); init_waitqueue_head(&ctxt->wait); - mutex_lock(&c->moving_context_lock); - list_add(&ctxt->list, &c->moving_context_list); - mutex_unlock(&c->moving_context_lock); + scoped_guard(mutex, &c->moving_context_lock) + list_add(&ctxt->list, &c->moving_context_list); } void bch2_move_stats_exit(struct bch_move_stats *stats, struct bch_fs *c) @@ -412,13 +402,13 @@ int bch2_move_extent(struct moving_context *ctxt, if (trace_io_move_read_enabled()) trace_io_move_read2(c, k); - mutex_lock(&ctxt->lock); - atomic_add(io->read_sectors, &ctxt->read_sectors); - atomic_inc(&ctxt->read_ios); + scoped_guard(mutex, &ctxt->lock) { + atomic_add(io->read_sectors, &ctxt->read_sectors); + atomic_inc(&ctxt->read_ios); - list_add_tail(&io->read_list, &ctxt->reads); - list_add_tail(&io->io_list, &ctxt->ios); - mutex_unlock(&ctxt->lock); + list_add_tail(&io->read_list, &ctxt->reads); + list_add_tail(&io->io_list, &ctxt->ios); + } /* * dropped by move_read_endio() - guards against use after free of @@ -443,13 +433,11 @@ err: count_event(c, io_move_start_fail); if (trace_io_move_start_fail_enabled()) { - struct printbuf buf = PRINTBUF; - + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, k); prt_str(&buf, ": "); prt_str(&buf, bch2_err_str(ret)); trace_io_move_start_fail(c, buf.buf); - printbuf_exit(&buf); } if (bch2_err_matches(ret, BCH_ERR_data_update_done)) @@ -874,7 +862,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, u64 check_mismatch_done = bucket_start; int ret = 0; - struct bch_dev *ca = bch2_dev_tryget(c, dev); + CLASS(bch2_dev_tryget, ca)(c, dev); if (!ca) return 0; @@ -1013,7 +1001,6 @@ err: bch2_trans_iter_exit(trans, &bp_iter); bch2_bkey_buf_exit(&sk, c); bch2_bkey_buf_exit(&last_flushed, c); - bch2_dev_put(ca); return ret; } @@ -1030,9 +1017,9 @@ int bch2_move_data_phys(struct bch_fs *c, { struct moving_context ctxt; - bch2_trans_run(c, bch2_btree_write_buffer_flush_sync(trans)); - bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); + bch2_btree_write_buffer_flush_sync(ctxt.trans); + if (ctxt.stats) { ctxt.stats->phys = true; ctxt.stats->data_type = (int) DATA_PROGRESS_DATA_TYPE_phys; @@ -1267,12 +1254,11 @@ int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats) BBPOS_MAX, rewrite_old_nodes_pred, c, stats); if (!ret) { - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done); c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done); c->disk_sb.sb->version_min = c->disk_sb.sb->version; bch2_write_super(c); - mutex_unlock(&c->sb_lock); } bch_err_fn(c, ret); @@ -1342,18 +1328,18 @@ static bool scrub_pred(struct bch_fs *c, void *_arg, int bch2_data_job(struct bch_fs *c, struct bch_move_stats *stats, - struct bch_ioctl_data op) + struct bch_ioctl_data *op) { - struct bbpos start = BBPOS(op.start_btree, op.start_pos); - struct bbpos end = BBPOS(op.end_btree, op.end_pos); + struct bbpos start = BBPOS(op->start_btree, op->start_pos); + struct bbpos end = BBPOS(op->end_btree, op->end_pos); int ret = 0; - if (op.op >= BCH_DATA_OP_NR) + if (op->op >= BCH_DATA_OP_NR) return -EINVAL; - bch2_move_stats_init(stats, bch2_data_ops_strs[op.op]); + bch2_move_stats_init(stats, bch2_data_ops_strs[op->op]); - switch (op.op) { + switch (op->op) { case BCH_DATA_OP_scrub: /* * prevent tests from spuriously failing, make sure we see all @@ -1361,13 +1347,13 @@ int bch2_data_job(struct bch_fs *c, */ bch2_btree_interior_updates_flush(c); - ret = bch2_move_data_phys(c, op.scrub.dev, 0, U64_MAX, - op.scrub.data_types, + ret = bch2_move_data_phys(c, op->scrub.dev, 0, U64_MAX, + op->scrub.data_types, NULL, stats, writepoint_hashed((unsigned long) current), false, - scrub_pred, &op) ?: ret; + scrub_pred, op) ?: ret; break; case BCH_DATA_OP_rereplicate: @@ -1384,18 +1370,18 @@ int bch2_data_job(struct bch_fs *c, ret = bch2_replicas_gc2(c) ?: ret; break; case BCH_DATA_OP_migrate: - if (op.migrate.dev >= c->sb.nr_devices) + if (op->migrate.dev >= c->sb.nr_devices) return -EINVAL; stats->data_type = BCH_DATA_journal; - ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev); - ret = bch2_move_data_phys(c, op.migrate.dev, 0, U64_MAX, + ret = bch2_journal_flush_device_pins(&c->journal, op->migrate.dev); + ret = bch2_move_data_phys(c, op->migrate.dev, 0, U64_MAX, ~0, NULL, stats, writepoint_hashed((unsigned long) current), true, - migrate_pred, &op) ?: ret; + migrate_pred, op) ?: ret; bch2_btree_interior_updates_flush(c); ret = bch2_replicas_gc2(c) ?: ret; break; @@ -1467,11 +1453,11 @@ static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, str printbuf_indent_add(out, 2); - mutex_lock(&ctxt->lock); - struct moving_io *io; - list_for_each_entry(io, &ctxt->ios, io_list) - bch2_data_update_inflight_to_text(out, &io->write); - mutex_unlock(&ctxt->lock); + scoped_guard(mutex, &ctxt->lock) { + struct moving_io *io; + list_for_each_entry(io, &ctxt->ios, io_list) + bch2_data_update_inflight_to_text(out, &io->write); + } printbuf_indent_sub(out, 4); } @@ -1480,10 +1466,9 @@ void bch2_fs_moving_ctxts_to_text(struct printbuf *out, struct bch_fs *c) { struct moving_context *ctxt; - mutex_lock(&c->moving_context_lock); - list_for_each_entry(ctxt, &c->moving_context_list, list) - bch2_moving_ctxt_to_text(out, c, ctxt); - mutex_unlock(&c->moving_context_lock); + scoped_guard(mutex, &c->moving_context_lock) + list_for_each_entry(ctxt, &c->moving_context_list, list) + bch2_moving_ctxt_to_text(out, c, ctxt); } void bch2_fs_move_init(struct bch_fs *c) diff --git a/libbcachefs/move.h b/libbcachefs/move.h index 86b80499..fe92ca6d 100644 --- a/libbcachefs/move.h +++ b/libbcachefs/move.h @@ -152,7 +152,7 @@ int bch2_evacuate_bucket(struct moving_context *, struct data_update_opts); int bch2_data_job(struct bch_fs *, struct bch_move_stats *, - struct bch_ioctl_data); + struct bch_ioctl_data *); void bch2_move_stats_to_text(struct printbuf *, struct bch_move_stats *); void bch2_move_stats_exit(struct bch_move_stats *, struct bch_fs *); diff --git a/libbcachefs/movinggc.c b/libbcachefs/movinggc.c index 5e6de91a..9192b1fc 100644 --- a/libbcachefs/movinggc.c +++ b/libbcachefs/movinggc.c @@ -71,7 +71,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, if (ret) return ret; - struct bch_dev *ca = bch2_dev_bucket_tryget(c, k.k->p); + CLASS(bch2_dev_bucket_tryget, ca)(c, k.k->p); if (!ca) goto out; @@ -90,7 +90,6 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, ret = lru_idx && lru_idx <= time; out: - bch2_dev_put(ca); bch2_trans_iter_exit(trans, &iter); return ret; } @@ -320,8 +319,8 @@ void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c) bch2_printbuf_make_room(out, 4096); struct task_struct *t; - out->atomic++; scoped_guard(rcu) { + guard(printbuf_atomic)(out); prt_printf(out, "Currently calculated wait:\n"); for_each_rw_member_rcu(c, ca) { prt_printf(out, " %s:\t", ca->name); @@ -333,7 +332,6 @@ void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c) if (t) get_task_struct(t); } - --out->atomic; if (t) { bch2_prt_task_backtrace(out, t, 0, GFP_KERNEL); diff --git a/libbcachefs/namei.c b/libbcachefs/namei.c index 3e2b41ba..8fa10888 100644 --- a/libbcachefs/namei.c +++ b/libbcachefs/namei.c @@ -99,7 +99,9 @@ int bch2_create_trans(struct btree_trans *trans, * If we're not root, we have to own the subvolume being * snapshotted: */ - if (uid && new_inode->bi_uid != uid) { + if (uid && + !capable(CAP_FOWNER) && + new_inode->bi_uid != uid) { ret = -EPERM; goto err; } @@ -727,7 +729,7 @@ static int bch2_check_dirent_inode_dirent(struct btree_trans *trans, bool in_fsck) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); struct btree_iter bp_iter = {}; int ret = 0; @@ -835,7 +837,6 @@ out: err: fsck_err: bch2_trans_iter_exit(trans, &bp_iter); - printbuf_exit(&buf); bch_err_fn(c, ret); return ret; } @@ -847,7 +848,7 @@ int __bch2_check_dirent_target(struct btree_trans *trans, bool in_fsck) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; ret = bch2_check_dirent_inode_dirent(trans, d, target, in_fsck); @@ -882,7 +883,6 @@ int __bch2_check_dirent_target(struct btree_trans *trans, } err: fsck_err: - printbuf_exit(&buf); bch_err_fn(c, ret); return ret; } @@ -940,7 +940,7 @@ int bch2_check_inode_has_case_insensitive(struct btree_trans *trans, snapshot_id_list *snapshot_overwrites, bool *do_update) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bool repairing_parents = false; int ret = 0; @@ -967,7 +967,7 @@ int bch2_check_inode_has_case_insensitive(struct btree_trans *trans, ret = bch2_inum_snapshot_to_path(trans, inode->bi_inum, inode->bi_snapshot, snapshot_overwrites, &buf); if (ret) - goto err; + return ret; if (fsck_err(trans, inode_has_case_insensitive_not_set, "%s", buf.buf)) { inode->bi_flags |= BCH_INODE_has_case_insensitive; @@ -986,14 +986,14 @@ int bch2_check_inode_has_case_insensitive(struct btree_trans *trans, if (dir.bi_parent_subvol) { ret = bch2_subvolume_get_snapshot(trans, dir.bi_parent_subvol, &snapshot); if (ret) - goto err; + return ret; snapshot_overwrites = NULL; } ret = bch2_inode_find_by_inum_snapshot(trans, dir.bi_dir, snapshot, &dir, 0); if (ret) - goto err; + return ret; if (!(dir.bi_flags & BCH_INODE_has_case_insensitive)) { prt_printf(&buf, "parent of casefolded dir with has_case_insensitive not set\n"); @@ -1001,13 +1001,13 @@ int bch2_check_inode_has_case_insensitive(struct btree_trans *trans, ret = bch2_inum_snapshot_to_path(trans, dir.bi_inum, dir.bi_snapshot, snapshot_overwrites, &buf); if (ret) - goto err; + return ret; if (fsck_err(trans, inode_parent_has_case_insensitive_not_set, "%s", buf.buf)) { dir.bi_flags |= BCH_INODE_has_case_insensitive; ret = __bch2_fsck_write_inode(trans, &dir); if (ret) - goto err; + return ret; } } @@ -1019,9 +1019,7 @@ int bch2_check_inode_has_case_insensitive(struct btree_trans *trans, break; } out: -err: fsck_err: - printbuf_exit(&buf); if (ret) return ret; diff --git a/libbcachefs/nocow_locking.c b/libbcachefs/nocow_locking.c index 962218fa..58cfd540 100644 --- a/libbcachefs/nocow_locking.c +++ b/libbcachefs/nocow_locking.c @@ -47,7 +47,7 @@ bool __bch2_bucket_nocow_trylock(struct nocow_lock_bucket *l, int v, lock_val = flags ? 1 : -1; unsigned i; - spin_lock(&l->lock); + guard(spinlock)(&l->lock); for (i = 0; i < ARRAY_SIZE(l->b); i++) if (l->b[i] == dev_bucket) @@ -58,21 +58,19 @@ bool __bch2_bucket_nocow_trylock(struct nocow_lock_bucket *l, l->b[i] = dev_bucket; goto take_lock; } -fail: - spin_unlock(&l->lock); + return false; got_entry: v = atomic_read(&l->l[i]); if (lock_val > 0 ? v < 0 : v > 0) - goto fail; + return false; take_lock: v = atomic_read(&l->l[i]); /* Overflow? */ if (v && sign(v + lock_val) != sign(v)) - goto fail; + return false; atomic_add(lock_val, &l->l[i]); - spin_unlock(&l->lock); return true; } diff --git a/libbcachefs/opts.c b/libbcachefs/opts.c index b1cf8890..921f9049 100644 --- a/libbcachefs/opts.c +++ b/libbcachefs/opts.c @@ -584,7 +584,7 @@ void bch2_opt_hook_post_set(struct bch_fs *c, struct bch_dev *ca, u64 inum, break; case Opt_discard: if (!ca) { - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); for_each_member_device(c, ca) { struct bch_member *m = bch2_members_v2_get_mut(ca->disk_sb.sb, ca->dev_idx); @@ -592,7 +592,6 @@ void bch2_opt_hook_post_set(struct bch_fs *c, struct bch_dev *ca, u64 inum, } bch2_write_super(c); - mutex_unlock(&c->sb_lock); } break; case Opt_version_upgrade: @@ -613,7 +612,6 @@ int bch2_parse_one_mount_opt(struct bch_fs *c, struct bch_opts *opts, struct printbuf *parse_later, const char *name, const char *val) { - struct printbuf err = PRINTBUF; u64 v; int ret, id; @@ -638,46 +636,36 @@ int bch2_parse_one_mount_opt(struct bch_fs *c, struct bch_opts *opts, val = bch2_opt_val_synonym_lookup(name, val); if (!(bch2_opt_table[id].flags & OPT_MOUNT)) - goto bad_opt; + return -BCH_ERR_option_name; if (id == Opt_acl && !IS_ENABLED(CONFIG_BCACHEFS_POSIX_ACL)) - goto bad_opt; + return -BCH_ERR_option_name; if ((id == Opt_usrquota || id == Opt_grpquota) && !IS_ENABLED(CONFIG_BCACHEFS_QUOTA)) - goto bad_opt; + return -BCH_ERR_option_name; + CLASS(printbuf, err)(); ret = bch2_opt_parse(c, &bch2_opt_table[id], val, &v, &err); if (ret == -BCH_ERR_option_needs_open_fs) { - ret = 0; - if (parse_later) { prt_printf(parse_later, "%s=%s,", name, val); if (parse_later->allocation_failure) - ret = -ENOMEM; + return -ENOMEM; } - goto out; + return 0; } if (ret < 0) - goto bad_val; + return -BCH_ERR_option_value; if (opts) bch2_opt_set_by_id(opts, id, v); - ret = 0; -out: - printbuf_exit(&err); - return ret; -bad_opt: - ret = -BCH_ERR_option_name; - goto out; -bad_val: - ret = -BCH_ERR_option_value; - goto out; + return 0; } int bch2_parse_mount_opts(struct bch_fs *c, struct bch_opts *opts, @@ -805,11 +793,10 @@ bool __bch2_opt_set_sb(struct bch_sb *sb, int dev_idx, bool bch2_opt_set_sb(struct bch_fs *c, struct bch_dev *ca, const struct bch_option *opt, u64 v) { - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); bool changed = __bch2_opt_set_sb(c->disk_sb.sb, ca ? ca->dev_idx : -1, opt, v); if (changed) bch2_write_super(c); - mutex_unlock(&c->sb_lock); return changed; } diff --git a/libbcachefs/printbuf.h b/libbcachefs/printbuf.h index 8f4e28d4..907e5c97 100644 --- a/libbcachefs/printbuf.h +++ b/libbcachefs/printbuf.h @@ -295,4 +295,8 @@ static inline void printbuf_atomic_dec(struct printbuf *buf) buf->atomic--; } +DEFINE_GUARD(printbuf_atomic, struct printbuf *, + printbuf_atomic_inc(_T), + printbuf_atomic_dec(_T)); + #endif /* _BCACHEFS_PRINTBUF_H */ diff --git a/libbcachefs/progress.c b/libbcachefs/progress.c index d0989856..42353067 100644 --- a/libbcachefs/progress.c +++ b/libbcachefs/progress.c @@ -46,7 +46,7 @@ void bch2_progress_update_iter(struct btree_trans *trans, s->last_node = b; if (progress_update_p(s)) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); unsigned percent = s->nodes_total ? div64_u64(s->nodes_seen * 100, s->nodes_total) : 0; @@ -56,6 +56,5 @@ void bch2_progress_update_iter(struct btree_trans *trans, bch2_bbpos_to_text(&buf, BBPOS(iter->btree_id, iter->pos)); bch_info(c, "%s", buf.buf); - printbuf_exit(&buf); } } diff --git a/libbcachefs/quota.c b/libbcachefs/quota.c index f241efb1..5f1eff59 100644 --- a/libbcachefs/quota.c +++ b/libbcachefs/quota.c @@ -394,12 +394,10 @@ static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k, dq = bkey_s_c_to_quota(k); q = &c->quotas[k.k->p.inode]; - mutex_lock(&q->lock); + guard(mutex)(&q->lock); mq = genradix_ptr_alloc(&q->table, k.k->p.offset, GFP_KERNEL); - if (!mq) { - mutex_unlock(&q->lock); + if (!mq) return -ENOMEM; - } for (i = 0; i < Q_COUNTERS; i++) { mq->c[i].hardlimit = le64_to_cpu(dq.v->c[i].hardlimit); @@ -414,8 +412,6 @@ static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k, mq->c[Q_INO].timer = qdq->d_ino_timer; if (qdq && qdq->d_fieldmask & QC_INO_WARNS) mq->c[Q_INO].warns = qdq->d_ino_warns; - - mutex_unlock(&q->lock); } return 0; @@ -522,24 +518,21 @@ advance: int bch2_fs_quota_read(struct bch_fs *c) { + scoped_guard(mutex, &c->sb_lock) { + struct bch_sb_field_quota *sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb); + if (!sb_quota) + return bch_err_throw(c, ENOSPC_sb_quota); - mutex_lock(&c->sb_lock); - struct bch_sb_field_quota *sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb); - if (!sb_quota) { - mutex_unlock(&c->sb_lock); - return bch_err_throw(c, ENOSPC_sb_quota); + bch2_sb_quota_read(c); } - bch2_sb_quota_read(c); - mutex_unlock(&c->sb_lock); - - int ret = bch2_trans_run(c, - for_each_btree_key(trans, iter, BTREE_ID_quotas, POS_MIN, + CLASS(btree_trans, trans)(c); + int ret = for_each_btree_key(trans, iter, BTREE_ID_quotas, POS_MIN, BTREE_ITER_prefetch, k, __bch2_quota_set(c, k, NULL)) ?: for_each_btree_key(trans, iter, BTREE_ID_inodes, POS_MIN, BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, - bch2_fs_quota_read_inode(trans, &iter, k))); + bch2_fs_quota_read_inode(trans, &iter, k)); bch_err_fn(c, ret); return ret; } @@ -550,7 +543,6 @@ static int bch2_quota_enable(struct super_block *sb, unsigned uflags) { struct bch_fs *c = sb->s_fs_info; struct bch_sb_field_quota *sb_quota; - int ret = 0; if (sb->s_flags & SB_RDONLY) return -EROFS; @@ -569,11 +561,12 @@ static int bch2_quota_enable(struct super_block *sb, unsigned uflags) if (uflags & FS_QUOTA_PDQ_ENFD && !c->opts.prjquota) return -EINVAL; - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb); if (!sb_quota) { - ret = bch_err_throw(c, ENOSPC_sb_quota); - goto unlock; + int ret = bch_err_throw(c, ENOSPC_sb_quota); + bch_err_fn(c, ret); + return ret; } if (uflags & FS_QUOTA_UDQ_ENFD) @@ -586,10 +579,7 @@ static int bch2_quota_enable(struct super_block *sb, unsigned uflags) SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, true); bch2_write_super(c); -unlock: - mutex_unlock(&c->sb_lock); - - return bch2_err_class(ret); + return 0; } static int bch2_quota_disable(struct super_block *sb, unsigned uflags) @@ -599,7 +589,7 @@ static int bch2_quota_disable(struct super_block *sb, unsigned uflags) if (sb->s_flags & SB_RDONLY) return -EROFS; - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); if (uflags & FS_QUOTA_UDQ_ENFD) SET_BCH_SB_USRQUOTA(c->disk_sb.sb, false); @@ -610,8 +600,6 @@ static int bch2_quota_disable(struct super_block *sb, unsigned uflags) SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, false); bch2_write_super(c); - mutex_unlock(&c->sb_lock); - return 0; } @@ -700,14 +688,12 @@ static int bch2_quota_set_info(struct super_block *sb, int type, { struct bch_fs *c = sb->s_fs_info; struct bch_sb_field_quota *sb_quota; - int ret = 0; if (0) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); qc_info_to_text(&buf, info); pr_info("setting:\n%s", buf.buf); - printbuf_exit(&buf); } if (sb->s_flags & SB_RDONLY) @@ -723,11 +709,12 @@ static int bch2_quota_set_info(struct super_block *sb, int type, ~(QC_SPC_TIMER|QC_INO_TIMER|QC_SPC_WARNS|QC_INO_WARNS)) return -EINVAL; - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb); if (!sb_quota) { - ret = bch_err_throw(c, ENOSPC_sb_quota); - goto unlock; + int ret = bch_err_throw(c, ENOSPC_sb_quota); + bch_err_fn(c, ret); + return bch2_err_class(ret); } if (info->i_fieldmask & QC_SPC_TIMER) @@ -749,10 +736,7 @@ static int bch2_quota_set_info(struct super_block *sb, int type, bch2_sb_quota_read(c); bch2_write_super(c); -unlock: - mutex_unlock(&c->sb_lock); - - return bch2_err_class(ret); + return 0; } /* Get/set individual quotas: */ @@ -778,15 +762,13 @@ static int bch2_get_quota(struct super_block *sb, struct kqid kqid, struct bch_fs *c = sb->s_fs_info; struct bch_memquota_type *q = &c->quotas[kqid.type]; qid_t qid = from_kqid(&init_user_ns, kqid); - struct bch_memquota *mq; memset(qdq, 0, sizeof(*qdq)); - mutex_lock(&q->lock); - mq = genradix_ptr(&q->table, qid); + guard(mutex)(&q->lock); + struct bch_memquota *mq = genradix_ptr(&q->table, qid); if (mq) __bch2_quota_get(qdq, mq); - mutex_unlock(&q->lock); return 0; } @@ -799,21 +781,17 @@ static int bch2_get_next_quota(struct super_block *sb, struct kqid *kqid, qid_t qid = from_kqid(&init_user_ns, *kqid); struct genradix_iter iter; struct bch_memquota *mq; - int ret = 0; - mutex_lock(&q->lock); + guard(mutex)(&q->lock); genradix_for_each_from(&q->table, iter, mq, qid) if (memcmp(mq, page_address(ZERO_PAGE(0)), sizeof(*mq))) { __bch2_quota_get(qdq, mq); *kqid = make_kqid(current_user_ns(), kqid->type, iter.pos); - goto found; + return 0; } - ret = -ENOENT; -found: - mutex_unlock(&q->lock); - return bch2_err_class(ret); + return -ENOENT; } static int bch2_set_quota_trans(struct btree_trans *trans, @@ -821,12 +799,10 @@ static int bch2_set_quota_trans(struct btree_trans *trans, struct qc_dqblk *qdq) { struct btree_iter iter; - struct bkey_s_c k; - int ret; - - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_quotas, new_quota->k.p, - BTREE_ITER_slots|BTREE_ITER_intent); - ret = bkey_err(k); + struct bkey_s_c k = + bch2_bkey_get_iter(trans, &iter, BTREE_ID_quotas, new_quota->k.p, + BTREE_ITER_slots|BTREE_ITER_intent); + int ret = bkey_err(k); if (unlikely(ret)) return ret; @@ -852,24 +828,22 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid, struct qc_dqblk *qdq) { struct bch_fs *c = sb->s_fs_info; - struct bkey_i_quota new_quota; - int ret; if (0) { - struct printbuf buf = PRINTBUF; - + CLASS(printbuf, buf)(); qc_dqblk_to_text(&buf, qdq); pr_info("setting:\n%s", buf.buf); - printbuf_exit(&buf); } if (sb->s_flags & SB_RDONLY) return -EROFS; + struct bkey_i_quota new_quota; bkey_quota_init(&new_quota.k_i); new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid)); - ret = bch2_trans_commit_do(c, NULL, NULL, 0, + CLASS(btree_trans, trans)(c); + int ret = commit_do(trans, NULL, NULL, 0, bch2_set_quota_trans(trans, &new_quota, qdq)) ?: __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i), qdq); diff --git a/libbcachefs/rebalance.c b/libbcachefs/rebalance.c index 73b463c9..32fa7cf9 100644 --- a/libbcachefs/rebalance.c +++ b/libbcachefs/rebalance.c @@ -235,24 +235,19 @@ static const char * const bch2_rebalance_state_strs[] = { int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum) { struct btree_iter iter; - struct bkey_s_c k; - struct bkey_i_cookie *cookie; - u64 v; - int ret; - bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work, SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX), BTREE_ITER_intent); - k = bch2_btree_iter_peek_slot(trans, &iter); - ret = bkey_err(k); + struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, &iter); + int ret = bkey_err(k); if (ret) goto err; - v = k.k->type == KEY_TYPE_cookie + u64 v = k.k->type == KEY_TYPE_cookie ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie) : 0; - cookie = bch2_trans_kmalloc(trans, sizeof(*cookie)); + struct bkey_i_cookie *cookie = bch2_trans_kmalloc(trans, sizeof(*cookie)); ret = PTR_ERR_OR_ZERO(cookie); if (ret) goto err; @@ -269,8 +264,8 @@ err: int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum) { - int ret = bch2_trans_commit_do(c, NULL, NULL, - BCH_TRANS_COMMIT_no_enospc, + CLASS(btree_trans, trans)(c); + int ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, bch2_set_rebalance_needs_scan_trans(trans, inum)); bch2_rebalance_wakeup(c); return ret; @@ -284,19 +279,15 @@ int bch2_set_fs_needs_rebalance(struct bch_fs *c) static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum, u64 cookie) { struct btree_iter iter; - struct bkey_s_c k; - u64 v; - int ret; - bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work, SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX), BTREE_ITER_intent); - k = bch2_btree_iter_peek_slot(trans, &iter); - ret = bkey_err(k); + struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, &iter); + int ret = bkey_err(k); if (ret) goto err; - v = k.k->type == KEY_TYPE_cookie + u64 v = k.k->type == KEY_TYPE_cookie ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie) : 0; @@ -373,7 +364,7 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans, } if (trace_rebalance_extent_enabled()) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_bkey_val_to_text(&buf, c, k); prt_newline(&buf); @@ -399,7 +390,6 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans, } trace_rebalance_extent(c, buf.buf); - printbuf_exit(&buf); } return k; @@ -713,17 +703,15 @@ void bch2_rebalance_stop(struct bch_fs *c) int bch2_rebalance_start(struct bch_fs *c) { - struct task_struct *p; - int ret; - if (c->rebalance.thread) return 0; if (c->opts.nochanges) return 0; - p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name); - ret = PTR_ERR_OR_ZERO(p); + struct task_struct *p = + kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name); + int ret = PTR_ERR_OR_ZERO(p); bch_err_msg(c, ret, "creating rebalance thread"); if (ret) return ret; @@ -779,7 +767,7 @@ static int check_rebalance_work_one(struct btree_trans *trans, { struct bch_fs *c = trans->c; struct bkey_s_c extent_k, rebalance_k; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = bkey_err(extent_k = bch2_btree_iter_peek(trans, extent_iter)) ?: bkey_err(rebalance_k = bch2_btree_iter_peek(trans, rebalance_iter)); @@ -833,7 +821,7 @@ static int check_rebalance_work_one(struct btree_trans *trans, ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work, extent_k.k->p, false); if (ret) - goto err; + return ret; } if (fsck_err_on(should_have_rebalance && !have_rebalance, @@ -842,22 +830,20 @@ static int check_rebalance_work_one(struct btree_trans *trans, ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work, extent_k.k->p, true); if (ret) - goto err; + return ret; } if (cmp <= 0) bch2_btree_iter_advance(trans, extent_iter); if (cmp >= 0) bch2_btree_iter_advance(trans, rebalance_iter); -err: fsck_err: - printbuf_exit(&buf); return ret; } int bch2_check_rebalance_work(struct bch_fs *c) { - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct btree_iter rebalance_iter, extent_iter; int ret = 0; @@ -884,6 +870,5 @@ int bch2_check_rebalance_work(struct bch_fs *c) bch2_bkey_buf_exit(&last_flushed, c); bch2_trans_iter_exit(trans, &extent_iter); bch2_trans_iter_exit(trans, &rebalance_iter); - bch2_trans_put(trans); return ret < 0 ? ret : 0; } diff --git a/libbcachefs/recovery.c b/libbcachefs/recovery.c index f82e9fb8..a8eea478 100644 --- a/libbcachefs/recovery.c +++ b/libbcachefs/recovery.c @@ -40,7 +40,7 @@ int bch2_btree_lost_data(struct bch_fs *c, u64 b = BIT_ULL(btree); int ret = 0; - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); if (!(c->sb.btrees_lost_data & b)) { @@ -109,8 +109,6 @@ int bch2_btree_lost_data(struct bch_fs *c, } out: bch2_write_super(c); - mutex_unlock(&c->sb_lock); - return ret; } @@ -123,7 +121,7 @@ static void kill_btree(struct bch_fs *c, enum btree_id btree) /* for -o reconstruct_alloc: */ void bch2_reconstruct_alloc(struct bch_fs *c) { - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_allocations, ext->recovery_passes_required); @@ -167,7 +165,6 @@ void bch2_reconstruct_alloc(struct bch_fs *c) c->disk_sb.sb->features[0] &= ~cpu_to_le64(BIT_ULL(BCH_FEATURE_no_alloc_info)); bch2_write_super(c); - mutex_unlock(&c->sb_lock); for (unsigned i = 0; i < btree_id_nr_alive(c); i++) if (btree_id_is_alloc(i)) @@ -339,14 +336,15 @@ static int journal_sort_seq_cmp(const void *_l, const void *_r) return cmp_int(l->journal_seq - 1, r->journal_seq - 1); } +DEFINE_DARRAY_NAMED(darray_journal_keys, struct journal_key *) + int bch2_journal_replay(struct bch_fs *c) { struct journal_keys *keys = &c->journal_keys; - DARRAY(struct journal_key *) keys_sorted = { 0 }; + CLASS(darray_journal_keys, keys_sorted)(); struct journal *j = &c->journal; u64 start_seq = c->journal_replay_seq_start; u64 end_seq = c->journal_replay_seq_start; - struct btree_trans *trans = NULL; bool immediate_flush = false; int ret = 0; @@ -354,13 +352,13 @@ int bch2_journal_replay(struct bch_fs *c) ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)", keys->nr, start_seq, end_seq); if (ret) - goto err; + return ret; } BUG_ON(!atomic_read(&keys->ref)); move_gap(keys, keys->nr); - trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); /* * Replay accounting keys first: we can't allow the write buffer to @@ -380,7 +378,7 @@ int bch2_journal_replay(struct bch_fs *c) BCH_WATERMARK_reclaim, bch2_journal_replay_accounting_key(trans, k)); if (bch2_fs_fatal_err_on(ret, c, "error replaying accounting; %s", bch2_err_str(ret))) - goto err; + return ret; k->overwritten = true; } @@ -414,7 +412,7 @@ int bch2_journal_replay(struct bch_fs *c) if (ret) { ret = darray_push(&keys_sorted, k); if (ret) - goto err; + return ret; } } @@ -445,22 +443,16 @@ int bch2_journal_replay(struct bch_fs *c) : 0), bch2_journal_replay_key(trans, k)); if (ret) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_btree_id_level_to_text(&buf, k->btree_id, k->level); bch_err_msg(c, ret, "while replaying key at %s:", buf.buf); - printbuf_exit(&buf); - goto err; + return ret; } BUG_ON(k->btree_id != BTREE_ID_accounting && !k->overwritten); } - /* - * We need to put our btree_trans before calling flush_all_pins(), since - * that will use a btree_trans internally - */ - bch2_trans_put(trans); - trans = NULL; + bch2_trans_unlock_long(trans); if (!c->opts.retain_recovery_info && c->recovery.pass_done >= BCH_RECOVERY_PASS_journal_replay) @@ -479,12 +471,7 @@ int bch2_journal_replay(struct bch_fs *c) if (keys->nr) bch2_journal_log_msg(c, "journal replay finished"); -err: - if (trans) - bch2_trans_put(trans); - darray_exit(&keys_sorted); - bch_err_fn(c, ret); - return ret; + return 0; } /* journal replay early: */ @@ -596,7 +583,7 @@ static int journal_replay_early(struct bch_fs *c, static int read_btree_roots(struct bch_fs *c) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; for (unsigned i = 0; i < btree_id_nr_alive(c); i++) { @@ -632,7 +619,6 @@ static int read_btree_roots(struct bch_fs *c) } } fsck_err: - printbuf_exit(&buf); return ret; } @@ -666,7 +652,7 @@ static bool check_version_upgrade(struct bch_fs *c) } if (new_version > old_version) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); if (old_version < bcachefs_metadata_required_upgrade_below) prt_str(&buf, "Version upgrade required:\n"); @@ -699,14 +685,12 @@ static bool check_version_upgrade(struct bch_fs *c) } bch_notice(c, "%s", buf.buf); - printbuf_exit(&buf); - ret = true; } if (new_version > c->sb.version_incompat_allowed && c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "Now allowing incompatible features up to "); bch2_version_to_text(&buf, new_version); @@ -715,8 +699,6 @@ static bool check_version_upgrade(struct bch_fs *c) prt_newline(&buf); bch_notice(c, "%s", buf.buf); - printbuf_exit(&buf); - ret = true; } @@ -796,15 +778,14 @@ int bch2_fs_recovery(struct bch_fs *c) u64 sb_passes = bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); if (sb_passes) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "superblock requires following recovery passes to be run:\n "); prt_bitflags(&buf, bch2_recovery_passes, sb_passes); bch_info(c, "%s", buf.buf); - printbuf_exit(&buf); } if (bch2_check_version_downgrade(c)) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "Version downgrade required:"); @@ -820,7 +801,6 @@ int bch2_fs_recovery(struct bch_fs *c) } bch_info(c, "%s", buf.buf); - printbuf_exit(&buf); write_sb = true; } @@ -937,11 +917,10 @@ use_clean: if (ret) goto err; - ret = bch2_fs_resize_on_mount(c); - if (ret) { - up_write(&c->state_lock); + scoped_guard(rwsem_write, &c->state_lock) + ret = bch2_fs_resize_on_mount(c); + if (ret) goto err; - } if (c->sb.features & BIT_ULL(BCH_FEATURE_small_image)) { bch_info(c, "filesystem is an unresized image file, mounting ro"); @@ -1119,10 +1098,9 @@ use_clean: bch2_move_stats_init(&stats, "recovery"); - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_version_to_text(&buf, c->sb.version_min); bch_info(c, "scanning for old btree nodes: min_version %s", buf.buf); - printbuf_exit(&buf); ret = bch2_fs_read_write_early(c) ?: bch2_scan_old_btree_nodes(c, &stats); @@ -1150,14 +1128,13 @@ final_out: err: fsck_err: { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_printf(&buf, "error in recovery: %s\n", bch2_err_str(ret)); bch2_fs_emergency_read_only2(c, &buf); bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); } goto final_out; } @@ -1167,33 +1144,30 @@ int bch2_fs_initialize(struct bch_fs *c) struct bch_inode_unpacked root_inode, lostfound_inode; struct bkey_inode_buf packed_inode; struct qstr lostfound = QSTR("lost+found"); - struct bch_member *m; int ret; bch_notice(c, "initializing new filesystem"); set_bit(BCH_FS_new_fs, &c->flags); - mutex_lock(&c->sb_lock); - c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done); - c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done); - - bch2_check_version_downgrade(c); + scoped_guard(mutex, &c->sb_lock) { + c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done); + c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done); - if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) { - bch2_sb_upgrade(c, bcachefs_metadata_version_current, false); - SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current); - bch2_write_super(c); - } + bch2_check_version_downgrade(c); - for_each_member_device(c, ca) { - m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); - SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, false); - } + if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) { + bch2_sb_upgrade(c, bcachefs_metadata_version_current, false); + SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current); + bch2_write_super(c); + } - bch2_sb_members_to_cpu(c); + for_each_member_device(c, ca) { + struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); + SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, false); + } - bch2_write_super(c); - mutex_unlock(&c->sb_lock); + bch2_write_super(c); + } set_bit(BCH_FS_btree_running, &c->flags); @@ -1293,12 +1267,11 @@ int bch2_fs_initialize(struct bch_fs *c) if (ret) goto err; - mutex_lock(&c->sb_lock); - SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true); - SET_BCH_SB_CLEAN(c->disk_sb.sb, false); - - bch2_write_super(c); - mutex_unlock(&c->sb_lock); + scoped_guard(mutex, &c->sb_lock) { + SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true); + SET_BCH_SB_CLEAN(c->disk_sb.sb, false); + bch2_write_super(c); + } c->recovery.curr_pass = BCH_RECOVERY_PASS_NR; return 0; diff --git a/libbcachefs/recovery_passes.c b/libbcachefs/recovery_passes.c index 6a039e01..f9d1c492 100644 --- a/libbcachefs/recovery_passes.c +++ b/libbcachefs/recovery_passes.c @@ -237,19 +237,21 @@ static int bch2_lookup_root_inode(struct bch_fs *c) subvol_inum inum = BCACHEFS_ROOT_SUBVOL_INUM; struct bch_inode_unpacked inode_u; struct bch_subvolume subvol; + CLASS(btree_trans, trans)(c); - return bch2_trans_do(c, + return lockrestart_do(trans, bch2_subvolume_get(trans, inum.subvol, true, &subvol) ?: bch2_inode_find_by_inum_trans(trans, inum, &inode_u)); } struct recovery_pass_fn { int (*fn)(struct bch_fs *); + const char *name; unsigned when; }; static struct recovery_pass_fn recovery_pass_fns[] = { -#define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when }, +#define x(_fn, _id, _when) { .fn = bch2_##_fn, .name = #_fn, .when = _when }, BCH_RECOVERY_PASSES() #undef x }; @@ -346,13 +348,11 @@ int __bch2_run_explicit_recovery_pass(struct bch_fs *c, lockdep_assert_held(&c->sb_lock); bch2_printbuf_make_room(out, 1024); - out->atomic++; - - unsigned long lockflags; - spin_lock_irqsave(&r->lock, lockflags); + guard(printbuf_atomic)(out); + guard(spinlock_irq)(&r->lock); if (!recovery_pass_needs_set(c, pass, &flags)) - goto out; + return 0; bool in_recovery = test_bit(BCH_FS_in_recovery, &c->flags); bool rewind = in_recovery && @@ -369,8 +369,7 @@ int __bch2_run_explicit_recovery_pass(struct bch_fs *c, (!in_recovery || r->curr_pass >= BCH_RECOVERY_PASS_set_may_go_rw)) { prt_printf(out, "need recovery pass %s (%u), but already rw\n", bch2_recovery_passes[pass], pass); - ret = bch_err_throw(c, cannot_rewind_recovery); - goto out; + return bch_err_throw(c, cannot_rewind_recovery); } if (ratelimit) @@ -400,9 +399,7 @@ int __bch2_run_explicit_recovery_pass(struct bch_fs *c, if (p->when & PASS_ONLINE) bch2_run_async_recovery_passes(c); } -out: - spin_unlock_irqrestore(&r->lock, lockflags); - --out->atomic; + return ret; } @@ -458,16 +455,14 @@ int bch2_run_print_explicit_recovery_pass(struct bch_fs *c, enum bch_recovery_pa if (!recovery_pass_needs_set(c, pass, &flags)) return 0; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); int ret = __bch2_run_explicit_recovery_pass(c, &buf, pass, RUN_RECOVERY_PASS_nopersistent); - mutex_unlock(&c->sb_lock); bch2_print_str(c, KERN_NOTICE, buf.buf); - printbuf_exit(&buf); return ret; } @@ -486,6 +481,7 @@ static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass) r->passes_to_run &= ~BIT_ULL(pass); if (ret) { + bch_err(c, "%s(): error %s", p->name, bch2_err_str(ret)); r->passes_failing |= BIT_ULL(pass); return ret; } diff --git a/libbcachefs/reflink.c b/libbcachefs/reflink.c index 8d8e045b..60abd89d 100644 --- a/libbcachefs/reflink.c +++ b/libbcachefs/reflink.c @@ -183,7 +183,7 @@ static int bch2_indirect_extent_missing_error(struct btree_trans *trans, u64 live_end = REFLINK_P_IDX(p.v) + p.k->size; u64 refd_start = live_start - le32_to_cpu(p.v->front_pad); u64 refd_end = live_end + le32_to_cpu(p.v->back_pad); - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; BUG_ON(missing_start < refd_start); @@ -195,7 +195,7 @@ static int bch2_indirect_extent_missing_error(struct btree_trans *trans, prt_printf(&buf, "pointer to missing indirect extent in "); ret = bch2_inum_snap_offset_err_msg_trans(trans, &buf, missing_pos); if (ret) - goto err; + return ret; prt_printf(&buf, "-%llu\n", (missing_pos.offset + (missing_end - missing_start)) << 9); bch2_bkey_val_to_text(&buf, c, p.s_c); @@ -207,7 +207,7 @@ static int bch2_indirect_extent_missing_error(struct btree_trans *trans, struct bkey_i_reflink_p *new = bch2_bkey_make_mut_noupdate_typed(trans, p.s_c, reflink_p); ret = PTR_ERR_OR_ZERO(new); if (ret) - goto err; + return ret; /* * Is the missing range not actually needed? @@ -238,15 +238,13 @@ static int bch2_indirect_extent_missing_error(struct btree_trans *trans, ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, &new->k_i, BTREE_TRIGGER_norun); if (ret) - goto err; + return ret; if (should_commit) ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: bch_err_throw(c, transaction_restart_nested); } -err: fsck_err: - printbuf_exit(&buf); return ret; } @@ -301,7 +299,7 @@ static int trans_trigger_reflink_p_segment(struct btree_trans *trans, enum btree_iter_update_trigger_flags flags) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); s64 offset_into_extent = *idx - REFLINK_P_IDX(p.v); struct btree_iter iter; @@ -360,7 +358,6 @@ next: err: fsck_err: bch2_trans_iter_exit(trans, &iter); - printbuf_exit(&buf); return ret; } @@ -374,7 +371,7 @@ static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans, int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1; u64 next_idx = REFLINK_P_IDX(p.v) + p.k->size + le32_to_cpu(p.v->back_pad); s64 ret = 0; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); if (r_idx >= c->reflink_gc_nr) goto not_found; @@ -394,12 +391,10 @@ not_found: if (flags & BTREE_TRIGGER_check_repair) { ret = bch2_indirect_extent_missing_error(trans, p, *idx, next_idx, false); if (ret) - goto err; + return ret; } *idx = next_idx; -err: - printbuf_exit(&buf); return ret; } @@ -498,20 +493,15 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, bool reflink_p_may_update_opts_field) { struct bch_fs *c = trans->c; - struct btree_iter reflink_iter = {}; - struct bkey_s_c k; - struct bkey_i *r_v; - struct bkey_i_reflink_p *r_p; - __le64 *refcount; - int ret; if (orig->k.type == KEY_TYPE_inline_data) bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data); + struct btree_iter reflink_iter; bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX, BTREE_ITER_intent); - k = bch2_btree_iter_peek_prev(trans, &reflink_iter); - ret = bkey_err(k); + struct bkey_s_c k = bch2_btree_iter_peek_prev(trans, &reflink_iter); + int ret = bkey_err(k); if (ret) goto err; @@ -523,7 +513,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, if (bkey_ge(reflink_iter.pos, POS(0, REFLINK_P_IDX_MAX - orig->k.size))) return -ENOSPC; - r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_bytes(&orig->k)); + struct bkey_i *r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_bytes(&orig->k)); ret = PTR_ERR_OR_ZERO(r_v); if (ret) goto err; @@ -536,7 +526,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, set_bkey_val_bytes(&r_v->k, sizeof(__le64) + bkey_val_bytes(&orig->k)); - refcount = bkey_refcount(bkey_i_to_s(r_v)); + __le64 *refcount = bkey_refcount(bkey_i_to_s(r_v)); *refcount = 0; memcpy(refcount + 1, &orig->v, bkey_val_bytes(&orig->k)); @@ -549,7 +539,8 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, * so we know it will be big enough: */ orig->k.type = KEY_TYPE_reflink_p; - r_p = bkey_i_to_reflink_p(orig); + + struct bkey_i_reflink_p *r_p = bkey_i_to_reflink_p(orig); set_bkey_val_bytes(&r_p->k, sizeof(r_p->v)); /* FORTIFY_SOURCE is broken here, and doesn't provide unsafe_memset() */ @@ -598,7 +589,6 @@ s64 bch2_remap_range(struct bch_fs *c, u64 new_i_size, s64 *i_sectors_delta, bool may_change_src_io_path_opts) { - struct btree_trans *trans; struct btree_iter dst_iter, src_iter; struct bkey_s_c src_k; struct bkey_buf new_dst, new_src; @@ -623,7 +613,7 @@ s64 bch2_remap_range(struct bch_fs *c, bch2_bkey_buf_init(&new_dst); bch2_bkey_buf_init(&new_src); - trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); ret = bch2_inum_opts_get(trans, src_inum, &opts); if (ret) @@ -761,7 +751,6 @@ s64 bch2_remap_range(struct bch_fs *c, bch2_trans_iter_exit(trans, &inode_iter); } while (bch2_err_matches(ret2, BCH_ERR_transaction_restart)); err: - bch2_trans_put(trans); bch2_bkey_buf_exit(&new_src, c); bch2_bkey_buf_exit(&new_dst, c); @@ -779,7 +768,7 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans, { struct bch_fs *c = trans->c; const __le64 *refcount = bkey_refcount_c(k); - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); struct reflink_gc *r; int ret = 0; @@ -807,7 +796,7 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans, struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); ret = PTR_ERR_OR_ZERO(new); if (ret) - goto out; + return ret; if (!r->refcount) new->k.type = KEY_TYPE_deleted; @@ -815,32 +804,30 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans, *bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount); ret = bch2_trans_update(trans, iter, new, 0); } -out: fsck_err: - printbuf_exit(&buf); return ret; } int bch2_gc_reflink_done(struct bch_fs *c) { + CLASS(btree_trans, trans)(c); size_t idx = 0; - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, + int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_reflink, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_gc_write_reflink_key(trans, &iter, k, &idx))); + bch2_gc_write_reflink_key(trans, &iter, k, &idx)); c->reflink_gc_nr = 0; return ret; } int bch2_gc_reflink_start(struct bch_fs *c) { + CLASS(btree_trans, trans)(c); c->reflink_gc_nr = 0; - int ret = bch2_trans_run(c, - for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN, + int ret = for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN, BTREE_ITER_prefetch, k, ({ const __le64 *refcount = bkey_refcount_c(k); @@ -858,7 +845,7 @@ int bch2_gc_reflink_start(struct bch_fs *c) r->size = k.k->size; r->refcount = 0; 0; - }))); + })); bch_err_fn(c, ret); return ret; diff --git a/libbcachefs/replicas.c b/libbcachefs/replicas.c index 8383bd7f..0784283c 100644 --- a/libbcachefs/replicas.c +++ b/libbcachefs/replicas.c @@ -286,11 +286,8 @@ bool bch2_replicas_marked_locked(struct bch_fs *c, bool bch2_replicas_marked(struct bch_fs *c, struct bch_replicas_entry_v1 *search) { - percpu_down_read(&c->mark_lock); - bool ret = bch2_replicas_marked_locked(c, search); - percpu_up_read(&c->mark_lock); - - return ret; + guard(percpu_read)(&c->mark_lock); + return bch2_replicas_marked_locked(c, search); } noinline @@ -305,14 +302,14 @@ static int bch2_mark_replicas_slowpath(struct bch_fs *c, memset(&new_r, 0, sizeof(new_r)); memset(&new_gc, 0, sizeof(new_gc)); - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); if (c->replicas_gc.entries && !__replicas_has_entry(&c->replicas_gc, new_entry)) { new_gc = cpu_replicas_add_entry(c, &c->replicas_gc, new_entry); if (!new_gc.entries) { ret = bch_err_throw(c, ENOMEM_cpu_replicas); - goto err; + goto out; } } @@ -320,12 +317,12 @@ static int bch2_mark_replicas_slowpath(struct bch_fs *c, new_r = cpu_replicas_add_entry(c, &c->replicas, new_entry); if (!new_r.entries) { ret = bch_err_throw(c, ENOMEM_cpu_replicas); - goto err; + goto out; } ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r); if (ret) - goto err; + goto out; } if (!new_r.entries && @@ -338,22 +335,18 @@ static int bch2_mark_replicas_slowpath(struct bch_fs *c, bch2_write_super(c); /* don't update in memory replicas until changes are persistent */ - percpu_down_write(&c->mark_lock); - if (new_r.entries) - swap(c->replicas, new_r); - if (new_gc.entries) - swap(new_gc, c->replicas_gc); - percpu_up_write(&c->mark_lock); + scoped_guard(percpu_write, &c->mark_lock) { + if (new_r.entries) + swap(c->replicas, new_r); + if (new_gc.entries) + swap(new_gc, c->replicas_gc); + } out: - mutex_unlock(&c->sb_lock); - kfree(new_r.entries); kfree(new_gc.entries); - return ret; -err: bch_err_msg(c, ret, "adding replicas entry"); - goto out; + return ret; } int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry_v1 *r) @@ -371,24 +364,20 @@ int bch2_replicas_gc_end(struct bch_fs *c, int ret) { lockdep_assert_held(&c->replicas_gc_lock); - mutex_lock(&c->sb_lock); - percpu_down_write(&c->mark_lock); - - ret = ret ?: - bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc); - if (!ret) - swap(c->replicas, c->replicas_gc); - - kfree(c->replicas_gc.entries); - c->replicas_gc.entries = NULL; + guard(mutex)(&c->sb_lock); + scoped_guard(percpu_write, &c->mark_lock) { + ret = ret ?: + bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc); + if (!ret) + swap(c->replicas, c->replicas_gc); - percpu_up_write(&c->mark_lock); + kfree(c->replicas_gc.entries); + c->replicas_gc.entries = NULL; + } if (!ret) bch2_write_super(c); - mutex_unlock(&c->sb_lock); - return ret; } @@ -399,7 +388,7 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask) lockdep_assert_held(&c->replicas_gc_lock); - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); BUG_ON(c->replicas_gc.entries); c->replicas_gc.nr = 0; @@ -420,7 +409,6 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask) c->replicas_gc.entry_size, GFP_KERNEL); if (!c->replicas_gc.entries) { - mutex_unlock(&c->sb_lock); bch_err(c, "error allocating c->replicas_gc"); return bch_err_throw(c, ENOMEM_replicas_gc); } @@ -432,8 +420,6 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask) e, c->replicas_gc.entry_size); bch2_cpu_replicas_sort(&c->replicas_gc); - mutex_unlock(&c->sb_lock); - return 0; } @@ -461,55 +447,48 @@ retry: return bch_err_throw(c, ENOMEM_replicas_gc); } - mutex_lock(&c->sb_lock); - percpu_down_write(&c->mark_lock); - - if (nr != c->replicas.nr || - new.entry_size != c->replicas.entry_size) { - percpu_up_write(&c->mark_lock); - mutex_unlock(&c->sb_lock); - kfree(new.entries); - goto retry; - } - - for (unsigned i = 0; i < c->replicas.nr; i++) { - struct bch_replicas_entry_v1 *e = - cpu_replicas_entry(&c->replicas, i); + guard(mutex)(&c->sb_lock); + scoped_guard(percpu_write, &c->mark_lock) { + if (nr != c->replicas.nr || + new.entry_size != c->replicas.entry_size) { + kfree(new.entries); + goto retry; + } - struct disk_accounting_pos k = { - .type = BCH_DISK_ACCOUNTING_replicas, - }; + for (unsigned i = 0; i < c->replicas.nr; i++) { + struct bch_replicas_entry_v1 *e = + cpu_replicas_entry(&c->replicas, i); - unsafe_memcpy(&k.replicas, e, replicas_entry_bytes(e), - "embedded variable length struct"); + struct disk_accounting_pos k = { + .type = BCH_DISK_ACCOUNTING_replicas, + }; - struct bpos p = disk_accounting_pos_to_bpos(&k); + unsafe_memcpy(&k.replicas, e, replicas_entry_bytes(e), + "embedded variable length struct"); - struct bch_accounting_mem *acc = &c->accounting; - bool kill = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]), - accounting_pos_cmp, &p) >= acc->k.nr; + struct bpos p = disk_accounting_pos_to_bpos(&k); - if (e->data_type == BCH_DATA_journal || !kill) - memcpy(cpu_replicas_entry(&new, new.nr++), - e, new.entry_size); - } + struct bch_accounting_mem *acc = &c->accounting; + bool kill = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]), + accounting_pos_cmp, &p) >= acc->k.nr; - bch2_cpu_replicas_sort(&new); + if (e->data_type == BCH_DATA_journal || !kill) + memcpy(cpu_replicas_entry(&new, new.nr++), + e, new.entry_size); + } - ret = bch2_cpu_replicas_to_sb_replicas(c, &new); + bch2_cpu_replicas_sort(&new); - if (!ret) - swap(c->replicas, new); + ret = bch2_cpu_replicas_to_sb_replicas(c, &new); - kfree(new.entries); + if (!ret) + swap(c->replicas, new); - percpu_up_write(&c->mark_lock); + kfree(new.entries); + } if (!ret) bch2_write_super(c); - - mutex_unlock(&c->sb_lock); - return ret; } @@ -597,9 +576,8 @@ int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c) bch2_cpu_replicas_sort(&new_r); - percpu_down_write(&c->mark_lock); + guard(percpu_write)(&c->mark_lock); swap(c->replicas, new_r); - percpu_up_write(&c->mark_lock); kfree(new_r.entries); @@ -809,9 +787,8 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs, unsigned flags, bool print) { struct bch_replicas_entry_v1 *e; - bool ret = true; - percpu_down_read(&c->mark_lock); + guard(percpu_read)(&c->mark_lock); for_each_cpu_replicas_entry(&c->replicas, e) { unsigned nr_online = 0, nr_failed = 0, dflags = 0; bool metadata = e->data_type < BCH_DATA_user; @@ -847,21 +824,18 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs, if (dflags & ~flags) { if (print) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_replicas_entry_to_text(&buf, e); bch_err(c, "insufficient devices online (%u) for replicas entry %s", nr_online, buf.buf); - printbuf_exit(&buf); } - ret = false; - break; + return false; } } - percpu_up_read(&c->mark_lock); - return ret; + return true; } unsigned bch2_sb_dev_has_data(struct bch_sb *sb, unsigned dev) @@ -904,11 +878,8 @@ unsigned bch2_sb_dev_has_data(struct bch_sb *sb, unsigned dev) unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca) { - mutex_lock(&c->sb_lock); - unsigned ret = bch2_sb_dev_has_data(c->disk_sb.sb, ca->dev_idx); - mutex_unlock(&c->sb_lock); - - return ret; + guard(mutex)(&c->sb_lock); + return bch2_sb_dev_has_data(c->disk_sb.sb, ca->dev_idx); } void bch2_fs_replicas_exit(struct bch_fs *c) diff --git a/libbcachefs/sb-clean.c b/libbcachefs/sb-clean.c index 59c8770e..a5916984 100644 --- a/libbcachefs/sb-clean.c +++ b/libbcachefs/sb-clean.c @@ -89,8 +89,8 @@ int bch2_verify_superblock_clean(struct bch_fs *c, { unsigned i; struct bch_sb_field_clean *clean = *cleanp; - struct printbuf buf1 = PRINTBUF; - struct printbuf buf2 = PRINTBUF; + CLASS(printbuf, buf1)(); + CLASS(printbuf, buf2)(); int ret = 0; if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c, @@ -140,8 +140,6 @@ int bch2_verify_superblock_clean(struct bch_fs *c, l2, buf2.buf); } fsck_err: - printbuf_exit(&buf2); - printbuf_exit(&buf1); return ret; } @@ -150,7 +148,7 @@ struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c) struct bch_sb_field_clean *clean, *sb_clean; int ret; - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); sb_clean = bch2_sb_field_get(c->disk_sb.sb, clean); if (fsck_err_on(!sb_clean, c, @@ -158,29 +156,22 @@ struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c) "superblock marked clean but clean section not present")) { SET_BCH_SB_CLEAN(c->disk_sb.sb, false); c->sb.clean = false; - mutex_unlock(&c->sb_lock); return ERR_PTR(-BCH_ERR_invalid_sb_clean); } clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field), GFP_KERNEL); - if (!clean) { - mutex_unlock(&c->sb_lock); + if (!clean) return ERR_PTR(-BCH_ERR_ENOMEM_read_superblock_clean); - } ret = bch2_sb_clean_validate_late(c, clean, READ); if (ret) { kfree(clean); - mutex_unlock(&c->sb_lock); return ERR_PTR(ret); } - mutex_unlock(&c->sb_lock); - return clean; fsck_err: - mutex_unlock(&c->sb_lock); return ERR_PTR(ret); } @@ -265,21 +256,16 @@ const struct bch_sb_field_ops bch_sb_field_ops_clean = { int bch2_fs_mark_dirty(struct bch_fs *c) { - int ret; - /* * Unconditionally write superblock, to verify it hasn't changed before * we go rw: */ - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); SET_BCH_SB_CLEAN(c->disk_sb.sb, false); c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALWAYS); - ret = bch2_write_super(c); - mutex_unlock(&c->sb_lock); - - return ret; + return bch2_write_super(c); } void bch2_fs_mark_clean(struct bch_fs *c) @@ -289,9 +275,9 @@ void bch2_fs_mark_clean(struct bch_fs *c) unsigned u64s; int ret; - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); if (BCH_SB_CLEAN(c->disk_sb.sb)) - goto out; + return; SET_BCH_SB_CLEAN(c->disk_sb.sb, true); @@ -305,7 +291,7 @@ void bch2_fs_mark_clean(struct bch_fs *c) sb_clean = bch2_sb_field_resize(&c->disk_sb, clean, u64s); if (!sb_clean) { bch_err(c, "error resizing superblock while setting filesystem clean"); - goto out; + return; } sb_clean->flags = 0; @@ -329,12 +315,10 @@ void bch2_fs_mark_clean(struct bch_fs *c) ret = bch2_sb_clean_validate_late(c, sb_clean, WRITE); if (ret) { bch_err(c, "error writing marking filesystem clean: validate error"); - goto out; + return; } bch2_journal_pos_from_member_info_set(c); bch2_write_super(c); -out: - mutex_unlock(&c->sb_lock); } diff --git a/libbcachefs/sb-downgrade.c b/libbcachefs/sb-downgrade.c index 1506d05e..de56a1ee 100644 --- a/libbcachefs/sb-downgrade.c +++ b/libbcachefs/sb-downgrade.c @@ -191,7 +191,7 @@ int bch2_sb_set_upgrade_extra(struct bch_fs *c) bool write_sb = false; int ret = 0; - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); if (old_version < bcachefs_metadata_version_bucket_stripe_sectors && @@ -205,7 +205,6 @@ int bch2_sb_set_upgrade_extra(struct bch_fs *c) if (write_sb) bch2_write_super(c); - mutex_unlock(&c->sb_lock); return ret < 0 ? ret : 0; } @@ -372,7 +371,7 @@ int bch2_sb_downgrade_update(struct bch_fs *c) if (!test_bit(BCH_FS_btree_running, &c->flags)) return 0; - darray_char table = {}; + CLASS(darray_char, table)(); int ret = 0; for (const struct upgrade_downgrade_entry *src = downgrade_table; @@ -389,7 +388,7 @@ int bch2_sb_downgrade_update(struct bch_fs *c) ret = darray_make_room(&table, bytes); if (ret) - goto out; + return ret; dst = (void *) &darray_top(table); dst->version = cpu_to_le16(src->version); @@ -401,7 +400,7 @@ int bch2_sb_downgrade_update(struct bch_fs *c) ret = downgrade_table_extra(c, &table); if (ret) - goto out; + return ret; if (!dst->recovery_passes[0] && !dst->recovery_passes[1] && @@ -416,18 +415,14 @@ int bch2_sb_downgrade_update(struct bch_fs *c) unsigned sb_u64s = DIV_ROUND_UP(sizeof(*d) + table.nr, sizeof(u64)); if (d && le32_to_cpu(d->field.u64s) > sb_u64s) - goto out; + return 0; d = bch2_sb_field_resize(&c->disk_sb, downgrade, sb_u64s); - if (!d) { - ret = bch_err_throw(c, ENOSPC_sb_downgrade); - goto out; - } + if (!d) + return bch_err_throw(c, ENOSPC_sb_downgrade); memcpy(d->entries, table.data, table.nr); memset_u64s_tail(d->entries, 0, table.nr); -out: - darray_exit(&table); return ret; } diff --git a/libbcachefs/sb-errors.c b/libbcachefs/sb-errors.c index 48853efd..41a259ea 100644 --- a/libbcachefs/sb-errors.c +++ b/libbcachefs/sb-errors.c @@ -110,75 +110,66 @@ void bch2_sb_error_count(struct bch_fs *c, enum bch_sb_error_id err) }; unsigned i; - mutex_lock(&c->fsck_error_counts_lock); + guard(mutex)(&c->fsck_error_counts_lock); + for (i = 0; i < e->nr; i++) { if (err == e->data[i].id) { e->data[i].nr++; e->data[i].last_error_time = n.last_error_time; - goto out; + return; } if (err < e->data[i].id) break; } if (darray_make_room(e, 1)) - goto out; + return; darray_insert_item(e, i, n); -out: - mutex_unlock(&c->fsck_error_counts_lock); } void bch2_sb_errors_from_cpu(struct bch_fs *c) { - bch_sb_errors_cpu *src = &c->fsck_error_counts; - struct bch_sb_field_errors *dst; - unsigned i; - - mutex_lock(&c->fsck_error_counts_lock); - - dst = bch2_sb_field_resize(&c->disk_sb, errors, - bch2_sb_field_errors_u64s(src->nr)); + guard(mutex)(&c->fsck_error_counts_lock); + bch_sb_errors_cpu *src = &c->fsck_error_counts; + struct bch_sb_field_errors *dst = + bch2_sb_field_resize(&c->disk_sb, errors, + bch2_sb_field_errors_u64s(src->nr)); if (!dst) - goto err; + return; - for (i = 0; i < src->nr; i++) { + for (unsigned i = 0; i < src->nr; i++) { SET_BCH_SB_ERROR_ENTRY_ID(&dst->entries[i], src->data[i].id); SET_BCH_SB_ERROR_ENTRY_NR(&dst->entries[i], src->data[i].nr); dst->entries[i].last_error_time = cpu_to_le64(src->data[i].last_error_time); } - -err: - mutex_unlock(&c->fsck_error_counts_lock); } static int bch2_sb_errors_to_cpu(struct bch_fs *c) { + guard(mutex)(&c->fsck_error_counts_lock); + struct bch_sb_field_errors *src = bch2_sb_field_get(c->disk_sb.sb, errors); bch_sb_errors_cpu *dst = &c->fsck_error_counts; - unsigned i, nr = bch2_sb_field_errors_nr_entries(src); - int ret; + unsigned nr = bch2_sb_field_errors_nr_entries(src); if (!nr) return 0; - mutex_lock(&c->fsck_error_counts_lock); - ret = darray_make_room(dst, nr); + int ret = darray_make_room(dst, nr); if (ret) - goto err; + return ret; dst->nr = nr; - for (i = 0; i < nr; i++) { + for (unsigned i = 0; i < nr; i++) { dst->data[i].id = BCH_SB_ERROR_ENTRY_ID(&src->entries[i]); dst->data[i].nr = BCH_SB_ERROR_ENTRY_NR(&src->entries[i]); dst->data[i].last_error_time = le64_to_cpu(src->entries[i].last_error_time); } -err: - mutex_unlock(&c->fsck_error_counts_lock); - return ret; + return 0; } void bch2_fs_sb_errors_exit(struct bch_fs *c) diff --git a/libbcachefs/sb-members.c b/libbcachefs/sb-members.c index 340d4fb7..0573c7b0 100644 --- a/libbcachefs/sb-members.c +++ b/libbcachefs/sb-members.c @@ -12,7 +12,7 @@ int bch2_dev_missing_bkey(struct bch_fs *c, struct bkey_s_c k, unsigned dev) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); bool removed = test_bit(dev, c->devs_removed.d); @@ -31,7 +31,6 @@ int bch2_dev_missing_bkey(struct bch_fs *c, struct bkey_s_c k, unsigned dev) if (print) bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); return ret; } @@ -442,9 +441,8 @@ void bch2_dev_io_errors_to_text(struct printbuf *out, struct bch_dev *ca) struct bch_fs *c = ca->fs; struct bch_member m; - mutex_lock(&ca->fs->sb_lock); - m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx); - mutex_unlock(&ca->fs->sb_lock); + scoped_guard(mutex, &ca->fs->sb_lock) + m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx); printbuf_tabstop_push(out, 12); @@ -471,16 +469,15 @@ void bch2_dev_io_errors_to_text(struct printbuf *out, struct bch_dev *ca) void bch2_dev_errors_reset(struct bch_dev *ca) { struct bch_fs *c = ca->fs; - struct bch_member *m; - mutex_lock(&c->sb_lock); - m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); + guard(mutex)(&c->sb_lock); + + struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); for (unsigned i = 0; i < ARRAY_SIZE(m->errors_at_reset); i++) m->errors_at_reset[i] = cpu_to_le64(atomic64_read(&ca->errors[i])); m->errors_reset_time = cpu_to_le64(ktime_get_real_seconds()); bch2_write_super(c); - mutex_unlock(&c->sb_lock); } /* @@ -612,7 +609,7 @@ have_slot: void bch2_sb_members_clean_deleted(struct bch_fs *c) { - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); bool write_sb = false; for (unsigned i = 0; i < c->sb.nr_devices; i++) { @@ -626,5 +623,4 @@ void bch2_sb_members_clean_deleted(struct bch_fs *c) if (write_sb) bch2_write_super(c); - mutex_unlock(&c->sb_lock); } diff --git a/libbcachefs/sb-members.h b/libbcachefs/sb-members.h index 0d363a1c..35d4ab9b 100644 --- a/libbcachefs/sb-members.h +++ b/libbcachefs/sb-members.h @@ -133,7 +133,7 @@ static inline void __bch2_dev_put(struct bch_dev *ca) static inline void bch2_dev_put(struct bch_dev *ca) { - if (ca) + if (!IS_ERR_OR_NULL(ca)) __bch2_dev_put(ca); } diff --git a/libbcachefs/six.c b/libbcachefs/six.c index 538c324f..08083d6c 100644 --- a/libbcachefs/six.c +++ b/libbcachefs/six.c @@ -152,16 +152,16 @@ static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type, * here. */ if (type == SIX_LOCK_read && lock->readers) { - preempt_disable(); - this_cpu_inc(*lock->readers); /* signal that we own lock */ + scoped_guard(preempt) { + this_cpu_inc(*lock->readers); /* signal that we own lock */ - smp_mb(); + smp_mb(); - old = atomic_read(&lock->state); - ret = !(old & l[type].lock_fail); + old = atomic_read(&lock->state); + ret = !(old & l[type].lock_fail); - this_cpu_sub(*lock->readers, !ret); - preempt_enable(); + this_cpu_sub(*lock->readers, !ret); + } if (!ret) { smp_mb(); @@ -360,7 +360,7 @@ static inline bool six_optimistic_spin(struct six_lock *lock, if (atomic_read(&lock->state) & SIX_LOCK_NOSPIN) return false; - preempt_disable(); + guard(preempt)(); end_time = sched_clock() + 10 * NSEC_PER_USEC; while (!need_resched() && six_owner_running(lock)) { @@ -369,10 +369,8 @@ static inline bool six_optimistic_spin(struct six_lock *lock, * wait->lock_acquired: pairs with the smp_store_release in * __six_lock_wakeup */ - if (smp_load_acquire(&wait->lock_acquired)) { - preempt_enable(); + if (smp_load_acquire(&wait->lock_acquired)) return true; - } if (!(++loop & 0xf) && (time_after64(sched_clock(), end_time))) { six_set_bitmask(lock, SIX_LOCK_NOSPIN); @@ -388,7 +386,6 @@ static inline bool six_optimistic_spin(struct six_lock *lock, cpu_relax(); } - preempt_enable(); return false; } diff --git a/libbcachefs/snapshot.c b/libbcachefs/snapshot.c index 4c43d2a2..7a801513 100644 --- a/libbcachefs/snapshot.c +++ b/libbcachefs/snapshot.c @@ -284,12 +284,10 @@ fsck_err: static int bch2_snapshot_table_make_room(struct bch_fs *c, u32 id) { - mutex_lock(&c->snapshot_table_lock); - int ret = snapshot_t_mut(c, id) + guard(mutex)(&c->snapshot_table_lock); + return snapshot_t_mut(c, id) ? 0 : bch_err_throw(c, ENOMEM_mark_snapshot); - mutex_unlock(&c->snapshot_table_lock); - return ret; } static int __bch2_mark_snapshot(struct btree_trans *trans, @@ -300,15 +298,12 @@ static int __bch2_mark_snapshot(struct btree_trans *trans, struct bch_fs *c = trans->c; struct snapshot_t *t; u32 id = new.k->p.offset; - int ret = 0; - mutex_lock(&c->snapshot_table_lock); + guard(mutex)(&c->snapshot_table_lock); t = snapshot_t_mut(c, id); - if (!t) { - ret = bch_err_throw(c, ENOMEM_mark_snapshot); - goto err; - } + if (!t) + return bch_err_throw(c, ENOMEM_mark_snapshot); if (new.k->type == KEY_TYPE_snapshot) { struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new); @@ -348,9 +343,8 @@ static int __bch2_mark_snapshot(struct btree_trans *trans, } else { memset(t, 0, sizeof(*t)); } -err: - mutex_unlock(&c->snapshot_table_lock); - return ret; + + return 0; } int bch2_mark_snapshot(struct btree_trans *trans, @@ -481,7 +475,7 @@ static int check_snapshot_tree(struct btree_trans *trans, struct bkey_s_c_snapshot_tree st; struct bch_snapshot s; struct bch_subvolume subvol; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); struct btree_iter snapshot_iter = {}; u32 root_id; int ret; @@ -567,7 +561,6 @@ out: err: fsck_err: bch2_trans_iter_exit(trans, &snapshot_iter); - printbuf_exit(&buf); return ret; } @@ -580,14 +573,12 @@ fsck_err: */ int bch2_check_snapshot_trees(struct bch_fs *c) { - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, + CLASS(btree_trans, trans)(c); + return for_each_btree_key_commit(trans, iter, BTREE_ID_snapshot_trees, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - check_snapshot_tree(trans, &iter, k))); - bch_err_fn(c, ret); - return ret; + check_snapshot_tree(trans, &iter, k)); } /* @@ -706,7 +697,7 @@ static int check_snapshot(struct btree_trans *trans, struct bkey_i_snapshot *u; u32 parent_id = bch2_snapshot_parent_early(c, k.k->p.offset); u32 real_depth; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); u32 i, id; int ret = 0; @@ -839,7 +830,6 @@ static int check_snapshot(struct btree_trans *trans, ret = 0; err: fsck_err: - printbuf_exit(&buf); return ret; } @@ -849,14 +839,12 @@ int bch2_check_snapshots(struct bch_fs *c) * We iterate backwards as checking/fixing the depth field requires that * the parent's depth already be correct: */ - int ret = bch2_trans_run(c, - for_each_btree_key_reverse_commit(trans, iter, + CLASS(btree_trans, trans)(c); + return for_each_btree_key_reverse_commit(trans, iter, BTREE_ID_snapshots, POS_MAX, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - check_snapshot(trans, &iter, k))); - bch_err_fn(c, ret); - return ret; + check_snapshot(trans, &iter, k)); } static int check_snapshot_exists(struct btree_trans *trans, u32 id) @@ -980,8 +968,8 @@ static int get_snapshot_trees(struct bch_fs *c, struct snapshot_tree_reconstruct int bch2_reconstruct_snapshots(struct bch_fs *c) { - struct btree_trans *trans = bch2_trans_get(c); - struct printbuf buf = PRINTBUF; + CLASS(btree_trans, trans)(c); + CLASS(printbuf, buf)(); struct snapshot_tree_reconstruct r = {}; int ret = 0; @@ -1023,10 +1011,7 @@ int bch2_reconstruct_snapshots(struct bch_fs *c) } fsck_err: err: - bch2_trans_put(trans); snapshot_tree_reconstruct_exit(&r); - printbuf_exit(&buf); - bch_err_fn(c, ret); return ret; } @@ -1035,7 +1020,7 @@ int __bch2_check_key_has_snapshot(struct btree_trans *trans, struct bkey_s_c k) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); int ret = 0; enum snapshot_id_state state = bch2_snapshot_id_state(c, k.k->p.snapshot); @@ -1083,7 +1068,6 @@ int __bch2_check_key_has_snapshot(struct btree_trans *trans, } } fsck_err: - printbuf_exit(&buf); return ret; } @@ -1693,7 +1677,7 @@ static int check_should_delete_snapshot(struct btree_trans *trans, struct bkey_s if (BCH_SNAPSHOT_DELETED(s.v)) return 0; - mutex_lock(&d->progress_lock); + guard(mutex)(&d->progress_lock); for (unsigned i = 0; i < 2; i++) { u32 child = le32_to_cpu(s.v->children[i]); @@ -1720,7 +1704,6 @@ static int check_should_delete_snapshot(struct btree_trans *trans, struct bkey_s darray_push(&d->delete_interior, n); } } - mutex_unlock(&d->progress_lock); return ret; } @@ -1825,10 +1808,12 @@ int __bch2_delete_dead_snapshots(struct bch_fs *c) if (!mutex_trylock(&d->lock)) return 0; - if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags)) - goto out_unlock; + if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags)) { + mutex_unlock(&d->lock); + return 0; + } - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); /* * For every snapshot node: If we have no live children and it's not @@ -1848,11 +1833,10 @@ int __bch2_delete_dead_snapshots(struct bch_fs *c) goto err; { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_snapshot_delete_nodes_to_text(&buf, d); ret = commit_do(trans, NULL, NULL, 0, bch2_trans_log_msg(trans, &buf)); - printbuf_exit(&buf); if (ret) goto err; } @@ -1895,19 +1879,16 @@ int __bch2_delete_dead_snapshots(struct bch_fs *c) goto err; } err: - mutex_lock(&d->progress_lock); - darray_exit(&d->deleting_from_trees); - darray_exit(&d->delete_interior); - darray_exit(&d->delete_leaves); - d->running = false; - mutex_unlock(&d->progress_lock); - bch2_trans_put(trans); + scoped_guard(mutex, &d->progress_lock) { + darray_exit(&d->deleting_from_trees); + darray_exit(&d->delete_interior); + darray_exit(&d->delete_leaves); + d->running = false; + } bch2_recovery_pass_set_no_ratelimit(c, BCH_RECOVERY_PASS_check_snapshots); -out_unlock: + mutex_unlock(&d->lock); - if (!bch2_err_matches(ret, EROFS)) - bch_err_fn(c, ret); return ret; } @@ -1952,11 +1933,10 @@ void bch2_snapshot_delete_status_to_text(struct printbuf *out, struct bch_fs *c) return; } - mutex_lock(&d->progress_lock); - bch2_snapshot_delete_nodes_to_text(out, d); - - bch2_bbpos_to_text(out, d->pos); - mutex_unlock(&d->progress_lock); + scoped_guard(mutex, &d->progress_lock) { + bch2_snapshot_delete_nodes_to_text(out, d); + bch2_bbpos_to_text(out, d->pos); + } } int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans, @@ -2010,11 +1990,11 @@ int bch2_snapshots_read(struct bch_fs *c) * Initializing the is_ancestor bitmaps requires ancestors to already be * initialized - so mark in reverse: */ - int ret = bch2_trans_run(c, - for_each_btree_key_reverse(trans, iter, BTREE_ID_snapshots, + CLASS(btree_trans, trans)(c); + int ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_snapshots, POS_MAX, 0, k, __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?: - bch2_check_snapshot_needs_deletion(trans, k))); + bch2_check_snapshot_needs_deletion(trans, k)); bch_err_fn(c, ret); /* diff --git a/libbcachefs/snapshot_types.h b/libbcachefs/snapshot_types.h index 0ab698f1..a826c9c8 100644 --- a/libbcachefs/snapshot_types.h +++ b/libbcachefs/snapshot_types.h @@ -6,7 +6,7 @@ #include "darray.h" #include "subvolume_types.h" -typedef DARRAY(u32) snapshot_id_list; +DEFINE_DARRAY_NAMED(snapshot_id_list, u32); #define IS_ANCESTOR_BITMAP 128 diff --git a/libbcachefs/str_hash.c b/libbcachefs/str_hash.c index d39fd426..dfe4b6ae 100644 --- a/libbcachefs/str_hash.c +++ b/libbcachefs/str_hash.c @@ -125,7 +125,7 @@ int bch2_repair_inode_hash_info(struct btree_trans *trans, struct bch_fs *c = trans->c; struct btree_iter iter; struct bkey_s_c k; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bool need_commit = false; int ret = 0; @@ -183,7 +183,7 @@ int bch2_repair_inode_hash_info(struct btree_trans *trans, goto err; if (!need_commit) { - struct printbuf buf = PRINTBUF; + printbuf_reset(&buf); bch2_log_msg_start(c, &buf); prt_printf(&buf, "inode %llu hash info mismatch with root, but mismatch not found\n", @@ -198,7 +198,6 @@ int bch2_repair_inode_hash_info(struct btree_trans *trans, prt_printf(&buf, " %llx %llx", hash_info->siphash_key.k0, hash_info->siphash_key.k1); #endif bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); ret = bch_err_throw(c, fsck_repair_unimplemented); goto err; } @@ -207,7 +206,6 @@ int bch2_repair_inode_hash_info(struct btree_trans *trans, bch_err_throw(c, transaction_restart_nested); err: fsck_err: - printbuf_exit(&buf); bch2_trans_iter_exit(trans, &iter); return ret; } @@ -244,7 +242,7 @@ int bch2_str_hash_repair_key(struct btree_trans *trans, bool *updated_before_k_pos) { struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bool free_snapshots_seen = false; int ret = 0; @@ -346,7 +344,7 @@ int __bch2_str_hash_check_key(struct btree_trans *trans, { struct bch_fs *c = trans->c; struct btree_iter iter = {}; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); struct bkey_s_c k; int ret = 0; @@ -375,9 +373,7 @@ int __bch2_str_hash_check_key(struct btree_trans *trans, goto bad_hash; } bch2_trans_iter_exit(trans, &iter); -out: fsck_err: - printbuf_exit(&buf); return ret; bad_hash: bch2_trans_iter_exit(trans, &iter); @@ -386,7 +382,7 @@ bad_hash: */ ret = check_inode_hash_info_matches_root(trans, hash_k.k->p.inode, hash_info); if (ret) - goto out; + return ret; if (fsck_err(trans, hash_table_key_wrong_offset, "hash table key at wrong offset: should be at %llu\n%s", @@ -396,5 +392,5 @@ bad_hash: k_iter, hash_k, &iter, bkey_s_c_null, updated_before_k_pos); - goto out; + return ret; } diff --git a/libbcachefs/subvolume.c b/libbcachefs/subvolume.c index 353df662..2d2d6b22 100644 --- a/libbcachefs/subvolume.c +++ b/libbcachefs/subvolume.c @@ -17,7 +17,7 @@ static int bch2_subvolume_delete(struct btree_trans *, u32); static int bch2_subvolume_missing(struct bch_fs *c, u32 subvolid) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); bch2_log_msg_start(c, &buf); prt_printf(&buf, "missing subvolume %u", subvolid); @@ -27,7 +27,6 @@ static int bch2_subvolume_missing(struct bch_fs *c, u32 subvolid) BCH_RECOVERY_PASS_check_inodes, 0); if (print) bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); return ret; } @@ -47,18 +46,18 @@ static int check_subvol(struct btree_trans *trans, struct bkey_s_c k) { struct bch_fs *c = trans->c; - struct bkey_s_c_subvolume subvol; struct btree_iter subvol_children_iter = {}; + struct bch_subvolume subvol; struct bch_snapshot snapshot; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); unsigned snapid; int ret = 0; if (k.k->type != KEY_TYPE_subvolume) return 0; - subvol = bkey_s_c_to_subvolume(k); - snapid = le32_to_cpu(subvol.v->snapshot); + bkey_val_copy(&subvol, bkey_s_c_to_subvolume(k)); + snapid = le32_to_cpu(subvol.snapshot); ret = bch2_snapshot_lookup(trans, snapid, &snapshot); if (bch2_err_matches(ret, ENOENT)) @@ -67,19 +66,19 @@ static int check_subvol(struct btree_trans *trans, if (ret) return ret; - if (BCH_SUBVOLUME_UNLINKED(subvol.v)) { + if (BCH_SUBVOLUME_UNLINKED(&subvol)) { ret = bch2_subvolume_delete(trans, iter->pos.offset); bch_err_msg(c, ret, "deleting subvolume %llu", iter->pos.offset); return ret ?: bch_err_throw(c, transaction_restart_nested); } - if (fsck_err_on(subvol.k->p.offset == BCACHEFS_ROOT_SUBVOL && - subvol.v->fs_path_parent, + if (fsck_err_on(k.k->p.offset == BCACHEFS_ROOT_SUBVOL && + subvol.fs_path_parent, trans, subvol_root_fs_path_parent_nonzero, "root subvolume has nonzero fs_path_parent\n%s", (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { struct bkey_i_subvolume *n = - bch2_bkey_make_mut_typed(trans, iter, &subvol.s_c, 0, subvolume); + bch2_bkey_make_mut_typed(trans, iter, &k, 0, subvolume); ret = PTR_ERR_OR_ZERO(n); if (ret) goto err; @@ -87,7 +86,7 @@ static int check_subvol(struct btree_trans *trans, n->v.fs_path_parent = 0; } - if (subvol.v->fs_path_parent) { + if (subvol.fs_path_parent) { struct bpos pos = subvolume_children_pos(k); struct bkey_s_c subvol_children_k = @@ -111,16 +110,16 @@ static int check_subvol(struct btree_trans *trans, struct bch_inode_unpacked inode; ret = bch2_inode_find_by_inum_nowarn_trans(trans, - (subvol_inum) { k.k->p.offset, le64_to_cpu(subvol.v->inode) }, + (subvol_inum) { k.k->p.offset, le64_to_cpu(subvol.inode) }, &inode); if (!ret) { - if (fsck_err_on(inode.bi_subvol != subvol.k->p.offset, + if (fsck_err_on(inode.bi_subvol != k.k->p.offset, trans, subvol_root_wrong_bi_subvol, "subvol root %llu:%u has wrong bi_subvol field: got %u, should be %llu", inode.bi_inum, inode.bi_snapshot, - inode.bi_subvol, subvol.k->p.offset)) { - inode.bi_subvol = subvol.k->p.offset; - inode.bi_snapshot = le32_to_cpu(subvol.v->snapshot); + inode.bi_subvol, k.k->p.offset)) { + inode.bi_subvol = k.k->p.offset; + inode.bi_snapshot = le32_to_cpu(subvol.snapshot); ret = __bch2_fsck_write_inode(trans, &inode); if (ret) goto err; @@ -128,8 +127,8 @@ static int check_subvol(struct btree_trans *trans, } else if (bch2_err_matches(ret, ENOENT)) { if (fsck_err(trans, subvol_to_missing_root, "subvolume %llu points to missing subvolume root %llu:%u", - k.k->p.offset, le64_to_cpu(subvol.v->inode), - le32_to_cpu(subvol.v->snapshot))) { + k.k->p.offset, le64_to_cpu(subvol.inode), + le32_to_cpu(subvol.snapshot))) { /* * Recreate - any contents that are still disconnected * will then get reattached under lost+found @@ -137,10 +136,10 @@ static int check_subvol(struct btree_trans *trans, bch2_inode_init_early(c, &inode); bch2_inode_init_late(c, &inode, bch2_current_time(c), 0, 0, S_IFDIR|0700, 0, NULL); - inode.bi_inum = le64_to_cpu(subvol.v->inode); - inode.bi_snapshot = le32_to_cpu(subvol.v->snapshot); + inode.bi_inum = le64_to_cpu(subvol.inode); + inode.bi_snapshot = le32_to_cpu(subvol.snapshot); inode.bi_subvol = k.k->p.offset; - inode.bi_parent_subvol = le32_to_cpu(subvol.v->fs_path_parent); + inode.bi_parent_subvol = le32_to_cpu(subvol.fs_path_parent); ret = __bch2_fsck_write_inode(trans, &inode); if (ret) goto err; @@ -149,8 +148,8 @@ static int check_subvol(struct btree_trans *trans, goto err; } - if (!BCH_SUBVOLUME_SNAP(subvol.v)) { - u32 snapshot_root = bch2_snapshot_root(c, le32_to_cpu(subvol.v->snapshot)); + if (!BCH_SUBVOLUME_SNAP(&subvol)) { + u32 snapshot_root = bch2_snapshot_root(c, le32_to_cpu(subvol.snapshot)); u32 snapshot_tree = bch2_snapshot_tree(c, snapshot_root); struct bch_snapshot_tree st; @@ -162,12 +161,12 @@ static int check_subvol(struct btree_trans *trans, if (ret) goto err; - if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset, + if (fsck_err_on(le32_to_cpu(st.master_subvol) != k.k->p.offset, trans, subvol_not_master_and_not_snapshot, "subvolume %llu is not set as snapshot but is not master subvolume", k.k->p.offset)) { struct bkey_i_subvolume *s = - bch2_bkey_make_mut_typed(trans, iter, &subvol.s_c, 0, subvolume); + bch2_bkey_make_mut_typed(trans, iter, &k, 0, subvolume); ret = PTR_ERR_OR_ZERO(s); if (ret) goto err; @@ -178,19 +177,16 @@ static int check_subvol(struct btree_trans *trans, err: fsck_err: bch2_trans_iter_exit(trans, &subvol_children_iter); - printbuf_exit(&buf); return ret; } int bch2_check_subvols(struct bch_fs *c) { - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, + CLASS(btree_trans, trans)(c); + return for_each_btree_key_commit(trans, iter, BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - check_subvol(trans, &iter, k))); - bch_err_fn(c, ret); - return ret; + check_subvol(trans, &iter, k)); } static int check_subvol_child(struct btree_trans *trans, @@ -219,13 +215,11 @@ fsck_err: int bch2_check_subvol_children(struct bch_fs *c) { - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, + CLASS(btree_trans, trans)(c); + return for_each_btree_key_commit(trans, iter, BTREE_ID_subvolume_children, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - check_subvol_child(trans, &iter, k))); - bch_err_fn(c, ret); - return 0; + check_subvol_child(trans, &iter, k)); } /* Subvolumes: */ @@ -348,7 +342,8 @@ int bch2_subvol_is_ro_trans(struct btree_trans *trans, u32 subvol) int bch2_subvol_is_ro(struct bch_fs *c, u32 subvol) { - return bch2_trans_do(c, bch2_subvol_is_ro_trans(trans, subvol)); + CLASS(btree_trans, trans)(c); + return lockrestart_do(trans, bch2_subvol_is_ro_trans(trans, subvol)); } int bch2_snapshot_get_subvol(struct btree_trans *trans, u32 snapshot, @@ -514,18 +509,22 @@ static void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *wor int ret = 0; while (!ret) { - mutex_lock(&c->snapshots_unlinked_lock); - snapshot_id_list s = c->snapshots_unlinked; - darray_init(&c->snapshots_unlinked); - mutex_unlock(&c->snapshots_unlinked_lock); + snapshot_id_list s; + + scoped_guard(mutex, &c->snapshots_unlinked_lock) { + s = c->snapshots_unlinked; + darray_init(&c->snapshots_unlinked); + } if (!s.nr) break; bch2_evict_subvolume_inodes(c, &s); + CLASS(btree_trans, trans)(c); + darray_for_each(s, id) { - ret = bch2_trans_run(c, bch2_subvolume_delete(trans, *id)); + ret = bch2_subvolume_delete(trans, *id); bch_err_msg(c, ret, "deleting subvolume %u", *id); if (ret) break; @@ -549,10 +548,9 @@ static int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans struct bch_fs *c = trans->c; int ret = 0; - mutex_lock(&c->snapshots_unlinked_lock); - if (!snapshot_list_has_id(&c->snapshots_unlinked, h->subvol)) - ret = snapshot_list_add(c, &c->snapshots_unlinked, h->subvol); - mutex_unlock(&c->snapshots_unlinked_lock); + scoped_guard(mutex, &c->snapshots_unlinked_lock) + if (!snapshot_list_has_id(&c->snapshots_unlinked, h->subvol)) + ret = snapshot_list_add(c, &c->snapshots_unlinked, h->subvol); if (ret) return ret; @@ -677,7 +675,6 @@ int bch2_initialize_subvolumes(struct bch_fs *c) struct bkey_i_snapshot_tree root_tree; struct bkey_i_snapshot root_snapshot; struct bkey_i_subvolume root_volume; - int ret; bkey_snapshot_tree_init(&root_tree.k_i); root_tree.k.p.offset = 1; @@ -698,11 +695,9 @@ int bch2_initialize_subvolumes(struct bch_fs *c) root_volume.v.snapshot = cpu_to_le32(U32_MAX); root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO); - ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0, 0) ?: + return bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0, 0) ?: bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0, 0) ?: bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0, 0); - bch_err_fn(c, ret); - return ret; } static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans) @@ -739,10 +734,9 @@ err: /* set bi_subvol on root inode */ int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c) { - int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - __bch2_fs_upgrade_for_subvolumes(trans)); - bch_err_fn(c, ret); - return ret; + CLASS(btree_trans, trans)(c); + return commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, + __bch2_fs_upgrade_for_subvolumes(trans)); } void bch2_fs_subvolumes_init_early(struct bch_fs *c) diff --git a/libbcachefs/super-io.c b/libbcachefs/super-io.c index 85e460d1..40fa87ce 100644 --- a/libbcachefs/super-io.c +++ b/libbcachefs/super-io.c @@ -68,23 +68,21 @@ enum bcachefs_metadata_version bch2_latest_compatible_version(enum bcachefs_meta int bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version version) { - int ret = ((c->sb.features & BIT_ULL(BCH_FEATURE_incompat_version_field)) && - version <= c->sb.version_incompat_allowed) - ? 0 - : -BCH_ERR_may_not_use_incompat_feature; + guard(mutex)(&c->sb_lock); - mutex_lock(&c->sb_lock); - if (!ret) { + if (((c->sb.features & BIT_ULL(BCH_FEATURE_incompat_version_field)) && + version <= c->sb.version_incompat_allowed)) { SET_BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb, max(BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb), version)); bch2_write_super(c); + return 0; } else { darray_for_each(c->incompat_versions_requested, i) if (version == *i) - goto out; + return -BCH_ERR_may_not_use_incompat_feature; darray_push(&c->incompat_versions_requested, version); - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "requested incompat feature "); bch2_version_to_text(&buf, version); prt_str(&buf, " currently not enabled, allowed up to "); @@ -92,13 +90,8 @@ int bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version v prt_printf(&buf, "\n set version_upgrade=incompat to enable"); bch_notice(c, "%s", buf.buf); - printbuf_exit(&buf); + return -BCH_ERR_may_not_use_incompat_feature; } - -out: - mutex_unlock(&c->sb_lock); - - return ret; } const char * const bch2_sb_fields[] = { @@ -203,12 +196,11 @@ int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s) u64 max_bytes = 512 << sb->sb->layout.sb_max_size_bits; if (new_bytes > max_bytes) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_bdevname(&buf, sb->bdev); prt_printf(&buf, ": superblock too big: want %zu but have %llu", new_bytes, max_bytes); pr_err("%s", buf.buf); - printbuf_exit(&buf); return -BCH_ERR_ENOSPC_sb; } } @@ -783,8 +775,8 @@ static int __bch2_read_super(const char *path, struct bch_opts *opts, { u64 offset = opt_get(*opts, sb); struct bch_sb_layout layout; - struct printbuf err = PRINTBUF; - struct printbuf err2 = PRINTBUF; + CLASS(printbuf, err)(); + CLASS(printbuf, err2)(); __le64 *i; int ret; #ifndef __KERNEL__ @@ -859,7 +851,6 @@ retry: else bch2_print_opts(opts, KERN_ERR "%s", err2.buf); - printbuf_exit(&err2); printbuf_reset(&err); /* @@ -925,15 +916,14 @@ got_super: path, err.buf); goto err_no_print; } -out: - printbuf_exit(&err); - return ret; + + return 0; err: bch2_print_opts(opts, KERN_ERR "bcachefs (%s): error reading superblock: %s\n", path, err.buf); err_no_print: bch2_free_super(sb); - goto out; + return ret; } int bch2_read_super(const char *path, struct bch_opts *opts, @@ -1001,7 +991,12 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx) sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb), null_nonce(), sb); - bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META); + /* + * blk-wbt.c throttles all writes except those that have both REQ_SYNC + * and REQ_IDLE set... + */ + + bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_IDLE|REQ_META); bio->bi_iter.bi_sector = le64_to_cpu(sb->offset); bio->bi_end_io = write_super_endio; bio->bi_private = ca; @@ -1019,7 +1014,7 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx) int bch2_write_super(struct bch_fs *c) { struct closure *cl = &c->sb_write; - struct printbuf err = PRINTBUF; + CLASS(printbuf, err)(); unsigned sb = 0, nr_wrote; struct bch_devs_mask sb_written; bool wrote, can_mount_without_written, can_mount_with_written; @@ -1101,14 +1096,13 @@ int bch2_write_super(struct bch_fs *c) goto out; if (le16_to_cpu(c->disk_sb.sb->version) > bcachefs_metadata_version_current) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_printf(&buf, "attempting to write superblock that wasn't version downgraded ("); bch2_version_to_text(&buf, le16_to_cpu(c->disk_sb.sb->version)); prt_str(&buf, " > "); bch2_version_to_text(&buf, bcachefs_metadata_version_current); prt_str(&buf, ")"); bch2_fs_fatal_error(c, ": %s", buf.buf); - printbuf_exit(&buf); ret = bch_err_throw(c, sb_not_downgraded); goto out; } @@ -1129,7 +1123,7 @@ int bch2_write_super(struct bch_fs *c) continue; if (le64_to_cpu(ca->sb_read_scratch->seq) < ca->disk_sb.seq) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_char(&buf, ' '); prt_bdevname(&buf, ca->disk_sb.bdev); prt_printf(&buf, @@ -1144,12 +1138,10 @@ int bch2_write_super(struct bch_fs *c) } else { bch_err(c, "%s", buf.buf); } - - printbuf_exit(&buf); } if (le64_to_cpu(ca->sb_read_scratch->seq) > ca->disk_sb.seq) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_char(&buf, ' '); prt_bdevname(&buf, ca->disk_sb.bdev); prt_printf(&buf, @@ -1157,7 +1149,6 @@ int bch2_write_super(struct bch_fs *c) le64_to_cpu(ca->sb_read_scratch->seq), ca->disk_sb.seq); bch2_fs_fatal_error(c, "%s", buf.buf); - printbuf_exit(&buf); ret = bch_err_throw(c, erofs_sb_err); } } @@ -1219,19 +1210,17 @@ out: darray_for_each(online_devices, ca) enumerated_ref_put(&(*ca)->io_ref[READ], BCH_DEV_READ_REF_write_super); darray_exit(&online_devices); - printbuf_exit(&err); return ret; } void __bch2_check_set_feature(struct bch_fs *c, unsigned feat) { - mutex_lock(&c->sb_lock); - if (!(c->sb.features & (1ULL << feat))) { - c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << feat); + guard(mutex)(&c->sb_lock); + if (!(c->sb.features & BIT_ULL(feat))) { + c->disk_sb.sb->features[0] |= cpu_to_le64(BIT_ULL(feat)); bch2_write_super(c); } - mutex_unlock(&c->sb_lock); } /* Downgrade if superblock is at a higher version than currently supported: */ @@ -1279,11 +1268,12 @@ void bch2_sb_upgrade(struct bch_fs *c, unsigned new_version, bool incompat) void bch2_sb_upgrade_incompat(struct bch_fs *c) { - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); + if (c->sb.version == c->sb.version_incompat_allowed) - goto unlock; + return; - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "Now allowing incompatible features up to "); bch2_version_to_text(&buf, c->sb.version); @@ -1292,14 +1282,11 @@ void bch2_sb_upgrade_incompat(struct bch_fs *c) prt_newline(&buf); bch_notice(c, "%s", buf.buf); - printbuf_exit(&buf); c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL); SET_BCH_SB_VERSION_INCOMPAT_ALLOWED(c->disk_sb.sb, max(BCH_SB_VERSION_INCOMPAT_ALLOWED(c->disk_sb.sb), c->sb.version)); bch2_write_super(c); -unlock: - mutex_unlock(&c->sb_lock); } static int bch2_sb_ext_validate(struct bch_sb *sb, struct bch_sb_field *f, @@ -1365,7 +1352,7 @@ static int bch2_sb_field_validate(struct bch_sb *sb, struct bch_sb_field *f, enum bch_validate_flags flags, struct printbuf *err) { unsigned type = le32_to_cpu(f->type); - struct printbuf field_err = PRINTBUF; + CLASS(printbuf, field_err)(); const struct bch_sb_field_ops *ops = bch2_sb_field_type_ops(type); int ret; @@ -1377,7 +1364,6 @@ static int bch2_sb_field_validate(struct bch_sb *sb, struct bch_sb_field *f, bch2_sb_field_to_text(err, sb, f); } - printbuf_exit(&field_err); return ret; } diff --git a/libbcachefs/super.c b/libbcachefs/super.c index 0107a031..4e038f65 100644 --- a/libbcachefs/super.c +++ b/libbcachefs/super.c @@ -267,14 +267,11 @@ static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid) struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid) { - struct bch_fs *c; + guard(mutex)(&bch_fs_list_lock); - mutex_lock(&bch_fs_list_lock); - c = __bch2_uuid_to_fs(uuid); + struct bch_fs *c = __bch2_uuid_to_fs(uuid); if (c) closure_get(&c->cl); - mutex_unlock(&bch_fs_list_lock); - return c; } @@ -418,9 +415,8 @@ void bch2_fs_read_only(struct bch_fs *c) bch2_fs_mark_clean(c); } else { /* Make sure error counts/counters are persisted */ - mutex_lock(&c->sb_lock); + guard(mutex)(&c->sb_lock); bch2_write_super(c); - mutex_unlock(&c->sb_lock); bch_verbose(c, "done going read-only, filesystem not clean"); } @@ -431,9 +427,8 @@ static void bch2_fs_read_only_work(struct work_struct *work) struct bch_fs *c = container_of(work, struct bch_fs, read_only_work); - down_write(&c->state_lock); + guard(rwsem_write)(&c->state_lock); bch2_fs_read_only(c); - up_write(&c->state_lock); } static void bch2_fs_read_only_async(struct bch_fs *c) @@ -513,11 +508,11 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early) ret = bch2_fs_init_rw(c); if (ret) - goto err; + return ret; ret = bch2_sb_members_v2_init(c); if (ret) - goto err; + return ret; clear_bit(BCH_FS_clean_shutdown, &c->flags); @@ -536,15 +531,20 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early) * overwriting whatever was there previously, and there must always be * at least one non-flush write in the journal or recovery will fail: */ - spin_lock(&c->journal.lock); - set_bit(JOURNAL_need_flush_write, &c->journal.flags); - set_bit(JOURNAL_running, &c->journal.flags); - bch2_journal_space_available(&c->journal); - spin_unlock(&c->journal.lock); + scoped_guard(spinlock, &c->journal.lock) { + set_bit(JOURNAL_need_flush_write, &c->journal.flags); + set_bit(JOURNAL_running, &c->journal.flags); + bch2_journal_space_available(&c->journal); + } ret = bch2_fs_mark_dirty(c); if (ret) - goto err; + return ret; + + /* + * Don't jump to our error path, and call bch2_fs_read_only(), unless we + * successfully marked the filesystem dirty + */ ret = bch2_journal_reclaim_start(&c->journal); if (ret) @@ -597,11 +597,8 @@ int bch2_fs_read_write(struct bch_fs *c) int bch2_fs_read_write_early(struct bch_fs *c) { - down_write(&c->state_lock); - int ret = __bch2_fs_read_write(c, true); - up_write(&c->state_lock); - - return ret; + guard(rwsem_write)(&c->state_lock); + return __bch2_fs_read_write(c, true); } /* Filesystem startup/shutdown: */ @@ -699,9 +696,8 @@ void __bch2_fs_stop(struct bch_fs *c) set_bit(BCH_FS_stopping, &c->flags); - down_write(&c->state_lock); - bch2_fs_read_only(c); - up_write(&c->state_lock); + scoped_guard(rwsem_write, &c->state_lock) + bch2_fs_read_only(c); for (unsigned i = 0; i < c->sb.nr_devices; i++) { struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true); @@ -737,9 +733,8 @@ void __bch2_fs_stop(struct bch_fs *c) void bch2_fs_free(struct bch_fs *c) { - mutex_lock(&bch_fs_list_lock); - list_del(&c->list); - mutex_unlock(&bch_fs_list_lock); + scoped_guard(mutex, &bch_fs_list_lock) + list_del(&c->list); closure_sync(&c->cl); closure_debug_destroy(&c->cl); @@ -801,21 +796,19 @@ static int bch2_fs_online(struct bch_fs *c) return ret; } - down_write(&c->state_lock); + guard(rwsem_write)(&c->state_lock); for_each_member_device(c, ca) { ret = bch2_dev_sysfs_online(c, ca); if (ret) { bch_err(c, "error creating sysfs objects"); bch2_dev_put(ca); - goto err; + return ret; } } BUG_ON(!list_empty(&c->list)); list_add(&c->list, &bch_fs_list); -err: - up_write(&c->state_lock); return ret; } @@ -852,8 +845,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts, bch_sb_handles *sbs) { struct bch_fs *c; - struct printbuf name = PRINTBUF; unsigned i, iter_size; + CLASS(printbuf, name)(); int ret = 0; c = kvmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO); @@ -940,9 +933,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts, if (ret) goto err; - mutex_lock(&c->sb_lock); - ret = bch2_sb_to_fs(c, sb); - mutex_unlock(&c->sb_lock); + scoped_guard(mutex, &c->sb_lock) + ret = bch2_sb_to_fs(c, sb); if (ret) goto err; @@ -994,7 +986,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts, goto err; strscpy(c->name, name.buf, sizeof(c->name)); - printbuf_exit(&name); iter_size = sizeof(struct sort_iter) + (btree_blocks(c) + 1) * 2 * @@ -1086,9 +1077,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts, &c->clock_journal_res, (sizeof(struct jset_entry_clock) / sizeof(u64)) * 2); - mutex_lock(&bch_fs_list_lock); - ret = bch2_fs_online(c); - mutex_unlock(&bch_fs_list_lock); + scoped_guard(mutex, &bch_fs_list_lock) + ret = bch2_fs_online(c); if (ret) goto err; @@ -1161,8 +1151,8 @@ static bool bch2_fs_may_start(struct bch_fs *c) case BCH_DEGRADED_yes: flags |= BCH_FORCE_IF_DEGRADED; break; - default: - mutex_lock(&c->sb_lock); + default: { + guard(mutex)(&c->sb_lock); for (unsigned i = 0; i < c->disk_sb.sb->nr_devices; i++) { if (!bch2_member_exists(c->disk_sb.sb, i)) continue; @@ -1171,13 +1161,11 @@ static bool bch2_fs_may_start(struct bch_fs *c) if (!bch2_dev_is_online(ca) && (ca->mi.state == BCH_MEMBER_STATE_rw || - ca->mi.state == BCH_MEMBER_STATE_ro)) { - mutex_unlock(&c->sb_lock); + ca->mi.state == BCH_MEMBER_STATE_ro)) return false; - } } - mutex_unlock(&c->sb_lock); break; + } } return bch2_have_enough_devs(c, c->online_devs, flags, true); @@ -1188,6 +1176,8 @@ int bch2_fs_start(struct bch_fs *c) time64_t now = ktime_get_real_seconds(); int ret = 0; + BUG_ON(test_bit(BCH_FS_started, &c->flags)); + print_mount_opts(c); if (c->cf_encoding) @@ -1199,43 +1189,29 @@ int bch2_fs_start(struct bch_fs *c) if (!bch2_fs_may_start(c)) return bch_err_throw(c, insufficient_devices_to_start); - down_write(&c->state_lock); - mutex_lock(&c->sb_lock); + scoped_guard(rwsem_write, &c->state_lock) { + guard(mutex)(&c->sb_lock); + if (!bch2_sb_field_get_minsize(&c->disk_sb, ext, + sizeof(struct bch_sb_field_ext) / sizeof(u64))) { + ret = bch_err_throw(c, ENOSPC_sb); + goto err; + } - BUG_ON(test_bit(BCH_FS_started, &c->flags)); + ret = bch2_sb_members_v2_init(c); + if (ret) + goto err; - if (!bch2_sb_field_get_minsize(&c->disk_sb, ext, - sizeof(struct bch_sb_field_ext) / sizeof(u64))) { - mutex_unlock(&c->sb_lock); - up_write(&c->state_lock); - ret = bch_err_throw(c, ENOSPC_sb); - goto err; - } + scoped_guard(rcu) + for_each_online_member_rcu(c, ca) { + bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = + cpu_to_le64(now); + if (ca->mi.state == BCH_MEMBER_STATE_rw) + bch2_dev_allocator_add(c, ca); + } - ret = bch2_sb_members_v2_init(c); - if (ret) { - mutex_unlock(&c->sb_lock); - up_write(&c->state_lock); - goto err; + bch2_recalc_capacity(c); } - scoped_guard(rcu) - for_each_online_member_rcu(c, ca) - bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = - cpu_to_le64(now); - - /* - * Dno't write superblock yet: recovery might have to downgrade - */ - mutex_unlock(&c->sb_lock); - - scoped_guard(rcu) - for_each_online_member_rcu(c, ca) - if (ca->mi.state == BCH_MEMBER_STATE_rw) - bch2_dev_allocator_add(c, ca); - bch2_recalc_capacity(c); - up_write(&c->state_lock); - ret = BCH_SB_INITIALIZED(c->disk_sb.sb) ? bch2_fs_recovery(c) : bch2_fs_initialize(c); @@ -1256,13 +1232,12 @@ int bch2_fs_start(struct bch_fs *c) set_bit(BCH_FS_started, &c->flags); wake_up(&c->ro_ref_wait); - down_write(&c->state_lock); - if (c->opts.read_only) - bch2_fs_read_only(c); - else if (!test_bit(BCH_FS_rw, &c->flags)) - ret = bch2_fs_read_write(c); - up_write(&c->state_lock); - + scoped_guard(rwsem_write, &c->state_lock) { + if (c->opts.read_only) + bch2_fs_read_only(c); + else if (!test_bit(BCH_FS_rw, &c->flags)) + ret = bch2_fs_read_write(c); + } err: if (ret) bch_err_msg(c, ret, "starting filesystem"); @@ -1307,7 +1282,7 @@ static int bch2_dev_in_fs(struct bch_sb_handle *fs, if (fs->sb->seq == sb->sb->seq && fs->sb->write_time != sb->sb->write_time) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "Split brain detected between "); prt_bdevname(&buf, sb->bdev); @@ -1332,7 +1307,6 @@ static int bch2_dev_in_fs(struct bch_sb_handle *fs, prt_printf(&buf, "Not using older sb"); pr_err("%s", buf.buf); - printbuf_exit(&buf); if (!opts->no_splitbrain_check) return -BCH_ERR_device_splitbrain; @@ -1343,7 +1317,7 @@ static int bch2_dev_in_fs(struct bch_sb_handle *fs, u64 seq_from_member = le64_to_cpu(sb->sb->seq); if (seq_from_fs && seq_from_fs < seq_from_member) { - struct printbuf buf = PRINTBUF; + CLASS(printbuf, buf)(); prt_str(&buf, "Split brain detected between "); prt_bdevname(&buf, sb->bdev); @@ -1365,7 +1339,6 @@ static int bch2_dev_in_fs(struct bch_sb_handle *fs, } pr_err("%s", buf.buf); - printbuf_exit(&buf); if (!opts->no_splitbrain_check) return -BCH_ERR_device_splitbrain; @@ -1580,18 +1553,16 @@ static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx) struct bch_dev *ca = NULL; if (bch2_fs_init_fault("dev_alloc")) - goto err; + return bch_err_throw(c, ENOMEM_dev_alloc); ca = __bch2_dev_alloc(c, &member); if (!ca) - goto err; + return bch_err_throw(c, ENOMEM_dev_alloc); ca->fs = c; bch2_dev_attach(c, ca, dev_idx); return 0; -err: - return bch_err_throw(c, ENOMEM_dev_alloc); } static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb) @@ -1606,7 +1577,10 @@ static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb) if (get_capacity(sb->bdev->bd_disk) < ca->mi.bucket_size * ca->mi.nbuckets) { - bch_err(ca, "cannot online: device too small"); + bch_err(ca, "cannot online: device too small (capacity %llu filesystem size %llu nbuckets %llu)", + get_capacity(sb->bdev->bd_disk), + ca->mi.bucket_size * ca->mi.nbuckets, + ca->mi.nbuckets); return bch_err_throw(ca->fs, device_size_too_small); } @@ -1617,10 +1591,9 @@ static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb) if (ret) return ret; - struct printbuf name = PRINTBUF; + CLASS(printbuf, name)(); prt_bdevname(&name, sb->bdev); strscpy(ca->name, name.buf, sizeof(ca->name)); - printbuf_exit(&name); /* Commit: */ ca->disk_sb = *sb; @@ -1752,7 +1725,6 @@ static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca) int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, enum bch_member_state new_state, int flags) { - struct bch_member *m; int ret = 0; if (ca->mi.state == new_state) @@ -1766,11 +1738,11 @@ int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, bch_notice(ca, "%s", bch2_member_states[new_state]); - mutex_lock(&c->sb_lock); - m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); - SET_BCH_MEMBER_STATE(m, new_state); - bch2_write_super(c); - mutex_unlock(&c->sb_lock); + scoped_guard(mutex, &c->sb_lock) { + struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); + SET_BCH_MEMBER_STATE(m, new_state); + bch2_write_super(c); + } if (new_state == BCH_MEMBER_STATE_rw) __bch2_dev_read_write(c, ca); @@ -1783,26 +1755,20 @@ int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, enum bch_member_state new_state, int flags) { - int ret; - - down_write(&c->state_lock); - ret = __bch2_dev_set_state(c, ca, new_state, flags); - up_write(&c->state_lock); - - return ret; + guard(rwsem_write)(&c->state_lock); + return __bch2_dev_set_state(c, ca, new_state, flags); } /* Device add/removal: */ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) { - struct bch_member *m; unsigned dev_idx = ca->dev_idx, data; bool fast_device_removal = !bch2_request_incompat_feature(c, bcachefs_metadata_version_fast_device_removal); int ret; - down_write(&c->state_lock); + guard(rwsem_write)(&c->state_lock); /* * We consume a reference to ca->ref, regardless of whether we succeed @@ -1869,20 +1835,17 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) data = bch2_dev_has_data(c, ca); if (data) { - struct printbuf data_has = PRINTBUF; - + CLASS(printbuf, data_has)(); prt_bitflags(&data_has, __bch2_data_types, data); bch_err(ca, "Remove failed, still has data (%s)", data_has.buf); - printbuf_exit(&data_has); ret = -EBUSY; goto err; } __bch2_dev_offline(c, ca); - mutex_lock(&c->sb_lock); - rcu_assign_pointer(c->devs[ca->dev_idx], NULL); - mutex_unlock(&c->sb_lock); + scoped_guard(mutex, &c->sb_lock) + rcu_assign_pointer(c->devs[ca->dev_idx], NULL); #ifndef CONFIG_BCACHEFS_DEBUG percpu_ref_kill(&ca->ref); @@ -1898,25 +1861,23 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) * Free this device's slot in the bch_member array - all pointers to * this device must be gone: */ - mutex_lock(&c->sb_lock); - m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx); + scoped_guard(mutex, &c->sb_lock) { + struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx); - if (fast_device_removal) - m->uuid = BCH_SB_MEMBER_DELETED_UUID; - else - memset(&m->uuid, 0, sizeof(m->uuid)); + if (fast_device_removal) + m->uuid = BCH_SB_MEMBER_DELETED_UUID; + else + memset(&m->uuid, 0, sizeof(m->uuid)); - bch2_write_super(c); + bch2_write_super(c); + } - mutex_unlock(&c->sb_lock); - up_write(&c->state_lock); return 0; err: if (test_bit(BCH_FS_rw, &c->flags) && ca->mi.state == BCH_MEMBER_STATE_rw && !enumerated_ref_is_zero(&ca->io_ref[READ])) __bch2_dev_read_write(c, ca); - up_write(&c->state_lock); return ret; } @@ -1926,8 +1887,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path) struct bch_opts opts = bch2_opts_empty(); struct bch_sb_handle sb = {}; struct bch_dev *ca = NULL; - struct printbuf errbuf = PRINTBUF; - struct printbuf label = PRINTBUF; + CLASS(printbuf, label)(); int ret = 0; ret = bch2_read_super(path, &opts, &sb); @@ -1946,12 +1906,12 @@ int bch2_dev_add(struct bch_fs *c, const char *path) } if (list_empty(&c->list)) { - mutex_lock(&bch_fs_list_lock); - if (__bch2_uuid_to_fs(c->sb.uuid)) - ret = bch_err_throw(c, filesystem_uuid_already_open); - else - list_add(&c->list, &bch_fs_list); - mutex_unlock(&bch_fs_list_lock); + scoped_guard(mutex, &bch_fs_list_lock) { + if (__bch2_uuid_to_fs(c->sb.uuid)) + ret = bch_err_throw(c, filesystem_uuid_already_open); + else + list_add(&c->list, &bch_fs_list); + } if (ret) { bch_err(c, "filesystem UUID already open"); @@ -1973,105 +1933,95 @@ int bch2_dev_add(struct bch_fs *c, const char *path) if (ret) goto err; - down_write(&c->state_lock); - mutex_lock(&c->sb_lock); - SET_BCH_SB_MULTI_DEVICE(c->disk_sb.sb, true); - - ret = bch2_sb_from_fs(c, ca); - bch_err_msg(c, ret, "setting up new superblock"); - if (ret) - goto err_unlock; + scoped_guard(rwsem_write, &c->state_lock) { + scoped_guard(mutex, &c->sb_lock) { + SET_BCH_SB_MULTI_DEVICE(c->disk_sb.sb, true); - if (dynamic_fault("bcachefs:add:no_slot")) - goto err_unlock; + ret = bch2_sb_from_fs(c, ca); + bch_err_msg(c, ret, "setting up new superblock"); + if (ret) + goto err; - ret = bch2_sb_member_alloc(c); - if (ret < 0) { - bch_err_msg(c, ret, "setting up new superblock"); - goto err_unlock; - } - unsigned dev_idx = ret; - ret = 0; + if (dynamic_fault("bcachefs:add:no_slot")) + goto err; - /* success: */ + ret = bch2_sb_member_alloc(c); + if (ret < 0) { + bch_err_msg(c, ret, "setting up new superblock"); + goto err; + } + unsigned dev_idx = ret; + ret = 0; - dev_mi.last_mount = cpu_to_le64(ktime_get_real_seconds()); - *bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx) = dev_mi; + /* success: */ - ca->disk_sb.sb->dev_idx = dev_idx; - bch2_dev_attach(c, ca, dev_idx); + dev_mi.last_mount = cpu_to_le64(ktime_get_real_seconds()); + *bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx) = dev_mi; - set_bit(ca->dev_idx, c->online_devs.d); + ca->disk_sb.sb->dev_idx = dev_idx; + bch2_dev_attach(c, ca, dev_idx); - if (BCH_MEMBER_GROUP(&dev_mi)) { - ret = __bch2_dev_group_set(c, ca, label.buf); - bch_err_msg(c, ret, "creating new label"); - if (ret) { - mutex_unlock(&c->sb_lock); - goto err_late; - } - } + set_bit(ca->dev_idx, c->online_devs.d); - bch2_write_super(c); - mutex_unlock(&c->sb_lock); + if (BCH_MEMBER_GROUP(&dev_mi)) { + ret = __bch2_dev_group_set(c, ca, label.buf); + bch_err_msg(c, ret, "creating new label"); + if (ret) + goto err_late; + } - ret = bch2_dev_usage_init(ca, false); - if (ret) - goto err_late; + bch2_write_super(c); + } - if (test_bit(BCH_FS_started, &c->flags)) { - ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional); - bch_err_msg(ca, ret, "marking new superblock"); + ret = bch2_dev_usage_init(ca, false); if (ret) goto err_late; - ret = bch2_fs_freespace_init(c); - bch_err_msg(ca, ret, "initializing free space"); - if (ret) - goto err_late; + if (test_bit(BCH_FS_started, &c->flags)) { + ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional); + bch_err_msg(ca, ret, "marking new superblock"); + if (ret) + goto err_late; - if (ca->mi.state == BCH_MEMBER_STATE_rw) - __bch2_dev_read_write(c, ca); + ret = bch2_fs_freespace_init(c); + bch_err_msg(ca, ret, "initializing free space"); + if (ret) + goto err_late; - ret = bch2_dev_journal_alloc(ca, false); - bch_err_msg(c, ret, "allocating journal"); - if (ret) - goto err_late; - } + if (ca->mi.state == BCH_MEMBER_STATE_rw) + __bch2_dev_read_write(c, ca); - /* - * We just changed the superblock UUID, invalidate cache and send a - * uevent to update /dev/disk/by-uuid - */ - invalidate_bdev(ca->disk_sb.bdev); + ret = bch2_dev_journal_alloc(ca, false); + bch_err_msg(c, ret, "allocating journal"); + if (ret) + goto err_late; + } - char uuid_str[37]; - snprintf(uuid_str, sizeof(uuid_str), "UUID=%pUb", &c->sb.uuid); + /* + * We just changed the superblock UUID, invalidate cache and send a + * uevent to update /dev/disk/by-uuid + */ + invalidate_bdev(ca->disk_sb.bdev); - char *envp[] = { - "CHANGE=uuid", - uuid_str, - NULL, - }; - kobject_uevent_env(&ca->disk_sb.bdev->bd_device.kobj, KOBJ_CHANGE, envp); + char uuid_str[37]; + snprintf(uuid_str, sizeof(uuid_str), "UUID=%pUb", &c->sb.uuid); - up_write(&c->state_lock); + char *envp[] = { + "CHANGE=uuid", + uuid_str, + NULL, + }; + kobject_uevent_env(&ca->disk_sb.bdev->bd_device.kobj, KOBJ_CHANGE, envp); + } out: - printbuf_exit(&label); - printbuf_exit(&errbuf); bch_err_fn(c, ret); return ret; - -err_unlock: - mutex_unlock(&c->sb_lock); - up_write(&c->state_lock); err: if (ca) bch2_dev_free(ca); bch2_free_super(&sb); goto out; err_late: - up_write(&c->state_lock); ca = NULL; goto err; } @@ -2085,13 +2035,11 @@ int bch2_dev_online(struct bch_fs *c, const char *path) unsigned dev_idx; int ret; - down_write(&c->state_lock); + guard(rwsem_write)(&c->state_lock); ret = bch2_read_super(path, &opts, &sb); - if (ret) { - up_write(&c->state_lock); + if (ret) return ret; - } dev_idx = sb.sb->dev_idx; @@ -2128,39 +2076,33 @@ int bch2_dev_online(struct bch_fs *c, const char *path) goto err; } - mutex_lock(&c->sb_lock); - bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = - cpu_to_le64(ktime_get_real_seconds()); - bch2_write_super(c); - mutex_unlock(&c->sb_lock); + scoped_guard(mutex, &c->sb_lock) { + bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = + cpu_to_le64(ktime_get_real_seconds()); + bch2_write_super(c); + } - up_write(&c->state_lock); return 0; err: - up_write(&c->state_lock); bch2_free_super(&sb); return ret; } int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags) { - down_write(&c->state_lock); + guard(rwsem_write)(&c->state_lock); if (!bch2_dev_is_online(ca)) { bch_err(ca, "Already offline"); - up_write(&c->state_lock); return 0; } if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) { bch_err(ca, "Cannot offline required disk"); - up_write(&c->state_lock); return bch_err_throw(c, device_state_not_allowed); } __bch2_dev_offline(c, ca); - - up_write(&c->state_lock); return 0; } @@ -2178,60 +2120,54 @@ static int __bch2_dev_resize_alloc(struct bch_dev *ca, u64 old_nbuckets, u64 new int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) { - struct bch_member *m; u64 old_nbuckets; int ret = 0; - down_write(&c->state_lock); + guard(rwsem_write)(&c->state_lock); old_nbuckets = ca->mi.nbuckets; if (nbuckets < ca->mi.nbuckets) { bch_err(ca, "Cannot shrink yet"); - ret = -EINVAL; - goto err; + return -EINVAL; } if (nbuckets > BCH_MEMBER_NBUCKETS_MAX) { bch_err(ca, "New device size too big (%llu greater than max %u)", nbuckets, BCH_MEMBER_NBUCKETS_MAX); - ret = bch_err_throw(c, device_size_too_big); - goto err; + return bch_err_throw(c, device_size_too_big); } if (bch2_dev_is_online(ca) && get_capacity(ca->disk_sb.bdev->bd_disk) < ca->mi.bucket_size * nbuckets) { bch_err(ca, "New size larger than device"); - ret = bch_err_throw(c, device_size_too_small); - goto err; + return bch_err_throw(c, device_size_too_small); } ret = bch2_dev_buckets_resize(c, ca, nbuckets); bch_err_msg(ca, ret, "resizing buckets"); if (ret) - goto err; + return ret; ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional); if (ret) - goto err; + return ret; - mutex_lock(&c->sb_lock); - m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); - m->nbuckets = cpu_to_le64(nbuckets); + scoped_guard(mutex, &c->sb_lock) { + struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); + m->nbuckets = cpu_to_le64(nbuckets); - bch2_write_super(c); - mutex_unlock(&c->sb_lock); + bch2_write_super(c); + } if (ca->mi.freespace_initialized) { ret = __bch2_dev_resize_alloc(ca, old_nbuckets, nbuckets); if (ret) - goto err; + return ret; } bch2_recalc_capacity(c); -err: - up_write(&c->state_lock); - return ret; + return 0; } int bch2_fs_resize_on_mount(struct bch_fs *c) @@ -2249,26 +2185,24 @@ int bch2_fs_resize_on_mount(struct bch_fs *c) if (ret) { enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_fs_resize_on_mount); - up_write(&c->state_lock); return ret; } - mutex_lock(&c->sb_lock); - struct bch_member *m = - bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); - m->nbuckets = cpu_to_le64(new_nbuckets); - SET_BCH_MEMBER_RESIZE_ON_MOUNT(m, false); + scoped_guard(mutex, &c->sb_lock) { + struct bch_member *m = + bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); + m->nbuckets = cpu_to_le64(new_nbuckets); + SET_BCH_MEMBER_RESIZE_ON_MOUNT(m, false); - c->disk_sb.sb->features[0] &= ~cpu_to_le64(BIT_ULL(BCH_FEATURE_small_image)); - bch2_write_super(c); - mutex_unlock(&c->sb_lock); + c->disk_sb.sb->features[0] &= ~cpu_to_le64(BIT_ULL(BCH_FEATURE_small_image)); + bch2_write_super(c); + } if (ca->mi.freespace_initialized) { ret = __bch2_dev_resize_alloc(ca, old_nbuckets, new_nbuckets); if (ret) { enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_fs_resize_on_mount); - up_write(&c->state_lock); return ret; } } @@ -2307,6 +2241,10 @@ static struct bch_fs *bdev_get_fs(struct block_device *bdev) return c; } +DEFINE_CLASS(bdev_get_fs, struct bch_fs *, + bch2_ro_ref_put(_T), bdev_get_fs(bdev), + struct block_device *bdev); + /* returns with ref on ca->ref */ static struct bch_dev *bdev_to_bch_dev(struct bch_fs *c, struct block_device *bdev) { @@ -2318,7 +2256,7 @@ static struct bch_dev *bdev_to_bch_dev(struct bch_fs *c, struct block_device *bd static void bch2_fs_bdev_mark_dead(struct block_device *bdev, bool surprise) { - struct bch_fs *c = bdev_get_fs(bdev); + CLASS(bdev_get_fs, c)(bdev); if (!c) return; @@ -2332,48 +2270,45 @@ static void bch2_fs_bdev_mark_dead(struct block_device *bdev, bool surprise) down_read(&sb->s_umount); } - down_write(&c->state_lock); + guard(rwsem_write)(&c->state_lock); + struct bch_dev *ca = bdev_to_bch_dev(c, bdev); - if (!ca) - goto unlock; + if (ca) { + bool dev = bch2_dev_state_allowed(c, ca, + BCH_MEMBER_STATE_failed, + BCH_FORCE_IF_DEGRADED); + + if (!dev && sb) { + if (!surprise) + sync_filesystem(sb); + shrink_dcache_sb(sb); + evict_inodes(sb); + } - bool dev = bch2_dev_state_allowed(c, ca, - BCH_MEMBER_STATE_failed, - BCH_FORCE_IF_DEGRADED); + CLASS(printbuf, buf)(); + __bch2_log_msg_start(ca->name, &buf); - if (!dev && sb) { - if (!surprise) - sync_filesystem(sb); - shrink_dcache_sb(sb); - evict_inodes(sb); - } + prt_printf(&buf, "offline from block layer"); - struct printbuf buf = PRINTBUF; - __bch2_log_msg_start(ca->name, &buf); + if (dev) { + __bch2_dev_offline(c, ca); + } else { + bch2_journal_flush(&c->journal); + bch2_fs_emergency_read_only2(c, &buf); + } - prt_printf(&buf, "offline from block layer"); + bch2_print_str(c, KERN_ERR, buf.buf); - if (dev) { - __bch2_dev_offline(c, ca); - } else { - bch2_journal_flush(&c->journal); - bch2_fs_emergency_read_only2(c, &buf); + bch2_dev_put(ca); } - bch2_print_str(c, KERN_ERR, buf.buf); - printbuf_exit(&buf); - - bch2_dev_put(ca); -unlock: if (sb) up_read(&sb->s_umount); - up_write(&c->state_lock); - bch2_ro_ref_put(c); } static void bch2_fs_bdev_sync(struct block_device *bdev) { - struct bch_fs *c = bdev_get_fs(bdev); + CLASS(bdev_get_fs, c)(bdev); if (!c) return; @@ -2384,12 +2319,9 @@ static void bch2_fs_bdev_sync(struct block_device *bdev) * unmounted - we only take this to avoid a warning in * sync_filesystem: */ - down_read(&sb->s_umount); + guard(rwsem_read)(&sb->s_umount); sync_filesystem(sb); - up_read(&sb->s_umount); } - - bch2_ro_ref_put(c); } const struct blk_holder_ops bch2_sb_handle_bdev_ops = { @@ -2411,7 +2343,6 @@ struct bch_fs *bch2_fs_open(darray_const_str *devices, bch_sb_handles sbs = {}; struct bch_fs *c = NULL; struct bch_sb_handle *best = NULL; - struct printbuf errbuf = PRINTBUF; int ret = 0; if (!try_module_get(THIS_MODULE)) @@ -2466,15 +2397,12 @@ struct bch_fs *bch2_fs_open(darray_const_str *devices, if (ret) goto err; - down_write(&c->state_lock); - darray_for_each(sbs, sb) { - ret = bch2_dev_attach_bdev(c, sb); - if (ret) { - up_write(&c->state_lock); - goto err; + scoped_guard(rwsem_write, &c->state_lock) + darray_for_each(sbs, sb) { + ret = bch2_dev_attach_bdev(c, sb); + if (ret) + goto err; } - } - up_write(&c->state_lock); if (!c->opts.nostart) { ret = bch2_fs_start(c); @@ -2485,7 +2413,6 @@ out: darray_for_each(sbs, sb) bch2_free_super(sb); darray_exit(&sbs); - printbuf_exit(&errbuf); module_put(THIS_MODULE); return c; err_print: diff --git a/libbcachefs/sysfs.c b/libbcachefs/sysfs.c index 67ae6773..158f526e 100644 --- a/libbcachefs/sysfs.c +++ b/libbcachefs/sysfs.c @@ -62,7 +62,7 @@ static ssize_t fn ## _to_text(struct printbuf *, \ static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\ char *buf) \ { \ - struct printbuf out = PRINTBUF; \ + CLASS(printbuf, out)(); \ ssize_t ret = fn ## _to_text(&out, kobj, attr); \ \ if (out.pos && out.buf[out.pos - 1] != '\n') \ @@ -75,7 +75,6 @@ static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\ ret = min_t(size_t, out.pos, PAGE_SIZE - 1); \ memcpy(buf, out.buf, ret); \ } \ - printbuf_exit(&out); \ return bch2_err_class(ret); \ } \ \ @@ -235,14 +234,13 @@ static size_t bch2_btree_cache_size(struct bch_fs *c) size_t ret = 0; struct btree *b; - mutex_lock(&bc->lock); + guard(mutex)(&bc->lock); list_for_each_entry(b, &bc->live[0].list, list) ret += btree_buf_bytes(b); list_for_each_entry(b, &bc->live[1].list, list) ret += btree_buf_bytes(b); list_for_each_entry(b, &bc->freeable, list) ret += btree_buf_bytes(b); - mutex_unlock(&bc->lock); return ret; } @@ -565,9 +563,8 @@ STORE(bch2_fs) closure_wake_up(&c->freelist_wait); if (attr == &sysfs_trigger_recalc_capacity) { - down_read(&c->state_lock); + guard(rwsem_read)(&c->state_lock); bch2_recalc_capacity(c); - up_read(&c->state_lock); } if (attr == &sysfs_trigger_delete_dead_snapshots) diff --git a/libbcachefs/tests.c b/libbcachefs/tests.c index 782a05fe..ea27df30 100644 --- a/libbcachefs/tests.c +++ b/libbcachefs/tests.c @@ -31,7 +31,7 @@ static void delete_test_keys(struct bch_fs *c) static int test_delete(struct bch_fs *c, u64 nr) { - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct btree_iter iter; struct bkey_i_cookie k; int ret; @@ -66,13 +66,12 @@ static int test_delete(struct bch_fs *c, u64 nr) goto err; err: bch2_trans_iter_exit(trans, &iter); - bch2_trans_put(trans); return ret; } static int test_delete_written(struct bch_fs *c, u64 nr) { - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct btree_iter iter; struct bkey_i_cookie k; int ret; @@ -101,7 +100,6 @@ static int test_delete_written(struct bch_fs *c, u64 nr) goto err; err: bch2_trans_iter_exit(trans, &iter); - bch2_trans_put(trans); return ret; } @@ -130,13 +128,14 @@ static int test_iterate(struct bch_fs *c, u64 nr) pr_info("iterating forwards"); i = 0; - ret = bch2_trans_run(c, - for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, - SPOS(0, 0, U32_MAX), POS(0, U64_MAX), - 0, k, ({ + CLASS(btree_trans, trans)(c); + + ret = for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, + SPOS(0, 0, U32_MAX), POS(0, U64_MAX), + 0, k, ({ BUG_ON(k.k->p.offset != i++); 0; - }))); + })); bch_err_msg(c, ret, "error iterating forwards"); if (ret) return ret; @@ -145,12 +144,11 @@ static int test_iterate(struct bch_fs *c, u64 nr) pr_info("iterating backwards"); - ret = bch2_trans_run(c, - for_each_btree_key_reverse(trans, iter, BTREE_ID_xattrs, + ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_xattrs, SPOS(0, U64_MAX, U32_MAX), 0, k, ({ BUG_ON(k.k->p.offset != --i); 0; - }))); + })); bch_err_msg(c, ret, "error iterating backwards"); if (ret) return ret; @@ -185,14 +183,15 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr) pr_info("iterating forwards"); i = 0; - ret = bch2_trans_run(c, - for_each_btree_key_max(trans, iter, BTREE_ID_extents, - SPOS(0, 0, U32_MAX), POS(0, U64_MAX), - 0, k, ({ + CLASS(btree_trans, trans)(c); + + ret = for_each_btree_key_max(trans, iter, BTREE_ID_extents, + SPOS(0, 0, U32_MAX), POS(0, U64_MAX), + 0, k, ({ BUG_ON(bkey_start_offset(k.k) != i); i = k.k->p.offset; 0; - }))); + })); bch_err_msg(c, ret, "error iterating forwards"); if (ret) return ret; @@ -201,13 +200,12 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr) pr_info("iterating backwards"); - ret = bch2_trans_run(c, - for_each_btree_key_reverse(trans, iter, BTREE_ID_extents, + ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_extents, SPOS(0, U64_MAX, U32_MAX), 0, k, ({ BUG_ON(k.k->p.offset != i); i = bkey_start_offset(k.k); 0; - }))); + })); bch_err_msg(c, ret, "error iterating backwards"); if (ret) return ret; @@ -241,14 +239,15 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr) pr_info("iterating forwards"); i = 0; - ret = bch2_trans_run(c, - for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, - SPOS(0, 0, U32_MAX), POS(0, U64_MAX), - 0, k, ({ + CLASS(btree_trans, trans)(c); + + ret = for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, + SPOS(0, 0, U32_MAX), POS(0, U64_MAX), + 0, k, ({ BUG_ON(k.k->p.offset != i); i += 2; 0; - }))); + })); bch_err_msg(c, ret, "error iterating forwards"); if (ret) return ret; @@ -258,10 +257,9 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr) pr_info("iterating forwards by slots"); i = 0; - ret = bch2_trans_run(c, - for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, - SPOS(0, 0, U32_MAX), POS(0, U64_MAX), - BTREE_ITER_slots, k, ({ + ret = for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, + SPOS(0, 0, U32_MAX), POS(0, U64_MAX), + BTREE_ITER_slots, k, ({ if (i >= nr * 2) break; @@ -270,7 +268,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr) i++; 0; - }))); + })); bch_err_msg(c, ret, "error iterating forwards by slots"); return ret; } @@ -301,15 +299,16 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr) pr_info("iterating forwards"); i = 0; - ret = bch2_trans_run(c, - for_each_btree_key_max(trans, iter, BTREE_ID_extents, - SPOS(0, 0, U32_MAX), POS(0, U64_MAX), - 0, k, ({ + CLASS(btree_trans, trans)(c); + + ret = for_each_btree_key_max(trans, iter, BTREE_ID_extents, + SPOS(0, 0, U32_MAX), POS(0, U64_MAX), + 0, k, ({ BUG_ON(bkey_start_offset(k.k) != i + 8); BUG_ON(k.k->size != 8); i += 16; 0; - }))); + })); bch_err_msg(c, ret, "error iterating forwards"); if (ret) return ret; @@ -319,10 +318,9 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr) pr_info("iterating forwards by slots"); i = 0; - ret = bch2_trans_run(c, - for_each_btree_key_max(trans, iter, BTREE_ID_extents, - SPOS(0, 0, U32_MAX), POS(0, U64_MAX), - BTREE_ITER_slots, k, ({ + ret = for_each_btree_key_max(trans, iter, BTREE_ID_extents, + SPOS(0, 0, U32_MAX), POS(0, U64_MAX), + BTREE_ITER_slots, k, ({ if (i == nr) break; BUG_ON(bkey_deleted(k.k) != !(i % 16)); @@ -331,7 +329,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr) BUG_ON(k.k->size != 8); i = k.k->p.offset; 0; - }))); + })); bch_err_msg(c, ret, "error iterating forwards by slots"); return ret; } @@ -344,7 +342,7 @@ static int test_peek_end(struct bch_fs *c, u64 nr) { delete_test_keys(c); - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct btree_iter iter; struct bkey_s_c k; @@ -358,7 +356,6 @@ static int test_peek_end(struct bch_fs *c, u64 nr) BUG_ON(k.k); bch2_trans_iter_exit(trans, &iter); - bch2_trans_put(trans); return 0; } @@ -366,7 +363,7 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr) { delete_test_keys(c); - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct btree_iter iter; struct bkey_s_c k; @@ -380,7 +377,6 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr) BUG_ON(k.k); bch2_trans_iter_exit(trans, &iter); - bch2_trans_put(trans); return 0; } @@ -392,15 +388,13 @@ static int insert_test_extent(struct bch_fs *c, u64 start, u64 end) { struct bkey_i_cookie k; - int ret; - bkey_cookie_init(&k.k_i); k.k_i.k.p.offset = end; k.k_i.k.p.snapshot = U32_MAX; k.k_i.k.size = end - start; k.k_i.k.bversion.lo = test_version++; - ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, NULL, 0, 0); + int ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, NULL, 0, 0); bch_err_fn(c, ret); return ret; } @@ -446,15 +440,14 @@ static int test_extent_overwrite_all(struct bch_fs *c, u64 nr) static int insert_test_overlapping_extent(struct bch_fs *c, u64 inum, u64 start, u32 len, u32 snapid) { struct bkey_i_cookie k; - int ret; - bkey_cookie_init(&k.k_i); k.k_i.k.p.inode = inum; k.k_i.k.p.offset = start + len; k.k_i.k.p.snapshot = snapid; k.k_i.k.size = len; - ret = bch2_trans_commit_do(c, NULL, NULL, 0, + CLASS(btree_trans, trans)(c); + int ret = commit_do(trans, NULL, NULL, 0, bch2_btree_insert_nonextent(trans, BTREE_ID_extents, &k.k_i, BTREE_UPDATE_internal_snapshot_node)); bch_err_fn(c, ret); @@ -477,7 +470,6 @@ static int test_extent_create_overlapping(struct bch_fs *c, u64 inum) /* Test skipping over keys in unrelated snapshots: */ static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi) { - struct btree_trans *trans; struct btree_iter iter; struct bkey_s_c k; struct bkey_i_cookie cookie; @@ -489,7 +481,7 @@ static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi) if (ret) return ret; - trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, SPOS(0, 0, snapid_lo), 0); lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX)))); @@ -497,28 +489,28 @@ static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi) BUG_ON(k.k->p.snapshot != U32_MAX); bch2_trans_iter_exit(trans, &iter); - bch2_trans_put(trans); return ret; } static int test_snapshots(struct bch_fs *c, u64 nr) { struct bkey_i_cookie cookie; - u32 snapids[2]; - u32 snapid_subvols[2] = { 1, 1 }; - int ret; - bkey_cookie_init(&cookie.k_i); cookie.k.p.snapshot = U32_MAX; - ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0, 0); + + int ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0, 0); if (ret) return ret; - ret = bch2_trans_commit_do(c, NULL, NULL, 0, - bch2_snapshot_node_create(trans, U32_MAX, - snapids, - snapid_subvols, - 2)); + u32 snapids[2]; + u32 snapid_subvols[2] = { 1, 1 }; + + CLASS(btree_trans, trans)(c); + ret = commit_do(trans, NULL, NULL, 0, + bch2_snapshot_node_create(trans, U32_MAX, + snapids, + snapid_subvols, + 2)); if (ret) return ret; @@ -542,42 +534,37 @@ static u64 test_rand(void) static int rand_insert(struct bch_fs *c, u64 nr) { - struct btree_trans *trans = bch2_trans_get(c); - struct bkey_i_cookie k; - int ret = 0; - u64 i; + CLASS(btree_trans, trans)(c); - for (i = 0; i < nr; i++) { + for (u64 i = 0; i < nr; i++) { + struct bkey_i_cookie k; bkey_cookie_init(&k.k_i); k.k.p.offset = test_rand(); k.k.p.snapshot = U32_MAX; - ret = commit_do(trans, NULL, NULL, 0, + int ret = commit_do(trans, NULL, NULL, 0, bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k.k_i, 0)); if (ret) - break; + return ret; } - bch2_trans_put(trans); - return ret; + return 0; } static int rand_insert_multi(struct bch_fs *c, u64 nr) { - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct bkey_i_cookie k[8]; - int ret = 0; unsigned j; - u64 i; - for (i = 0; i < nr; i += ARRAY_SIZE(k)) { + for (u64 i = 0; i < nr; i += ARRAY_SIZE(k)) { for (j = 0; j < ARRAY_SIZE(k); j++) { bkey_cookie_init(&k[j].k_i); k[j].k.p.offset = test_rand(); k[j].k.p.snapshot = U32_MAX; } - ret = commit_do(trans, NULL, NULL, 0, + int ret = commit_do(trans, NULL, NULL, 0, bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[0].k_i, 0) ?: bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[1].k_i, 0) ?: bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[2].k_i, 0) ?: @@ -587,25 +574,23 @@ static int rand_insert_multi(struct bch_fs *c, u64 nr) bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[6].k_i, 0) ?: bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[7].k_i, 0)); if (ret) - break; + return ret; } - bch2_trans_put(trans); - return ret; + return 0; } static int rand_lookup(struct bch_fs *c, u64 nr) { - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct btree_iter iter; struct bkey_s_c k; int ret = 0; - u64 i; bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), 0); - for (i = 0; i < nr; i++) { + for (u64 i = 0; i < nr; i++) { bch2_btree_iter_set_pos(trans, &iter, SPOS(0, test_rand(), U32_MAX)); lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(trans, &iter))); @@ -615,7 +600,6 @@ static int rand_lookup(struct bch_fs *c, u64 nr) } bch2_trans_iter_exit(trans, &iter); - bch2_trans_put(trans); return ret; } @@ -646,17 +630,16 @@ static int rand_mixed_trans(struct btree_trans *trans, static int rand_mixed(struct bch_fs *c, u64 nr) { - struct btree_trans *trans = bch2_trans_get(c); + CLASS(btree_trans, trans)(c); struct btree_iter iter; struct bkey_i_cookie cookie; int ret = 0; - u64 i, rand; bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), 0); - for (i = 0; i < nr; i++) { - rand = test_rand(); + for (u64 i = 0; i < nr; i++) { + u64 rand = test_rand(); ret = commit_do(trans, NULL, NULL, 0, rand_mixed_trans(trans, &iter, &cookie, i, rand)); if (ret) @@ -664,7 +647,6 @@ static int rand_mixed(struct bch_fs *c, u64 nr) } bch2_trans_iter_exit(trans, &iter); - bch2_trans_put(trans); return ret; } @@ -692,31 +674,27 @@ err: static int rand_delete(struct bch_fs *c, u64 nr) { - struct btree_trans *trans = bch2_trans_get(c); - int ret = 0; - u64 i; + CLASS(btree_trans, trans)(c); - for (i = 0; i < nr; i++) { + for (u64 i = 0; i < nr; i++) { struct bpos pos = SPOS(0, test_rand(), U32_MAX); - ret = commit_do(trans, NULL, NULL, 0, + int ret = commit_do(trans, NULL, NULL, 0, __do_delete(trans, pos)); if (ret) - break; + return ret; } - bch2_trans_put(trans); - return ret; + return 0; } static int seq_insert(struct bch_fs *c, u64 nr) { struct bkey_i_cookie insert; - bkey_cookie_init(&insert.k_i); - return bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs, + CLASS(btree_trans, trans)(c); + return for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), BTREE_ITER_slots|BTREE_ITER_intent, k, NULL, NULL, 0, ({ @@ -724,22 +702,22 @@ static int seq_insert(struct bch_fs *c, u64 nr) break; insert.k.p = iter.pos; bch2_trans_update(trans, &iter, &insert.k_i, 0); - }))); + })); } static int seq_lookup(struct bch_fs *c, u64 nr) { - return bch2_trans_run(c, - for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, + CLASS(btree_trans, trans)(c); + return for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 0, k, - 0)); + 0); } static int seq_overwrite(struct bch_fs *c, u64 nr) { - return bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs, + CLASS(btree_trans, trans)(c); + return for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), BTREE_ITER_intent, k, NULL, NULL, 0, ({ @@ -747,7 +725,7 @@ static int seq_overwrite(struct bch_fs *c, u64 nr) bkey_reassemble(&u.k_i, k); bch2_trans_update(trans, &iter, &u.k_i, 0); - }))); + })); } static int seq_delete(struct bch_fs *c, u64 nr) @@ -808,8 +786,8 @@ int bch2_btree_perf_test(struct bch_fs *c, const char *testname, { struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads }; char name_buf[20]; - struct printbuf nr_buf = PRINTBUF; - struct printbuf per_sec_buf = PRINTBUF; + CLASS(printbuf, nr_buf)(); + CLASS(printbuf, per_sec_buf)(); unsigned i; u64 time; @@ -883,8 +861,6 @@ int bch2_btree_perf_test(struct bch_fs *c, const char *testname, div_u64(time, NSEC_PER_SEC), div_u64(time * nr_threads, nr), per_sec_buf.buf); - printbuf_exit(&per_sec_buf); - printbuf_exit(&nr_buf); return j.ret; } diff --git a/libbcachefs/thread_with_file.c b/libbcachefs/thread_with_file.c index 314a24d1..c2eae0ab 100644 --- a/libbcachefs/thread_with_file.c +++ b/libbcachefs/thread_with_file.c @@ -60,8 +60,7 @@ int bch2_run_thread_with_file(struct thread_with_file *thr, err: if (fd >= 0) put_unused_fd(fd); - if (thr->task) - kthread_stop(thr->task); + kthread_stop(thr->task); return ret; } @@ -185,23 +184,23 @@ static ssize_t thread_with_stdio_write(struct file *file, const char __user *ubu break; } - spin_lock(&buf->lock); - size_t makeroom = b; - if (!buf->waiting_for_line || memchr(buf->buf.data, '\n', buf->buf.nr)) - makeroom = min_t(ssize_t, makeroom, - max_t(ssize_t, STDIO_REDIRECT_BUFSIZE - buf->buf.nr, - 0)); - darray_make_room_gfp(&buf->buf, makeroom, GFP_NOWAIT); - - b = min(len, darray_room(buf->buf)); - - if (b && !copy_from_user_nofault(&darray_top(buf->buf), ubuf, b)) { - buf->buf.nr += b; - ubuf += b; - len -= b; - copied += b; + scoped_guard(spinlock, &buf->lock) { + size_t makeroom = b; + if (!buf->waiting_for_line || memchr(buf->buf.data, '\n', buf->buf.nr)) + makeroom = min_t(ssize_t, makeroom, + max_t(ssize_t, STDIO_REDIRECT_BUFSIZE - buf->buf.nr, + 0)); + darray_make_room_gfp(&buf->buf, makeroom, GFP_NOWAIT); + + b = min(len, darray_room(buf->buf)); + + if (b && !copy_from_user_nofault(&darray_top(buf->buf), ubuf, b)) { + buf->buf.nr += b; + ubuf += b; + len -= b; + copied += b; + } } - spin_unlock(&buf->lock); if (b) { wake_up(&buf->wait); @@ -349,14 +348,15 @@ int bch2_stdio_redirect_read(struct stdio_redirect *stdio, char *ubuf, size_t le if (stdio->done) return -1; - spin_lock(&buf->lock); - int ret = min(len, buf->buf.nr); - buf->buf.nr -= ret; - memcpy(ubuf, buf->buf.data, ret); - memmove(buf->buf.data, - buf->buf.data + ret, - buf->buf.nr); - spin_unlock(&buf->lock); + int ret; + scoped_guard(spinlock, &buf->lock) { + ret = min(len, buf->buf.nr); + buf->buf.nr -= ret; + memcpy(ubuf, buf->buf.data, ret); + memmove(buf->buf.data, + buf->buf.data + ret, + buf->buf.nr); + } wake_up(&buf->wait); return ret; diff --git a/libbcachefs/time_stats.c b/libbcachefs/time_stats.c index 2c34fe4b..7b5fa448 100644 --- a/libbcachefs/time_stats.c +++ b/libbcachefs/time_stats.c @@ -138,10 +138,8 @@ void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end) GFP_ATOMIC); spin_unlock_irqrestore(&stats->lock, flags); } else { - struct time_stat_buffer *b; - - preempt_disable(); - b = this_cpu_ptr(stats->buffer); + guard(preempt)(); + struct time_stat_buffer *b = this_cpu_ptr(stats->buffer); BUG_ON(b->nr >= ARRAY_SIZE(b->entries)); b->entries[b->nr++] = (struct time_stat_buffer_entry) { @@ -151,7 +149,6 @@ void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end) if (unlikely(b->nr == ARRAY_SIZE(b->entries))) time_stats_clear_buffer(stats, b); - preempt_enable(); } } diff --git a/libbcachefs/util.c b/libbcachefs/util.c index 7a4436fd..2ded7f3c 100644 --- a/libbcachefs/util.c +++ b/libbcachefs/util.c @@ -321,11 +321,10 @@ void bch2_prt_backtrace(struct printbuf *out, bch_stacktrace *stack) int bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task, unsigned skipnr, gfp_t gfp) { - bch_stacktrace stack = { 0 }; + CLASS(bch_stacktrace, stack)(); int ret = bch2_save_backtrace(&stack, task, skipnr + 1, gfp); bch2_prt_backtrace(out, &stack); - darray_exit(&stack); return ret; } @@ -982,9 +981,8 @@ u64 *bch2_acc_percpu_u64s(u64 __percpu *p, unsigned nr) int cpu; /* access to pcpu vars has to be blocked by other locking */ - preempt_disable(); - ret = this_cpu_ptr(p); - preempt_enable(); + scoped_guard(preempt) + ret = this_cpu_ptr(p); for_each_possible_cpu(cpu) { u64 *i = per_cpu_ptr(p, cpu); diff --git a/libbcachefs/util.h b/libbcachefs/util.h index 6488f098..768528c2 100644 --- a/libbcachefs/util.h +++ b/libbcachefs/util.h @@ -216,7 +216,8 @@ void bch2_prt_u64_base2(struct printbuf *, u64); void bch2_print_string_as_lines(const char *, const char *); -typedef DARRAY(unsigned long) bch_stacktrace; +DEFINE_DARRAY_NAMED(bch_stacktrace, unsigned long); + int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned, gfp_t); void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *); int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *, unsigned, gfp_t); diff --git a/libbcachefs/xattr.c b/libbcachefs/xattr.c index 627f1537..903e20cd 100644 --- a/libbcachefs/xattr.c +++ b/libbcachefs/xattr.c @@ -313,8 +313,8 @@ ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) struct xattr_buf buf = { .buf = buffer, .len = buffer_size }; u64 offset = 0, inum = inode->ei_inode.bi_inum; - int ret = bch2_trans_run(c, - for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_xattrs, + CLASS(btree_trans, trans)(c); + int ret = for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_xattrs, POS(inum, offset), POS(inum, U64_MAX), inode->ei_inum.subvol, 0, k, ({ @@ -322,7 +322,7 @@ ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) continue; bch2_xattr_emit(dentry, bkey_s_c_to_xattr(k).v, &buf); - }))) ?: + })) ?: bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, false) ?: bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, true); @@ -335,9 +335,10 @@ static int bch2_xattr_get_handler(const struct xattr_handler *handler, { struct bch_inode_info *inode = to_bch_ei(vinode); struct bch_fs *c = inode->v.i_sb->s_fs_info; - int ret = bch2_trans_do(c, - bch2_xattr_get_trans(trans, inode, name, buffer, size, handler->flags)); + CLASS(btree_trans, trans)(c); + int ret = lockrestart_do(trans, + bch2_xattr_get_trans(trans, inode, name, buffer, size, handler->flags)); if (ret < 0 && bch2_err_matches(ret, ENOENT)) ret = -ENODATA; @@ -356,12 +357,12 @@ static int bch2_xattr_set_handler(const struct xattr_handler *handler, struct bch_inode_unpacked inode_u; int ret; - ret = bch2_trans_run(c, - commit_do(trans, NULL, NULL, 0, + CLASS(btree_trans, trans)(c); + ret = commit_do(trans, NULL, NULL, 0, bch2_xattr_set(trans, inode_inum(inode), &inode_u, &hash, name, value, size, handler->flags, flags)) ?: - (bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME), 0)); + (bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME), 0); return bch2_err_class(ret); } @@ -418,7 +419,6 @@ static int __bch2_xattr_bcachefs_get(const struct xattr_handler *handler, bch2_inode_opts_to_opts(&inode->ei_inode); const struct bch_option *opt; int id, inode_opt_id; - struct printbuf out = PRINTBUF; int ret; u64 v; @@ -439,6 +439,7 @@ static int __bch2_xattr_bcachefs_get(const struct xattr_handler *handler, !(inode->ei_inode.bi_fields_set & (1 << inode_opt_id))) return -ENODATA; + CLASS(printbuf, out)(); v = bch2_opt_get_by_id(&opts, id); bch2_opt_to_text(&out, c, c->disk_sb.sb, opt, v, 0); @@ -453,7 +454,6 @@ static int __bch2_xattr_bcachefs_get(const struct xattr_handler *handler, memcpy(buffer, out.buf, out.pos); } - printbuf_exit(&out); return ret; } @@ -532,11 +532,11 @@ static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler, kfree(buf); if (ret < 0) - goto err_class_exit; + goto err; ret = bch2_opt_hook_pre_set(c, NULL, opt_id, v); if (ret < 0) - goto err_class_exit; + goto err; s.v = v + 1; s.defined = true; @@ -548,7 +548,7 @@ static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler, * rename() also has to deal with keeping inherited options up * to date - see bch2_reinherit_attrs() */ - spin_lock(&dentry->d_lock); + guard(spinlock)(&dentry->d_lock); if (!IS_ROOT(dentry)) { struct bch_inode_info *dir = to_bch_ei(d_inode(dentry->d_parent)); @@ -557,26 +557,24 @@ static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler, } else { s.v = 0; } - spin_unlock(&dentry->d_lock); s.defined = false; } - mutex_lock(&inode->ei_update_lock); - if (inode_opt_id == Inode_opt_project) { - /* - * inode fields accessible via the xattr interface are stored - * with a +1 bias, so that 0 means unset: - */ - ret = bch2_set_projid(c, inode, s.v ? s.v - 1 : 0); - if (ret) - goto err; - } + scoped_guard(mutex, &inode->ei_update_lock) { + if (inode_opt_id == Inode_opt_project) { + /* + * inode fields accessible via the xattr interface are stored + * with a +1 bias, so that 0 means unset: + */ + ret = bch2_set_projid(c, inode, s.v ? s.v - 1 : 0); + if (ret) + goto err; + } - ret = bch2_write_inode(c, inode, inode_opt_set_fn, &s, 0); + ret = bch2_write_inode(c, inode, inode_opt_set_fn, &s, 0); + } err: - mutex_unlock(&inode->ei_update_lock); -err_class_exit: return bch2_err_class(ret); } |