diff options
-rw-r--r-- | fs/bcachefs/data_update.c | 20 | ||||
-rw-r--r-- | fs/bcachefs/ec.c | 36 | ||||
-rw-r--r-- | fs/bcachefs/rebalance.c | 7 | ||||
-rw-r--r-- | fs/bcachefs/super.c | 5 | ||||
-rw-r--r-- | include/linux/workqueue.h | 12 | ||||
-rw-r--r-- | kernel/workqueue.c | 14 |
6 files changed, 35 insertions, 59 deletions
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index a314d70c6b8e..b7e0e31407bf 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -460,17 +460,11 @@ restart_drop_extra_replicas: this_cpu_add(c->counters[BCH_COUNTER_io_move_finish], new->k.size); if (trace_io_move_finish_enabled()) trace_io_move_finish2(m, &new->k_i, insert); + goto next; err: - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - ret = 0; - if (ret) + if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) break; -next: - while (bkey_ge(iter.pos, bch2_keylist_front(&op->insert_keys)->k.p)) { - bch2_keylist_pop_front(&op->insert_keys); - if (bch2_keylist_empty(&op->insert_keys)) - goto out; - } + continue; nowork: if (m->stats) { @@ -479,11 +473,15 @@ nowork: atomic64_add(k.k->p.offset - iter.pos.offset, &m->stats->sectors_raced); } - count_event(c, io_move_fail); bch2_btree_iter_advance(&iter); - goto next; +next: + while (bkey_ge(iter.pos, bch2_keylist_front(&op->insert_keys)->k.p)) { + bch2_keylist_pop_front(&op->insert_keys); + if (bch2_keylist_empty(&op->insert_keys)) + goto out; + } } out: BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart)); diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index 00e16801417a..c2840cb674b2 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -35,8 +35,6 @@ #include <linux/raid/pq.h> #include <linux/raid/xor.h> -static bool bch2_stripe_is_open(struct bch_fs *, u64); - static void raid5_recov(unsigned disks, unsigned failed_idx, size_t size, void **data) { @@ -388,20 +386,11 @@ int bch2_trigger_stripe(struct btree_trans *trans, new_s->nr_redundant != old_s->nr_redundant)); if (flags & BTREE_TRIGGER_transactional) { - u64 old_lru_pos = stripe_lru_pos(old_s); - u64 new_lru_pos = stripe_lru_pos(new_s); - - if (new_lru_pos == STRIPE_LRU_POS_EMPTY && - !bch2_stripe_is_open(c, idx)) { - _new.k->type = KEY_TYPE_deleted; - set_bkey_val_u64s(_new.k, 0); - new_s = NULL; - new_lru_pos = 0; - } - int ret = bch2_lru_change(trans, - BCH_LRU_STRIPE_FRAGMENTATION, idx, - old_lru_pos, new_lru_pos); + BCH_LRU_STRIPE_FRAGMENTATION, + idx, + stripe_lru_pos(old_s), + stripe_lru_pos(new_s)); if (ret) return ret; } @@ -965,7 +954,7 @@ static int ec_stripe_delete(struct btree_trans *trans, u64 idx) */ if (k.k->type == KEY_TYPE_stripe && !bch2_stripe_is_open(trans->c, idx) && - stripe_lru_pos(bkey_s_c_to_stripe(k).v) == STRIPE_LRU_POS_EMPTY) + stripe_lru_pos(bkey_s_c_to_stripe(k).v) == 1) return bch2_btree_delete_at(trans, &iter, 0); return 0; @@ -1789,19 +1778,8 @@ static int __get_existing_stripe(struct btree_trans *trans, return 0; struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k); - - if (stripe_lru_pos(s.v) == STRIPE_LRU_POS_EMPTY) { - /* - * We can't guarantee that the trigger will always delete - * stripes - the stripe might still be open when the last data - * in it was deleted - */ - return !bch2_stripe_is_open(c, idx) - ? bch2_btree_delete_at(trans, &iter, 0) ?: - bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: - bch_err_throw(c, transaction_restart_commit) - : 0; - } + if (stripe_lru_pos(s.v) <= 1) + return 0; if (s.v->disk_label == head->disk_label && s.v->algorithm == head->algo && diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c index 17ca56b0e2ac..e1db63d75a99 100644 --- a/fs/bcachefs/rebalance.c +++ b/fs/bcachefs/rebalance.c @@ -444,8 +444,9 @@ static int do_rebalance_extent(struct moving_context *ctxt, bch2_bkey_buf_init(&sk); - ret = bkey_err(k = next_rebalance_extent(trans, work_pos, - extent_iter, &io_opts, &data_opts)); + ret = lockrestart_do(trans, + bkey_err(k = next_rebalance_extent(trans, work_pos, + extent_iter, &io_opts, &data_opts))); if (ret || !k.k) goto out; @@ -587,7 +588,7 @@ static int do_rebalance(struct moving_context *ctxt) ret = k->k.type == KEY_TYPE_cookie ? do_rebalance_scan(ctxt, k->k.p.inode, le64_to_cpu(bkey_i_to_cookie(k)->v.cookie)) - : lockrestart_do(trans, do_rebalance_extent(ctxt, k->k.p, &extent_iter)); + : do_rebalance_extent(ctxt, k->k.p, &extent_iter); if (ret) break; } diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index ef15e614f4f3..09e7f8ae9922 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -2542,11 +2542,6 @@ struct bch_fs *bch2_fs_open(darray_const_str *devices, BUG_ON(darray_push(&sbs, sb)); } - if (opts->nochanges && !opts->read_only) { - ret = bch_err_throw(c, erofs_nochanges); - goto err_print; - } - darray_for_each(sbs, sb) if (!best || sb_cmp(sb->sb, best->sb) > 0) best = sb; diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 6e30f275da77..e907c9bb840c 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -6,6 +6,7 @@ #ifndef _LINUX_WORKQUEUE_H #define _LINUX_WORKQUEUE_H +#include <linux/alloc_tag.h> #include <linux/timer.h> #include <linux/linkage.h> #include <linux/bitops.h> @@ -505,7 +506,8 @@ void workqueue_softirq_dead(unsigned int cpu); * Pointer to the allocated workqueue on success, %NULL on failure. */ __printf(1, 4) struct workqueue_struct * -alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); +alloc_workqueue_noprof(const char *fmt, unsigned int flags, int max_active, ...); +#define alloc_workqueue(...) alloc_hooks(alloc_workqueue_noprof(__VA_ARGS__)) #ifdef CONFIG_LOCKDEP /** @@ -544,8 +546,8 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active, * Pointer to the allocated workqueue on success, %NULL on failure. */ #define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \ - alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \ - 1, lockdep_map, ##args) + alloc_hooks(alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags),\ + 1, lockdep_map, ##args)) #endif /** @@ -577,7 +579,9 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active, extern void destroy_workqueue(struct workqueue_struct *wq); -struct workqueue_attrs *alloc_workqueue_attrs(void); +struct workqueue_attrs *alloc_workqueue_attrs_noprof(void); +#define alloc_workqueue_attrs(...) alloc_hooks(alloc_workqueue_attrs_noprof(__VA_ARGS__)) + void free_workqueue_attrs(struct workqueue_attrs *attrs); int apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9f9148075828..992cb0467c21 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4629,7 +4629,7 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs) * * Return: The allocated new workqueue_attr on success. %NULL on failure. */ -struct workqueue_attrs *alloc_workqueue_attrs(void) +struct workqueue_attrs *alloc_workqueue_attrs_noprof(void) { struct workqueue_attrs *attrs; @@ -5682,12 +5682,12 @@ static struct workqueue_struct *__alloc_workqueue(const char *fmt, else wq_size = sizeof(*wq); - wq = kzalloc(wq_size, GFP_KERNEL); + wq = kzalloc_noprof(wq_size, GFP_KERNEL); if (!wq) return NULL; if (flags & WQ_UNBOUND) { - wq->unbound_attrs = alloc_workqueue_attrs(); + wq->unbound_attrs = alloc_workqueue_attrs_noprof(); if (!wq->unbound_attrs) goto err_free_wq; } @@ -5777,9 +5777,9 @@ err_destroy: } __printf(1, 4) -struct workqueue_struct *alloc_workqueue(const char *fmt, - unsigned int flags, - int max_active, ...) +struct workqueue_struct *alloc_workqueue_noprof(const char *fmt, + unsigned int flags, + int max_active, ...) { struct workqueue_struct *wq; va_list args; @@ -5794,7 +5794,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, return wq; } -EXPORT_SYMBOL_GPL(alloc_workqueue); +EXPORT_SYMBOL_GPL(alloc_workqueue_noprof); #ifdef CONFIG_LOCKDEP __printf(1, 5) |