diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/bcachefs/alloc_foreground.c | 189 | ||||
-rw-r--r-- | fs/bcachefs/backpointers.c | 3 | ||||
-rw-r--r-- | fs/bcachefs/btree_io.c | 1 | ||||
-rw-r--r-- | fs/bcachefs/btree_node_scan.c | 6 | ||||
-rw-r--r-- | fs/bcachefs/btree_trans_commit.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_update.c | 27 | ||||
-rw-r--r-- | fs/bcachefs/btree_update.h | 21 | ||||
-rw-r--r-- | fs/bcachefs/data_update.c | 13 | ||||
-rw-r--r-- | fs/bcachefs/fs.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/io_write.c | 1 | ||||
-rw-r--r-- | fs/bcachefs/recovery_passes.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/sb-members.c | 1 | ||||
-rw-r--r-- | fs/bcachefs/super.c | 10 | ||||
-rw-r--r-- | fs/bcachefs/trace.h | 5 |
14 files changed, 180 insertions, 105 deletions
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index 5b9e64163d37..23a9fbb36f49 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -706,19 +706,13 @@ static int add_new_bucket(struct bch_fs *c, inline int bch2_bucket_alloc_set_trans(struct btree_trans *trans, struct alloc_request *req, struct dev_stripe_state *stripe, - struct closure *_cl) + struct closure *cl) { struct bch_fs *c = trans->c; - struct closure *cl = NULL; int ret = 0; BUG_ON(req->nr_effective >= req->nr_replicas); - /* - * Try nonblocking first, so that if one device is full we'll try from - * other devices: - */ -retry_blocking: bch2_dev_alloc_list(c, stripe, &req->devs_may_alloc, &req->devs_sorted); darray_for_each(req->devs_sorted, i) { @@ -751,14 +745,6 @@ retry_blocking: if (ret == 1) return 0; - - if (ret && - !bch2_err_matches(ret, BCH_ERR_transaction_restart) && - cl != _cl) { - cl = _cl; - goto retry_blocking; - } - if (ret) return ret; return bch_err_throw(c, insufficient_devices); @@ -779,6 +765,12 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans, struct bch_fs *c = trans->c; int ret = 0; + if (req->nr_replicas < 2) + return 0; + + if (ec_open_bucket(c, &req->ptrs)) + return 0; + struct ec_stripe_head *h = bch2_ec_stripe_head_get(trans, req, 0, cl); if (IS_ERR(h)) @@ -895,6 +887,79 @@ unlock: return ret; } +static int __open_bucket_add_buckets(struct btree_trans *trans, + struct alloc_request *req, + struct closure *_cl) +{ + struct bch_fs *c = trans->c; + struct open_bucket *ob; + struct closure *cl = NULL; + unsigned i; + int ret; + + req->devs_may_alloc = target_rw_devs(c, req->wp->data_type, req->target); + + /* Don't allocate from devices we already have pointers to: */ + darray_for_each(*req->devs_have, i) + __clear_bit(*i, req->devs_may_alloc.d); + + open_bucket_for_each(c, &req->ptrs, ob, i) + __clear_bit(ob->dev, req->devs_may_alloc.d); + + ret = bucket_alloc_set_writepoint(c, req); + if (ret) + return ret; + + ret = bucket_alloc_set_partial(c, req); + if (ret) + return ret; + + if (req->ec) { + ret = bucket_alloc_from_stripe(trans, req, _cl); + } else { +retry_blocking: + /* + * Try nonblocking first, so that if one device is full we'll try from + * other devices: + */ + ret = bch2_bucket_alloc_set_trans(trans, req, &req->wp->stripe, cl); + if (ret && + !bch2_err_matches(ret, BCH_ERR_transaction_restart) && + !bch2_err_matches(ret, BCH_ERR_insufficient_devices) && + !cl && _cl) { + cl = _cl; + goto retry_blocking; + } + } + + return ret; +} + +static int open_bucket_add_buckets(struct btree_trans *trans, + struct alloc_request *req, + struct closure *cl) +{ + int ret; + + if (req->ec && !ec_open_bucket(trans->c, &req->ptrs)) { + ret = __open_bucket_add_buckets(trans, req, cl); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || + bch2_err_matches(ret, BCH_ERR_operation_blocked) || + bch2_err_matches(ret, BCH_ERR_freelist_empty) || + bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) + return ret; + if (req->nr_effective >= req->nr_replicas) + return 0; + } + + bool ec = false; + swap(ec, req->ec); + ret = __open_bucket_add_buckets(trans, req, cl); + swap(ec, req->ec); + + return ret < 0 ? ret : 0; +} + /** * should_drop_bucket - check if this is open_bucket should go away * @ob: open_bucket to predicate on @@ -1174,7 +1239,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, unsigned nr_replicas_required, enum bch_watermark watermark, enum bch_write_flags flags, - struct closure *_cl, + struct closure *cl, struct write_point **wp_ret) { struct bch_fs *c = trans->c; @@ -1190,18 +1255,15 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING)) erasure_code = false; - if (nr_replicas < 2) - erasure_code = false; - req->nr_replicas = nr_replicas; req->target = target; + req->ec = erasure_code; req->watermark = watermark; req->flags = flags; req->devs_have = devs_have; BUG_ON(!nr_replicas || !nr_replicas_required); retry: - req->ec = erasure_code; req->ptrs.nr = 0; req->nr_effective = 0; req->have_cache = false; @@ -1211,77 +1273,54 @@ retry: req->data_type = req->wp->data_type; - /* metadata may not allocate on cache devices: */ - if (req->data_type != BCH_DATA_user) - req->have_cache = true; - - /* If we're going to fall back to the whole fs, try nonblocking first */ - struct closure *cl = req->target && !(flags & BCH_WRITE_only_specified_devs) - ? _cl - : NULL; - ret = bch2_trans_relock(trans); if (ret) goto err; - while (1) { - req->devs_may_alloc = target_rw_devs(c, req->wp->data_type, req->target); - - /* Don't allocate from devices we already have pointers to: */ - darray_for_each(*req->devs_have, i) - __clear_bit(*i, req->devs_may_alloc.d); - - open_bucket_for_each(c, &req->ptrs, ob, i) - __clear_bit(ob->dev, req->devs_may_alloc.d); - - ret = bucket_alloc_set_writepoint(c, req) ?: - bucket_alloc_set_partial(c, req) ?: - (req->ec - ? bucket_alloc_from_stripe(trans, req, _cl) - : bch2_bucket_alloc_set_trans(trans, req, &req->wp->stripe, cl)); + /* metadata may not allocate on cache devices: */ + if (req->data_type != BCH_DATA_user) + req->have_cache = true; - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - goto err; + if (target && !(flags & BCH_WRITE_only_specified_devs)) { + ret = open_bucket_add_buckets(trans, req, NULL); + if (!ret || + bch2_err_matches(ret, BCH_ERR_transaction_restart)) + goto alloc_done; /* Don't retry from all devices if we're out of open buckets: */ - if (ret == -BCH_ERR_open_buckets_empty) - goto retry_blocking; - - if (ret == -BCH_ERR_freelist_empty) { - if (req->target && !(flags & BCH_WRITE_only_specified_devs)) - goto retry_all; - goto retry_blocking; - } - - if (ret == -BCH_ERR_insufficient_devices && req->target) - goto retry_all; - - if (req->nr_effective < req->nr_replicas && req->ec) { - req->ec = false; - continue; - } - - if (ret == -BCH_ERR_insufficient_devices) { - if (req->nr_effective < nr_replicas_required) - goto err; - ret = 0; + if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) { + int ret2 = open_bucket_add_buckets(trans, req, cl); + if (!ret2 || + bch2_err_matches(ret2, BCH_ERR_transaction_restart) || + bch2_err_matches(ret2, BCH_ERR_open_buckets_empty)) { + ret = ret2; + goto alloc_done; + } } - BUG_ON(ret < 0); - break; -retry_blocking: - if (cl == _cl) - goto err; - cl = _cl; - continue; -retry_all: /* * Only try to allocate cache (durability = 0 devices) from the * specified target: */ req->have_cache = true; req->target = 0; + + ret = open_bucket_add_buckets(trans, req, cl); + } else { + ret = open_bucket_add_buckets(trans, req, cl); } +alloc_done: + BUG_ON(!ret && req->nr_effective < req->nr_replicas); + + if (erasure_code && !ec_open_bucket(c, &req->ptrs)) + pr_debug("failed to get ec bucket: ret %u", ret); + + if (ret == -BCH_ERR_insufficient_devices && + req->nr_effective >= nr_replicas_required) + ret = 0; + + if (ret) + goto err; if (req->nr_effective > req->nr_replicas) deallocate_extra_replicas(c, req); diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c index 77d93beb3c8f..bc277f42cf5f 100644 --- a/fs/bcachefs/backpointers.c +++ b/fs/bcachefs/backpointers.c @@ -144,7 +144,8 @@ static noinline int backpointer_mod_err(struct btree_trans *trans, if (!will_check && __bch2_inconsistent_error(c, &buf)) ret = bch_err_throw(c, erofs_unfixed_errors); - bch_err(c, "%s", buf.buf); + if (buf.buf) + bch_err(c, "%s", buf.buf); printbuf_exit(&buf); return ret; } diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index b30799e494eb..8924dae15d41 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -24,6 +24,7 @@ #include "super-io.h" #include "trace.h" +#include <linux/moduleparam.h> #include <linux/sched/mm.h> #ifdef CONFIG_BCACHEFS_DEBUG diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c index 365808b4b7c0..42c9eb2c786e 100644 --- a/fs/bcachefs/btree_node_scan.c +++ b/fs/bcachefs/btree_node_scan.c @@ -226,15 +226,17 @@ static int read_btree_nodes_worker(void *p) struct bch_fs *c = container_of(w->f, struct bch_fs, found_btree_nodes); struct bch_dev *ca = w->ca; unsigned long last_print = jiffies; + struct btree *b = NULL; + struct bio *bio = NULL; - struct btree *b = __bch2_btree_node_mem_alloc(c); + b = __bch2_btree_node_mem_alloc(c); if (!b) { bch_err(c, "read_btree_nodes_worker: error allocating buf"); w->f->ret = -ENOMEM; goto err; } - struct bio *bio = bio_alloc(NULL, buf_pages(b->data, c->opts.btree_node_size), 0, GFP_KERNEL); + bio = bio_alloc(NULL, buf_pages(b->data, c->opts.btree_node_size), 0, GFP_KERNEL); if (!bio) { bch_err(c, "read_btree_nodes_worker: error allocating bio"); w->f->ret = -ENOMEM; diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c index 7fcf248a9a76..a7e9d8916848 100644 --- a/fs/bcachefs/btree_trans_commit.c +++ b/fs/bcachefs/btree_trans_commit.c @@ -1008,7 +1008,7 @@ do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans) return 0; } -int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) +int __bch2_trans_commit(struct btree_trans *trans, enum bch_trans_commit_flags flags) { struct btree_insert_entry *errored_at = NULL; struct bch_fs *c = trans->c; diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c index 5d9e02370aff..7983c4940b3b 100644 --- a/fs/bcachefs/btree_update.c +++ b/fs/bcachefs/btree_update.c @@ -661,21 +661,22 @@ int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id, * @k: key to insert * @disk_res: must be non-NULL whenever inserting or potentially * splitting data extents - * @flags: transaction commit flags + * @commit_flags: transaction commit flags * @iter_flags: btree iter update trigger flags * * Returns: 0 on success, error code on failure */ int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k, - struct disk_reservation *disk_res, int flags, + struct disk_reservation *disk_res, + enum bch_trans_commit_flags commit_flags, enum btree_iter_update_trigger_flags iter_flags) { - return bch2_trans_commit_do(c, disk_res, NULL, flags, + return bch2_trans_commit_do(c, disk_res, NULL, commit_flags, bch2_btree_insert_trans(trans, id, k, iter_flags)); } -int bch2_btree_delete_at(struct btree_trans *trans, - struct btree_iter *iter, unsigned update_flags) +int bch2_btree_delete_at(struct btree_trans *trans, struct btree_iter *iter, + enum btree_iter_update_trigger_flags flags) { struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k)); int ret = PTR_ERR_OR_ZERO(k); @@ -684,12 +685,12 @@ int bch2_btree_delete_at(struct btree_trans *trans, bkey_init(&k->k); k->k.p = iter->pos; - return bch2_trans_update(trans, iter, k, update_flags); + return bch2_trans_update(trans, iter, k, flags); } int bch2_btree_delete(struct btree_trans *trans, enum btree_id btree, struct bpos pos, - unsigned update_flags) + enum btree_iter_update_trigger_flags flags) { struct btree_iter iter; int ret; @@ -698,7 +699,7 @@ int bch2_btree_delete(struct btree_trans *trans, BTREE_ITER_cached| BTREE_ITER_intent); ret = bch2_btree_iter_traverse(trans, &iter) ?: - bch2_btree_delete_at(trans, &iter, update_flags); + bch2_btree_delete_at(trans, &iter, flags); bch2_trans_iter_exit(trans, &iter); return ret; @@ -706,7 +707,7 @@ int bch2_btree_delete(struct btree_trans *trans, int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id, struct bpos start, struct bpos end, - unsigned update_flags, + enum btree_iter_update_trigger_flags flags, u64 *journal_seq) { u32 restart_count = trans->restart_count; @@ -714,7 +715,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id, struct bkey_s_c k; int ret = 0; - bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent); + bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent|flags); while ((k = bch2_btree_iter_peek_max(trans, &iter, end)).k) { struct disk_reservation disk_res = bch2_disk_reservation_init(trans->c, 0); @@ -747,7 +748,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id, bpos_min(end, k.k->p).offset - iter.pos.offset); - ret = bch2_trans_update(trans, &iter, &delete, update_flags) ?: + ret = bch2_trans_update(trans, &iter, &delete, flags) ?: bch2_trans_commit(trans, &disk_res, journal_seq, BCH_TRANS_COMMIT_no_enospc); bch2_disk_reservation_put(trans->c, &disk_res); @@ -777,12 +778,12 @@ err: */ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id, struct bpos start, struct bpos end, - unsigned update_flags, + enum btree_iter_update_trigger_flags flags, u64 *journal_seq) { int ret = bch2_trans_run(c, bch2_btree_delete_range_trans(trans, id, start, end, - update_flags, journal_seq)); + flags, journal_seq)); if (ret == -BCH_ERR_transaction_restart_nested) ret = 0; return ret; diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h index 2c6f9b44d888..222a9f8ffbd5 100644 --- a/fs/bcachefs/btree_update.h +++ b/fs/bcachefs/btree_update.h @@ -47,22 +47,27 @@ enum bch_trans_commit_flags { void bch2_trans_commit_flags_to_text(struct printbuf *, enum bch_trans_commit_flags); -int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned); -int bch2_btree_delete(struct btree_trans *, enum btree_id, struct bpos, unsigned); +int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, + enum btree_iter_update_trigger_flags); +int bch2_btree_delete(struct btree_trans *, enum btree_id, struct bpos, + enum btree_iter_update_trigger_flags); int bch2_btree_insert_nonextent(struct btree_trans *, enum btree_id, struct bkey_i *, enum btree_iter_update_trigger_flags); int bch2_btree_insert_trans(struct btree_trans *, enum btree_id, struct bkey_i *, enum btree_iter_update_trigger_flags); -int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *, struct - disk_reservation *, int flags, enum - btree_iter_update_trigger_flags iter_flags); +int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *, + struct disk_reservation *, + enum bch_trans_commit_flags, + enum btree_iter_update_trigger_flags); int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id, - struct bpos, struct bpos, unsigned, u64 *); + struct bpos, struct bpos, + enum btree_iter_update_trigger_flags, u64 *); int bch2_btree_delete_range(struct bch_fs *, enum btree_id, - struct bpos, struct bpos, unsigned, u64 *); + struct bpos, struct bpos, + enum btree_iter_update_trigger_flags, u64 *); int bch2_btree_bit_mod_iter(struct btree_trans *, struct btree_iter *, bool); int bch2_btree_bit_mod(struct btree_trans *, enum btree_id, struct bpos, bool); @@ -226,7 +231,7 @@ static inline int __must_check bch2_trans_update_buffered(struct btree_trans *tr void bch2_trans_commit_hook(struct btree_trans *, struct btree_trans_commit_hook *); -int __bch2_trans_commit(struct btree_trans *, unsigned); +int __bch2_trans_commit(struct btree_trans *, enum bch_trans_commit_flags); int bch2_trans_log_str(struct btree_trans *, const char *); int bch2_trans_log_msg(struct btree_trans *, struct printbuf *); diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index e848e210a9bf..3968f3be7f3b 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -783,6 +783,9 @@ static int can_write_extent(struct bch_fs *c, struct data_update *m) darray_for_each(m->op.devs_have, i) __clear_bit(*i, devs.d); + CLASS(printbuf, buf)(); + buf.atomic++; + guard(rcu)(); unsigned nr_replicas = 0, i; @@ -794,7 +797,11 @@ static int can_write_extent(struct bch_fs *c, struct data_update *m) struct bch_dev_usage usage; bch2_dev_usage_read_fast(ca, &usage); - if (!dev_buckets_free(ca, usage, m->op.watermark)) + u64 nr_free = dev_buckets_free(ca, usage, m->op.watermark); + + prt_printf(&buf, "%s=%llu ", ca->name, nr_free); + + if (!nr_free) continue; nr_replicas += ca->mi.durability; @@ -802,8 +809,10 @@ static int can_write_extent(struct bch_fs *c, struct data_update *m) break; } - if (!nr_replicas) + if (!nr_replicas) { + trace_data_update_done_no_rw_devs(c, buf.buf); return bch_err_throw(c, data_update_done_no_rw_devs); + } if (nr_replicas < m->op.nr_replicas) return bch_err_throw(c, insufficient_devices); return 0; diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index 3b0783f117ae..7fd2551372d7 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -1010,6 +1010,10 @@ err_tx_restart: goto err; } + BUG_ON(src_inode->ei_inum.inum != src_inode_u.bi_inum); + BUG_ON(dst_inode && + dst_inode->ei_inum.inum != dst_inode_u.bi_inum); + BUG_ON(src_inode->v.i_ino != src_inode_u.bi_inum); BUG_ON(dst_inode && dst_inode->v.i_ino != dst_inode_u.bi_inum); diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index 88b1eec8eff3..fa077341d2ef 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -32,6 +32,7 @@ #include "trace.h" #include <linux/blkdev.h> +#include <linux/moduleparam.h> #include <linux/prefetch.h> #include <linux/random.h> #include <linux/sched/mm.h> diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c index c09ed2dd4639..6a039e011064 100644 --- a/fs/bcachefs/recovery_passes.c +++ b/fs/bcachefs/recovery_passes.c @@ -360,7 +360,7 @@ int __bch2_run_explicit_recovery_pass(struct bch_fs *c, !(r->passes_complete & BIT_ULL(pass)); bool ratelimit = flags & RUN_RECOVERY_PASS_ratelimit; - if (!(in_recovery && (flags & RUN_RECOVERY_PASS_nopersistent))) { + if (!(flags & RUN_RECOVERY_PASS_nopersistent)) { struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); __set_bit_le64(bch2_recovery_pass_to_stable(pass), ext->recovery_passes_required); } diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c index f2abe92ca130..340d4fb7f9b6 100644 --- a/fs/bcachefs/sb-members.c +++ b/fs/bcachefs/sb-members.c @@ -20,6 +20,7 @@ int bch2_dev_missing_bkey(struct bch_fs *c, struct bkey_s_c k, unsigned dev) prt_printf(&buf, "pointer to %s device %u in key\n", removed ? "removed" : "nonexistent", dev); bch2_bkey_val_to_text(&buf, c, k); + prt_newline(&buf); bool print = removed ? bch2_count_fsck_err(c, ptr_to_removed_device, &buf) diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index 6980cd5b0ca8..a3438b0dc0a9 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -1974,11 +1974,15 @@ int bch2_dev_add(struct bch_fs *c, const char *path) ca->disk_sb.sb->dev_idx = dev_idx; bch2_dev_attach(c, ca, dev_idx); + set_bit(ca->dev_idx, c->online_devs.d); + if (BCH_MEMBER_GROUP(&dev_mi)) { ret = __bch2_dev_group_set(c, ca, label.buf); bch_err_msg(c, ret, "creating new label"); - if (ret) - goto err_unlock; + if (ret) { + mutex_unlock(&c->sb_lock); + goto err_late; + } } bch2_write_super(c); @@ -2526,6 +2530,8 @@ static int bch2_param_get_static_key_t(char *buffer, const struct kernel_param * return sprintf(buffer, "%c\n", static_key_enabled(key) ? 'N' : 'Y'); } +/* this is unused in userspace - silence the warning */ +__maybe_unused static const struct kernel_param_ops bch2_param_ops_static_key_t = { .flags = KERNEL_PARAM_OPS_FL_NOARG, .set = bch2_param_set_static_key_t, diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h index b5dae1145afa..9324ef32903d 100644 --- a/fs/bcachefs/trace.h +++ b/fs/bcachefs/trace.h @@ -1330,6 +1330,11 @@ DEFINE_EVENT(fs_str, data_update, TP_ARGS(c, str) ); +DEFINE_EVENT(fs_str, data_update_done_no_rw_devs, + TP_PROTO(struct bch_fs *c, const char *str), + TP_ARGS(c, str) +); + DEFINE_EVENT(fs_str, io_move_pred, TP_PROTO(struct bch_fs *c, const char *str), TP_ARGS(c, str) |