diff options
-rw-r--r-- | .bcachefs_revision | 2 | ||||
-rw-r--r-- | Makefile | 1 | ||||
-rw-r--r-- | libbcachefs/Makefile | 5 | ||||
-rw-r--r-- | libbcachefs/async_objs.c | 145 | ||||
-rw-r--r-- | libbcachefs/fast_list.c | 171 |
5 files changed, 319 insertions, 5 deletions
diff --git a/.bcachefs_revision b/.bcachefs_revision index 21d54840..8c2da63f 100644 --- a/.bcachefs_revision +++ b/.bcachefs_revision @@ -1 +1 @@ -ff14f7886ed9adc133777d5cdba1e3101856f29a +68d390aa7f67b4ffb92497e0774560fc9ee5d188 @@ -228,7 +228,6 @@ update-bcachefs-sources: test -d libbcachefs || mkdir libbcachefs cp $(LINUX_DIR)/fs/bcachefs/*.[ch] libbcachefs/ cp $(LINUX_DIR)/fs/bcachefs/Makefile libbcachefs/ - rm libbcachefs/fast_list.c libbcachefs/async_objs.c git add libbcachefs/*.[ch] git add libbcachefs/Makefile git rm -f libbcachefs/mean_and_variance_test.c diff --git a/libbcachefs/Makefile b/libbcachefs/Makefile index bb2a80fb..a4258615 100644 --- a/libbcachefs/Makefile +++ b/libbcachefs/Makefile @@ -5,6 +5,7 @@ bcachefs-y := \ acl.o \ alloc_background.o \ alloc_foreground.o \ + async_objs.o \ backpointers.o \ bkey.o \ bkey_methods.o \ @@ -41,6 +42,7 @@ bcachefs-y := \ extents.o \ extent_update.o \ eytzinger.o \ + fast_list.o \ fs.o \ fs-ioctl.o \ fs-io.o \ @@ -98,9 +100,6 @@ bcachefs-y := \ varint.o \ xattr.o -bcachefs-$(CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS) += fast_list.o -bcachefs-$(CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS) += async_objs.o - obj-$(CONFIG_MEAN_AND_VARIANCE_UNIT_TEST) += mean_and_variance_test.o # Silence "note: xyz changed in GCC X.X" messages diff --git a/libbcachefs/async_objs.c b/libbcachefs/async_objs.c new file mode 100644 index 00000000..bd935782 --- /dev/null +++ b/libbcachefs/async_objs.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS + +/* + * Async obj debugging: keep asynchronous objects on (very fast) lists, make + * them visibile in debugfs: + */ + +#include "bcachefs.h" +#include "async_objs.h" +#include "btree_io.h" +#include "debug.h" +#include "io_read.h" +#include "io_write.h" + +#include <linux/debugfs.h> + +static void promote_obj_to_text(struct printbuf *out, + struct bch_fs *c, + void *obj) +{ + bch2_promote_op_to_text(out, c, obj); +} + +static void rbio_obj_to_text(struct printbuf *out, + struct bch_fs *c, + void *obj) +{ + bch2_read_bio_to_text(out, c, obj); +} + +static void write_op_obj_to_text(struct printbuf *out, + struct bch_fs *c, + void *obj) +{ + bch2_write_op_to_text(out, obj); +} + +static void btree_read_bio_obj_to_text(struct printbuf *out, + struct bch_fs *c, + void *obj) +{ + struct btree_read_bio *rbio = obj; + bch2_btree_read_bio_to_text(out, rbio); +} + +static void btree_write_bio_obj_to_text(struct printbuf *out, + struct bch_fs *c, + void *obj) +{ + struct btree_write_bio *wbio = obj; + bch2_bio_to_text(out, &wbio->wbio.bio); +} + +static int bch2_async_obj_list_open(struct inode *inode, struct file *file) +{ + struct async_obj_list *list = inode->i_private; + struct dump_iter *i; + + i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL); + if (!i) + return -ENOMEM; + + file->private_data = i; + i->from = POS_MIN; + i->iter = 0; + i->c = container_of(list, struct bch_fs, async_objs[list->idx]); + i->list = list; + i->buf = PRINTBUF; + return 0; +} + +static ssize_t bch2_async_obj_list_read(struct file *file, char __user *buf, + size_t size, loff_t *ppos) +{ + struct dump_iter *i = file->private_data; + struct async_obj_list *list = i->list; + ssize_t ret = 0; + + i->ubuf = buf; + i->size = size; + i->ret = 0; + + struct genradix_iter iter; + void *obj; + fast_list_for_each_from(&list->list, iter, obj, i->iter) { + ret = bch2_debugfs_flush_buf(i); + if (ret) + return ret; + + if (!i->size) + break; + + list->obj_to_text(&i->buf, i->c, obj); + i->iter = iter.pos; + } + + if (i->buf.allocation_failure) + ret = -ENOMEM; + + if (!ret) + ret = bch2_debugfs_flush_buf(i); + + return ret ?: i->ret; +} + +static const struct file_operations async_obj_ops = { + .owner = THIS_MODULE, + .open = bch2_async_obj_list_open, + .release = bch2_dump_release, + .read = bch2_async_obj_list_read, +}; + +void bch2_fs_async_obj_debugfs_init(struct bch_fs *c) +{ + c->async_obj_dir = debugfs_create_dir("async_objs", c->fs_debug_dir); + +#define x(n) debugfs_create_file(#n, 0400, c->async_obj_dir, \ + &c->async_objs[BCH_ASYNC_OBJ_LIST_##n], &async_obj_ops); + BCH_ASYNC_OBJ_LISTS() +#undef x +} + +void bch2_fs_async_obj_exit(struct bch_fs *c) +{ + for (unsigned i = 0; i < ARRAY_SIZE(c->async_objs); i++) + fast_list_exit(&c->async_objs[i].list); +} + +int bch2_fs_async_obj_init(struct bch_fs *c) +{ + for (unsigned i = 0; i < ARRAY_SIZE(c->async_objs); i++) { + if (fast_list_init(&c->async_objs[i].list)) + return -BCH_ERR_ENOMEM_async_obj_init; + c->async_objs[i].idx = i; + } + +#define x(n) c->async_objs[BCH_ASYNC_OBJ_LIST_##n].obj_to_text = n##_obj_to_text; + BCH_ASYNC_OBJ_LISTS() +#undef x + + return 0; +} + +#endif /* CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS */ diff --git a/libbcachefs/fast_list.c b/libbcachefs/fast_list.c new file mode 100644 index 00000000..de2947cd --- /dev/null +++ b/libbcachefs/fast_list.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS + +/* + * Fast, unordered lists + * + * Supports add, remove, and iterate + * + * Underneath, they're a radix tree and an IDA, with a percpu buffer for slot + * allocation and freeing. + * + * This means that adding, removing, and iterating over items is lockless, + * except when refilling/emptying the percpu slot buffers. + */ + +#include "fast_list.h" + +struct fast_list_pcpu { + u32 nr; + u32 entries[31]; +}; + +static int fast_list_alloc_idx(struct fast_list *l, gfp_t gfp) +{ + int idx = ida_alloc_range(&l->slots_allocated, 1, INT_MAX, gfp); + if (unlikely(idx < 0)) + return 0; + + if (unlikely(!genradix_ptr_alloc_inlined(&l->items, idx, gfp))) { + ida_free(&l->slots_allocated, idx); + return 0; + } + + return idx; +} + +/** + * fast_list_get_idx - get a slot in a fast_list + * @l: list to get slot in + * + * This allocates a slot in the radix tree without storing to it, so that we can + * take the potential memory allocation failure early and do the list add later + * when we can't take an allocation failure. + * + * Returns: positive integer on success, -ENOMEM on failure + */ +int fast_list_get_idx(struct fast_list *l) +{ + unsigned long flags; + int idx; +retry: + local_irq_save(flags); + struct fast_list_pcpu *lp = this_cpu_ptr(l->buffer); + + if (unlikely(!lp->nr)) { + u32 entries[16], nr = 0; + + local_irq_restore(flags); + while (nr < ARRAY_SIZE(entries) && + (idx = fast_list_alloc_idx(l, GFP_KERNEL))) + entries[nr++] = idx; + local_irq_save(flags); + + lp = this_cpu_ptr(l->buffer); + + while (nr && lp->nr < ARRAY_SIZE(lp->entries)) + lp->entries[lp->nr++] = entries[--nr]; + + if (unlikely(nr)) { + local_irq_restore(flags); + while (nr) + ida_free(&l->slots_allocated, entries[--nr]); + goto retry; + } + + if (unlikely(!lp->nr)) { + local_irq_restore(flags); + return -ENOMEM; + } + } + + idx = lp->entries[--lp->nr]; + local_irq_restore(flags); + + return idx; +} + +/** + * fast_list_add - add an item to a fast_list + * @l: list + * @item: item to add + * + * Allocates a slot in the radix tree and stores to it and then returns the + * slot index, which must be passed to fast_list_remove(). + * + * Returns: positive integer on success, -ENOMEM on failure + */ +int fast_list_add(struct fast_list *l, void *item) +{ + int idx = fast_list_get_idx(l); + if (idx < 0) + return idx; + + *genradix_ptr_inlined(&l->items, idx) = item; + return idx; +} + +/** + * fast_list_remove - remove an item from a fast_list + * @l: list + * @idx: item's slot index + * + * Zeroes out the slot in the radix tree and frees the slot for future + * fast_list_add() operations. + */ +void fast_list_remove(struct fast_list *l, unsigned idx) +{ + u32 entries[16], nr = 0; + + if (!idx) + return; + + *genradix_ptr_inlined(&l->items, idx) = NULL; + + scoped_guard(irqsave) { + struct fast_list_pcpu *lp = this_cpu_ptr(l->buffer); + + if (unlikely(lp->nr == ARRAY_SIZE(lp->entries))) + while (nr < ARRAY_SIZE(entries)) + entries[nr++] = lp->entries[--lp->nr]; + + lp->entries[lp->nr++] = idx; + } + + if (unlikely(nr)) + while (nr) + ida_free(&l->slots_allocated, entries[--nr]); +} + +void fast_list_exit(struct fast_list *l) +{ + if (l->buffer) { + int cpu; + for_each_possible_cpu(cpu) { + struct fast_list_pcpu *lp = per_cpu_ptr(l->buffer, cpu); + + while (lp->nr) + ida_free(&l->slots_allocated, lp->entries[--lp->nr]); + } + + free_percpu(l->buffer); + } + + WARN(ida_find_first(&l->slots_allocated) >= 0, + "fast_list still has objects on exit\n"); + + ida_destroy(&l->slots_allocated); + genradix_free(&l->items); +} + +int fast_list_init(struct fast_list *l) +{ + genradix_init(&l->items); + ida_init(&l->slots_allocated); + l->buffer = alloc_percpu(*l->buffer); + if (!l->buffer) + return -ENOMEM; + return 0; +} + +#endif /* CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS */ |