diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2017-03-19 15:56:34 -0800 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2017-03-19 17:31:47 -0800 |
commit | 5ec39af8eaba49aee7bafa44c661da39e2f40dc3 (patch) | |
tree | 1fb1a981602cbf22c7d2b2dba1168c715d7cecb5 | |
parent | bb1941de5378a7b8122d3575dcbc7d0aeb6326f0 (diff) |
Rename from bcache-tools to bcachefs-tools
-rw-r--r-- | .bcache_revision | 1 | ||||
-rw-r--r-- | .bcachefs_revision | 1 | ||||
-rw-r--r-- | .gitignore | 5 | ||||
-rw-r--r-- | Makefile | 73 | ||||
-rw-r--r-- | README | 14 | ||||
-rw-r--r-- | bcache-register | 4 | ||||
-rw-r--r-- | bcachefs-userspace-shim.c (renamed from bcache-userspace-shim.c) | 29 | ||||
-rw-r--r-- | bcachefs.8 (renamed from bcache.8) | 10 | ||||
-rw-r--r-- | bcachefs.c (renamed from bcache.c) | 8 | ||||
-rw-r--r-- | cmd_assemble.c | 2 | ||||
-rw-r--r-- | cmd_debug.c | 38 | ||||
-rw-r--r-- | cmd_device.c | 32 | ||||
-rw-r--r-- | cmd_format.c | 24 | ||||
-rw-r--r-- | cmd_fsck.c | 12 | ||||
-rw-r--r-- | cmd_key.c | 2 | ||||
-rw-r--r-- | cmd_migrate.c | 87 | ||||
-rw-r--r-- | cmd_run.c | 2 | ||||
-rw-r--r-- | crypto.c | 16 | ||||
-rw-r--r-- | debian/.gitignore | 2 | ||||
-rw-r--r-- | debian/bcache-tools.preinst | 25 | ||||
-rw-r--r-- | debian/bcachefs-tools.dirs (renamed from debian/bcache-tools.dirs) | 0 | ||||
-rw-r--r-- | debian/changelog | 8 | ||||
-rw-r--r-- | debian/control | 20 | ||||
-rw-r--r-- | debian/watch | 5 | ||||
-rwxr-xr-x | fsck.bcache | 3 | ||||
-rwxr-xr-x | fsck.bcachefs | 3 | ||||
-rw-r--r-- | include/linux/blkdev.h | 6 | ||||
-rw-r--r-- | include/linux/closure.h (renamed from libbcache/closure.h) | 16 | ||||
-rw-r--r-- | include/trace/events/bcachefs.h (renamed from include/trace/events/bcache.h) | 672 | ||||
-rw-r--r-- | libbcache/blockdev.c | 819 | ||||
-rw-r--r-- | libbcache/blockdev.h | 134 | ||||
-rw-r--r-- | libbcache/blockdev_types.h | 123 | ||||
-rw-r--r-- | libbcache/chardev.h | 30 | ||||
-rw-r--r-- | libbcache/clock.h | 23 | ||||
-rw-r--r-- | libbcache/compress.h | 15 | ||||
-rw-r--r-- | libbcache/debug.h | 65 | ||||
-rw-r--r-- | libbcache/dirent.h | 36 | ||||
-rw-r--r-- | libbcache/error.c | 140 | ||||
-rw-r--r-- | libbcache/fs-gc.h | 7 | ||||
-rw-r--r-- | libbcache/fs-io.h | 96 | ||||
-rw-r--r-- | libbcache/io.h | 90 | ||||
-rw-r--r-- | libbcache/keybuf.c | 195 | ||||
-rw-r--r-- | libbcache/keybuf.h | 16 | ||||
-rw-r--r-- | libbcache/keybuf_types.h | 33 | ||||
-rw-r--r-- | libbcache/migrate.h | 8 | ||||
-rw-r--r-- | libbcache/notify.c | 105 | ||||
-rw-r--r-- | libbcache/notify.h | 34 | ||||
-rw-r--r-- | libbcache/request.c | 809 | ||||
-rw-r--r-- | libbcache/request.h | 16 | ||||
-rw-r--r-- | libbcache/stats.c | 219 | ||||
-rw-r--r-- | libbcache/stats.h | 68 | ||||
-rw-r--r-- | libbcache/stats_types.h | 56 | ||||
-rw-r--r-- | libbcache/super.h | 136 | ||||
-rw-r--r-- | libbcache/tier.h | 8 | ||||
-rw-r--r-- | libbcache/writeback.c | 657 | ||||
-rw-r--r-- | libbcache/writeback.h | 122 | ||||
-rw-r--r-- | libbcache/xattr.h | 20 | ||||
-rw-r--r-- | libbcachefs.c (renamed from libbcache.c) | 22 | ||||
-rw-r--r-- | libbcachefs.h (renamed from libbcache.h) | 7 | ||||
-rw-r--r-- | libbcachefs/acl.c (renamed from libbcache/acl.c) | 24 | ||||
-rw-r--r-- | libbcachefs/acl.h (renamed from libbcache/acl.h) | 8 | ||||
-rw-r--r-- | libbcachefs/alloc.c (renamed from libbcache/alloc.c) | 316 | ||||
-rw-r--r-- | libbcachefs/alloc.h (renamed from libbcache/alloc.h) | 28 | ||||
-rw-r--r-- | libbcachefs/alloc_types.h (renamed from libbcache/alloc_types.h) | 0 | ||||
-rw-r--r-- | libbcachefs/bcachefs.h (renamed from libbcache/bcache.h) | 83 | ||||
-rw-r--r-- | libbcachefs/bcachefs_format.h (renamed from include/linux/bcache.h) | 113 | ||||
-rw-r--r-- | libbcachefs/bcachefs_ioctl.h (renamed from include/linux/bcache-ioctl.h) | 2 | ||||
-rw-r--r-- | libbcachefs/bkey.c (renamed from libbcache/bkey.c) | 129 | ||||
-rw-r--r-- | libbcachefs/bkey.h (renamed from libbcache/bkey.h) | 93 | ||||
-rw-r--r-- | libbcachefs/bkey_methods.c (renamed from libbcache/bkey_methods.c) | 50 | ||||
-rw-r--r-- | libbcachefs/bkey_methods.h (renamed from libbcache/bkey_methods.h) | 22 | ||||
-rw-r--r-- | libbcachefs/bset.c (renamed from libbcache/bset.c) | 261 | ||||
-rw-r--r-- | libbcachefs/bset.h (renamed from libbcache/bset.h) | 138 | ||||
-rw-r--r-- | libbcachefs/btree_cache.c (renamed from libbcache/btree_cache.c) | 118 | ||||
-rw-r--r-- | libbcachefs/btree_cache.h (renamed from libbcache/btree_cache.h) | 28 | ||||
-rw-r--r-- | libbcachefs/btree_gc.c (renamed from libbcache/btree_gc.c) | 215 | ||||
-rw-r--r-- | libbcachefs/btree_gc.h (renamed from libbcache/btree_gc.h) | 18 | ||||
-rw-r--r-- | libbcachefs/btree_io.c (renamed from libbcache/btree_io.c) | 227 | ||||
-rw-r--r-- | libbcachefs/btree_io.h (renamed from libbcache/btree_io.h) | 30 | ||||
-rw-r--r-- | libbcachefs/btree_iter.c (renamed from libbcache/btree_iter.c) | 203 | ||||
-rw-r--r-- | libbcachefs/btree_iter.h (renamed from libbcache/btree_iter.h) | 96 | ||||
-rw-r--r-- | libbcachefs/btree_locking.h (renamed from libbcache/btree_locking.h) | 13 | ||||
-rw-r--r-- | libbcachefs/btree_types.h (renamed from libbcache/btree_types.h) | 14 | ||||
-rw-r--r-- | libbcachefs/btree_update.c (renamed from libbcache/btree_update.c) | 599 | ||||
-rw-r--r-- | libbcachefs/btree_update.h (renamed from libbcache/btree_update.h) | 55 | ||||
-rw-r--r-- | libbcachefs/buckets.c (renamed from libbcache/buckets.c) | 114 | ||||
-rw-r--r-- | libbcachefs/buckets.h (renamed from libbcache/buckets.h) | 46 | ||||
-rw-r--r-- | libbcachefs/buckets_types.h (renamed from libbcache/buckets_types.h) | 0 | ||||
-rw-r--r-- | libbcachefs/chardev.c (renamed from libbcache/chardev.c) | 126 | ||||
-rw-r--r-- | libbcachefs/chardev.h | 30 | ||||
-rw-r--r-- | libbcachefs/checksum.c (renamed from libbcache/checksum.c) | 109 | ||||
-rw-r--r-- | libbcachefs/checksum.h (renamed from libbcache/checksum.h) | 46 | ||||
-rw-r--r-- | libbcachefs/clock.c (renamed from libbcache/clock.c) | 24 | ||||
-rw-r--r-- | libbcachefs/clock.h | 23 | ||||
-rw-r--r-- | libbcachefs/clock_types.h (renamed from libbcache/clock_types.h) | 0 | ||||
-rw-r--r-- | libbcachefs/compress.c (renamed from libbcache/compress.c) | 36 | ||||
-rw-r--r-- | libbcachefs/compress.h | 15 | ||||
-rw-r--r-- | libbcachefs/debug.c (renamed from libbcache/debug.c) | 152 | ||||
-rw-r--r-- | libbcachefs/debug.h | 62 | ||||
-rw-r--r-- | libbcachefs/dirent.c (renamed from libbcache/dirent.c) | 136 | ||||
-rw-r--r-- | libbcachefs/dirent.h | 36 | ||||
-rw-r--r-- | libbcachefs/error.c | 51 | ||||
-rw-r--r-- | libbcachefs/error.h (renamed from libbcache/error.h) | 71 | ||||
-rw-r--r-- | libbcachefs/extents.c (renamed from libbcache/extents.c) | 521 | ||||
-rw-r--r-- | libbcachefs/extents.h (renamed from libbcache/extents.h) | 58 | ||||
-rw-r--r-- | libbcachefs/eytzinger.h (renamed from libbcache/eytzinger.h) | 0 | ||||
-rw-r--r-- | libbcachefs/fifo.h (renamed from libbcache/fifo.h) | 0 | ||||
-rw-r--r-- | libbcachefs/fs-gc.c (renamed from libbcache/fs-gc.c) | 120 | ||||
-rw-r--r-- | libbcachefs/fs-gc.h | 7 | ||||
-rw-r--r-- | libbcachefs/fs-io.c (renamed from libbcache/fs-io.c) | 704 | ||||
-rw-r--r-- | libbcachefs/fs-io.h | 96 | ||||
-rw-r--r-- | libbcachefs/fs.c (renamed from libbcache/fs.c) | 528 | ||||
-rw-r--r-- | libbcachefs/fs.h (renamed from libbcache/fs.h) | 16 | ||||
-rw-r--r-- | libbcachefs/inode.c (renamed from libbcache/inode.c) | 90 | ||||
-rw-r--r-- | libbcachefs/inode.h (renamed from libbcache/inode.h) | 22 | ||||
-rw-r--r-- | libbcachefs/io.c (renamed from libbcache/io.c) | 374 | ||||
-rw-r--r-- | libbcachefs/io.h | 87 | ||||
-rw-r--r-- | libbcachefs/io_types.h (renamed from libbcache/io_types.h) | 2 | ||||
-rw-r--r-- | libbcachefs/journal.c (renamed from libbcache/journal.c) | 300 | ||||
-rw-r--r-- | libbcachefs/journal.h (renamed from libbcache/journal.h) | 92 | ||||
-rw-r--r-- | libbcachefs/journal_types.h (renamed from libbcache/journal_types.h) | 0 | ||||
-rw-r--r-- | libbcachefs/keylist.c (renamed from libbcache/keylist.c) | 10 | ||||
-rw-r--r-- | libbcachefs/keylist.h (renamed from libbcache/keylist.h) | 22 | ||||
-rw-r--r-- | libbcachefs/keylist_types.h (renamed from libbcache/keylist_types.h) | 0 | ||||
-rw-r--r-- | libbcachefs/migrate.c (renamed from libbcache/migrate.c) | 92 | ||||
-rw-r--r-- | libbcachefs/migrate.h | 8 | ||||
-rw-r--r-- | libbcachefs/move.c (renamed from libbcache/move.c) | 94 | ||||
-rw-r--r-- | libbcachefs/move.h (renamed from libbcache/move.h) | 12 | ||||
-rw-r--r-- | libbcachefs/move_types.h (renamed from libbcache/move_types.h) | 0 | ||||
-rw-r--r-- | libbcachefs/movinggc.c (renamed from libbcache/movinggc.c) | 68 | ||||
-rw-r--r-- | libbcachefs/movinggc.h (renamed from libbcache/movinggc.h) | 6 | ||||
-rw-r--r-- | libbcachefs/opts.c (renamed from libbcache/opts.c) | 64 | ||||
-rw-r--r-- | libbcachefs/opts.h (renamed from libbcache/opts.h) | 44 | ||||
-rw-r--r-- | libbcachefs/siphash.c (renamed from libbcache/siphash.c) | 0 | ||||
-rw-r--r-- | libbcachefs/siphash.h (renamed from libbcache/siphash.h) | 0 | ||||
-rw-r--r-- | libbcachefs/six.c (renamed from libbcache/six.c) | 0 | ||||
-rw-r--r-- | libbcachefs/six.h (renamed from libbcache/six.h) | 0 | ||||
-rw-r--r-- | libbcachefs/str_hash.h (renamed from libbcache/str_hash.h) | 105 | ||||
-rw-r--r-- | libbcachefs/super-io.c (renamed from libbcache/super-io.c) | 135 | ||||
-rw-r--r-- | libbcachefs/super-io.h (renamed from libbcache/super-io.h) | 74 | ||||
-rw-r--r-- | libbcachefs/super.c (renamed from libbcache/super.c) | 771 | ||||
-rw-r--r-- | libbcachefs/super.h | 130 | ||||
-rw-r--r-- | libbcachefs/super_types.h (renamed from libbcache/super_types.h) | 0 | ||||
-rw-r--r-- | libbcachefs/sysfs.c (renamed from libbcache/sysfs.c) | 555 | ||||
-rw-r--r-- | libbcachefs/sysfs.h (renamed from libbcache/sysfs.h) | 2 | ||||
-rw-r--r-- | libbcachefs/tier.c (renamed from libbcache/tier.c) | 62 | ||||
-rw-r--r-- | libbcachefs/tier.h | 8 | ||||
-rw-r--r-- | libbcachefs/trace.c (renamed from libbcache/trace.c) | 6 | ||||
-rw-r--r-- | libbcachefs/util.c (renamed from libbcache/util.c) | 52 | ||||
-rw-r--r-- | libbcachefs/util.h (renamed from libbcache/util.h) | 85 | ||||
-rw-r--r-- | libbcachefs/vstructs.h (renamed from libbcache/vstructs.h) | 0 | ||||
-rw-r--r-- | libbcachefs/xattr.c (renamed from libbcache/xattr.c) | 86 | ||||
-rw-r--r-- | libbcachefs/xattr.h | 20 | ||||
-rw-r--r-- | linux/closure.c (renamed from libbcache/closure.c) | 11 | ||||
-rwxr-xr-x | mkfs.bcache | 3 | ||||
-rwxr-xr-x | mkfs.bcachefs | 3 | ||||
-rw-r--r-- | tools-util.c | 12 |
157 files changed, 4905 insertions, 9841 deletions
diff --git a/.bcache_revision b/.bcache_revision deleted file mode 100644 index 72b9b175..00000000 --- a/.bcache_revision +++ /dev/null @@ -1 +0,0 @@ -BCACHE_REVISION=84b6390084721a37c0f7a261240093ad659f9a65 diff --git a/.bcachefs_revision b/.bcachefs_revision new file mode 100644 index 00000000..e3d90e2f --- /dev/null +++ b/.bcachefs_revision @@ -0,0 +1 @@ +4fef29d8dad00d8e6192016631c5108e5e537c3c @@ -1,10 +1,7 @@ -bcache -bcache-userspace -probe-bcache +bcachefs .* *.o *.d *.a tags cscope* -bcache-tools @@ -5,7 +5,7 @@ CFLAGS+=-std=gnu99 -O2 -g -MMD -Wall \ -Wno-unused-but-set-variable \ -Wno-pointer-sign \ -fno-strict-aliasing \ - -I. -Iinclude -Ilibbcache \ + -I. -Iinclude -Ilibbcachefs \ -D_FILE_OFFSET_BITS=64 \ -D_GNU_SOURCE \ -D_LGPL_SOURCE \ @@ -38,49 +38,44 @@ else endif .PHONY: all -all: bcache +all: bcachefs -CCANSRCS=$(wildcard ccan/*/*.c) -CCANOBJS=$(patsubst %.c,%.o,$(CCANSRCS)) +SRCS=bcachefs.c \ + bcachefs-userspace-shim.c \ + cmd_assemble.c \ + cmd_debug.c \ + cmd_device.c \ + cmd_fs.c \ + cmd_fsck.c \ + cmd_format.c \ + cmd_key.c \ + cmd_migrate.c \ + cmd_run.c \ + crypto.c \ + libbcachefs.c \ + qcow2.c \ + tools-util.c \ + $(wildcard linux/*.c linux/*/*.c) \ + $(wildcard ccan/*/*.c) -# Linux kernel shim: -LINUX_SRCS=$(wildcard linux/*.c linux/*/*.c) -LINUX_OBJS=$(LINUX_SRCS:.c=.o) - -OBJS=bcache.o \ - bcache-userspace-shim.o \ - cmd_assemble.o \ - cmd_debug.o \ - cmd_device.o \ - cmd_fs.o \ - cmd_fsck.o \ - cmd_format.o \ - cmd_key.o \ - cmd_migrate.o \ - cmd_run.o \ - crypto.o \ - libbcache.o \ - qcow2.o \ - tools-util.o \ - $(LINUX_OBJS) \ - $(CCANOBJS) - -DEPS=$(OBJS:.o=.d) +DEPS=$(SRCS:.c=.d) -include $(DEPS) -bcache: $(OBJS) +OBJS=$(SRCS:.c=.o) +bcachefs: $(OBJS) .PHONY: install -install: bcache +install: bcachefs mkdir -p $(DESTDIR)$(ROOT_SBINDIR) mkdir -p $(DESTDIR)$(PREFIX)/share/man/man8/ - $(INSTALL) -m0755 bcache $(DESTDIR)$(ROOT_SBINDIR) - $(INSTALL) -m0755 mkfs.bcache $(DESTDIR)$(ROOT_SBINDIR) - $(INSTALL) -m0644 bcache.8 $(DESTDIR)$(PREFIX)/share/man/man8/ + $(INSTALL) -m0755 bcachefs $(DESTDIR)$(ROOT_SBINDIR) + $(INSTALL) -m0755 fsck.bcachefs $(DESTDIR)$(ROOT_SBINDIR) + $(INSTALL) -m0755 mkfs.bcachefs $(DESTDIR)$(ROOT_SBINDIR) + $(INSTALL) -m0644 bcachefs.8 $(DESTDIR)$(PREFIX)/share/man/man8/ .PHONY: clean clean: - $(RM) bcache $(OBJS) $(DEPS) + $(RM) bcachefs $(OBJS) $(DEPS) .PHONY: deb deb: all @@ -91,10 +86,8 @@ deb: all --diff-ignore \ --tar-ignore -.PHONE: update-bcache-sources -update-bcache-sources: - echo BCACHE_REVISION=`cd $(LINUX_DIR); git rev-parse HEAD` > .bcache_revision - cp $(LINUX_DIR)/drivers/md/bcache/*.[ch] libbcache/ - cp $(LINUX_DIR)/include/trace/events/bcache.h include/trace/events/ - cp $(LINUX_DIR)/include/uapi/linux/bcache.h include/linux/ - cp $(LINUX_DIR)/include/uapi/linux/bcache-ioctl.h include/linux/ +.PHONE: update-bcachefs-sources +update-bcachefs-sources: + echo `cd $(LINUX_DIR); git rev-parse HEAD` > .bcachefs_revision + cp $(LINUX_DIR)/fs/bcachefs/*.[ch] libbcachefs/ + cp $(LINUX_DIR)/include/trace/events/bcachefs.h include/trace/events/ @@ -1,12 +1,12 @@ -Userspace tools for bcache-dev/bcachefs +Userspace tools for bcachefs -This builds the bcache tool, which has a number of subcommands for formatting +This builds the bcachefs tool, which has a number of subcommands for formatting and managing bcachefs filesystems: -bcache format -bcache unlock -bcache assemble -bcache incremental +bcachefs format +bcachefs unlock +bcachefs assemble +bcachefs incremental etc. -Run bcache --help for full list of commands. +Run bcachefs --help for full list of commands. diff --git a/bcache-register b/bcache-register deleted file mode 100644 index 9b592bcd..00000000 --- a/bcache-register +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh -/sbin/modprobe -qba bcache -test -f /sys/fs/bcache/register_quiet && echo "$1" > /sys/fs/bcache/register_quiet - diff --git a/bcache-userspace-shim.c b/bcachefs-userspace-shim.c index 021664f7..9fd62cdb 100644 --- a/bcache-userspace-shim.c +++ b/bcachefs-userspace-shim.c @@ -2,10 +2,10 @@ #include <errno.h> #include <linux/types.h> -#include "libbcache.h" -#include "tools-util.h" +#define bch2_fmt(_c, fmt) fmt "\n" -#define bch_fmt(_c, fmt) fmt "\n" +#include "libbcachefs.h" +#include "tools-util.h" enum fsck_err_opts fsck_err_opt; @@ -59,7 +59,6 @@ enum fsck_err_opts fsck_err_opt; #include "alloc.c" #include "bkey.c" #include "bkey_methods.c" -//#include "blockdev.c" #include "bset.c" #include "btree_cache.c" #include "btree_gc.c" @@ -70,7 +69,6 @@ enum fsck_err_opts fsck_err_opt; //#include "chardev.c" #include "checksum.c" #include "clock.c" -#include "closure.c" #include "compress.c" #include "debug.c" #include "dirent.c" @@ -82,37 +80,32 @@ enum fsck_err_opts fsck_err_opt; #include "inode.c" #include "io.c" #include "journal.c" -#include "keybuf.c" #include "keylist.c" #include "migrate.c" #include "move.c" #include "movinggc.c" -//#include "notify.c" #include "opts.c" -//#include "request.c" #include "siphash.c" #include "six.c" -//#include "stats.c" #include "super.c" #include "super-io.c" //#include "sysfs.c" #include "tier.c" #include "trace.c" #include "util.c" -//#include "writeback.c" #include "xattr.c" #define SHIM_KTYPE(type) \ struct kobj_type type ## _ktype = { .release = type ## _release, } -static void bch_fs_internal_release(struct kobject *k) {} +static void bch2_fs_internal_release(struct kobject *k) {} -static void bch_fs_opts_dir_release(struct kobject *k) {} +static void bch2_fs_opts_dir_release(struct kobject *k) {} -static void bch_fs_time_stats_release(struct kobject *k) {} +static void bch2_fs_time_stats_release(struct kobject *k) {} -SHIM_KTYPE(bch_dev); -SHIM_KTYPE(bch_fs); -SHIM_KTYPE(bch_fs_internal); -SHIM_KTYPE(bch_fs_time_stats); -SHIM_KTYPE(bch_fs_opts_dir); +SHIM_KTYPE(bch2_dev); +SHIM_KTYPE(bch2_fs); +SHIM_KTYPE(bch2_fs_internal); +SHIM_KTYPE(bch2_fs_time_stats); +SHIM_KTYPE(bch2_fs_opts_dir); @@ -1,9 +1,9 @@ -.TH bcache 8 +.TH bcachefs 8 .SH NAME -bcache \- manage bcache filesystems/devices +bcachefs \- manage bcachefs filesystems/devices .SH SYNOPSIS -.B bcache +.B bcachefs [\fIoptions\fR] .B COMMAND [\fIoptions\fR] @@ -12,7 +12,7 @@ bcache \- manage bcache filesystems/devices .TP .BR format .RS -Format one or a list of devices with bcache data structures. You need to do this before you create a volume. +Format one or a list of devices with bcachefs data structures. You need to do this before you create a volume. .RE .BR register @@ -22,7 +22,7 @@ Register a list of devices. .BR list-cachesets .RS -List cachesets on this system. This just searches /sys/fs/bcache. +List cachesets on this system. This just searches /sys/fs/bcachefs. .RE .BR query-devs @@ -25,15 +25,15 @@ static void usage(void) { - puts("bcache - tool for managing bcache volumes/filesystems\n" - "usage: bcache <command> [<args>]\n" + puts("bcachefs - tool for managing bcachefs filesystems\n" + "usage: bcachefs <command> [<args>]\n" "\n" "Superblock commands:\n" " format Format a new filesystem\n" " show-super Dump superblock information to stdout\n" "\n" "Repair:\n" - " bcache fsck Check an existing filesystem for errors\n" + " fsck Check an existing filesystem for errors\n" "\n" "Startup/shutdown, assembly of multi device filesystems:\n" " unlock Unlock an encrypted filesystem prior to running/mounting\n" @@ -57,7 +57,7 @@ static void usage(void) "Migrate:\n" " migrate Migrate an existing filesystem to bcachefs, in place\n" " migrate-superblock\n" - " Add default superblock, after bcache migrate\n" + " Add default superblock, after bcachefs migrate\n" "\n" "Debug:\n" "These commands work on offline, unmounted filesystems\n" diff --git a/cmd_assemble.c b/cmd_assemble.c index 1fa33e4c..5f981d71 100644 --- a/cmd_assemble.c +++ b/cmd_assemble.c @@ -7,8 +7,8 @@ #include <string.h> #include <sys/ioctl.h> +#include "bcachefs_ioctl.h" #include "cmds.h" -#include "linux/bcache-ioctl.h" int cmd_assemble(int argc, char *argv[]) { diff --git a/cmd_debug.c b/cmd_debug.c index 66499b8b..a8c534ba 100644 --- a/cmd_debug.c +++ b/cmd_debug.c @@ -4,11 +4,11 @@ #include <sys/types.h> #include "cmds.h" -#include "libbcache.h" +#include "libbcachefs.h" #include "qcow2.h" #include "tools-util.h" -#include "bcache.h" +#include "bcachefs.h" #include "alloc.h" #include "btree_cache.h" #include "btree_iter.h" @@ -18,8 +18,8 @@ static void dump_usage(void) { - puts("bcache dump - dump filesystem metadata\n" - "Usage: bcache dump [OPTION]... <devices>\n" + puts("bcachefs dump - dump filesystem metadata\n" + "Usage: bcachefs dump [OPTION]... <devices>\n" "\n" "Options:\n" " -o output Output qcow2 image(s)\n" @@ -75,7 +75,7 @@ static void dump_one_device(struct bch_fs *c, struct bch_dev *ca, int fd) ptr->offset << 9, b->written << 9); } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); } qcow2_write_image(ca->disk_sb.bdev->bd_fd, fd, &data, @@ -84,7 +84,7 @@ static void dump_one_device(struct bch_fs *c, struct bch_dev *ca, int fd) int cmd_dump(int argc, char *argv[]) { - struct bch_opts opts = bch_opts_empty(); + struct bch_opts opts = bch2_opts_empty(); struct bch_fs *c = NULL; const char *err; char *out = NULL; @@ -116,7 +116,7 @@ int cmd_dump(int argc, char *argv[]) if (!out) die("Please supply output filename"); - err = bch_fs_open(argv + optind, argc - optind, opts, &c); + err = bch2_fs_open(argv + optind, argc - optind, opts, &c); if (err) die("error opening %s: %s", argv[optind], err); @@ -149,7 +149,7 @@ int cmd_dump(int argc, char *argv[]) up_read(&c->gc_lock); - bch_fs_stop(c); + bch2_fs_stop(c); return 0; } @@ -164,11 +164,11 @@ static void list_keys(struct bch_fs *c, enum btree_id btree_id, if (bkey_cmp(k.k->p, end) > 0) break; - bch_bkey_val_to_text(c, bkey_type(0, btree_id), - buf, sizeof(buf), k); + bch2_bkey_val_to_text(c, bkey_type(0, btree_id), + buf, sizeof(buf), k); puts(buf); } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); } static void list_btree_formats(struct bch_fs *c, enum btree_id btree_id, @@ -182,10 +182,10 @@ static void list_btree_formats(struct bch_fs *c, enum btree_id btree_id, if (bkey_cmp(b->key.k.p, end) > 0) break; - bch_print_btree_node(c, b, buf, sizeof(buf)); + bch2_print_btree_node(c, b, buf, sizeof(buf)); puts(buf); } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); } static struct bpos parse_pos(char *buf) @@ -205,8 +205,8 @@ static struct bpos parse_pos(char *buf) static void list_keys_usage(void) { - puts("bcache list_keys - list filesystem metadata to stdout\n" - "Usage: bcache list_keys [OPTION]... <devices>\n" + puts("bcachefs list_keys - list filesystem metadata to stdout\n" + "Usage: bcachefs list_keys [OPTION]... <devices>\n" "\n" "Options:\n" " -b (extents|inodes|dirents|xattrs) Btree to list from\n" @@ -225,7 +225,7 @@ static const char * const list_modes[] = { int cmd_list(int argc, char *argv[]) { - struct bch_opts opts = bch_opts_empty(); + struct bch_opts opts = bch2_opts_empty(); struct bch_fs *c = NULL; enum btree_id btree_id = BTREE_ID_EXTENTS; struct bpos start = POS_MIN, end = POS_MAX; @@ -241,7 +241,7 @@ int cmd_list(int argc, char *argv[]) switch (opt) { case 'b': btree_id = read_string_list_or_die(optarg, - bch_btree_ids, "btree id"); + bch2_btree_ids, "btree id"); break; case 's': start = parse_pos(optarg); @@ -261,7 +261,7 @@ int cmd_list(int argc, char *argv[]) if (optind >= argc) die("Please supply device(s) to check"); - err = bch_fs_open(argv + optind, argc - optind, opts, &c); + err = bch2_fs_open(argv + optind, argc - optind, opts, &c); if (err) die("error opening %s: %s", argv[optind], err); @@ -276,6 +276,6 @@ int cmd_list(int argc, char *argv[]) die("Invalid mode"); } - bch_fs_stop(c); + bch2_fs_stop(c); return 0; } diff --git a/cmd_device.c b/cmd_device.c index d898733b..f71ab238 100644 --- a/cmd_device.c +++ b/cmd_device.c @@ -12,9 +12,9 @@ #include <sys/types.h> #include <unistd.h> +#include "bcachefs_ioctl.h" #include "cmds.h" -#include "libbcache.h" -#include "linux/bcache-ioctl.h" +#include "libbcachefs.h" #include "opts.h" #include "tools-util.h" @@ -174,8 +174,8 @@ static void disk_ioctl(const char *fs, const char *dev, int cmd, int flags) static void device_add_usage(void) { - puts("bcache device add - add a device to an existing filesystem\n" - "Usage: bcache device add [OPTION]... filesystem device\n" + puts("bcachefs device add - add a device to an existing filesystem\n" + "Usage: bcachefs device add [OPTION]... filesystem device\n" "\n" "Options:\n" " --fs_size=size Size of filesystem on device\n" @@ -207,7 +207,7 @@ int cmd_device_add(int argc, char *argv[]) longopts, NULL)) != -1) switch (opt) { case 'S': - if (bch_strtoull_h(optarg, &dev_opts.size)) + if (bch2_strtoull_h(optarg, &dev_opts.size)) die("invalid filesystem size"); dev_opts.size >>= 9; @@ -258,8 +258,8 @@ int cmd_device_add(int argc, char *argv[]) static void device_remove_usage(void) { - puts("bcache device_remove - remove a device from a filesystem\n" - "Usage: bcache device remove filesystem device\n" + puts("bcachefs device_remove - remove a device from a filesystem\n" + "Usage: bcachefs device remove filesystem device\n" "\n" "Options:\n" " -f, --force Force removal, even if some data\n" @@ -303,8 +303,8 @@ int cmd_device_remove(int argc, char *argv[]) static void device_online_usage(void) { - puts("bcache device online - readd a device to a running filesystem\n" - "Usage: bcache device online [OPTION]... filesystem device\n" + puts("bcachefs device online - readd a device to a running filesystem\n" + "Usage: bcachefs device online [OPTION]... filesystem device\n" "\n" "Options:\n" " -h, --help Display this help and exit\n" @@ -332,8 +332,8 @@ int cmd_device_online(int argc, char *argv[]) static void device_offline_usage(void) { - puts("bcache device offline - take a device offline, without removing it\n" - "Usage: bcache device offline [OPTION]... filesystem device\n" + puts("bcachefs device offline - take a device offline, without removing it\n" + "Usage: bcachefs device offline [OPTION]... filesystem device\n" "\n" "Options:\n" " -f, --force Force, if data redundancy will be degraded\n" @@ -371,8 +371,8 @@ int cmd_device_offline(int argc, char *argv[]) static void device_evacuate_usage(void) { - puts("bcache device evacuate - move data off of a given device\n" - "Usage: bcache device evacuate [OPTION]... filesystem device\n" + puts("bcachefs device evacuate - move data off of a given device\n" + "Usage: bcachefs device evacuate [OPTION]... filesystem device\n" "\n" "Options:\n" " -h, --help Display this help and exit\n" @@ -400,8 +400,8 @@ int cmd_device_evacuate(int argc, char *argv[]) static void device_set_state_usage(void) { - puts("bcache device set-state\n" - "Usage: bcache device set-state filesystem device new-state\n" + puts("bcachefs device set-state\n" + "Usage: bcachefs device set-state filesystem device new-state\n" "\n" "Options:\n" " -f, --force Force, if data redundancy will be degraded\n" @@ -437,7 +437,7 @@ int cmd_device_set_state(int argc, char *argv[]) .flags = flags, .dev = (__u64) argv[optind + 1], .new_state = read_string_list_or_die(argv[optind + 2], - bch_dev_state, "device state"), + bch2_dev_state, "device state"), }; xioctl(fs.ioctl_fd, BCH_IOCTL_DISK_SET_STATE, &i); diff --git a/cmd_format.c b/cmd_format.c index db4b10ee..fbf8bddd 100644 --- a/cmd_format.c +++ b/cmd_format.c @@ -22,14 +22,14 @@ #include "ccan/darray/darray.h" #include "cmds.h" -#include "libbcache.h" +#include "libbcachefs.h" #include "crypto.h" #include "opts.h" #include "util.h" #define OPTS \ -t("bcache format - create a new bcache filesystem on one or more devices") \ -t("Usage: bcache format [OPTION]... <devices>") \ +t("bcachefs format - create a new bcachefs filesystem on one or more devices") \ +t("Usage: bcachefs format [OPTION]... <devices>") \ t("") \ x('b', block_size, "size", NULL) \ x(0, btree_node_size, "size", "Default 256k") \ @@ -52,7 +52,7 @@ x(0, bucket_size, "size", "Bucket size") \ x('t', tier, "#", "Higher tier indicates slower devices")\ x(0, discard, NULL, NULL) \ t("Device specific options must come before corresponding devices, e.g.") \ -t(" bcache format --tier 0 /dev/sdb --tier 1 /dev/sdc") \ +t(" bcachefs format --tier 0 /dev/sdb --tier 1 /dev/sdc") \ t("") \ x('q', quiet, NULL, "Only print errors") \ x('h', help, NULL, "Display this help and exit") @@ -65,8 +65,8 @@ static void usage(void) #undef x #undef t - puts("bcache format - create a new bcache filesystem on one or more devices\n" - "Usage: bcache format [OPTION]... <devices>\n" + puts("bcachefs format - create a new bcachefs filesystem on one or more devices\n" + "Usage: bcachefs format [OPTION]... <devices>\n" "\n" "Options:\n" " -b, --block=size\n" @@ -95,7 +95,7 @@ static void usage(void) " -h, --help Display this help and exit\n" "\n" "Device specific options must come before corresponding devices, e.g.\n" - " bcache format --tier 0 /dev/sdb --tier 1 /dev/sdc\n" + " bcachefs format --tier 0 /dev/sdb --tier 1 /dev/sdc\n" "\n" "Report bugs to <linux-bcache@vger.kernel.org>"); } @@ -150,17 +150,17 @@ int cmd_format(int argc, char *argv[]) case O_metadata_checksum_type: opts.meta_csum_type = read_string_list_or_die(optarg, - bch_csum_types, "checksum type"); + bch2_csum_types, "checksum type"); break; case O_data_checksum_type: opts.data_csum_type = read_string_list_or_die(optarg, - bch_csum_types, "checksum type"); + bch2_csum_types, "checksum type"); break; case O_compression_type: opts.compression_type = read_string_list_or_die(optarg, - bch_compression_types, + bch2_compression_types, "compression type"); break; case O_data_replicas: @@ -183,7 +183,7 @@ int cmd_format(int argc, char *argv[]) case 'e': opts.on_error_action = read_string_list_or_die(optarg, - bch_error_actions, "error action"); + bch2_error_actions, "error action"); break; case O_max_journal_entry_size: opts.max_journal_entry_size = @@ -203,7 +203,7 @@ int cmd_format(int argc, char *argv[]) force = true; break; case O_fs_size: - if (bch_strtoull_h(optarg, &dev_opts.size)) + if (bch2_strtoull_h(optarg, &dev_opts.size)) die("invalid filesystem size"); dev_opts.size >>= 9; @@ -1,13 +1,13 @@ #include "cmds.h" -#include "libbcache.h" +#include "libbcachefs.h" #include "super.h" #include "tools-util.h" static void usage(void) { - puts("bcache fsck - filesystem check and repair\n" - "Usage: bcache fsck [OPTION]... <devices>\n" + puts("bcachefs fsck - filesystem check and repair\n" + "Usage: bcachefs fsck [OPTION]... <devices>\n" "\n" "Options:\n" " -p Automatic repair (no questions\n" @@ -21,7 +21,7 @@ static void usage(void) int cmd_fsck(int argc, char *argv[]) { - struct bch_opts opts = bch_opts_empty(); + struct bch_opts opts = bch2_opts_empty(); struct bch_fs *c = NULL; const char *err; int opt; @@ -52,10 +52,10 @@ int cmd_fsck(int argc, char *argv[]) if (optind >= argc) die("Please supply device(s) to check"); - err = bch_fs_open(argv + optind, argc - optind, opts, &c); + err = bch2_fs_open(argv + optind, argc - optind, opts, &c); if (err) die("error opening %s: %s", argv[optind], err); - bch_fs_stop(c); + bch2_fs_stop(c); return 0; } @@ -5,7 +5,7 @@ #include "cmds.h" #include "checksum.h" #include "crypto.h" -#include "libbcache.h" +#include "libbcachefs.h" int cmd_unlock(int argc, char *argv[]) { diff --git a/cmd_migrate.c b/cmd_migrate.c index 4924a1ec..8c8cbaf2 100644 --- a/cmd_migrate.c +++ b/cmd_migrate.c @@ -19,8 +19,7 @@ #include "cmds.h" #include "crypto.h" -#include "libbcache.h" -#include "linux/bcache.h" +#include "libbcachefs.h" #include <linux/dcache.h> #include <linux/generic-radix-tree.h> @@ -117,8 +116,8 @@ static void update_inode(struct bch_fs *c, struct bkey_inode_buf packed; int ret; - bch_inode_pack(&packed, inode); - ret = bch_btree_update(c, BTREE_ID_INODES, &packed.inode.k_i, NULL); + bch2_inode_pack(&packed, inode); + ret = bch2_btree_update(c, BTREE_ID_INODES, &packed.inode.k_i, NULL); if (ret) die("error creating file: %s", strerror(-ret)); } @@ -127,12 +126,12 @@ static void create_dirent(struct bch_fs *c, struct bch_inode_unpacked *parent, const char *name, u64 inum, mode_t mode) { - struct bch_hash_info parent_hash_info = bch_hash_info_init(parent); + struct bch_hash_info parent_hash_info = bch2_hash_info_init(c, parent); struct qstr qname = { { { .len = strlen(name), } }, .name = name }; - int ret = bch_dirent_create(c, parent->inum, &parent_hash_info, - mode_to_type(mode), &qname, - inum, NULL, BCH_HASH_SET_MUST_CREATE); + int ret = bch2_dirent_create(c, parent->inum, &parent_hash_info, + mode_to_type(mode), &qname, + inum, NULL, BCH_HASH_SET_MUST_CREATE); if (ret) die("error creating file: %s", strerror(-ret)); @@ -145,7 +144,7 @@ static void create_link(struct bch_fs *c, const char *name, u64 inum, mode_t mode) { struct bch_inode_unpacked inode; - int ret = bch_inode_find_by_inum(c, inum, &inode); + int ret = bch2_inode_find_by_inum(c, inum, &inode); if (ret) die("error looking up hardlink: %s", strerror(-ret)); @@ -165,11 +164,11 @@ static struct bch_inode_unpacked create_file(struct bch_fs *c, struct bkey_inode_buf packed; int ret; - bch_inode_init(c, &new_inode, uid, gid, mode, rdev); - bch_inode_pack(&packed, &new_inode); + bch2_inode_init(c, &new_inode, uid, gid, mode, rdev); + bch2_inode_pack(&packed, &new_inode); - ret = bch_inode_create(c, &packed.inode.k_i, BLOCKDEV_INODE_MAX, 0, - &c->unused_inode_hint); + ret = bch2_inode_create(c, &packed.inode.k_i, BLOCKDEV_INODE_MAX, 0, + &c->unused_inode_hint); if (ret) die("error creating file: %s", strerror(-ret)); @@ -187,7 +186,7 @@ static struct bch_inode_unpacked create_file(struct bch_fs *c, static const struct xattr_handler *xattr_resolve_name(const char **name) { - const struct xattr_handler **handlers = bch_xattr_handlers; + const struct xattr_handler **handlers = bch2_xattr_handlers; const struct xattr_handler *handler; for_each_xattr_handler(handlers, handler) { @@ -210,15 +209,15 @@ static const struct xattr_handler *xattr_resolve_name(const char **name) static void copy_times(struct bch_fs *c, struct bch_inode_unpacked *dst, struct stat *src) { - dst->i_atime = timespec_to_bch_time(c, src->st_atim); - dst->i_mtime = timespec_to_bch_time(c, src->st_mtim); - dst->i_ctime = timespec_to_bch_time(c, src->st_ctim); + dst->i_atime = timespec_to_bch2_time(c, src->st_atim); + dst->i_mtime = timespec_to_bch2_time(c, src->st_mtim); + dst->i_ctime = timespec_to_bch2_time(c, src->st_ctim); } static void copy_xattrs(struct bch_fs *c, struct bch_inode_unpacked *dst, char *src) { - struct bch_hash_info hash_info = bch_hash_info_init(dst); + struct bch_hash_info hash_info = bch2_hash_info_init(c, dst); char attrs[XATTR_LIST_MAX]; ssize_t attrs_size = llistxattr(src, attrs, sizeof(attrs)); @@ -238,8 +237,8 @@ static void copy_xattrs(struct bch_fs *c, struct bch_inode_unpacked *dst, const struct xattr_handler *h = xattr_resolve_name(&attr); - int ret = __bch_xattr_set(c, dst->inum, &hash_info, attr, - val, val_size, 0, h->flags, NULL); + int ret = __bch2_xattr_set(c, dst->inum, &hash_info, attr, + val, val_size, 0, h->flags, NULL); if (ret < 0) die("error creating xattr: %s", strerror(-ret)); } @@ -264,15 +263,15 @@ static void write_data(struct bch_fs *c, bio.bio.bi_max_vecs = 1; bio.bio.bi_io_vec = &bv; bio.bio.bi_iter.bi_size = len; - bch_bio_map(&bio.bio, buf); + bch2_bio_map(&bio.bio, buf); - int ret = bch_disk_reservation_get(c, &res, len >> 9, 0); + int ret = bch2_disk_reservation_get(c, &res, len >> 9, 0); if (ret) die("error reserving space in new filesystem: %s", strerror(-ret)); - bch_write_op_init(&op, c, &bio, res, c->write_points, - POS(dst_inode->inum, dst_offset >> 9), NULL, 0); - closure_call(&op.cl, bch_write, NULL, &cl); + bch2_write_op_init(&op, c, &bio, res, c->write_points, + POS(dst_inode->inum, dst_offset >> 9), NULL, 0); + closure_call(&op.cl, bch2_write, NULL, &cl); closure_sync(&cl); dst_inode->i_sectors += len >> 9; @@ -330,18 +329,18 @@ static void link_data(struct bch_fs *c, struct bch_inode_unpacked *dst, .gen = ca->buckets[b].mark.gen, }); - ret = bch_disk_reservation_get(c, &res, sectors, - BCH_DISK_RESERVATION_NOFAIL); + ret = bch2_disk_reservation_get(c, &res, sectors, + BCH_DISK_RESERVATION_NOFAIL); if (ret) die("error reserving space in new filesystem: %s", strerror(-ret)); - ret = bch_btree_insert(c, BTREE_ID_EXTENTS, &e->k_i, - &res, NULL, NULL, 0); + ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &e->k_i, + &res, NULL, NULL, 0); if (ret) die("btree insert error %s", strerror(-ret)); - bch_disk_reservation_put(c, &res); + bch2_disk_reservation_put(c, &res); dst->i_sectors += sectors; logical += sectors; @@ -566,7 +565,7 @@ static void copy_fs(struct bch_fs *c, int src_fd, const char *src_path, syncfs(src_fd); struct bch_inode_unpacked root_inode; - int ret = bch_inode_find_by_inum(c, BCACHE_ROOT_INO, &root_inode); + int ret = bch2_inode_find_by_inum(c, BCACHE_ROOT_INO, &root_inode); if (ret) die("error looking up root directory: %s", strerror(-ret)); @@ -612,8 +611,8 @@ static void find_superblock_space(ranges extents, struct dev_opts *dev) static void migrate_usage(void) { - puts("bcache migrate - migrate an existing filesystem to bcachefs\n" - "Usage: bcache migrate [OPTION]...\n" + puts("bcachefs migrate - migrate an existing filesystem to bcachefs\n" + "Usage: bcachefs migrate [OPTION]...\n" "\n" "Options:\n" " -f fs Root of filesystem to migrate(s)\n" @@ -719,21 +718,21 @@ int cmd_migrate(int argc, char *argv[]) printf("Creating new filesystem on %s in space reserved at %s\n" "To mount, run\n" - " mount -t bcache -o sb=%llu %s dir\n" + " mount -t bcachefs -o sb=%llu %s dir\n" "\n" "After verifying that the new filesystem is correct, to create a\n" "superblock at the default offset and finish the migration run\n" - " bcache migrate_superblock -d %s -o %llu\n" + " bcachefs migrate_superblock -d %s -o %llu\n" "\n" "The new filesystem will have a file at /old_migrated_filestem\n" "referencing all disk space that might be used by the existing\n" "filesystem. That file can be deleted once the old filesystem is\n" "no longer needed (and should be deleted prior to running\n" - "bcache migrate_superblock)\n", + "bcachefs migrate_superblock)\n", dev.path, file_path, sb_offset, dev.path, dev.path, sb_offset); - struct bch_opts opts = bch_opts_empty(); + struct bch_opts opts = bch2_opts_empty(); struct bch_fs *c = NULL; char *path[1] = { dev.path }; const char *err; @@ -742,38 +741,38 @@ int cmd_migrate(int argc, char *argv[]) opts.nostart = true; opts.noexcl = true; - err = bch_fs_open(path, 1, opts, &c); + err = bch2_fs_open(path, 1, opts, &c); if (err) die("Error opening new filesystem: %s", err); mark_unreserved_space(c, extents); - err = bch_fs_start(c); + err = bch2_fs_start(c); if (err) die("Error starting new filesystem: %s", err); copy_fs(c, fs_fd, fs_path, bcachefs_inum, &extents); - bch_fs_stop(c); + bch2_fs_stop(c); printf("Migrate complete, running fsck:\n"); opts.nostart = false; opts.nochanges = true; fsck_err_opt = FSCK_ERR_NO; - err = bch_fs_open(path, 1, opts, &c); + err = bch2_fs_open(path, 1, opts, &c); if (err) die("Error opening new filesystem: %s", err); - bch_fs_stop(c); + bch2_fs_stop(c); printf("fsck complete\n"); return 0; } static void migrate_superblock_usage(void) { - puts("bcache migrate_superblock - create default superblock after migrating\n" - "Usage: bcache migrate_superblock [OPTION]...\n" + puts("bcachefs migrate_superblock - create default superblock after migrating\n" + "Usage: bcachefs migrate_superblock [OPTION]...\n" "\n" "Options:\n" " -d device Device to create superblock for\n" @@ -11,8 +11,8 @@ #include <uuid/uuid.h> +#include "bcachefs_ioctl.h" #include "cmds.h" -#include "linux/bcache-ioctl.h" int cmd_run(int argc, char *argv[]) { @@ -79,29 +79,29 @@ void derive_passphrase(struct bch_sb_field_crypt *crypt, void add_bcache_key(struct bch_sb *sb, const char *passphrase) { - struct bch_sb_field_crypt *crypt = bch_sb_get_crypt(sb); + struct bch_sb_field_crypt *crypt = bch2_sb_get_crypt(sb); if (!crypt) die("filesystem is not encrypted"); struct bch_encrypted_key sb_key = crypt->key; - if (!bch_key_is_encrypted(&sb_key)) + if (!bch2_key_is_encrypted(&sb_key)) die("filesystem does not have encryption key"); struct bch_key passphrase_key; derive_passphrase(crypt, &passphrase_key, passphrase); /* Check if the user supplied the correct passphrase: */ - if (bch_chacha_encrypt_key(&passphrase_key, __bch_sb_key_nonce(sb), + if (bch2_chacha_encrypt_key(&passphrase_key, __bch2_sb_key_nonce(sb), &sb_key, sizeof(sb_key))) die("error encrypting key"); - if (bch_key_is_encrypted(&sb_key)) + if (bch2_key_is_encrypted(&sb_key)) die("incorrect passphrase"); char uuid[40]; uuid_unparse_lower(sb->user_uuid.b, uuid); - char *description = mprintf("bcache:%s", uuid); + char *description = mprintf("bcachefs:%s", uuid); if (add_key("logon", description, &passphrase_key, sizeof(passphrase_key), @@ -134,13 +134,13 @@ void bch_sb_crypt_init(struct bch_sb *sb, derive_passphrase(crypt, &passphrase_key, passphrase); - assert(!bch_key_is_encrypted(&crypt->key)); + assert(!bch2_key_is_encrypted(&crypt->key)); - if (bch_chacha_encrypt_key(&passphrase_key, __bch_sb_key_nonce(sb), + if (bch2_chacha_encrypt_key(&passphrase_key, __bch2_sb_key_nonce(sb), &crypt->key, sizeof(crypt->key))) die("error encrypting key"); - assert(bch_key_is_encrypted(&crypt->key)); + assert(bch2_key_is_encrypted(&crypt->key)); memzero_explicit(&passphrase_key, sizeof(passphrase_key)); } diff --git a/debian/.gitignore b/debian/.gitignore index 957abf01..04bfc77c 100644 --- a/debian/.gitignore +++ b/debian/.gitignore @@ -1,3 +1,3 @@ -bcache-tools* +bcachefs-tools* debhelper-build-stamp files diff --git a/debian/bcache-tools.preinst b/debian/bcache-tools.preinst deleted file mode 100644 index d2e70952..00000000 --- a/debian/bcache-tools.preinst +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh - -oldscript=/etc/initramfs-tools/hooks/bcache -dpkg-maintscript-helper rm_conffile $oldscript 1.0.1-1~ -- "$@" -case "$1" in - install|upgrade) - if [ -f $oldscript ]; then - for hash in ca5a1c3f716e3ec69057f657cb79cee2f47c7ef6619983d86e647ac1f9f1f099 \ - 74c5338e21c926d3cbbc1b44c5525667dc964fe91189ffa6b11352974ef56950 - do - if echo "$hash $oldscript" | - sha256sum --check --status -; then - # Old conffile was not modified, let's just remove it - rm -f $oldscript - fi - done - if [ -f $oldscript ]; then - # Otherwise, disable and rename it - chmod -x $oldscript - mv $oldscript $oldscript.dpkg-remove - fi - fi - ;; -esac -#DEBHELPER# diff --git a/debian/bcache-tools.dirs b/debian/bcachefs-tools.dirs index ea98e984..ea98e984 100644 --- a/debian/bcache-tools.dirs +++ b/debian/bcachefs-tools.dirs diff --git a/debian/changelog b/debian/changelog index 78965101..26d64694 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,16 +1,16 @@ -bcache-tools (1.0.8-2~bpo8+1) jessie-backports; urgency=medium +bcachefs-tools (1.0.8-2~bpo8+1) jessie-backports; urgency=medium * Rebuild for jessie-backports. -- Mathieu Parent <sathieu@debian.org> Mon, 21 Sep 2015 21:18:39 +0200 -bcache-tools (1.0.8-2) unstable; urgency=medium +bcachefs-tools (1.0.8-2) unstable; urgency=medium * Only run update-initramfs if installed. Fix dracut. (Closes: #788442) -- David Mohr <david@mcbf.net> Thu, 11 Jun 2015 10:23:48 -0600 -bcache-tools (1.0.8-1) unstable; urgency=medium +bcachefs-tools (1.0.8-1) unstable; urgency=medium [ James Page ] * d/control: Add Vcs fields. @@ -28,7 +28,7 @@ bcache-tools (1.0.8-1) unstable; urgency=medium -- David Mohr <david@mcbf.net> Tue, 26 May 2015 20:57:58 -0600 -bcache-tools (1.0.7-1) unstable; urgency=medium +bcachefs-tools (1.0.7-1) unstable; urgency=medium [ David Mohr ] * Based on work by Gabriel de Perthuis <g2p.code+debian@gmail.com> diff --git a/debian/control b/debian/control index 20cfd118..8c003fcc 100644 --- a/debian/control +++ b/debian/control @@ -1,25 +1,17 @@ -Source: bcache-tools -Maintainer: David Mohr <david@mcbf.net> -Uploaders: Robie Basak <robie@justgohome.co.uk> +Source: bcachefs-tools +Maintainer: Kent Overstreet <kent.overstreet@gmail.com> Section: utils Priority: optional Standards-Version: 3.9.5 Build-Depends: debhelper (>= 9), pkg-config, libblkid-dev, uuid-dev, libscrypt-dev, libsodium-dev, libkeyutils-dev, liburcu-dev, zlib1g-dev, libattr1-dev -Vcs-Browser: http://anonscm.debian.org/gitweb/?p=collab-maint/bcache-tools.git -Vcs-Git: git://anonscm.debian.org/collab-maint/bcache-tools.git Homepage: http://bcache.evilpiepirate.org/ -Package: bcache-tools +Package: bcachefs-tools Architecture: linux-any Depends: ${shlibs:Depends}, ${misc:Depends} Recommends: initramfs-tools | linux-initramfs-tool -Description: bcache userspace tools - Bcache allows the use of SSDs to cache other block devices. - . - Documentation for the run-time interface is included in the kernel tree; in - Documentation/bcache.txt. - . - This package includes udev rules, initramfs support, and the utilities to - create a new bcache as well as inspect existing bcache partitions. +Description: bcachefs userspace tools + Userspace tools for bcachefs, a modern copy on write, checksumming, multi + device filesystem. diff --git a/debian/watch b/debian/watch index e5d55111..f9ca3c6d 100644 --- a/debian/watch +++ b/debian/watch @@ -1,8 +1,3 @@ version=3 http://evilpiepirate.org/git/bcache-tools.git/refs/ /git/bcache-tools.git/tag/\?id=v(\d[\d.]*) - -# tag/\?id=(v?\d[\d.]*) -#opts="filenamemangle=s/(?:.*)?v?(\d[\d\.]*)\.tar\.gz/bcache-tools-$1.tar.gz/" \ -# (?:.*/)?v?(\d[\d\.]*)\.tar\.gz -# http://evilpiepirate.org/git/bcache-tools.git/tag/?id=v1.0.8 diff --git a/fsck.bcache b/fsck.bcache deleted file mode 100755 index 17abea93..00000000 --- a/fsck.bcache +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -exec bcache fsck "$@" diff --git a/fsck.bcachefs b/fsck.bcachefs new file mode 100755 index 00000000..e1d2a44c --- /dev/null +++ b/fsck.bcachefs @@ -0,0 +1,3 @@ +#!/bin/sh + +exec bcachefs fsck "$@" diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 37a04a32..1c793b51 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -85,6 +85,12 @@ struct block_device { void generic_make_request(struct bio *); int submit_bio_wait(struct bio *); + +static inline void submit_bio(struct bio *bio) +{ + generic_make_request(bio); +} + int blkdev_issue_discard(struct block_device *, sector_t, sector_t, gfp_t, unsigned long); diff --git a/libbcache/closure.h b/include/linux/closure.h index b55254b6..33280d30 100644 --- a/libbcache/closure.h +++ b/include/linux/closure.h @@ -152,7 +152,7 @@ struct closure { atomic_t remaining; -#ifdef CONFIG_BCACHE_CLOSURES_DEBUG +#ifdef CONFIG_DEBUG_CLOSURES #define CLOSURE_MAGIC_DEAD 0xc054dead #define CLOSURE_MAGIC_ALIVE 0xc054a11e @@ -181,15 +181,13 @@ static inline void closure_sync(struct closure *cl) __closure_sync(cl); } -#ifdef CONFIG_BCACHE_CLOSURES_DEBUG +#ifdef CONFIG_DEBUG_CLOSURES -void closure_debug_init(void); void closure_debug_create(struct closure *cl); void closure_debug_destroy(struct closure *cl); #else -static inline void closure_debug_init(void) {} static inline void closure_debug_create(struct closure *cl) {} static inline void closure_debug_destroy(struct closure *cl) {} @@ -197,21 +195,21 @@ static inline void closure_debug_destroy(struct closure *cl) {} static inline void closure_set_ip(struct closure *cl) { -#ifdef CONFIG_BCACHE_CLOSURES_DEBUG +#ifdef CONFIG_DEBUG_CLOSURES cl->ip = _THIS_IP_; #endif } static inline void closure_set_ret_ip(struct closure *cl) { -#ifdef CONFIG_BCACHE_CLOSURES_DEBUG +#ifdef CONFIG_DEBUG_CLOSURES cl->ip = _RET_IP_; #endif } static inline void closure_set_waiting(struct closure *cl, unsigned long f) { -#ifdef CONFIG_BCACHE_CLOSURES_DEBUG +#ifdef CONFIG_DEBUG_CLOSURES cl->waiting_on = f; #endif } @@ -247,7 +245,7 @@ static inline void closure_queue(struct closure *cl) */ static inline void closure_get(struct closure *cl) { -#ifdef CONFIG_BCACHE_CLOSURES_DEBUG +#ifdef CONFIG_DEBUG_CLOSURES BUG_ON((atomic_inc_return(&cl->remaining) & CLOSURE_REMAINING_MASK) <= 1); #else @@ -338,7 +336,7 @@ do { \ */ #define continue_at_nobarrier(_cl, _fn, _wq) \ do { \ - closure_set_ip(cl); \ + closure_set_ip(_cl); \ if (_wq) { \ INIT_WORK(&(_cl)->work, (void *) _fn); \ queue_work((_wq), &(_cl)->work); \ diff --git a/include/trace/events/bcache.h b/include/trace/events/bcachefs.h index b39fdde7..7dea9d63 100644 --- a/include/trace/events/bcache.h +++ b/include/trace/events/bcachefs.h @@ -1,52 +1,11 @@ #undef TRACE_SYSTEM -#define TRACE_SYSTEM bcache +#define TRACE_SYSTEM bcachefs #if !defined(_TRACE_BCACHE_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_BCACHE_H #include <linux/tracepoint.h> -struct bcache_device; -struct bio; -struct bkey; -struct btree; -struct bch_dev; -struct bch_fs; -struct keylist; -struct moving_queue; - -DECLARE_EVENT_CLASS(bcache_request, - TP_PROTO(struct bcache_device *d, struct bio *bio), - TP_ARGS(d, bio), - - TP_STRUCT__entry( - __field(dev_t, dev ) - __field(unsigned int, orig_major ) - __field(unsigned int, orig_minor ) - __field(sector_t, sector ) - __field(sector_t, orig_sector ) - __field(unsigned int, nr_sector ) - __array(char, rwbs, 6 ) - ), - - TP_fast_assign( - __entry->dev = bio->bi_bdev->bd_dev; - __entry->orig_major = d->disk->major; - __entry->orig_minor = d->disk->first_minor; - __entry->sector = bio->bi_iter.bi_sector; - __entry->orig_sector = bio->bi_iter.bi_sector - 16; - __entry->nr_sector = bio->bi_iter.bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, - bio->bi_iter.bi_size); - ), - - TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->rwbs, (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->orig_major, __entry->orig_minor, - (unsigned long long)__entry->orig_sector) -); - DECLARE_EVENT_CLASS(bpos, TP_PROTO(struct bpos p), TP_ARGS(p), @@ -84,73 +43,47 @@ DECLARE_EVENT_CLASS(bkey, __entry->offset, __entry->size) ); -/* request.c */ - -DEFINE_EVENT(bcache_request, bcache_request_start, - TP_PROTO(struct bcache_device *d, struct bio *bio), - TP_ARGS(d, bio) -); - -DEFINE_EVENT(bcache_request, bcache_request_end, - TP_PROTO(struct bcache_device *d, struct bio *bio), - TP_ARGS(d, bio) -); - -DECLARE_EVENT_CLASS(bcache_bio, - TP_PROTO(struct bio *bio), - TP_ARGS(bio), +DECLARE_EVENT_CLASS(bch_dev, + TP_PROTO(struct bch_dev *ca), + TP_ARGS(ca), TP_STRUCT__entry( - __field(dev_t, dev ) - __field(sector_t, sector ) - __field(unsigned int, nr_sector ) - __array(char, rwbs, 6 ) + __array(char, uuid, 16 ) + __field(unsigned, tier ) ), TP_fast_assign( - __entry->dev = bio->bi_bdev->bd_dev; - __entry->sector = bio->bi_iter.bi_sector; - __entry->nr_sector = bio->bi_iter.bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, - bio->bi_iter.bi_size); + memcpy(__entry->uuid, ca->uuid.b, 16); + __entry->tier = ca->mi.tier; ), - TP_printk("%d,%d %s %llu + %u", - MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, - (unsigned long long)__entry->sector, __entry->nr_sector) + TP_printk("%pU tier %u", __entry->uuid, __entry->tier) ); -DEFINE_EVENT(bcache_bio, bcache_bypass_sequential, - TP_PROTO(struct bio *bio), - TP_ARGS(bio) -); +DECLARE_EVENT_CLASS(bch_fs, + TP_PROTO(struct bch_fs *c), + TP_ARGS(c), -DEFINE_EVENT(bcache_bio, bcache_bypass_congested, - TP_PROTO(struct bio *bio), - TP_ARGS(bio) -); + TP_STRUCT__entry( + __array(char, uuid, 16 ) + ), -DEFINE_EVENT(bcache_bio, bcache_promote, - TP_PROTO(struct bio *bio), - TP_ARGS(bio) -); + TP_fast_assign( + memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + ), -DEFINE_EVENT(bkey, bcache_promote_collision, - TP_PROTO(const struct bkey *k), - TP_ARGS(k) + TP_printk("%pU", __entry->uuid) ); -TRACE_EVENT(bcache_read, - TP_PROTO(struct bio *bio, bool hit, bool bypass), - TP_ARGS(bio, hit, bypass), +DECLARE_EVENT_CLASS(bio, + TP_PROTO(struct bio *bio), + TP_ARGS(bio), TP_STRUCT__entry( __field(dev_t, dev ) __field(sector_t, sector ) __field(unsigned int, nr_sector ) __array(char, rwbs, 6 ) - __field(bool, cache_hit ) - __field(bool, bypass ) ), TP_fast_assign( @@ -159,49 +92,53 @@ TRACE_EVENT(bcache_read, __entry->nr_sector = bio->bi_iter.bi_size >> 9; blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_size); - __entry->cache_hit = hit; - __entry->bypass = bypass; ), - TP_printk("%d,%d %s %llu + %u hit %u bypass %u", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->rwbs, (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->cache_hit, __entry->bypass) + TP_printk("%d,%d %s %llu + %u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, __entry->nr_sector) ); -TRACE_EVENT(bcache_write, - TP_PROTO(struct bch_fs *c, u64 inode, struct bio *bio, - bool writeback, bool bypass), - TP_ARGS(c, inode, bio, writeback, bypass), +DECLARE_EVENT_CLASS(page_alloc_fail, + TP_PROTO(struct bch_fs *c, u64 size), + TP_ARGS(c, size), TP_STRUCT__entry( - __array(char, uuid, 16 ) - __field(u64, inode ) - __field(sector_t, sector ) - __field(unsigned int, nr_sector ) - __array(char, rwbs, 6 ) - __field(bool, writeback ) - __field(bool, bypass ) + __array(char, uuid, 16 ) + __field(u64, size ) ), TP_fast_assign( memcpy(__entry->uuid, c->sb.user_uuid.b, 16); - __entry->inode = inode; - __entry->sector = bio->bi_iter.bi_sector; - __entry->nr_sector = bio->bi_iter.bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, - bio->bi_iter.bi_size); - __entry->writeback = writeback; - __entry->bypass = bypass; + __entry->size = size; ), - TP_printk("%pU inode %llu %s %llu + %u hit %u bypass %u", - __entry->uuid, __entry->inode, - __entry->rwbs, (unsigned long long)__entry->sector, - __entry->nr_sector, __entry->writeback, __entry->bypass) + TP_printk("%pU size %llu", __entry->uuid, __entry->size) +); + +/* io.c: */ + +DEFINE_EVENT(bio, read_split, + TP_PROTO(struct bio *bio), + TP_ARGS(bio) ); -TRACE_EVENT(bcache_write_throttle, +DEFINE_EVENT(bio, read_bounce, + TP_PROTO(struct bio *bio), + TP_ARGS(bio) +); + +DEFINE_EVENT(bio, read_retry, + TP_PROTO(struct bio *bio), + TP_ARGS(bio) +); + +DEFINE_EVENT(bio, promote, + TP_PROTO(struct bio *bio), + TP_ARGS(bio) +); + +TRACE_EVENT(write_throttle, TP_PROTO(struct bch_fs *c, u64 inode, struct bio *bio, u64 delay), TP_ARGS(c, inode, bio, delay), @@ -230,172 +167,24 @@ TRACE_EVENT(bcache_write_throttle, __entry->nr_sector, __entry->delay) ); -DEFINE_EVENT(bcache_bio, bcache_read_retry, - TP_PROTO(struct bio *bio), - TP_ARGS(bio) -); - -DECLARE_EVENT_CLASS(page_alloc_fail, - TP_PROTO(struct bch_fs *c, u64 size), - TP_ARGS(c, size), - - TP_STRUCT__entry( - __array(char, uuid, 16 ) - __field(u64, size ) - ), - - TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); - __entry->size = size; - ), - - TP_printk("%pU size %llu", __entry->uuid, __entry->size) -); - /* Journal */ -DECLARE_EVENT_CLASS(cache_set, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c), - - TP_STRUCT__entry( - __array(char, uuid, 16 ) - ), - - TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); - ), - - TP_printk("%pU", __entry->uuid) -); - -DEFINE_EVENT(bkey, bcache_journal_replay_key, - TP_PROTO(const struct bkey *k), - TP_ARGS(k) -); - -TRACE_EVENT(bcache_journal_next_bucket, - TP_PROTO(struct bch_dev *ca, unsigned cur_idx, unsigned last_idx), - TP_ARGS(ca, cur_idx, last_idx), - - TP_STRUCT__entry( - __array(char, uuid, 16 ) - __field(unsigned, cur_idx ) - __field(unsigned, last_idx ) - ), - - TP_fast_assign( - memcpy(__entry->uuid, ca->uuid.b, 16); - __entry->cur_idx = cur_idx; - __entry->last_idx = last_idx; - ), - - TP_printk("%pU cur %u last %u", __entry->uuid, - __entry->cur_idx, __entry->last_idx) -); - -TRACE_EVENT(bcache_journal_write_oldest, - TP_PROTO(struct bch_fs *c, u64 seq), - TP_ARGS(c, seq), - - TP_STRUCT__entry( - __array(char, uuid, 16 ) - __field(u64, seq ) - ), - - TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); - __entry->seq = seq; - ), - - TP_printk("%pU seq %llu", __entry->uuid, __entry->seq) -); - -TRACE_EVENT(bcache_journal_write_oldest_done, - TP_PROTO(struct bch_fs *c, u64 seq, unsigned written), - TP_ARGS(c, seq, written), - - TP_STRUCT__entry( - __array(char, uuid, 16 ) - __field(u64, seq ) - __field(unsigned, written ) - ), - - TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); - __entry->seq = seq; - __entry->written = written; - ), - - TP_printk("%pU seq %llu written %u", __entry->uuid, __entry->seq, - __entry->written) -); - -DEFINE_EVENT(cache_set, bcache_journal_full, +DEFINE_EVENT(bch_fs, journal_full, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -DEFINE_EVENT(cache_set, bcache_journal_entry_full, +DEFINE_EVENT(bch_fs, journal_entry_full, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -DEFINE_EVENT(bcache_bio, bcache_journal_write, +DEFINE_EVENT(bio, journal_write, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); -/* Device state changes */ - -DEFINE_EVENT(cache_set, fs_read_only, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c) -); - -DEFINE_EVENT(cache_set, fs_read_only_done, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c) -); - -DECLARE_EVENT_CLASS(cache, - TP_PROTO(struct bch_dev *ca), - TP_ARGS(ca), - - TP_STRUCT__entry( - __array(char, uuid, 16 ) - __field(unsigned, tier ) - ), - - TP_fast_assign( - memcpy(__entry->uuid, ca->uuid.b, 16); - __entry->tier = ca->mi.tier; - ), - - TP_printk("%pU tier %u", __entry->uuid, __entry->tier) -); - -DEFINE_EVENT(cache, bcache_cache_read_only, - TP_PROTO(struct bch_dev *ca), - TP_ARGS(ca) -); - -DEFINE_EVENT(cache, bcache_cache_read_only_done, - TP_PROTO(struct bch_dev *ca), - TP_ARGS(ca) -); - -DEFINE_EVENT(cache, bcache_cache_read_write, - TP_PROTO(struct bch_dev *ca), - TP_ARGS(ca) -); - -DEFINE_EVENT(cache, bcache_cache_read_write_done, - TP_PROTO(struct bch_dev *ca), - TP_ARGS(ca) -); - -/* Searching */ +/* bset.c: */ DEFINE_EVENT(bpos, bkey_pack_pos_fail, TP_PROTO(struct bpos p), @@ -431,12 +220,12 @@ DECLARE_EVENT_CLASS(btree_node, __entry->inode, __entry->offset) ); -DEFINE_EVENT(btree_node, bcache_btree_read, +DEFINE_EVENT(btree_node, btree_read, TP_PROTO(struct bch_fs *c, struct btree *b), TP_ARGS(c, b) ); -TRACE_EVENT(bcache_btree_write, +TRACE_EVENT(btree_write, TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors), TP_ARGS(b, bytes, sectors), @@ -456,34 +245,17 @@ TRACE_EVENT(bcache_btree_write, __entry->type , __entry->bytes, __entry->sectors) ); -DEFINE_EVENT(btree_node, bcache_btree_node_alloc, +DEFINE_EVENT(btree_node, btree_node_alloc, TP_PROTO(struct bch_fs *c, struct btree *b), TP_ARGS(c, b) ); -TRACE_EVENT(bcache_btree_node_alloc_fail, - TP_PROTO(struct bch_fs *c, enum btree_id id), - TP_ARGS(c, id), - - TP_STRUCT__entry( - __array(char, uuid, 16 ) - __field(enum btree_id, id ) - ), - - TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); - __entry->id = id; - ), - - TP_printk("%pU id %u", __entry->uuid, __entry->id) -); - -DEFINE_EVENT(btree_node, bcache_btree_node_free, +DEFINE_EVENT(btree_node, btree_node_free, TP_PROTO(struct bch_fs *c, struct btree *b), TP_ARGS(c, b) ); -TRACE_EVENT(bcache_mca_reap, +TRACE_EVENT(btree_node_reap, TP_PROTO(struct bch_fs *c, struct btree *b, int ret), TP_ARGS(c, b, ret), @@ -500,33 +272,7 @@ TRACE_EVENT(bcache_mca_reap, TP_printk("bucket %llu ret %d", __entry->bucket, __entry->ret) ); -TRACE_EVENT(bcache_mca_scan, - TP_PROTO(struct bch_fs *c, unsigned touched, unsigned freed, - unsigned can_free, unsigned long nr), - TP_ARGS(c, touched, freed, can_free, nr), - - TP_STRUCT__entry( - __array(char, uuid, 16 ) - __field(unsigned long, touched ) - __field(unsigned long, freed ) - __field(unsigned long, can_free ) - __field(unsigned long, nr ) - ), - - TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); - __entry->touched = touched; - __entry->freed = freed; - __entry->can_free = can_free; - __entry->nr = nr; - ), - - TP_printk("%pU touched %lu freed %lu can_free %lu nr %lu", - __entry->uuid, __entry->touched, __entry->freed, - __entry->can_free, __entry->nr) -); - -DECLARE_EVENT_CLASS(mca_cannibalize_lock, +DECLARE_EVENT_CLASS(btree_node_cannibalize_lock, TP_PROTO(struct bch_fs *c), TP_ARGS(c), @@ -541,27 +287,47 @@ DECLARE_EVENT_CLASS(mca_cannibalize_lock, TP_printk("%pU", __entry->uuid) ); -DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize_lock_fail, +DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock_fail, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize_lock, +DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize, +DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -DEFINE_EVENT(cache_set, bcache_mca_cannibalize_unlock, +DEFINE_EVENT(bch_fs, btree_node_cannibalize_unlock, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -TRACE_EVENT(bcache_btree_insert_key, +TRACE_EVENT(btree_reserve_get_fail, + TP_PROTO(struct bch_fs *c, size_t required, struct closure *cl), + TP_ARGS(c, required, cl), + + TP_STRUCT__entry( + __array(char, uuid, 16 ) + __field(size_t, required ) + __field(struct closure *, cl ) + ), + + TP_fast_assign( + memcpy(__entry->uuid, c->sb.user_uuid.b, 16); + __entry->required = required; + __entry->cl = cl; + ), + + TP_printk("%pU required %zu by %p", __entry->uuid, + __entry->required, __entry->cl) +); + +TRACE_EVENT(btree_insert_key, TP_PROTO(struct bch_fs *c, struct btree *b, struct bkey_i *k), TP_ARGS(c, b, k), @@ -620,24 +386,24 @@ DECLARE_EVENT_CLASS(btree_split, __entry->inode, __entry->offset, __entry->keys) ); -DEFINE_EVENT(btree_split, bcache_btree_node_split, +DEFINE_EVENT(btree_split, btree_node_split, TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys), TP_ARGS(c, b, keys) ); -DEFINE_EVENT(btree_split, bcache_btree_node_compact, +DEFINE_EVENT(btree_split, btree_node_compact, TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys), TP_ARGS(c, b, keys) ); -DEFINE_EVENT(btree_node, bcache_btree_set_root, +DEFINE_EVENT(btree_node, btree_set_root, TP_PROTO(struct bch_fs *c, struct btree *b), TP_ARGS(c, b) ); /* Garbage collection */ -TRACE_EVENT(bcache_btree_gc_coalesce, +TRACE_EVENT(btree_gc_coalesce, TP_PROTO(struct bch_fs *c, struct btree *b, unsigned nodes), TP_ARGS(c, b, nodes), @@ -664,7 +430,7 @@ TRACE_EVENT(bcache_btree_gc_coalesce, __entry->inode, __entry->offset, __entry->nodes) ); -TRACE_EVENT(bcache_btree_gc_coalesce_fail, +TRACE_EVENT(btree_gc_coalesce_fail, TP_PROTO(struct bch_fs *c, int reason), TP_ARGS(c, reason), @@ -681,119 +447,54 @@ TRACE_EVENT(bcache_btree_gc_coalesce_fail, TP_printk("%pU: %u", __entry->uuid, __entry->reason) ); -TRACE_EVENT(bcache_btree_node_alloc_replacement, - TP_PROTO(struct bch_fs *c, struct btree *old, struct btree *b), - TP_ARGS(c, old, b), - - TP_STRUCT__entry( - __array(char, uuid, 16 ) - __field(u64, bucket ) - __field(u64, old_bucket ) - __field(u8, level ) - __field(u8, id ) - __field(u32, inode ) - __field(u64, offset ) - ), - - TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); - __entry->old_bucket = PTR_BUCKET_NR_TRACE(c, - &old->key, 0); - __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0); - __entry->level = b->level; - __entry->id = b->btree_id; - __entry->inode = b->key.k.p.inode; - __entry->offset = b->key.k.p.offset; - ), - - TP_printk("%pU for %llu bucket %llu(%u) id %u: %u:%llu", - __entry->uuid, __entry->old_bucket, __entry->bucket, - __entry->level, __entry->id, - __entry->inode, __entry->offset) -); - -DEFINE_EVENT(btree_node, bcache_btree_gc_rewrite_node, +DEFINE_EVENT(btree_node, btree_gc_rewrite_node, TP_PROTO(struct bch_fs *c, struct btree *b), TP_ARGS(c, b) ); -DEFINE_EVENT(btree_node, bcache_btree_gc_rewrite_node_fail, +DEFINE_EVENT(btree_node, btree_gc_rewrite_node_fail, TP_PROTO(struct bch_fs *c, struct btree *b), TP_ARGS(c, b) ); -DEFINE_EVENT(cache_set, bcache_gc_start, +DEFINE_EVENT(bch_fs, gc_start, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -DEFINE_EVENT(cache_set, bcache_gc_end, +DEFINE_EVENT(bch_fs, gc_end, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -DEFINE_EVENT(cache_set, bcache_gc_coalesce_start, +DEFINE_EVENT(bch_fs, gc_coalesce_start, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -DEFINE_EVENT(cache_set, bcache_gc_coalesce_end, +DEFINE_EVENT(bch_fs, gc_coalesce_end, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -DEFINE_EVENT(cache, bcache_sectors_saturated, +DEFINE_EVENT(bch_dev, sectors_saturated, TP_PROTO(struct bch_dev *ca), TP_ARGS(ca) ); -DEFINE_EVENT(cache_set, bcache_gc_sectors_saturated, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c) -); - -DEFINE_EVENT(cache_set, bcache_gc_cannot_inc_gens, +DEFINE_EVENT(bch_fs, gc_sectors_saturated, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -DEFINE_EVENT(cache_set, bcache_gc_periodic, +DEFINE_EVENT(bch_fs, gc_cannot_inc_gens, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -TRACE_EVENT(bcache_mark_bucket, - TP_PROTO(struct bch_dev *ca, const struct bkey *k, - const struct bch_extent_ptr *ptr, - int sectors, bool dirty), - TP_ARGS(ca, k, ptr, sectors, dirty), - - TP_STRUCT__entry( - __array(char, uuid, 16 ) - __field(u32, inode ) - __field(u64, offset ) - __field(u32, sectors ) - __field(u64, bucket ) - __field(bool, dirty ) - ), - - TP_fast_assign( - memcpy(__entry->uuid, ca->uuid.b, 16); - __entry->inode = k->p.inode; - __entry->offset = k->p.offset; - __entry->sectors = sectors; - __entry->bucket = PTR_BUCKET_NR(ca, ptr); - __entry->dirty = dirty; - ), - - TP_printk("%pU %u:%llu sectors %i bucket %llu dirty %i", - __entry->uuid, __entry->inode, __entry->offset, - __entry->sectors, __entry->bucket, __entry->dirty) -); - /* Allocator */ -TRACE_EVENT(bcache_alloc_batch, +TRACE_EVENT(alloc_batch, TP_PROTO(struct bch_dev *ca, size_t free, size_t total), TP_ARGS(ca, free, total), @@ -813,37 +514,17 @@ TRACE_EVENT(bcache_alloc_batch, __entry->uuid, __entry->free, __entry->total) ); -TRACE_EVENT(bcache_btree_reserve_get_fail, - TP_PROTO(struct bch_fs *c, size_t required, struct closure *cl), - TP_ARGS(c, required, cl), - - TP_STRUCT__entry( - __array(char, uuid, 16 ) - __field(size_t, required ) - __field(struct closure *, cl ) - ), - - TP_fast_assign( - memcpy(__entry->uuid, c->sb.user_uuid.b, 16); - __entry->required = required; - __entry->cl = cl; - ), - - TP_printk("%pU required %zu by %p", __entry->uuid, - __entry->required, __entry->cl) -); - -DEFINE_EVENT(cache, bcache_prio_write_start, +DEFINE_EVENT(bch_dev, prio_write_start, TP_PROTO(struct bch_dev *ca), TP_ARGS(ca) ); -DEFINE_EVENT(cache, bcache_prio_write_end, +DEFINE_EVENT(bch_dev, prio_write_end, TP_PROTO(struct bch_dev *ca), TP_ARGS(ca) ); -TRACE_EVENT(bcache_invalidate, +TRACE_EVENT(invalidate, TP_PROTO(struct bch_dev *ca, size_t bucket, unsigned sectors), TP_ARGS(ca, bucket, sectors), @@ -864,12 +545,12 @@ TRACE_EVENT(bcache_invalidate, MINOR(__entry->dev), __entry->offset) ); -DEFINE_EVENT(cache_set, bcache_rescale_prios, +DEFINE_EVENT(bch_fs, rescale_prios, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -DECLARE_EVENT_CLASS(cache_bucket_alloc, +DECLARE_EVENT_CLASS(bucket_alloc, TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve), TP_ARGS(ca, reserve), @@ -886,17 +567,17 @@ DECLARE_EVENT_CLASS(cache_bucket_alloc, TP_printk("%pU reserve %d", __entry->uuid, __entry->reserve) ); -DEFINE_EVENT(cache_bucket_alloc, bcache_bucket_alloc, +DEFINE_EVENT(bucket_alloc, bucket_alloc, TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve), TP_ARGS(ca, reserve) ); -DEFINE_EVENT(cache_bucket_alloc, bcache_bucket_alloc_fail, +DEFINE_EVENT(bucket_alloc, bucket_alloc_fail, TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve), TP_ARGS(ca, reserve) ); -TRACE_EVENT(bcache_freelist_empty_fail, +TRACE_EVENT(freelist_empty_fail, TP_PROTO(struct bch_fs *c, enum alloc_reserve reserve, struct closure *cl), TP_ARGS(c, reserve, cl), @@ -935,47 +616,16 @@ DECLARE_EVENT_CLASS(open_bucket_alloc, __entry->uuid, __entry->cl) ); -DEFINE_EVENT(open_bucket_alloc, bcache_open_bucket_alloc, +DEFINE_EVENT(open_bucket_alloc, open_bucket_alloc, TP_PROTO(struct bch_fs *c, struct closure *cl), TP_ARGS(c, cl) ); -DEFINE_EVENT(open_bucket_alloc, bcache_open_bucket_alloc_fail, +DEFINE_EVENT(open_bucket_alloc, open_bucket_alloc_fail, TP_PROTO(struct bch_fs *c, struct closure *cl), TP_ARGS(c, cl) ); -/* Keylists */ - -TRACE_EVENT(bcache_keyscan, - TP_PROTO(unsigned nr_found, - unsigned start_inode, u64 start_offset, - unsigned end_inode, u64 end_offset), - TP_ARGS(nr_found, - start_inode, start_offset, - end_inode, end_offset), - - TP_STRUCT__entry( - __field(__u32, nr_found ) - __field(__u32, start_inode ) - __field(__u64, start_offset ) - __field(__u32, end_inode ) - __field(__u64, end_offset ) - ), - - TP_fast_assign( - __entry->nr_found = nr_found; - __entry->start_inode = start_inode; - __entry->start_offset = start_offset; - __entry->end_inode = end_inode; - __entry->end_offset = end_offset; - ), - - TP_printk("found %u keys from %u:%llu to %u:%llu", __entry->nr_found, - __entry->start_inode, __entry->start_offset, - __entry->end_inode, __entry->end_offset) -); - /* Moving IO */ DECLARE_EVENT_CLASS(moving_io, @@ -998,44 +648,39 @@ DECLARE_EVENT_CLASS(moving_io, __entry->inode, __entry->offset, __entry->sectors) ); -DEFINE_EVENT(moving_io, bcache_move_read, +DEFINE_EVENT(moving_io, move_read, TP_PROTO(struct bkey *k), TP_ARGS(k) ); -DEFINE_EVENT(moving_io, bcache_move_read_done, +DEFINE_EVENT(moving_io, move_read_done, TP_PROTO(struct bkey *k), TP_ARGS(k) ); -DEFINE_EVENT(moving_io, bcache_move_write, +DEFINE_EVENT(moving_io, move_write, TP_PROTO(struct bkey *k), TP_ARGS(k) ); -DEFINE_EVENT(moving_io, bcache_move_write_done, - TP_PROTO(struct bkey *k), - TP_ARGS(k) -); - -DEFINE_EVENT(moving_io, bcache_copy_collision, +DEFINE_EVENT(moving_io, copy_collision, TP_PROTO(struct bkey *k), TP_ARGS(k) ); /* Copy GC */ -DEFINE_EVENT(page_alloc_fail, bcache_moving_gc_alloc_fail, +DEFINE_EVENT(page_alloc_fail, moving_gc_alloc_fail, TP_PROTO(struct bch_fs *c, u64 size), TP_ARGS(c, size) ); -DEFINE_EVENT(cache, bcache_moving_gc_start, +DEFINE_EVENT(bch_dev, moving_gc_start, TP_PROTO(struct bch_dev *ca), TP_ARGS(ca) ); -TRACE_EVENT(bcache_moving_gc_end, +TRACE_EVENT(moving_gc_end, TP_PROTO(struct bch_dev *ca, u64 sectors_moved, u64 keys_moved, u64 buckets_moved), TP_ARGS(ca, sectors_moved, keys_moved, buckets_moved), @@ -1059,44 +704,24 @@ TRACE_EVENT(bcache_moving_gc_end, __entry->buckets_moved) ); -DEFINE_EVENT(cache, bcache_moving_gc_reserve_empty, - TP_PROTO(struct bch_dev *ca), - TP_ARGS(ca) -); - -DEFINE_EVENT(cache, bcache_moving_gc_no_work, - TP_PROTO(struct bch_dev *ca), - TP_ARGS(ca) -); - -DEFINE_EVENT(bkey, bcache_gc_copy, +DEFINE_EVENT(bkey, gc_copy, TP_PROTO(const struct bkey *k), TP_ARGS(k) ); /* Tiering */ -DEFINE_EVENT(cache_set, bcache_tiering_refill_start, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c) -); - -DEFINE_EVENT(cache_set, bcache_tiering_refill_end, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c) -); - -DEFINE_EVENT(page_alloc_fail, bcache_tiering_alloc_fail, +DEFINE_EVENT(page_alloc_fail, tiering_alloc_fail, TP_PROTO(struct bch_fs *c, u64 size), TP_ARGS(c, size) ); -DEFINE_EVENT(cache_set, bcache_tiering_start, +DEFINE_EVENT(bch_fs, tiering_start, TP_PROTO(struct bch_fs *c), TP_ARGS(c) ); -TRACE_EVENT(bcache_tiering_end, +TRACE_EVENT(tiering_end, TP_PROTO(struct bch_fs *c, u64 sectors_moved, u64 keys_moved), TP_ARGS(c, sectors_moved, keys_moved), @@ -1117,54 +742,11 @@ TRACE_EVENT(bcache_tiering_end, __entry->uuid, __entry->sectors_moved, __entry->keys_moved) ); -DEFINE_EVENT(bkey, bcache_tiering_copy, +DEFINE_EVENT(bkey, tiering_copy, TP_PROTO(const struct bkey *k), TP_ARGS(k) ); -/* Background writeback */ - -DEFINE_EVENT(bkey, bcache_writeback, - TP_PROTO(const struct bkey *k), - TP_ARGS(k) -); - -DEFINE_EVENT(bkey, bcache_writeback_collision, - TP_PROTO(const struct bkey *k), - TP_ARGS(k) -); - -TRACE_EVENT(bcache_writeback_error, - TP_PROTO(struct bkey *k, bool write, int error), - TP_ARGS(k, write, error), - - TP_STRUCT__entry( - __field(u32, size ) - __field(u32, inode ) - __field(u64, offset ) - __field(bool, write ) - __field(int, error ) - ), - - TP_fast_assign( - __entry->inode = k->p.inode; - __entry->offset = k->p.offset; - __entry->size = k->size; - __entry->write = write; - __entry->error = error; - ), - - TP_printk("%u:%llu len %u %s error %d", __entry->inode, - __entry->offset, __entry->size, - __entry->write ? "write" : "read", - __entry->error) -); - -DEFINE_EVENT(page_alloc_fail, bcache_writeback_alloc_fail, - TP_PROTO(struct bch_fs *c, u64 size), - TP_ARGS(c, size) -); - #endif /* _TRACE_BCACHE_H */ /* This part must be outside protection */ diff --git a/libbcache/blockdev.c b/libbcache/blockdev.c deleted file mode 100644 index a4522ad2..00000000 --- a/libbcache/blockdev.c +++ /dev/null @@ -1,819 +0,0 @@ - -#include "bcache.h" -#include "blockdev.h" -#include "btree_iter.h" -#include "btree_update.h" -#include "checksum.h" -#include "error.h" -#include "inode.h" -#include "request.h" -#include "super-io.h" -#include "writeback.h" - -#include <linux/kthread.h> -#include <linux/module.h> -#include <linux/random.h> - -static int bch_blockdev_major; -static DEFINE_IDA(bch_blockdev_minor); -static LIST_HEAD(uncached_devices); -static DEFINE_MUTEX(bch_blockdev_lock); - -static struct kmem_cache *bch_search_cache; - -static void write_bdev_super_endio(struct bio *bio) -{ - struct cached_dev *dc = bio->bi_private; - /* XXX: error checking */ - - closure_put(&dc->sb_write); -} - -static void bch_write_bdev_super_unlock(struct closure *cl) -{ - struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); - - up(&dc->sb_write_mutex); -} - -void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) -{ - struct backingdev_sb *sb = dc->disk_sb.sb; - struct closure *cl = &dc->sb_write; - struct bio *bio = dc->disk_sb.bio; - - down(&dc->sb_write_mutex); - closure_init(cl, parent); - - sb->csum = csum_vstruct(NULL, BCH_CSUM_CRC64, - (struct nonce) { 0 }, sb).lo; - - bio_reset(bio); - bio->bi_bdev = dc->disk_sb.bdev; - bio->bi_iter.bi_sector = le64_to_cpu(sb->offset); - bio->bi_iter.bi_size = - roundup(vstruct_bytes(sb), - bdev_logical_block_size(dc->disk_sb.bdev)); - bio->bi_end_io = write_bdev_super_endio; - bio->bi_private = dc; - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FUA|REQ_META); - bch_bio_map(bio, sb); - - closure_get(cl); - - closure_return_with_destructor(cl, bch_write_bdev_super_unlock); -} - -static int open_dev(struct block_device *b, fmode_t mode) -{ - struct bcache_device *d = b->bd_disk->private_data; - - if (test_bit(BCACHE_DEV_CLOSING, &d->flags)) - return -ENXIO; - - closure_get(&d->cl); - return 0; -} - -static void release_dev(struct gendisk *b, fmode_t mode) -{ - struct bcache_device *d = b->private_data; - - closure_put(&d->cl); -} - -static int ioctl_dev(struct block_device *b, fmode_t mode, - unsigned int cmd, unsigned long arg) -{ - struct bcache_device *d = b->bd_disk->private_data; - - return d->ioctl(d, mode, cmd, arg); -} - -static const struct block_device_operations bcache_ops = { - .open = open_dev, - .release = release_dev, - .ioctl = ioctl_dev, - .owner = THIS_MODULE, -}; - -void bch_blockdev_stop(struct bcache_device *d) -{ - if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags)) - closure_queue(&d->cl); -} - -static void bcache_device_unlink(struct bcache_device *d) -{ - if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { - sysfs_remove_link(&d->c->kobj, d->name); - sysfs_remove_link(&d->kobj, "cache"); - } -} - -static void bcache_device_link(struct bcache_device *d, struct bch_fs *c, - const char *name) -{ - snprintf(d->name, BCACHEDEVNAME_SIZE, - "%s%llu", name, bcache_dev_inum(d)); - - WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") || - sysfs_create_link(&c->kobj, &d->kobj, d->name), - "Couldn't create device <-> cache set symlinks"); - - clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags); -} - -static void bcache_device_detach(struct bcache_device *d) -{ - if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { - mutex_lock(&d->inode_lock); - bch_inode_rm(d->c, bcache_dev_inum(d)); - mutex_unlock(&d->inode_lock); - } - - bcache_device_unlink(d); - - radix_tree_delete(&d->c->devices, bcache_dev_inum(d)); - - closure_put(&d->c->caching); - d->c = NULL; -} - -static int bcache_device_attach(struct bcache_device *d, struct bch_fs *c) -{ - int ret; - - ret = radix_tree_insert(&c->devices, bcache_dev_inum(d), d); - if (ret) { - pr_err("radix_tree_insert() error for inum %llu", - bcache_dev_inum(d)); - return ret; - } - - d->c = c; - closure_get(&c->caching); - - return ret; -} - -static void bcache_device_free(struct bcache_device *d) -{ - pr_info("%s stopped", d->disk->disk_name); - - if (d->c) - bcache_device_detach(d); - if (d->disk && d->disk->flags & GENHD_FL_UP) - del_gendisk(d->disk); - if (d->disk && d->disk->queue) - blk_cleanup_queue(d->disk->queue); - if (d->disk) { - ida_simple_remove(&bch_blockdev_minor, d->disk->first_minor); - put_disk(d->disk); - } - - bioset_exit(&d->bio_split); - - closure_debug_destroy(&d->cl); -} - -static int bcache_device_init(struct bcache_device *d, unsigned block_size, - sector_t sectors) -{ - struct request_queue *q; - int minor; - - mutex_init(&d->inode_lock); - - minor = ida_simple_get(&bch_blockdev_minor, 0, MINORMASK + 1, GFP_KERNEL); - if (minor < 0) { - pr_err("cannot allocate minor"); - return minor; - } - - if (!(d->disk = alloc_disk(1)) || - bioset_init(&d->bio_split, 4, offsetof(struct bch_read_bio, bio))) { - pr_err("cannot allocate disk"); - ida_simple_remove(&bch_blockdev_minor, minor); - return -ENOMEM; - } - - set_capacity(d->disk, sectors); - snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor); - - d->disk->major = bch_blockdev_major; - d->disk->first_minor = minor; - d->disk->fops = &bcache_ops; - d->disk->private_data = d; - - q = blk_alloc_queue(GFP_KERNEL); - if (!q) { - pr_err("cannot allocate queue"); - return -ENOMEM; - } - - blk_queue_make_request(q, NULL); - d->disk->queue = q; - q->queuedata = d; - q->backing_dev_info.congested_data = d; - q->limits.max_hw_sectors = UINT_MAX; - q->limits.max_sectors = UINT_MAX; - q->limits.max_segment_size = UINT_MAX; - q->limits.max_segments = BIO_MAX_PAGES; - blk_queue_max_discard_sectors(q, UINT_MAX); - q->limits.discard_granularity = 512; - q->limits.io_min = block_size; - q->limits.logical_block_size = block_size; - q->limits.physical_block_size = block_size; - set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); - clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags); - set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); - - blk_queue_write_cache(q, true, true); - - return 0; -} - -/* Cached device */ - -static void calc_cached_dev_sectors(struct bch_fs *c) -{ - u64 sectors = 0; - struct cached_dev *dc; - - list_for_each_entry(dc, &c->cached_devs, list) - sectors += bdev_sectors(dc->disk_sb.bdev); - - c->cached_dev_sectors = sectors; -} - -void bch_cached_dev_run(struct cached_dev *dc) -{ - struct bcache_device *d = &dc->disk; - char buf[BCH_SB_LABEL_SIZE + 1]; - char *env[] = { - "DRIVER=bcache", - kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", - dc->disk_sb.sb->disk_uuid.b), - NULL, - NULL, - }; - - memcpy(buf, dc->disk_sb.sb->label, BCH_SB_LABEL_SIZE); - buf[BCH_SB_LABEL_SIZE] = '\0'; - env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); - - if (atomic_xchg(&dc->running, 1)) { - kfree(env[1]); - kfree(env[2]); - return; - } - - if (!d->c && - BDEV_STATE(dc->disk_sb.sb) != BDEV_STATE_NONE) { - struct closure cl; - - closure_init_stack(&cl); - - SET_BDEV_STATE(dc->disk_sb.sb, BDEV_STATE_STALE); - bch_write_bdev_super(dc, &cl); - closure_sync(&cl); - } - - add_disk(d->disk); - bd_link_disk_holder(dc->disk_sb.bdev, dc->disk.disk); - /* won't show up in the uevent file, use udevadm monitor -e instead - * only class / kset properties are persistent */ - kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); - kfree(env[1]); - kfree(env[2]); - - if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || - sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) - pr_debug("error creating sysfs link"); -} - -static void cached_dev_detach_finish(struct work_struct *w) -{ - struct cached_dev *dc = container_of(w, struct cached_dev, detach); - char buf[BDEVNAME_SIZE]; - struct closure cl; - - closure_init_stack(&cl); - - BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); - BUG_ON(atomic_read(&dc->count)); - - mutex_lock(&bch_blockdev_lock); - - memset(&dc->disk_sb.sb->set_uuid, 0, 16); - SET_BDEV_STATE(dc->disk_sb.sb, BDEV_STATE_NONE); - - bch_write_bdev_super(dc, &cl); - closure_sync(&cl); - - bcache_device_detach(&dc->disk); - list_move(&dc->list, &uncached_devices); - - clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); - clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); - - mutex_unlock(&bch_blockdev_lock); - - pr_info("Caching disabled for %s", bdevname(dc->disk_sb.bdev, buf)); - - /* Drop ref we took in cached_dev_detach() */ - closure_put(&dc->disk.cl); -} - -void bch_cached_dev_detach(struct cached_dev *dc) -{ - if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) - return; - - if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) - return; - - /* - * Block the device from being closed and freed until we're finished - * detaching - */ - closure_get(&dc->disk.cl); - - dc->writeback_pd.rate.rate = UINT_MAX; - bch_writeback_queue(dc); - cached_dev_put(dc); -} - -int bch_cached_dev_attach(struct cached_dev *dc, struct bch_fs *c) -{ - __le64 rtime = cpu_to_le64(ktime_get_seconds()); - char buf[BDEVNAME_SIZE]; - bool found; - int ret; - - lockdep_assert_held(&c->state_lock); - - bdevname(dc->disk_sb.bdev, buf); - - if (memcmp(&dc->disk_sb.sb->set_uuid, - &c->sb.uuid, - sizeof(c->sb.uuid))) - return -ENOENT; - - if (dc->disk.c) { - pr_err("Can't attach %s: already attached", buf); - return -EINVAL; - } - - if (!bch_fs_running(c)) { - pr_err("Can't attach %s: not running", buf); - return -EINVAL; - } - - if (le16_to_cpu(dc->disk_sb.sb->block_size) < c->sb.block_size) { - /* Will die */ - pr_err("Couldn't attach %s: block size less than set's block size", - buf); - return -EINVAL; - } - - found = !bch_cached_dev_inode_find_by_uuid(c, - &dc->disk_sb.sb->disk_uuid, - &dc->disk.inode); - - if (!found && BDEV_STATE(dc->disk_sb.sb) == BDEV_STATE_DIRTY) { - pr_err("Couldn't find uuid for %s in set", buf); - return -ENOENT; - } - - if (found && - (BDEV_STATE(dc->disk_sb.sb) == BDEV_STATE_STALE || - BDEV_STATE(dc->disk_sb.sb) == BDEV_STATE_NONE)) { - found = false; - bch_inode_rm(c, bcache_dev_inum(&dc->disk)); - } - - /* Deadlocks since we're called via sysfs... - sysfs_remove_file(&dc->kobj, &sysfs_attach); - */ - - if (!found) { - struct closure cl; - - closure_init_stack(&cl); - - bkey_inode_blockdev_init(&dc->disk.inode.k_i); - dc->disk.inode.k.type = BCH_INODE_BLOCKDEV; - SET_CACHED_DEV(&dc->disk.inode.v, true); - dc->disk.inode.v.i_uuid = dc->disk_sb.sb->disk_uuid; - memcpy(dc->disk.inode.v.i_label, - dc->disk_sb.sb->label, BCH_SB_LABEL_SIZE); - dc->disk.inode.v.i_ctime = rtime; - dc->disk.inode.v.i_mtime = rtime; - - ret = bch_inode_create(c, &dc->disk.inode.k_i, - 0, BLOCKDEV_INODE_MAX, - &c->unused_inode_hint); - if (ret) { - pr_err("Error %d, not caching %s", ret, buf); - return ret; - } - - pr_info("attached inode %llu", bcache_dev_inum(&dc->disk)); - - dc->disk_sb.sb->set_uuid = c->sb.uuid; - SET_BDEV_STATE(dc->disk_sb.sb, BDEV_STATE_CLEAN); - - bch_write_bdev_super(dc, &cl); - closure_sync(&cl); - } else { - dc->disk.inode.v.i_mtime = rtime; - bch_btree_update(c, BTREE_ID_INODES, - &dc->disk.inode.k_i, NULL); - } - - /* Count dirty sectors before attaching */ - if (BDEV_STATE(dc->disk_sb.sb) == BDEV_STATE_DIRTY) - bch_sectors_dirty_init(dc, c); - - ret = bcache_device_attach(&dc->disk, c); - if (ret) - return ret; - - list_move(&dc->list, &c->cached_devs); - calc_cached_dev_sectors(c); - - /* - * dc->c must be set before dc->count != 0 - paired with the mb in - * cached_dev_get() - */ - smp_wmb(); - atomic_set(&dc->count, 1); - - if (bch_cached_dev_writeback_start(dc)) - return -ENOMEM; - - if (BDEV_STATE(dc->disk_sb.sb) == BDEV_STATE_DIRTY) { - atomic_set(&dc->has_dirty, 1); - atomic_inc(&dc->count); - } - - bch_cached_dev_run(dc); - bcache_device_link(&dc->disk, c, "bdev"); - - pr_info("Caching %s as %s on set %pU", - bdevname(dc->disk_sb.bdev, buf), dc->disk.disk->disk_name, - dc->disk.c->sb.uuid.b); - return 0; -} - -void bch_attach_backing_devs(struct bch_fs *c) -{ - struct cached_dev *dc, *t; - - lockdep_assert_held(&c->state_lock); - - mutex_lock(&bch_blockdev_lock); - - list_for_each_entry_safe(dc, t, &uncached_devices, list) - bch_cached_dev_attach(dc, c); - - mutex_unlock(&bch_blockdev_lock); -} - -void bch_cached_dev_release(struct kobject *kobj) -{ - struct cached_dev *dc = container_of(kobj, struct cached_dev, - disk.kobj); - kfree(dc); - module_put(THIS_MODULE); -} - -static void cached_dev_free(struct closure *cl) -{ - struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); - - bch_cached_dev_writeback_stop(dc); - bch_cached_dev_writeback_free(dc); - - mutex_lock(&bch_blockdev_lock); - - if (atomic_read(&dc->running)) - bd_unlink_disk_holder(dc->disk_sb.bdev, dc->disk.disk); - bcache_device_free(&dc->disk); - list_del(&dc->list); - - mutex_unlock(&bch_blockdev_lock); - - bch_free_super((void *) &dc->disk_sb); - - kobject_put(&dc->disk.kobj); -} - -static void cached_dev_flush(struct closure *cl) -{ - struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); - struct bcache_device *d = &dc->disk; - - bch_cache_accounting_destroy(&dc->accounting); - bcache_device_unlink(d); - kobject_del(&d->kobj); - - continue_at(cl, cached_dev_free, system_wq); -} - -static int cached_dev_init(struct cached_dev *dc, unsigned block_size) -{ - int ret; - struct io *io; - struct request_queue *q = bdev_get_queue(dc->disk_sb.bdev); - - dc->sequential_cutoff = 4 << 20; - - for (io = dc->io; io < dc->io + RECENT_IO; io++) { - list_add(&io->lru, &dc->io_lru); - hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); - } - - dc->disk.stripe_size = q->limits.io_opt >> 9; - - if (dc->disk.stripe_size) - dc->partial_stripes_expensive = - q->limits.raid_partial_stripes_expensive; - - ret = bcache_device_init(&dc->disk, block_size, - dc->disk_sb.bdev->bd_part->nr_sects - - le64_to_cpu(dc->disk_sb.sb->data_offset)); - if (ret) - return ret; - - dc->disk.disk->queue->backing_dev_info.ra_pages = - max(dc->disk.disk->queue->backing_dev_info.ra_pages, - q->backing_dev_info.ra_pages); - - bch_cached_dev_request_init(dc); - ret = bch_cached_dev_writeback_init(dc); - if (ret) - return ret; - - return 0; -} - -/* Cached device - bcache superblock */ - -static const char *bdev_validate_super(struct backingdev_sb *sb) -{ - switch (le64_to_cpu(sb->version)) { - case BCACHE_SB_VERSION_BDEV: - sb->data_offset = cpu_to_le64(BDEV_DATA_START_DEFAULT); - break; - case BCACHE_SB_VERSION_BDEV_WITH_OFFSET: - if (le64_to_cpu(sb->data_offset) < BDEV_DATA_START_DEFAULT) - return "Bad data offset"; - - break; - default: - return"Unsupported superblock version"; - } - - sb->last_mount = cpu_to_le32(get_seconds()); - - return NULL; -} - -const char *bch_backing_dev_register(struct bcache_superblock *sb) -{ - char name[BDEVNAME_SIZE]; - const char *err; - struct bch_fs *c; - struct cached_dev *dc; - - dc = kzalloc(sizeof(*dc), GFP_KERNEL); - if (!dc) - return "cannot allocate memory"; - - __module_get(THIS_MODULE); - INIT_LIST_HEAD(&dc->list); - closure_init(&dc->disk.cl, NULL); - set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq); - kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype); - INIT_WORK(&dc->detach, cached_dev_detach_finish); - sema_init(&dc->sb_write_mutex, 1); - INIT_LIST_HEAD(&dc->io_lru); - spin_lock_init(&dc->io_lock); - bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); - - memcpy(&dc->disk_sb, sb, sizeof(*sb)); - dc->disk_sb.bdev->bd_holder = dc; - memset(sb, 0, sizeof(*sb)); - - err = bdev_validate_super(dc->disk_sb.sb); - if (err) - goto err; - - if (cached_dev_init(dc, le16_to_cpu(dc->disk_sb.sb->block_size) << 9)) - goto err; - - err = "error creating kobject"; - if (kobject_add(&dc->disk.kobj, - &part_to_dev(dc->disk_sb.bdev->bd_part)->kobj, - "bcache")) - goto err; - - err = "error accounting kobject"; - if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj)) - goto err; - - pr_info("registered backing device %s", - bdevname(dc->disk_sb.bdev, name)); - - list_add(&dc->list, &uncached_devices); - c = bch_uuid_to_fs(dc->disk_sb.sb->set_uuid); - if (c) { - bch_cached_dev_attach(dc, c); - closure_put(&c->cl); - } - - if (BDEV_STATE(dc->disk_sb.sb) == BDEV_STATE_NONE || - BDEV_STATE(dc->disk_sb.sb) == BDEV_STATE_STALE) - bch_cached_dev_run(dc); - - return NULL; -err: - bch_blockdev_stop(&dc->disk); - return err; -} - -/* Flash only volumes */ - -void bch_blockdev_volume_release(struct kobject *kobj) -{ - struct bcache_device *d = container_of(kobj, struct bcache_device, - kobj); - kfree(d); -} - -static void blockdev_volume_free(struct closure *cl) -{ - struct bcache_device *d = container_of(cl, struct bcache_device, cl); - - bcache_device_free(d); - kobject_put(&d->kobj); -} - -static void blockdev_volume_flush(struct closure *cl) -{ - struct bcache_device *d = container_of(cl, struct bcache_device, cl); - - bcache_device_unlink(d); - kobject_del(&d->kobj); - continue_at(cl, blockdev_volume_free, system_wq); -} - -static int blockdev_volume_run(struct bch_fs *c, - struct bkey_s_c_inode_blockdev inode) -{ - struct bcache_device *d = kzalloc(sizeof(struct bcache_device), - GFP_KERNEL); - int ret = -ENOMEM; - - if (!d) - return ret; - - bkey_reassemble(&d->inode.k_i, inode.s_c); - - closure_init(&d->cl, NULL); - set_closure_fn(&d->cl, blockdev_volume_flush, system_wq); - - kobject_init(&d->kobj, &bch_blockdev_volume_ktype); - - ret = bcache_device_init(d, block_bytes(c), - le64_to_cpu(inode.v->i_size) >> 9); - if (ret) - goto err; - - ret = bcache_device_attach(d, c); - if (ret) - goto err; - - bch_blockdev_volume_request_init(d); - add_disk(d->disk); - - if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) - goto err; - - bcache_device_link(d, c, "volume"); - - return 0; -err: - kobject_put(&d->kobj); - return ret; -} - -int bch_blockdev_volumes_start(struct bch_fs *c) -{ - struct btree_iter iter; - struct bkey_s_c k; - struct bkey_s_c_inode_blockdev inode; - int ret = 0; - - if (!bch_fs_running(c)) - return -EINVAL; - - for_each_btree_key(&iter, c, BTREE_ID_INODES, POS_MIN, k) { - if (k.k->p.inode >= BLOCKDEV_INODE_MAX) - break; - - if (k.k->type != BCH_INODE_BLOCKDEV) - continue; - - inode = bkey_s_c_to_inode_blockdev(k); - - ret = blockdev_volume_run(c, inode); - if (ret) - break; - } - bch_btree_iter_unlock(&iter); - - return ret; -} - -int bch_blockdev_volume_create(struct bch_fs *c, u64 size) -{ - __le64 rtime = cpu_to_le64(ktime_get_seconds()); - struct bkey_i_inode_blockdev inode; - int ret; - - bkey_inode_blockdev_init(&inode.k_i); - get_random_bytes(&inode.v.i_uuid, sizeof(inode.v.i_uuid)); - inode.v.i_ctime = rtime; - inode.v.i_mtime = rtime; - inode.v.i_size = cpu_to_le64(size); - - ret = bch_inode_create(c, &inode.k_i, 0, BLOCKDEV_INODE_MAX, - &c->unused_inode_hint); - if (ret) { - pr_err("Can't create volume: %d", ret); - return ret; - } - - return blockdev_volume_run(c, inode_blockdev_i_to_s_c(&inode)); -} - -void bch_blockdevs_stop(struct bch_fs *c) -{ - struct cached_dev *dc; - struct bcache_device *d; - struct radix_tree_iter iter; - void **slot; - - mutex_lock(&bch_blockdev_lock); - rcu_read_lock(); - - radix_tree_for_each_slot(slot, &c->devices, &iter, 0) { - d = radix_tree_deref_slot(slot); - - if (CACHED_DEV(&d->inode.v) && - test_bit(BCH_FS_DETACHING, &c->flags)) { - dc = container_of(d, struct cached_dev, disk); - bch_cached_dev_detach(dc); - } else { - bch_blockdev_stop(d); - } - } - - rcu_read_unlock(); - mutex_unlock(&bch_blockdev_lock); -} - -void bch_fs_blockdev_exit(struct bch_fs *c) -{ - mempool_exit(&c->search); -} - -int bch_fs_blockdev_init(struct bch_fs *c) -{ - return mempool_init_slab_pool(&c->search, 1, bch_search_cache); -} - -void bch_blockdev_exit(void) -{ - kmem_cache_destroy(bch_search_cache); - - if (bch_blockdev_major >= 0) - unregister_blkdev(bch_blockdev_major, "bcache"); -} - -int __init bch_blockdev_init(void) -{ - bch_blockdev_major = register_blkdev(0, "bcache"); - if (bch_blockdev_major < 0) - return bch_blockdev_major; - - bch_search_cache = KMEM_CACHE(search, 0); - if (!bch_search_cache) - return -ENOMEM; - - return 0; -} diff --git a/libbcache/blockdev.h b/libbcache/blockdev.h deleted file mode 100644 index 5423d776..00000000 --- a/libbcache/blockdev.h +++ /dev/null @@ -1,134 +0,0 @@ -#ifndef _BCACHE_BLOCKDEV_H -#define _BCACHE_BLOCKDEV_H - -#include "blockdev_types.h" -#include "io_types.h" - -struct search { - /* Stack frame for bio_complete */ - struct closure cl; - - union { - struct bch_read_bio rbio; - struct bch_write_bio wbio; - }; - /* Not modified */ - struct bio *orig_bio; - struct bcache_device *d; - - unsigned inode; - unsigned write:1; - - /* Flags only used for reads */ - unsigned recoverable:1; - unsigned read_dirty_data:1; - unsigned cache_miss:1; - - /* - * For reads: bypass read from cache and insertion into cache - * For writes: discard key range from cache, sending the write to - * the backing device (if there is a backing device) - */ - unsigned bypass:1; - - unsigned long start_time; - - /* - * Mostly only used for writes. For reads, we still make use of - * some trivial fields: - * - c - * - error - */ - struct bch_write_op iop; -}; - -#ifndef NO_BCACHE_BLOCKDEV - -extern struct kobj_type bch_cached_dev_ktype; -extern struct kobj_type bch_blockdev_volume_ktype; - -void bch_write_bdev_super(struct cached_dev *, struct closure *); - -void bch_cached_dev_release(struct kobject *); -void bch_blockdev_volume_release(struct kobject *); - -int bch_cached_dev_attach(struct cached_dev *, struct bch_fs *); -void bch_attach_backing_devs(struct bch_fs *); - -void bch_cached_dev_detach(struct cached_dev *); -void bch_cached_dev_run(struct cached_dev *); -void bch_blockdev_stop(struct bcache_device *); - -const char *bch_backing_dev_register(struct bcache_superblock *); - -int bch_blockdev_volume_create(struct bch_fs *, u64); -int bch_blockdev_volumes_start(struct bch_fs *); - -void bch_blockdevs_stop(struct bch_fs *); - -void bch_fs_blockdev_exit(struct bch_fs *); -int bch_fs_blockdev_init(struct bch_fs *); -void bch_blockdev_exit(void); -int bch_blockdev_init(void); - -#else - -static inline void bch_write_bdev_super(struct cached_dev *dc, - struct closure *cl) {} - -static inline void bch_cached_dev_release(struct kobject *kobj) {} -static inline void bch_blockdev_volume_release(struct kobject *kobj) {} - -static inline int bch_cached_dev_attach(struct cached_dev *dc, struct bch_fs *c) -{ - return 0; -} -static inline void bch_attach_backing_devs(struct bch_fs *c) {} - -static inline void bch_cached_dev_detach(struct cached_dev *dc) {} -static inline void bch_cached_dev_run(struct cached_dev *dc) {} -static inline void bch_blockdev_stop(struct bcache_device *d) {} - -static inline const char *bch_backing_dev_register(struct bcache_superblock *sb) -{ - return "not implemented"; -} - -static inline int bch_blockdev_volume_create(struct bch_fs *c, u64 s) { return 0; } -static inline int bch_blockdev_volumes_start(struct bch_fs *c) { return 0; } - -static inline void bch_blockdevs_stop(struct bch_fs *c) {} -static inline void bch_fs_blockdev_exit(struct bch_fs *c) {} -static inline int bch_fs_blockdev_init(struct bch_fs *c) { return 0; } -static inline void bch_blockdev_exit(void) {} -static inline int bch_blockdev_init(void) { return 0; } - -#endif - -static inline void cached_dev_put(struct cached_dev *dc) -{ - if (atomic_dec_and_test(&dc->count)) - schedule_work(&dc->detach); -} - -static inline bool cached_dev_get(struct cached_dev *dc) -{ - if (!atomic_inc_not_zero(&dc->count)) - return false; - - /* Paired with the mb in cached_dev_attach */ - smp_mb__after_atomic(); - return true; -} - -static inline u64 bcache_dev_inum(struct bcache_device *d) -{ - return d->inode.k.p.inode; -} - -static inline struct bcache_device *bch_dev_find(struct bch_fs *c, u64 inode) -{ - return radix_tree_lookup(&c->devices, inode); -} - -#endif /* _BCACHE_BLOCKDEV_H */ diff --git a/libbcache/blockdev_types.h b/libbcache/blockdev_types.h deleted file mode 100644 index e5172004..00000000 --- a/libbcache/blockdev_types.h +++ /dev/null @@ -1,123 +0,0 @@ -#ifndef _BCACHE_BLOCKDEV_TYPES_H -#define _BCACHE_BLOCKDEV_TYPES_H - -#include "keybuf_types.h" -#include "stats_types.h" -#include "super_types.h" -#include "util.h" - -struct bcache_device { - struct closure cl; - - struct kobject kobj; - - struct bch_fs *c; - - struct rb_node node; - struct bkey_i_inode_blockdev inode; - struct mutex inode_lock; - -#define BCACHEDEVNAME_SIZE 12 - char name[BCACHEDEVNAME_SIZE]; - - struct gendisk *disk; - - unsigned long flags; -#define BCACHE_DEV_CLOSING 0 -#define BCACHE_DEV_DETACHING 1 -#define BCACHE_DEV_UNLINK_DONE 2 - - unsigned nr_stripes; - unsigned stripe_size; - atomic_t *stripe_sectors_dirty; - unsigned long *full_dirty_stripes; - - struct bio_set bio_split; - - unsigned data_csum:1; - - int (*ioctl)(struct bcache_device *, fmode_t, unsigned, unsigned long); -}; - -struct io { - /* Used to track sequential IO so it can be skipped */ - struct hlist_node hash; - struct list_head lru; - - unsigned long last_io; - unsigned sequential; - sector_t last; -}; - -struct cached_dev { - struct list_head list; - struct bcache_device disk; - - //struct backingdev_sb sb; - - struct { - struct backingdev_sb *sb; - struct block_device *bdev; - struct bio *bio; - unsigned page_order; - } disk_sb; - struct closure sb_write; - struct semaphore sb_write_mutex; - - /* Refcount on the cache set. Always nonzero when we're caching. */ - atomic_t count; - struct work_struct detach; - - /* - * Device might not be running if it's dirty and the cache set hasn't - * showed up yet. - */ - atomic_t running; - - /* - * Writes take a shared lock from start to finish; scanning for dirty - * data to refill the rb tree requires an exclusive lock. - */ - struct rw_semaphore writeback_lock; - - /* - * Nonzero, and writeback has a refcount (d->count), iff there is dirty - * data in the cache. Protected by writeback_lock; must have an - * shared lock to set and exclusive lock to clear. - */ - atomic_t has_dirty; - - /* for dynamic rate control of writeback */ - struct bch_pd_controller writeback_pd; - struct delayed_work writeback_pd_update; - unsigned writeback_pd_update_seconds; - - struct task_struct *writeback_thread; - struct keybuf writeback_keys; - mempool_t writeback_io_pool; - mempool_t writeback_page_pool; - - /* For tracking sequential IO */ -#define RECENT_IO_BITS 7 -#define RECENT_IO (1 << RECENT_IO_BITS) - struct io io[RECENT_IO]; - struct hlist_head io_hash[RECENT_IO + 1]; - struct list_head io_lru; - spinlock_t io_lock; - - struct cache_accounting accounting; - - /* The rest of this all shows up in sysfs */ - unsigned sequential_cutoff; - unsigned readahead; - - unsigned verify:1; - unsigned bypass_torture_test:1; - - unsigned partial_stripes_expensive:1; - unsigned writeback_metadata:1; - unsigned writeback_running:1; - unsigned char writeback_percent; -}; - -#endif /* _BCACHE_BLOCKDEV_TYPES_H */ diff --git a/libbcache/chardev.h b/libbcache/chardev.h deleted file mode 100644 index 61a4c2b5..00000000 --- a/libbcache/chardev.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef _BCACHE_CHARDEV_H -#define _BCACHE_CHARDEV_H - -#ifndef NO_BCACHE_CHARDEV - -long bch_fs_ioctl(struct bch_fs *, unsigned, void __user *); - -void bch_fs_chardev_exit(struct bch_fs *); -int bch_fs_chardev_init(struct bch_fs *); - -void bch_chardev_exit(void); -int __init bch_chardev_init(void); - -#else - -static inline long bch_fs_ioctl(struct bch_fs *c, - unsigned cmd, void __user * arg) -{ - return -ENOSYS; -} - -static inline void bch_fs_chardev_exit(struct bch_fs *c) {} -static inline int bch_fs_chardev_init(struct bch_fs *c) { return 0; } - -static inline void bch_chardev_exit(void) {} -static inline int __init bch_chardev_init(void) { return 0; } - -#endif - -#endif /* _BCACHE_CHARDEV_H */ diff --git a/libbcache/clock.h b/libbcache/clock.h deleted file mode 100644 index 9e081d7d..00000000 --- a/libbcache/clock.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _BCACHE_CLOCK_H -#define _BCACHE_CLOCK_H - -void bch_io_timer_add(struct io_clock *, struct io_timer *); -void bch_io_timer_del(struct io_clock *, struct io_timer *); -void bch_kthread_io_clock_wait(struct io_clock *, unsigned long); -void bch_increment_clock(struct bch_fs *, unsigned, int); - -void bch_io_clock_schedule_timeout(struct io_clock *, unsigned long); - -#define bch_kthread_wait_event_ioclock_timeout(condition, clock, timeout)\ -({ \ - long __ret = timeout; \ - might_sleep(); \ - if (!___wait_cond_timeout(condition)) \ - __ret = __wait_event_timeout(wq, condition, timeout); \ - __ret; \ -}) - -void bch_io_clock_exit(struct io_clock *); -int bch_io_clock_init(struct io_clock *); - -#endif /* _BCACHE_CLOCK_H */ diff --git a/libbcache/compress.h b/libbcache/compress.h deleted file mode 100644 index e8d208a0..00000000 --- a/libbcache/compress.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _BCACHE_COMPRESS_H -#define _BCACHE_COMPRESS_H - -int bch_bio_uncompress_inplace(struct bch_fs *, struct bio *, - unsigned, struct bch_extent_crc128); -int bch_bio_uncompress(struct bch_fs *, struct bio *, struct bio *, - struct bvec_iter, struct bch_extent_crc128); -void bch_bio_compress(struct bch_fs *, struct bio *, size_t *, - struct bio *, size_t *, unsigned *); - -int bch_check_set_has_compressed_data(struct bch_fs *, unsigned); -void bch_fs_compress_exit(struct bch_fs *); -int bch_fs_compress_init(struct bch_fs *); - -#endif /* _BCACHE_COMPRESS_H */ diff --git a/libbcache/debug.h b/libbcache/debug.h deleted file mode 100644 index 63e74304..00000000 --- a/libbcache/debug.h +++ /dev/null @@ -1,65 +0,0 @@ -#ifndef _BCACHE_DEBUG_H -#define _BCACHE_DEBUG_H - -#include "bcache.h" - -struct bio; -struct btree; -struct cached_dev; -struct bch_fs; - -#define BCH_DEBUG_PARAM(name, description) extern bool bch_##name; -BCH_DEBUG_PARAMS() -#undef BCH_DEBUG_PARAM - -#define BCH_DEBUG_PARAM(name, description) \ - static inline bool name(struct bch_fs *c) \ - { return bch_##name || c->name; } -BCH_DEBUG_PARAMS_ALWAYS() -#undef BCH_DEBUG_PARAM - -#ifdef CONFIG_BCACHE_DEBUG - -#define BCH_DEBUG_PARAM(name, description) \ - static inline bool name(struct bch_fs *c) \ - { return bch_##name || c->name; } -BCH_DEBUG_PARAMS_DEBUG() -#undef BCH_DEBUG_PARAM - -void __bch_btree_verify(struct bch_fs *, struct btree *); -void bch_data_verify(struct cached_dev *, struct bio *); - -#define bypass_torture_test(d) ((d)->bypass_torture_test) - -#else /* DEBUG */ - -#define BCH_DEBUG_PARAM(name, description) \ - static inline bool name(struct bch_fs *c) { return false; } -BCH_DEBUG_PARAMS_DEBUG() -#undef BCH_DEBUG_PARAM - -static inline void __bch_btree_verify(struct bch_fs *c, struct btree *b) {} -static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {} - -#define bypass_torture_test(d) 0 - -#endif - -static inline void bch_btree_verify(struct bch_fs *c, struct btree *b) -{ - if (verify_btree_ondisk(c)) - __bch_btree_verify(c, b); -} - -#ifdef CONFIG_DEBUG_FS -void bch_fs_debug_exit(struct bch_fs *); -void bch_fs_debug_init(struct bch_fs *); -#else -static inline void bch_fs_debug_exit(struct bch_fs *c) {} -static inline void bch_fs_debug_init(struct bch_fs *c) {} -#endif - -void bch_debug_exit(void); -int bch_debug_init(void); - -#endif diff --git a/libbcache/dirent.h b/libbcache/dirent.h deleted file mode 100644 index 158d4cae..00000000 --- a/libbcache/dirent.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef _BCACHE_DIRENT_H -#define _BCACHE_DIRENT_H - -extern const struct bkey_ops bch_bkey_dirent_ops; - -struct qstr; -struct file; -struct dir_context; -struct bch_fs; -struct bch_hash_info; - -unsigned bch_dirent_name_bytes(struct bkey_s_c_dirent); -int bch_dirent_create(struct bch_fs *c, u64, const struct bch_hash_info *, - u8, const struct qstr *, u64, u64 *, int); -int bch_dirent_delete(struct bch_fs *, u64, const struct bch_hash_info *, - const struct qstr *, u64 *); - -enum bch_rename_mode { - BCH_RENAME, - BCH_RENAME_OVERWRITE, - BCH_RENAME_EXCHANGE, -}; - -int bch_dirent_rename(struct bch_fs *, - struct inode *, const struct qstr *, - struct inode *, const struct qstr *, - u64 *, enum bch_rename_mode); - -u64 bch_dirent_lookup(struct bch_fs *, u64, const struct bch_hash_info *, - const struct qstr *); - -int bch_empty_dir(struct bch_fs *, u64); -int bch_readdir(struct bch_fs *, struct file *, struct dir_context *); - -#endif /* _BCACHE_DIRENT_H */ - diff --git a/libbcache/error.c b/libbcache/error.c deleted file mode 100644 index ba46d2d1..00000000 --- a/libbcache/error.c +++ /dev/null @@ -1,140 +0,0 @@ -#include "bcache.h" -#include "error.h" -#include "io.h" -#include "notify.h" -#include "super.h" - -void bch_inconsistent_error(struct bch_fs *c) -{ - set_bit(BCH_FS_ERROR, &c->flags); - - switch (c->opts.errors) { - case BCH_ON_ERROR_CONTINUE: - break; - case BCH_ON_ERROR_RO: - if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { - /* XXX do something better here? */ - bch_fs_stop_async(c); - return; - } - - if (bch_fs_emergency_read_only(c)) - bch_err(c, "emergency read only"); - break; - case BCH_ON_ERROR_PANIC: - panic(bch_fmt(c, "panic after error")); - break; - } -} - -void bch_fatal_error(struct bch_fs *c) -{ - if (bch_fs_emergency_read_only(c)) - bch_err(c, "emergency read only"); -} - -/* Nonfatal IO errors, IO error/latency accounting: */ - -/* Just does IO error accounting: */ -void bch_account_io_completion(struct bch_dev *ca) -{ - /* - * The halflife of an error is: - * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh - */ - - if (ca->fs->error_decay) { - unsigned count = atomic_inc_return(&ca->io_count); - - while (count > ca->fs->error_decay) { - unsigned errors; - unsigned old = count; - unsigned new = count - ca->fs->error_decay; - - /* - * First we subtract refresh from count; each time we - * succesfully do so, we rescale the errors once: - */ - - count = atomic_cmpxchg(&ca->io_count, old, new); - - if (count == old) { - count = new; - - errors = atomic_read(&ca->io_errors); - do { - old = errors; - new = ((uint64_t) errors * 127) / 128; - errors = atomic_cmpxchg(&ca->io_errors, - old, new); - } while (old != errors); - } - } - } -} - -/* IO error accounting and latency accounting: */ -void bch_account_io_completion_time(struct bch_dev *ca, - unsigned submit_time_us, int op) -{ - struct bch_fs *c; - unsigned threshold; - - if (!ca) - return; - - c = ca->fs; - threshold = op_is_write(op) - ? c->congested_write_threshold_us - : c->congested_read_threshold_us; - - if (threshold && submit_time_us) { - unsigned t = local_clock_us(); - - int us = t - submit_time_us; - int congested = atomic_read(&c->congested); - - if (us > (int) threshold) { - int ms = us / 1024; - c->congested_last_us = t; - - ms = min(ms, CONGESTED_MAX + congested); - atomic_sub(ms, &c->congested); - } else if (congested < 0) - atomic_inc(&c->congested); - } - - bch_account_io_completion(ca); -} - -void bch_nonfatal_io_error_work(struct work_struct *work) -{ - struct bch_dev *ca = container_of(work, struct bch_dev, io_error_work); - struct bch_fs *c = ca->fs; - unsigned errors = atomic_read(&ca->io_errors); - bool dev; - - if (errors < c->error_limit) { - bch_notify_dev_error(ca, false); - } else { - bch_notify_dev_error(ca, true); - - mutex_lock(&c->state_lock); - dev = bch_dev_state_allowed(c, ca, BCH_MEMBER_STATE_RO, - BCH_FORCE_IF_DEGRADED); - if (dev - ? __bch_dev_set_state(c, ca, BCH_MEMBER_STATE_RO, - BCH_FORCE_IF_DEGRADED) - : bch_fs_emergency_read_only(c)) - bch_err(ca, - "too many IO errors, setting %s RO", - dev ? "device" : "filesystem"); - mutex_unlock(&c->state_lock); - } -} - -void bch_nonfatal_io_error(struct bch_dev *ca) -{ - atomic_add(1 << IO_ERROR_SHIFT, &ca->io_errors); - queue_work(system_long_wq, &ca->io_error_work); -} diff --git a/libbcache/fs-gc.h b/libbcache/fs-gc.h deleted file mode 100644 index ac86fd22..00000000 --- a/libbcache/fs-gc.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _BCACHE_FS_GC_H -#define _BCACHE_FS_GC_H - -s64 bch_count_inode_sectors(struct bch_fs *, u64); -int bch_fsck(struct bch_fs *, bool); - -#endif /* _BCACHE_FS_GC_H */ diff --git a/libbcache/fs-io.h b/libbcache/fs-io.h deleted file mode 100644 index 4c428978..00000000 --- a/libbcache/fs-io.h +++ /dev/null @@ -1,96 +0,0 @@ -#ifndef _BCACHE_FS_IO_H -#define _BCACHE_FS_IO_H - -#include "buckets.h" -#include <linux/uio.h> - -int bch_set_page_dirty(struct page *); - -int bch_writepage(struct page *, struct writeback_control *); -int bch_readpage(struct file *, struct page *); - -int bch_writepages(struct address_space *, struct writeback_control *); -int bch_readpages(struct file *, struct address_space *, - struct list_head *, unsigned); - -int bch_write_begin(struct file *, struct address_space *, loff_t, - unsigned, unsigned, struct page **, void **); -int bch_write_end(struct file *, struct address_space *, loff_t, - unsigned, unsigned, struct page *, void *); - -ssize_t bch_direct_IO(struct kiocb *, struct iov_iter *); - -ssize_t bch_write_iter(struct kiocb *, struct iov_iter *); - -int bch_fsync(struct file *, loff_t, loff_t, int); - -int bch_truncate(struct inode *, struct iattr *); -long bch_fallocate_dispatch(struct file *, int, loff_t, loff_t); - -loff_t bch_llseek(struct file *, loff_t, int); - -int bch_page_mkwrite(struct vm_area_struct *, struct vm_fault *); -void bch_invalidatepage(struct page *, unsigned int, unsigned int); -int bch_releasepage(struct page *, gfp_t); -int bch_migrate_page(struct address_space *, struct page *, - struct page *, enum migrate_mode); - -struct i_sectors_hook { - struct extent_insert_hook hook; - s64 sectors; - struct bch_inode_info *ei; -}; - -struct bchfs_write_op { - struct bch_inode_info *ei; - s64 sectors_added; - bool is_dio; - u64 new_i_size; - struct bch_write_op op; -}; - -struct bch_writepage_io { - struct closure cl; - - struct bchfs_write_op op; - - /* must come last: */ - struct bch_write_bio bio; -}; - -extern struct bio_set *bch_writepage_bioset; - -struct dio_write { - struct closure cl; - struct kiocb *req; - struct bch_fs *c; - long written; - long error; - loff_t offset; - - struct disk_reservation res; - - struct iovec *iovec; - struct iovec inline_vecs[UIO_FASTIOV]; - struct iov_iter iter; - - struct mm_struct *mm; - - struct bchfs_write_op iop; - - /* must be last: */ - struct bch_write_bio bio; -}; - -extern struct bio_set *bch_dio_write_bioset; - -struct dio_read { - struct closure cl; - struct kiocb *req; - long ret; - struct bch_read_bio rbio; -}; - -extern struct bio_set *bch_dio_read_bioset; - -#endif /* _BCACHE_FS_IO_H */ diff --git a/libbcache/io.h b/libbcache/io.h deleted file mode 100644 index 9239ca4a..00000000 --- a/libbcache/io.h +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef _BCACHE_IO_H -#define _BCACHE_IO_H - -#include "io_types.h" - -#define to_wbio(_bio) \ - container_of((_bio), struct bch_write_bio, bio) - -#define to_rbio(_bio) \ - container_of((_bio), struct bch_read_bio, bio) - -void bch_bio_free_pages_pool(struct bch_fs *, struct bio *); -void bch_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t); - -enum bch_write_flags { - BCH_WRITE_ALLOC_NOWAIT = (1 << 0), - BCH_WRITE_DISCARD = (1 << 1), - BCH_WRITE_CACHED = (1 << 2), - BCH_WRITE_FLUSH = (1 << 3), - BCH_WRITE_DISCARD_ON_ERROR = (1 << 4), - BCH_WRITE_DATA_COMPRESSED = (1 << 5), - - /* Internal: */ - BCH_WRITE_JOURNAL_SEQ_PTR = (1 << 6), - BCH_WRITE_DONE = (1 << 7), - BCH_WRITE_LOOPED = (1 << 8), -}; - -static inline u64 *op_journal_seq(struct bch_write_op *op) -{ - return (op->flags & BCH_WRITE_JOURNAL_SEQ_PTR) - ? op->journal_seq_p : &op->journal_seq; -} - -static inline struct write_point *foreground_write_point(struct bch_fs *c, - unsigned long v) -{ - return c->write_points + - hash_long(v, ilog2(ARRAY_SIZE(c->write_points))); -} - -void bch_write_op_init(struct bch_write_op *, struct bch_fs *, - struct bch_write_bio *, - struct disk_reservation, struct write_point *, - struct bpos, u64 *, unsigned); -void bch_write(struct closure *); - -struct cache_promote_op; - -struct extent_pick_ptr; - -void bch_read_extent_iter(struct bch_fs *, struct bch_read_bio *, - struct bvec_iter, struct bkey_s_c k, - struct extent_pick_ptr *, unsigned); - -static inline void bch_read_extent(struct bch_fs *c, - struct bch_read_bio *orig, - struct bkey_s_c k, - struct extent_pick_ptr *pick, - unsigned flags) -{ - bch_read_extent_iter(c, orig, orig->bio.bi_iter, - k, pick, flags); -} - -enum bch_read_flags { - BCH_READ_FORCE_BOUNCE = 1 << 0, - BCH_READ_RETRY_IF_STALE = 1 << 1, - BCH_READ_PROMOTE = 1 << 2, - BCH_READ_IS_LAST = 1 << 3, - BCH_READ_MAY_REUSE_BIO = 1 << 4, - BCH_READ_ACCOUNT_TIMES = 1 << 5, - BCH_READ_USER_MAPPED = 1 << 6, -}; - -void bch_read(struct bch_fs *, struct bch_read_bio *, u64); - -void bch_generic_make_request(struct bio *, struct bch_fs *); -void bch_bio_submit_work(struct work_struct *); -void bch_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *, - const struct bkey_i *, bool); - -int bch_discard(struct bch_fs *, struct bpos, struct bpos, - struct bversion, struct disk_reservation *, - struct extent_insert_hook *, u64 *); - -void bch_read_retry_work(struct work_struct *); -void bch_wake_delayed_writes(unsigned long data); - -#endif /* _BCACHE_IO_H */ diff --git a/libbcache/keybuf.c b/libbcache/keybuf.c deleted file mode 100644 index 961fc79a..00000000 --- a/libbcache/keybuf.c +++ /dev/null @@ -1,195 +0,0 @@ - -#include "bcache.h" -#include "btree_gc.h" -#include "btree_iter.h" -#include "keybuf.h" - -#include <trace/events/bcache.h> - -/* - * For buffered iteration over the btree, with predicates and ratelimiting and - * whatnot - */ - -static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r) -{ - /* Overlapping keys compare equal */ - if (bkey_cmp(l->key.k.p, bkey_start_pos(&r->key.k)) <= 0) - return -1; - if (bkey_cmp(bkey_start_pos(&l->key.k), r->key.k.p) >= 0) - return 1; - return 0; -} - -static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, - struct keybuf_key *r) -{ - return clamp_t(s64, bkey_cmp(l->key.k.p, r->key.k.p), -1, 1); -} - -void bch_refill_keybuf(struct bch_fs *c, struct keybuf *buf, - struct bpos end, keybuf_pred_fn *pred) -{ - struct bpos start = buf->last_scanned; - struct btree_iter iter; - struct bkey_s_c k; - unsigned nr_found = 0; - - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, buf->last_scanned, k) { - if (bkey_cmp(k.k->p, end) >= 0) { - buf->last_scanned = k.k->p; - goto done; - } - - if (pred(buf, k)) { - struct keybuf_key *w; - - spin_lock(&buf->lock); - - w = array_alloc(&buf->freelist); - if (!w) { - spin_unlock(&buf->lock); - goto done; - } - - bkey_reassemble(&w->key, k); - atomic_set(&w->ref, -1); /* -1 means hasn't started */ - - if (RB_INSERT(&buf->keys, w, node, keybuf_cmp)) - array_free(&buf->freelist, w); - else - nr_found++; - - spin_unlock(&buf->lock); - } - - buf->last_scanned = k.k->p; - bch_btree_iter_cond_resched(&iter); - } - - /* If we end up here, it means: - * - the map_fn didn't fill up the keybuf - * - the map_fn didn't see the end key - * - there were no more keys to map over - * Therefore, we are at the end of the key space */ - buf->last_scanned = POS_MAX; -done: - bch_btree_iter_unlock(&iter); - - trace_bcache_keyscan(nr_found, - start.inode, start.offset, - buf->last_scanned.inode, - buf->last_scanned.offset); - - spin_lock(&buf->lock); - - if (!RB_EMPTY_ROOT(&buf->keys)) { - struct keybuf_key *w; - - w = RB_FIRST(&buf->keys, struct keybuf_key, node); - buf->start = bkey_start_pos(&w->key.k); - - w = RB_LAST(&buf->keys, struct keybuf_key, node); - buf->end = w->key.k.p; - } else { - buf->start = POS_MAX; - buf->end = POS_MAX; - } - - spin_unlock(&buf->lock); -} - -static void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w) -{ - rb_erase(&w->node, &buf->keys); - array_free(&buf->freelist, w); -} - -void bch_keybuf_put(struct keybuf *buf, struct keybuf_key *w) -{ - BUG_ON(atomic_read(&w->ref) <= 0); - - if (atomic_dec_and_test(&w->ref)) { - up(&buf->in_flight); - - spin_lock(&buf->lock); - bch_keybuf_del(buf, w); - spin_unlock(&buf->lock); - } -} - -void bch_keybuf_recalc_oldest_gens(struct bch_fs *c, struct keybuf *buf) -{ - struct keybuf_key *w, *n; - - spin_lock(&buf->lock); - rbtree_postorder_for_each_entry_safe(w, n, - &buf->keys, node) - bch_btree_key_recalc_oldest_gen(c, bkey_i_to_s_c(&w->key)); - spin_unlock(&buf->lock); -} - -bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bpos start, - struct bpos end) -{ - bool ret = false; - struct keybuf_key *w, *next, s = { .key.k.p = start }; - - if (bkey_cmp(end, buf->start) <= 0 || - bkey_cmp(start, buf->end) >= 0) - return false; - - spin_lock(&buf->lock); - - for (w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp); - w && bkey_cmp(bkey_start_pos(&w->key.k), end) < 0; - w = next) { - next = RB_NEXT(w, node); - - if (atomic_read(&w->ref) == -1) - bch_keybuf_del(buf, w); - else - ret = true; - } - - spin_unlock(&buf->lock); - return ret; -} - -struct keybuf_key *bch_keybuf_next(struct keybuf *buf) -{ - struct keybuf_key *w; - - spin_lock(&buf->lock); - - w = RB_FIRST(&buf->keys, struct keybuf_key, node); - - while (w && atomic_read(&w->ref) != -1) - w = RB_NEXT(w, node); - - if (!w) { - spin_unlock(&buf->lock); - return NULL; - } - - atomic_set(&w->ref, 1); - spin_unlock(&buf->lock); - - down(&buf->in_flight); - - return w; -} - -void bch_keybuf_init(struct keybuf *buf) -{ - sema_init(&buf->in_flight, KEYBUF_REFILL_BATCH / 2); - - buf->last_scanned = POS_MAX; - buf->start = POS_MIN; - buf->end = POS_MIN; - - buf->keys = RB_ROOT; - - spin_lock_init(&buf->lock); - array_allocator_init(&buf->freelist); -} diff --git a/libbcache/keybuf.h b/libbcache/keybuf.h deleted file mode 100644 index dd1402d3..00000000 --- a/libbcache/keybuf.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _BCACHE_KEYBUF_H -#define _BCACHE_KEYBUF_H - -#include "keybuf_types.h" - -typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey_s_c); - -void bch_keybuf_init(struct keybuf *); -void bch_refill_keybuf(struct bch_fs *, struct keybuf *, - struct bpos, keybuf_pred_fn *); -void bch_keybuf_recalc_oldest_gens(struct bch_fs *, struct keybuf *); -bool bch_keybuf_check_overlapping(struct keybuf *, struct bpos, struct bpos); -void bch_keybuf_put(struct keybuf *, struct keybuf_key *); -struct keybuf_key *bch_keybuf_next(struct keybuf *); - -#endif /* _BCACHE_KEYBUF_H */ diff --git a/libbcache/keybuf_types.h b/libbcache/keybuf_types.h deleted file mode 100644 index 3facc4a0..00000000 --- a/libbcache/keybuf_types.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _BCACHE_KEYBUF_TYPES_H -#define _BCACHE_KEYBUF_TYPES_H - -struct keybuf_key { - struct rb_node node; - BKEY_PADDED(key); - atomic_t ref; -}; - -#define KEYBUF_REFILL_BATCH 500 - -struct keybuf { - struct bpos last_scanned; - spinlock_t lock; - - /* - * Beginning and end of range in rb tree - so that we can skip taking - * lock and checking the rb tree when we need to check for overlapping - * keys. - */ - struct bpos start; - struct bpos end; - - struct rb_root keys; - - unsigned max_in_flight; - struct semaphore in_flight; - - DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, - KEYBUF_REFILL_BATCH); -}; - -#endif /* _BCACHE_KEYBUF_TYPES_H */ diff --git a/libbcache/migrate.h b/libbcache/migrate.h deleted file mode 100644 index c6a056cb..00000000 --- a/libbcache/migrate.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _BCACHE_MIGRATE_H -#define _BCACHE_MIGRATE_H - -int bch_move_data_off_device(struct bch_dev *); -int bch_move_metadata_off_device(struct bch_dev *); -int bch_flag_data_bad(struct bch_dev *); - -#endif /* _BCACHE_MIGRATE_H */ diff --git a/libbcache/notify.c b/libbcache/notify.c deleted file mode 100644 index b06a8749..00000000 --- a/libbcache/notify.c +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Code for sending uevent notifications to user-space. - * - * Copyright 2015 Datera, Inc. - */ - -#include "bcache.h" -#include "notify.h" - -#include <linux/kobject.h> - -#define notify_var(c, format, ...) \ -({ \ - int ret; \ - lockdep_assert_held(&(c)->uevent_lock); \ - ret = add_uevent_var(&(c)->uevent_env, format, ##__VA_ARGS__); \ - WARN_ON_ONCE(ret); \ -}) - -static void notify_get(struct bch_fs *c) -{ - struct kobj_uevent_env *env = &c->uevent_env; - - mutex_lock(&c->uevent_lock); - env->envp_idx = 0; - env->buflen = 0; - - notify_var(c, "SET_UUID=%pU", c->sb.user_uuid.b); -} - -static void notify_get_cache(struct bch_dev *ca) -{ - struct bch_fs *c = ca->fs; - - notify_get(c); - notify_var(c, "UUID=%pU", ca->uuid.b); - notify_var(c, "BLOCKDEV=%s", ca->name); -} - -static void notify_put(struct bch_fs *c) -{ - struct kobj_uevent_env *env = &c->uevent_env; - - env->envp[env->envp_idx] = NULL; - kobject_uevent_env(&c->kobj, KOBJ_CHANGE, env->envp); - mutex_unlock(&c->uevent_lock); -} - -void bch_notify_fs_read_write(struct bch_fs *c) -{ - notify_get(c); - notify_var(c, "STATE=active"); - notify_put(c); -} - -void bch_notify_fs_read_only(struct bch_fs *c) -{ - notify_get(c); - notify_var(c, "STATE=readonly"); - notify_put(c); -} - -void bch_notify_fs_stopped(struct bch_fs *c) -{ - notify_get(c); - notify_var(c, "STATE=stopped"); - notify_put(c); -} - -void bch_notify_dev_read_write(struct bch_dev *ca) -{ - struct bch_fs *c = ca->fs; - - notify_get_cache(ca); - notify_var(c, "STATE=active"); - notify_put(c); -} - -void bch_notify_dev_read_only(struct bch_dev *ca) -{ - struct bch_fs *c = ca->fs; - - notify_get_cache(ca); - notify_var(c, "STATE=readonly"); - notify_put(c); -} - -void bch_notify_dev_added(struct bch_dev *ca) -{ - struct bch_fs *c = ca->fs; - - notify_get_cache(ca); - notify_var(c, "STATE=removing"); - notify_put(c); -} - -void bch_notify_dev_error(struct bch_dev *ca, bool fatal) -{ - struct bch_fs *c = ca->fs; - - notify_get_cache(ca); - notify_var(c, "STATE=error"); - notify_var(c, "FATAL=%d", fatal); - notify_put(c); -} diff --git a/libbcache/notify.h b/libbcache/notify.h deleted file mode 100644 index 2c1e3679..00000000 --- a/libbcache/notify.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Code for sending uevent notifications to user-space. - * - * Copyright 2015 Datera, Inc. - */ - -#ifndef _NOTIFY_H -#define _NOTIFY_H - -#ifndef NO_BCACHE_NOTIFY - -void bch_notify_fs_read_write(struct bch_fs *); -void bch_notify_fs_read_only(struct bch_fs *); -void bch_notify_fs_stopped(struct bch_fs *); - -void bch_notify_dev_read_write(struct bch_dev *); -void bch_notify_dev_read_only(struct bch_dev *); -void bch_notify_dev_added(struct bch_dev *); -void bch_notify_dev_error(struct bch_dev *, bool); - -#else - -static inline void bch_notify_fs_read_write(struct bch_fs *c) {} -static inline void bch_notify_fs_read_only(struct bch_fs *c) {} -static inline void bch_notify_fs_stopped(struct bch_fs *c) {} - -static inline void bch_notify_dev_read_write(struct bch_dev *ca) {} -static inline void bch_notify_dev_read_only(struct bch_dev *ca) {} -static inline void bch_notify_dev_added(struct bch_dev *ca) {} -static inline void bch_notify_dev_error(struct bch_dev *ca, bool b) {} - -#endif - -#endif /* _NOTIFY_H */ diff --git a/libbcache/request.c b/libbcache/request.c deleted file mode 100644 index b24770bc..00000000 --- a/libbcache/request.c +++ /dev/null @@ -1,809 +0,0 @@ -/* - * Handle a read or a write request and decide what to do with it. - * - * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> - * Copyright 2012 Google, Inc. - * - * Main pieces here: - * - * 1) Data insert path, via bch_data_insert() -- writes data to cache and - * updates extents btree - * 2) Read path, via bch_read() -- for now only used by bcachefs and ioctl - * interface - * 3) Read path, via cache_lookup() and struct search -- used by block device - * make_request functions - * 4) Cache promotion -- used by bch_read() and cache_lookup() to copy data to - * the cache, either from a backing device or a cache device in a higher tier - * - * One tricky thing that comes up is a race condition where a bucket may be - * re-used while reads from it are still in flight. To guard against this, we - * save the ptr that is being read and check if it is stale once the read - * completes. If the ptr is stale, the read is retried. - * - * #2 and #3 will be unified further in the future. - */ - -#include "bcache.h" -#include "blockdev.h" -#include "btree_update.h" -#include "btree_iter.h" -#include "clock.h" -#include "debug.h" -#include "error.h" -#include "extents.h" -#include "io.h" -#include "journal.h" -#include "keybuf.h" -#include "request.h" -#include "writeback.h" -#include "stats.h" - -#include <linux/module.h> -#include <linux/hash.h> -#include <linux/random.h> -#include <linux/backing-dev.h> - -#include <trace/events/bcache.h> - -#define CUTOFF_CACHE_ADD 10 -#define CUTOFF_CACHE_READA 15 - -/* Congested? */ - -unsigned bch_get_congested(struct bch_fs *c) -{ - int i; - long rand; - - if (!c->congested_read_threshold_us && - !c->congested_write_threshold_us) - return 0; - - i = (local_clock_us() - c->congested_last_us) / 1024; - if (i < 0) - return 0; - - i += atomic_read(&c->congested); - if (i >= 0) - return 0; - - i += CONGESTED_MAX; - - if (i > 0) - i = fract_exp_two(i, 6); - - rand = get_random_int(); - i -= bitmap_weight(&rand, BITS_PER_LONG); - - return i > 0 ? i : 1; -} - -static void add_sequential(struct task_struct *t) -{ - t->sequential_io_avg = ewma_add(t->sequential_io_avg, - t->sequential_io, 3); - t->sequential_io = 0; -} - -static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) -{ - return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; -} - -static bool check_should_bypass(struct cached_dev *dc, struct bio *bio, int rw) -{ - struct bch_fs *c = dc->disk.c; - unsigned mode = BDEV_CACHE_MODE(dc->disk_sb.sb); - unsigned sectors, congested = bch_get_congested(c); - struct task_struct *task = current; - struct io *i; - - if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || - sectors_available(c) * 100 < c->capacity * CUTOFF_CACHE_ADD || - (bio_op(bio) == REQ_OP_DISCARD)) - goto skip; - - if (mode == CACHE_MODE_NONE || - (mode == CACHE_MODE_WRITEAROUND && - op_is_write(bio_op(bio)))) - goto skip; - - if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || - bio_sectors(bio) & (c->sb.block_size - 1)) { - pr_debug("skipping unaligned io"); - goto skip; - } - - if (bypass_torture_test(dc)) { - if ((get_random_int() & 3) == 3) - goto skip; - else - goto rescale; - } - - if (!congested && !dc->sequential_cutoff) - goto rescale; - - if (!congested && - mode == CACHE_MODE_WRITEBACK && - op_is_write(bio_op(bio)) && - (bio->bi_opf & REQ_SYNC)) - goto rescale; - - spin_lock(&dc->io_lock); - - hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) - if (i->last == bio->bi_iter.bi_sector && - time_before(jiffies, i->last_io)) - goto found; - - i = list_first_entry(&dc->io_lru, struct io, lru); - - add_sequential(task); - i->sequential = 0; -found: - if (i->sequential + bio->bi_iter.bi_size > i->sequential) - i->sequential += bio->bi_iter.bi_size; - - i->last = bio_end_sector(bio); - i->last_io = jiffies + msecs_to_jiffies(5000); - task->sequential_io = i->sequential; - - hlist_del(&i->hash); - hlist_add_head(&i->hash, iohash(dc, i->last)); - list_move_tail(&i->lru, &dc->io_lru); - - spin_unlock(&dc->io_lock); - - sectors = max(task->sequential_io, - task->sequential_io_avg) >> 9; - - if (dc->sequential_cutoff && - sectors >= dc->sequential_cutoff >> 9) { - trace_bcache_bypass_sequential(bio); - goto skip; - } - - if (congested && sectors >= congested) { - trace_bcache_bypass_congested(bio); - goto skip; - } - -rescale: - return false; -skip: - bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); - return true; -} - -/* Common code for the make_request functions */ - -/** - * request_endio - endio function for backing device bios - */ -static void request_endio(struct bio *bio) -{ - struct closure *cl = bio->bi_private; - - if (bio->bi_error) { - struct search *s = container_of(cl, struct search, cl); - s->iop.error = bio->bi_error; - /* Only cache read errors are recoverable */ - s->recoverable = false; - } - - bio_put(bio); - closure_put(cl); -} - -static void bio_complete(struct search *s) -{ - if (s->orig_bio) { - generic_end_io_acct(bio_data_dir(s->orig_bio), - &s->d->disk->part0, s->start_time); - - trace_bcache_request_end(s->d, s->orig_bio); - s->orig_bio->bi_error = s->iop.error; - bio_endio(s->orig_bio); - s->orig_bio = NULL; - } -} - -static void do_bio_hook(struct search *s, struct bio *orig_bio) -{ - int rw = bio_data_dir(orig_bio); - struct bio *bio = rw ? &s->wbio.bio : &s->rbio.bio; - - bio_init(bio); - __bio_clone_fast(bio, orig_bio); - bio->bi_end_io = request_endio; - bio->bi_private = &s->cl; - - bio_cnt_set(bio, 3); -} - -static void search_free(struct closure *cl) -{ - struct search *s = container_of(cl, struct search, cl); - - bio_complete(s); - - if (s->iop.bio) - bio_put(&s->iop.bio->bio); - - closure_debug_destroy(cl); - mempool_free(s, &s->d->c->search); -} - -static inline struct search *search_alloc(struct bio *bio, - struct bcache_device *d) -{ - struct search *s; - - s = mempool_alloc(&d->c->search, GFP_NOIO); - - closure_init(&s->cl, NULL); - do_bio_hook(s, bio); - - s->orig_bio = bio; - s->d = d; - s->recoverable = 1; - s->bypass = 0; - s->write = op_is_write(bio_op(bio)); - s->read_dirty_data = 0; - s->cache_miss = 0; - s->start_time = jiffies; - s->inode = bcache_dev_inum(d); - - s->iop.c = d->c; - s->iop.bio = NULL; - s->iop.error = 0; - - return s; -} - -/* Cached devices */ - -static void cached_dev_bio_complete(struct closure *cl) -{ - struct search *s = container_of(cl, struct search, cl); - struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - - search_free(cl); - cached_dev_put(dc); -} - -/* Process reads */ - -static void cached_dev_read_error(struct closure *cl) -{ - struct search *s = container_of(cl, struct search, cl); - struct bio *bio = &s->rbio.bio; - - if (s->recoverable) { - /* Read bucket invalidate races are handled here, also plain - * old IO errors from the cache that can be retried from the - * backing device (reads of clean data) */ - trace_bcache_read_retry(s->orig_bio); - - s->iop.error = 0; - do_bio_hook(s, s->orig_bio); - - /* XXX: invalidate cache, don't count twice */ - - closure_bio_submit(bio, cl); - } - - continue_at(cl, cached_dev_bio_complete, NULL); -} - -static void cached_dev_read_done(struct closure *cl) -{ - struct search *s = container_of(cl, struct search, cl); - struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - - if (dc->verify && s->recoverable && !s->read_dirty_data) - bch_data_verify(dc, s->orig_bio); - - continue_at_nobarrier(cl, cached_dev_bio_complete, NULL); -} - -static void cached_dev_read_done_bh(struct closure *cl) -{ - struct search *s = container_of(cl, struct search, cl); - struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - - bch_mark_cache_accounting(s->iop.c, dc, !s->cache_miss, s->bypass); - trace_bcache_read(s->orig_bio, !s->cache_miss, s->bypass); - - if (s->iop.error) - continue_at_nobarrier(cl, cached_dev_read_error, s->iop.c->wq); - else if (dc->verify) - continue_at_nobarrier(cl, cached_dev_read_done, s->iop.c->wq); - else - continue_at_nobarrier(cl, cached_dev_bio_complete, NULL); -} - -/** - * __cache_promote -- insert result of read bio into cache - * - * Used for backing devices and flash-only volumes. - * - * @orig_bio must actually be a bbio with a valid key. - */ -void __cache_promote(struct bch_fs *c, struct bch_read_bio *orig_bio, - struct bkey_s_c old, - struct bkey_s_c new, - unsigned write_flags) -{ -#if 0 - struct cache_promote_op *op; - struct bio *bio; - unsigned pages = DIV_ROUND_UP(orig_bio->bio.bi_iter.bi_size, PAGE_SIZE); - - /* XXX: readahead? */ - - op = kmalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOIO); - if (!op) - goto out_submit; - - /* clone the bbio */ - memcpy(&op->bio, orig_bio, offsetof(struct bbio, bio)); - - bio = &op->bio.bio.bio; - bio_init(bio); - bio_get(bio); - bio->bi_bdev = orig_bio->bio.bi_bdev; - bio->bi_iter.bi_sector = orig_bio->bio.bi_iter.bi_sector; - bio->bi_iter.bi_size = orig_bio->bio.bi_iter.bi_size; - bio->bi_end_io = cache_promote_endio; - bio->bi_private = &op->cl; - bio->bi_io_vec = bio->bi_inline_vecs; - bch_bio_map(bio, NULL); - - if (bio_alloc_pages(bio, __GFP_NOWARN|GFP_NOIO)) - goto out_free; - - orig_bio->ca = NULL; - - closure_init(&op->cl, &c->cl); - op->orig_bio = &orig_bio->bio; - op->stale = 0; - - bch_write_op_init(&op->iop, c, &op->bio, &c->promote_write_point, - new, old, - BCH_WRITE_ALLOC_NOWAIT|write_flags); - op->iop.nr_replicas = 1; - - //bch_cut_front(bkey_start_pos(&orig_bio->key.k), &op->iop.insert_key); - //bch_cut_back(orig_bio->key.k.p, &op->iop.insert_key.k); - - trace_bcache_promote(&orig_bio->bio); - - op->bio.bio.submit_time_us = local_clock_us(); - closure_bio_submit(bio, &op->cl); - - continue_at(&op->cl, cache_promote_write, c->wq); -out_free: - kfree(op); -out_submit: - generic_make_request(&orig_bio->bio); -#endif -} - -/** - * cached_dev_cache_miss - populate cache with data from backing device - * - * We don't write to the cache if s->bypass is set. - */ -static int cached_dev_cache_miss(struct btree_iter *iter, struct search *s, - struct bio *bio, unsigned sectors) -{ - int ret; - unsigned reada = 0; - struct bio *miss; - BKEY_PADDED(key) replace; - - s->cache_miss = 1; - - if (s->bypass) - goto nopromote; -#if 0 - struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - - /* XXX: broken */ - if (!(bio->bi_opf & REQ_RAHEAD) && - !(bio->bi_opf & REQ_META) && - ((u64) sectors_available(dc->disk.c) * 100 < - (u64) iter->c->capacity * CUTOFF_CACHE_READA)) - reada = min_t(sector_t, dc->readahead >> 9, - bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); -#endif - sectors = min(sectors, bio_sectors(bio) + reada); - - replace.key.k = KEY(s->inode, - bio->bi_iter.bi_sector + sectors, - sectors); - - ret = bch_btree_insert_check_key(iter, &replace.key); - if (ret == -EINTR) - return ret; - - miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); - - miss->bi_end_io = request_endio; - miss->bi_private = &s->cl; - - //to_bbio(miss)->key.k = KEY(s->inode, - // bio_end_sector(miss), - // bio_sectors(miss)); - to_rbio(miss)->ca = NULL; - - closure_get(&s->cl); - __cache_promote(s->iop.c, to_rbio(miss), - bkey_i_to_s_c(&replace.key), - bkey_to_s_c(&KEY(replace.key.k.p.inode, - replace.key.k.p.offset, - replace.key.k.size)), - BCH_WRITE_CACHED); - - return 0; -nopromote: - miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); - - miss->bi_end_io = request_endio; - miss->bi_private = &s->cl; - closure_bio_submit(miss, &s->cl); - - return 0; -} - -static void cached_dev_read(struct cached_dev *dc, struct search *s) -{ - struct bch_fs *c = s->iop.c; - struct closure *cl = &s->cl; - struct bio *bio = &s->rbio.bio; - struct btree_iter iter; - struct bkey_s_c k; - int ret; - - bch_increment_clock(c, bio_sectors(bio), READ); - - for_each_btree_key_with_holes(&iter, c, BTREE_ID_EXTENTS, - POS(s->inode, bio->bi_iter.bi_sector), k) { - BKEY_PADDED(k) tmp; - struct extent_pick_ptr pick; - unsigned sectors, bytes; - bool is_last; -retry: - bkey_reassemble(&tmp.k, k); - bch_btree_iter_unlock(&iter); - k = bkey_i_to_s_c(&tmp.k); - - bch_extent_pick_ptr(c, k, &pick); - if (IS_ERR(pick.ca)) { - bcache_io_error(c, bio, "no device to read from"); - goto out; - } - - sectors = min_t(u64, k.k->p.offset, bio_end_sector(bio)) - - bio->bi_iter.bi_sector; - bytes = sectors << 9; - is_last = bytes == bio->bi_iter.bi_size; - swap(bio->bi_iter.bi_size, bytes); - - if (pick.ca) { - PTR_BUCKET(pick.ca, &pick.ptr)->read_prio = - c->prio_clock[READ].hand; - - if (!bkey_extent_is_cached(k.k)) - s->read_dirty_data = true; - - bch_read_extent(c, &s->rbio, k, &pick, - BCH_READ_ACCOUNT_TIMES| - BCH_READ_RETRY_IF_STALE| - (!s->bypass ? BCH_READ_PROMOTE : 0)| - (is_last ? BCH_READ_IS_LAST : 0)); - } else { - /* not present (hole), or stale cached data */ - if (cached_dev_cache_miss(&iter, s, bio, sectors)) { - k = bch_btree_iter_peek_with_holes(&iter); - if (btree_iter_err(k)) - break; - goto retry; - } - } - - swap(bio->bi_iter.bi_size, bytes); - bio_advance(bio, bytes); - - if (is_last) { - bch_btree_iter_unlock(&iter); - goto out; - } - } - - /* - * If we get here, it better have been because there was an error - * reading a btree node - */ - ret = bch_btree_iter_unlock(&iter); - BUG_ON(!ret); - bcache_io_error(c, bio, "btree IO error %i", ret); -out: - continue_at(cl, cached_dev_read_done_bh, NULL); -} - -/* Process writes */ - -static void cached_dev_write_complete(struct closure *cl) -{ - struct search *s = container_of(cl, struct search, cl); - struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); - - up_read_non_owner(&dc->writeback_lock); - cached_dev_bio_complete(cl); -} - -static void cached_dev_write(struct cached_dev *dc, struct search *s) -{ - struct closure *cl = &s->cl; - struct bio *bio = &s->wbio.bio; - bool writeback = false; - bool bypass = s->bypass; - struct bkey insert_key = KEY(s->inode, - bio_end_sector(bio), - bio_sectors(bio)); - unsigned flags = BCH_WRITE_DISCARD_ON_ERROR; - - down_read_non_owner(&dc->writeback_lock); - if (bch_keybuf_check_overlapping(&dc->writeback_keys, - bkey_start_pos(&insert_key), - insert_key.p)) { - /* - * We overlap with some dirty data undergoing background - * writeback, force this write to writeback - */ - bypass = false; - writeback = true; - } - - /* - * Discards aren't _required_ to do anything, so skipping if - * check_overlapping returned true is ok - * - * But check_overlapping drops dirty keys for which io hasn't started, - * so we still want to call it. - */ - if (bio_op(bio) == REQ_OP_DISCARD) - bypass = true; - - if (should_writeback(dc, bio, BDEV_CACHE_MODE(dc->disk_sb.sb), - bypass)) { - bypass = false; - writeback = true; - } - - if (bypass) { - /* - * If this is a bypass-write (as opposed to a discard), send - * it down to the backing device. If this is a discard, only - * send it to the backing device if the backing device - * supports discards. Otherwise, we simply discard the key - * range from the cache and don't touch the backing device. - */ - if ((bio_op(bio) != REQ_OP_DISCARD) || - blk_queue_discard(bdev_get_queue(dc->disk_sb.bdev))) - closure_bio_submit(s->orig_bio, cl); - } else if (writeback) { - bch_writeback_add(dc); - - if (bio->bi_opf & REQ_PREFLUSH) { - /* Also need to send a flush to the backing device */ - struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, - &dc->disk.bio_split); - - flush->bi_bdev = bio->bi_bdev; - flush->bi_end_io = request_endio; - flush->bi_private = cl; - bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH); - - closure_bio_submit(flush, cl); - } - } else { - struct bio *writethrough = - bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split); - - closure_bio_submit(writethrough, cl); - - flags |= BCH_WRITE_CACHED; - flags |= BCH_WRITE_ALLOC_NOWAIT; - } - - if (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) - flags |= BCH_WRITE_FLUSH; - if (bypass) - flags |= BCH_WRITE_DISCARD; - - bch_write_op_init(&s->iop, dc->disk.c, &s->wbio, - (struct disk_reservation) { 0 }, - foreground_write_point(dc->disk.c, - (unsigned long) current), - bkey_start_pos(&insert_key), - NULL, flags); - - closure_call(&s->iop.cl, bch_write, NULL, cl); - continue_at(cl, cached_dev_write_complete, NULL); -} - -/* Cached devices - read & write stuff */ - -static void __cached_dev_make_request(struct request_queue *q, struct bio *bio) -{ - struct search *s; - struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; - struct cached_dev *dc = container_of(d, struct cached_dev, disk); - int rw = bio_data_dir(bio); - - generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0); - - bio->bi_bdev = dc->disk_sb.bdev; - bio->bi_iter.bi_sector += le64_to_cpu(dc->disk_sb.sb->data_offset); - - if (cached_dev_get(dc)) { - struct bio *clone; - - s = search_alloc(bio, d); - trace_bcache_request_start(s->d, bio); - - clone = rw ? &s->wbio.bio : &s->rbio.bio; - - if (!bio->bi_iter.bi_size) { - if (s->orig_bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) - bch_journal_flush_async(&s->iop.c->journal, - &s->cl); - - /* - * If it's a flush, we send the flush to the backing - * device too - */ - closure_bio_submit(clone, &s->cl); - - continue_at(&s->cl, cached_dev_bio_complete, NULL); - } else { - s->bypass = check_should_bypass(dc, bio, rw); - - if (rw) - cached_dev_write(dc, s); - else - cached_dev_read(dc, s); - } - } else { - if ((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(dc->disk_sb.bdev))) - bio_endio(bio); - else - generic_make_request(bio); - } -} - -static blk_qc_t cached_dev_make_request(struct request_queue *q, - struct bio *bio) -{ - __cached_dev_make_request(q, bio); - return BLK_QC_T_NONE; -} - -static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, - unsigned int cmd, unsigned long arg) -{ - struct cached_dev *dc = container_of(d, struct cached_dev, disk); - return __blkdev_driver_ioctl(dc->disk_sb.bdev, mode, cmd, arg); -} - -static int cached_dev_congested(void *data, int bits) -{ - struct bcache_device *d = data; - struct cached_dev *dc = container_of(d, struct cached_dev, disk); - struct request_queue *q = bdev_get_queue(dc->disk_sb.bdev); - int ret = 0; - - if (bdi_congested(&q->backing_dev_info, bits)) - return 1; - - if (cached_dev_get(dc)) { - ret |= bch_congested(d->c, bits); - cached_dev_put(dc); - } - - return ret; -} - -void bch_cached_dev_request_init(struct cached_dev *dc) -{ - struct gendisk *g = dc->disk.disk; - - g->queue->make_request_fn = cached_dev_make_request; - g->queue->backing_dev_info.congested_fn = cached_dev_congested; - dc->disk.ioctl = cached_dev_ioctl; -} - -/* Blockdev volumes */ - -static void __blockdev_volume_make_request(struct request_queue *q, - struct bio *bio) -{ - struct search *s; - struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; - int rw = bio_data_dir(bio); - - generic_start_io_acct(rw, bio_sectors(bio), &d->disk->part0); - - trace_bcache_request_start(d, bio); - - s = search_alloc(bio, d); - - if (!bio->bi_iter.bi_size) { - if (s->orig_bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) - bch_journal_flush_async(&s->iop.c->journal, - &s->cl); - - continue_at(&s->cl, search_free, NULL); - } else if (rw) { - struct disk_reservation res = { 0 }; - unsigned flags = 0; - - if (bio_op(bio) != REQ_OP_DISCARD && - bch_disk_reservation_get(d->c, &res, bio_sectors(bio), 0)) { - s->iop.error = -ENOSPC; - continue_at(&s->cl, search_free, NULL); - return; - } - - if (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) - flags |= BCH_WRITE_FLUSH; - if (bio_op(bio) == REQ_OP_DISCARD) - flags |= BCH_WRITE_DISCARD; - - bch_write_op_init(&s->iop, d->c, &s->wbio, res, - foreground_write_point(d->c, - (unsigned long) current), - POS(s->inode, bio->bi_iter.bi_sector), - NULL, flags); - - closure_call(&s->iop.cl, bch_write, NULL, &s->cl); - } else { - closure_get(&s->cl); - bch_read(d->c, &s->rbio, bcache_dev_inum(d)); - } - continue_at(&s->cl, search_free, NULL); -} - -static blk_qc_t blockdev_volume_make_request(struct request_queue *q, - struct bio *bio) -{ - __blockdev_volume_make_request(q, bio); - return BLK_QC_T_NONE; -} - -static int blockdev_volume_ioctl(struct bcache_device *d, fmode_t mode, - unsigned int cmd, unsigned long arg) -{ - return -ENOTTY; -} - -static int blockdev_volume_congested(void *data, int bits) -{ - struct bcache_device *d = data; - - return bch_congested(d->c, bits); -} - -void bch_blockdev_volume_request_init(struct bcache_device *d) -{ - struct gendisk *g = d->disk; - - g->queue->make_request_fn = blockdev_volume_make_request; - g->queue->backing_dev_info.congested_fn = blockdev_volume_congested; - d->ioctl = blockdev_volume_ioctl; -} diff --git a/libbcache/request.h b/libbcache/request.h deleted file mode 100644 index 1ee3d16f..00000000 --- a/libbcache/request.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _BCACHE_REQUEST_H_ -#define _BCACHE_REQUEST_H_ - -#include "stats.h" - -struct bch_fs; -struct cached_dev; -struct bcache_device; -struct kmem_cache; - -unsigned bch_get_congested(struct bch_fs *); - -void bch_cached_dev_request_init(struct cached_dev *dc); -void bch_blockdev_volume_request_init(struct bcache_device *d); - -#endif /* _BCACHE_REQUEST_H_ */ diff --git a/libbcache/stats.c b/libbcache/stats.c deleted file mode 100644 index a8a4eb36..00000000 --- a/libbcache/stats.c +++ /dev/null @@ -1,219 +0,0 @@ -/* - * bcache stats code - * - * Copyright 2012 Google, Inc. - */ - -#include "bcache.h" -#include "stats.h" -#include "sysfs.h" - -/* - * We keep absolute totals of various statistics, and addionally a set of three - * rolling averages. - * - * Every so often, a timer goes off and rescales the rolling averages. - * accounting_rescale[] is how many times the timer has to go off before we - * rescale each set of numbers; that gets us half lives of 5 minutes, one hour, - * and one day. - * - * accounting_delay is how often the timer goes off - 22 times in 5 minutes, - * and accounting_weight is what we use to rescale: - * - * pow(31 / 32, 22) ~= 1/2 - * - * So that we don't have to increment each set of numbers every time we (say) - * get a cache hit, we increment a single atomic_t in acc->collector, and when - * the rescale function runs it resets the atomic counter to 0 and adds its - * old value to each of the exported numbers. - * - * To reduce rounding error, the numbers in struct cache_stats are all - * stored left shifted by 16, and scaled back in the sysfs show() function. - */ - -static const unsigned DAY_RESCALE = 288; -static const unsigned HOUR_RESCALE = 12; -static const unsigned FIVE_MINUTE_RESCALE = 1; -static const unsigned accounting_delay = (HZ * 300) / 22; -static const unsigned accounting_weight = 5; - -/* sysfs reading/writing */ - -read_attribute(cache_hits); -read_attribute(cache_misses); -read_attribute(cache_bypass_hits); -read_attribute(cache_bypass_misses); -read_attribute(cache_hit_ratio); -read_attribute(cache_readaheads); -read_attribute(cache_miss_collisions); -read_attribute(bypassed); -read_attribute(foreground_write_ratio); -read_attribute(foreground_writes); -read_attribute(gc_writes); -read_attribute(discards); - -SHOW(bch_stats) -{ - struct cache_stats *s = - container_of(kobj, struct cache_stats, kobj); -#define var(stat) (s->stat >> 16) - var_print(cache_hits); - var_print(cache_misses); - var_print(cache_bypass_hits); - var_print(cache_bypass_misses); - - sysfs_print(cache_hit_ratio, - DIV_SAFE(var(cache_hits) * 100, - var(cache_hits) + var(cache_misses))); - - var_print(cache_readaheads); - var_print(cache_miss_collisions); - - sysfs_hprint(bypassed, var(sectors_bypassed) << 9); - sysfs_hprint(foreground_writes, var(foreground_write_sectors) << 9); - sysfs_hprint(gc_writes, var(gc_write_sectors) << 9); - sysfs_hprint(discards, var(discard_sectors) << 9); - - sysfs_print(foreground_write_ratio, - DIV_SAFE(var(foreground_write_sectors) * 100, - var(foreground_write_sectors) + - var(gc_write_sectors))); -#undef var - return 0; -} - -STORE(bch_stats) -{ - return size; -} - -static void bch_stats_release(struct kobject *k) -{ -} - -static struct attribute *bch_stats_files[] = { - &sysfs_cache_hits, - &sysfs_cache_misses, - &sysfs_cache_bypass_hits, - &sysfs_cache_bypass_misses, - &sysfs_cache_hit_ratio, - &sysfs_cache_readaheads, - &sysfs_cache_miss_collisions, - &sysfs_bypassed, - &sysfs_foreground_write_ratio, - &sysfs_foreground_writes, - &sysfs_gc_writes, - &sysfs_discards, - NULL -}; -static KTYPE(bch_stats); - -int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, - struct kobject *parent) -{ - int ret = kobject_add(&acc->total.kobj, parent, - "stats_total"); - ret = ret ?: kobject_add(&acc->five_minute.kobj, parent, - "stats_five_minute"); - ret = ret ?: kobject_add(&acc->hour.kobj, parent, - "stats_hour"); - ret = ret ?: kobject_add(&acc->day.kobj, parent, - "stats_day"); - return ret; -} - -void bch_cache_accounting_clear(struct cache_accounting *acc) -{ - memset(&acc->total.cache_hits, - 0, - sizeof(unsigned long) * 9); -} - -void bch_cache_accounting_destroy(struct cache_accounting *acc) -{ - kobject_put(&acc->total.kobj); - kobject_put(&acc->five_minute.kobj); - kobject_put(&acc->hour.kobj); - kobject_put(&acc->day.kobj); - - atomic_set(&acc->closing, 1); - if (del_timer_sync(&acc->timer)) - closure_return(&acc->cl); -} - -/* EWMA scaling */ - -static void scale_stat(unsigned long *stat) -{ - *stat = ewma_add(*stat, 0, accounting_weight); -} - -static void scale_stats(struct cache_stats *stats, unsigned long rescale_at) -{ - if (++stats->rescale == rescale_at) { - stats->rescale = 0; - scale_stat(&stats->cache_hits); - scale_stat(&stats->cache_misses); - scale_stat(&stats->cache_bypass_hits); - scale_stat(&stats->cache_bypass_misses); - scale_stat(&stats->cache_readaheads); - scale_stat(&stats->cache_miss_collisions); - scale_stat(&stats->sectors_bypassed); - scale_stat(&stats->foreground_write_sectors); - scale_stat(&stats->gc_write_sectors); - scale_stat(&stats->discard_sectors); - } -} - -static void scale_accounting(unsigned long data) -{ - struct cache_accounting *acc = (struct cache_accounting *) data; - -#define move_stat(name) do { \ - unsigned t = atomic_xchg(&acc->collector.name, 0); \ - t <<= 16; \ - acc->five_minute.name += t; \ - acc->hour.name += t; \ - acc->day.name += t; \ - acc->total.name += t; \ -} while (0) - - move_stat(cache_hits); - move_stat(cache_misses); - move_stat(cache_bypass_hits); - move_stat(cache_bypass_misses); - move_stat(cache_readaheads); - move_stat(cache_miss_collisions); - move_stat(sectors_bypassed); - move_stat(foreground_write_sectors); - move_stat(gc_write_sectors); - move_stat(discard_sectors); - - scale_stats(&acc->total, 0); - scale_stats(&acc->day, DAY_RESCALE); - scale_stats(&acc->hour, HOUR_RESCALE); - scale_stats(&acc->five_minute, FIVE_MINUTE_RESCALE); - - acc->timer.expires += accounting_delay; - - if (!atomic_read(&acc->closing)) - add_timer(&acc->timer); - else - closure_return(&acc->cl); -} - -void bch_cache_accounting_init(struct cache_accounting *acc, - struct closure *parent) -{ - kobject_init(&acc->total.kobj, &bch_stats_ktype); - kobject_init(&acc->five_minute.kobj, &bch_stats_ktype); - kobject_init(&acc->hour.kobj, &bch_stats_ktype); - kobject_init(&acc->day.kobj, &bch_stats_ktype); - - closure_init(&acc->cl, parent); - init_timer(&acc->timer); - acc->timer.expires = jiffies + accounting_delay; - acc->timer.data = (unsigned long) acc; - acc->timer.function = scale_accounting; - add_timer(&acc->timer); -} diff --git a/libbcache/stats.h b/libbcache/stats.h deleted file mode 100644 index a3c7bd26..00000000 --- a/libbcache/stats.h +++ /dev/null @@ -1,68 +0,0 @@ -#ifndef _BCACHE_STATS_H_ -#define _BCACHE_STATS_H_ - -#include "stats_types.h" - -struct bch_fs; -struct cached_dev; -struct bcache_device; - -#ifndef NO_BCACHE_ACCOUNTING - -void bch_cache_accounting_init(struct cache_accounting *, struct closure *); -int bch_cache_accounting_add_kobjs(struct cache_accounting *, struct kobject *); -void bch_cache_accounting_clear(struct cache_accounting *); -void bch_cache_accounting_destroy(struct cache_accounting *); - -#else - -static inline void bch_cache_accounting_init(struct cache_accounting *acc, - struct closure *cl) {} -static inline int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, - struct kobject *cl) -{ - return 0; -} -static inline void bch_cache_accounting_clear(struct cache_accounting *acc) {} -static inline void bch_cache_accounting_destroy(struct cache_accounting *acc) {} - -#endif - -static inline void mark_cache_stats(struct cache_stat_collector *stats, - bool hit, bool bypass) -{ - atomic_inc(&stats->cache_hit_array[!bypass][!hit]); -} - -static inline void bch_mark_cache_accounting(struct bch_fs *c, - struct cached_dev *dc, - bool hit, bool bypass) -{ - mark_cache_stats(&dc->accounting.collector, hit, bypass); - mark_cache_stats(&c->accounting.collector, hit, bypass); -} - -static inline void bch_mark_sectors_bypassed(struct bch_fs *c, - struct cached_dev *dc, - unsigned sectors) -{ - atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); - atomic_add(sectors, &c->accounting.collector.sectors_bypassed); -} - -static inline void bch_mark_gc_write(struct bch_fs *c, int sectors) -{ - atomic_add(sectors, &c->accounting.collector.gc_write_sectors); -} - -static inline void bch_mark_foreground_write(struct bch_fs *c, int sectors) -{ - atomic_add(sectors, &c->accounting.collector.foreground_write_sectors); -} - -static inline void bch_mark_discard(struct bch_fs *c, int sectors) -{ - atomic_add(sectors, &c->accounting.collector.discard_sectors); -} - -#endif /* _BCACHE_STATS_H_ */ diff --git a/libbcache/stats_types.h b/libbcache/stats_types.h deleted file mode 100644 index 28e4c69e..00000000 --- a/libbcache/stats_types.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef _BCACHE_STATS_TYPES_H_ -#define _BCACHE_STATS_TYPES_H_ - -struct cache_stat_collector { - union { - struct { - atomic_t cache_hits; - atomic_t cache_misses; - atomic_t cache_bypass_hits; - atomic_t cache_bypass_misses; - }; - - /* cache_hit_array[!bypass][!hit]: */ - atomic_t cache_hit_array[2][2]; - }; - - - atomic_t cache_readaheads; - atomic_t cache_miss_collisions; - atomic_t sectors_bypassed; - atomic_t foreground_write_sectors; - atomic_t gc_write_sectors; - atomic_t discard_sectors; -}; - -struct cache_stats { - struct kobject kobj; - - unsigned long cache_hits; - unsigned long cache_misses; - unsigned long cache_bypass_hits; - unsigned long cache_bypass_misses; - unsigned long cache_readaheads; - unsigned long cache_miss_collisions; - unsigned long sectors_bypassed; - unsigned long foreground_write_sectors; - unsigned long gc_write_sectors; - unsigned long discard_sectors; - - unsigned rescale; -}; - -struct cache_accounting { - struct closure cl; - struct timer_list timer; - atomic_t closing; - - struct cache_stat_collector collector; - - struct cache_stats total; - struct cache_stats five_minute; - struct cache_stats hour; - struct cache_stats day; -}; - -#endif /* _BCACHE_STATS_TYPES_H_ */ diff --git a/libbcache/super.h b/libbcache/super.h deleted file mode 100644 index 66c34308..00000000 --- a/libbcache/super.h +++ /dev/null @@ -1,136 +0,0 @@ -#ifndef _BCACHE_SUPER_H -#define _BCACHE_SUPER_H - -#include "extents.h" - -#include <linux/bcache-ioctl.h> - -static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s) -{ - return s >> ca->bucket_bits; -} - -static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b) -{ - return ((sector_t) b) << ca->bucket_bits; -} - -static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s) -{ - return s & (ca->mi.bucket_size - 1); -} - -static inline struct bch_dev *__bch_next_dev(struct bch_fs *c, unsigned *iter) -{ - struct bch_dev *ca = NULL; - - while (*iter < c->sb.nr_devices && - !(ca = rcu_dereference_check(c->devs[*iter], - lockdep_is_held(&c->state_lock)))) - (*iter)++; - - return ca; -} - -#define __for_each_member_device(ca, c, iter) \ - for ((iter) = 0; ((ca) = __bch_next_dev((c), &(iter))); (iter)++) - -#define for_each_member_device_rcu(ca, c, iter) \ - __for_each_member_device(ca, c, iter) - -static inline struct bch_dev *bch_get_next_dev(struct bch_fs *c, unsigned *iter) -{ - struct bch_dev *ca; - - rcu_read_lock(); - if ((ca = __bch_next_dev(c, iter))) - percpu_ref_get(&ca->ref); - rcu_read_unlock(); - - return ca; -} - -/* - * If you break early, you must drop your ref on the current device - */ -#define for_each_member_device(ca, c, iter) \ - for ((iter) = 0; \ - (ca = bch_get_next_dev(c, &(iter))); \ - percpu_ref_put(&ca->ref), (iter)++) - -static inline struct bch_dev *bch_get_next_online_dev(struct bch_fs *c, - unsigned *iter, - int state_mask) -{ - struct bch_dev *ca; - - rcu_read_lock(); - while ((ca = __bch_next_dev(c, iter)) && - (!((1 << ca->mi.state) & state_mask) || - !percpu_ref_tryget(&ca->io_ref))) - (*iter)++; - rcu_read_unlock(); - - return ca; -} - -#define __for_each_online_member(ca, c, iter, state_mask) \ - for ((iter) = 0; \ - (ca = bch_get_next_online_dev(c, &(iter), state_mask)); \ - percpu_ref_put(&ca->io_ref), (iter)++) - -#define for_each_online_member(ca, c, iter) \ - __for_each_online_member(ca, c, iter, ~0) - -#define for_each_rw_member(ca, c, iter) \ - __for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_RW) - -#define for_each_readable_member(ca, c, iter) \ - __for_each_online_member(ca, c, iter, \ - (1 << BCH_MEMBER_STATE_RW)|(1 << BCH_MEMBER_STATE_RO)) - -struct bch_fs *bch_bdev_to_fs(struct block_device *); -struct bch_fs *bch_uuid_to_fs(uuid_le); -int bch_congested(struct bch_fs *, int); - -void bch_dev_release(struct kobject *); - -bool bch_dev_state_allowed(struct bch_fs *, struct bch_dev *, - enum bch_member_state, int); -int __bch_dev_set_state(struct bch_fs *, struct bch_dev *, - enum bch_member_state, int); -int bch_dev_set_state(struct bch_fs *, struct bch_dev *, - enum bch_member_state, int); - -int bch_dev_fail(struct bch_dev *, int); -int bch_dev_remove(struct bch_fs *, struct bch_dev *, int); -int bch_dev_add(struct bch_fs *, const char *); -int bch_dev_online(struct bch_fs *, const char *); -int bch_dev_offline(struct bch_fs *, struct bch_dev *, int); -int bch_dev_evacuate(struct bch_fs *, struct bch_dev *); - -void bch_fs_detach(struct bch_fs *); - -bool bch_fs_emergency_read_only(struct bch_fs *); -void bch_fs_read_only(struct bch_fs *); -const char *bch_fs_read_write(struct bch_fs *); - -void bch_fs_release(struct kobject *); -void bch_fs_stop_async(struct bch_fs *); -void bch_fs_stop(struct bch_fs *); - -const char *bch_fs_start(struct bch_fs *); -const char *bch_fs_open(char * const *, unsigned, struct bch_opts, - struct bch_fs **); -const char *bch_fs_open_incremental(const char *path); - -extern struct workqueue_struct *bcache_io_wq; -extern struct crypto_shash *bch_sha256; - -extern struct kobj_type bch_fs_ktype; -extern struct kobj_type bch_fs_internal_ktype; -extern struct kobj_type bch_fs_time_stats_ktype; -extern struct kobj_type bch_fs_opts_dir_ktype; -extern struct kobj_type bch_dev_ktype; - -#endif /* _BCACHE_SUPER_H */ diff --git a/libbcache/tier.h b/libbcache/tier.h deleted file mode 100644 index b6f8d4a2..00000000 --- a/libbcache/tier.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _BCACHE_TIER_H -#define _BCACHE_TIER_H - -void bch_tiering_stop(struct bch_fs *); -int bch_tiering_start(struct bch_fs *); -void bch_fs_tiering_init(struct bch_fs *); - -#endif diff --git a/libbcache/writeback.c b/libbcache/writeback.c deleted file mode 100644 index 279cfe67..00000000 --- a/libbcache/writeback.c +++ /dev/null @@ -1,657 +0,0 @@ -/* - * background writeback - scan btree for dirty data and write it to the backing - * device - * - * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> - * Copyright 2012 Google, Inc. - */ - -#include "bcache.h" -#include "btree_update.h" -#include "clock.h" -#include "debug.h" -#include "error.h" -#include "extents.h" -#include "io.h" -#include "keybuf.h" -#include "keylist.h" -#include "writeback.h" - -#include <linux/delay.h> -#include <linux/freezer.h> -#include <linux/kthread.h> -#include <trace/events/bcache.h> - -/* Rate limiting */ - -static void __update_writeback_rate(struct cached_dev *dc) -{ - struct bch_fs *c = dc->disk.c; - u64 cache_dirty_target = - div_u64(c->capacity * dc->writeback_percent, 100); - s64 target = div64_u64(cache_dirty_target * - bdev_sectors(dc->disk_sb.bdev), - c->cached_dev_sectors); - s64 dirty = bcache_dev_sectors_dirty(&dc->disk); - - bch_pd_controller_update(&dc->writeback_pd, target << 9, - dirty << 9, -1); -} - -static void update_writeback_rate(struct work_struct *work) -{ - struct cached_dev *dc = container_of(to_delayed_work(work), - struct cached_dev, - writeback_pd_update); - - down_read(&dc->writeback_lock); - - if (atomic_read(&dc->has_dirty) && - dc->writeback_percent && - !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) - __update_writeback_rate(dc); - else - dc->writeback_pd.rate.rate = UINT_MAX; - - up_read(&dc->writeback_lock); - - schedule_delayed_work(&dc->writeback_pd_update, - dc->writeback_pd_update_seconds * HZ); -} - -struct dirty_io { - struct closure cl; - struct bch_replace_info replace; - struct cached_dev *dc; - struct bch_dev *ca; - struct keybuf_key *w; - struct bch_extent_ptr ptr; - int error; - bool from_mempool; - /* Must be last */ - struct bio bio; -}; - -#define DIRTY_IO_MEMPOOL_BVECS 64 -#define DIRTY_IO_MEMPOOL_SECTORS (DIRTY_IO_MEMPOOL_BVECS * PAGE_SECTORS) - -static void dirty_init(struct dirty_io *io) -{ - struct bio *bio = &io->bio; - - bio_init(bio); - if (!io->dc->writeback_percent) - bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); - - bio->bi_iter.bi_size = io->replace.key.k.size << 9; - bio->bi_max_vecs = - DIV_ROUND_UP(io->replace.key.k.size, PAGE_SECTORS); - bio->bi_io_vec = bio->bi_inline_vecs; - bch_bio_map(bio, NULL); -} - -static void dirty_io_destructor(struct closure *cl) -{ - struct dirty_io *io = container_of(cl, struct dirty_io, cl); - - if (io->from_mempool) - mempool_free(io, &io->dc->writeback_io_pool); - else - kfree(io); -} - -static void write_dirty_finish(struct closure *cl) -{ - struct dirty_io *io = container_of(cl, struct dirty_io, cl); - struct cached_dev *dc = io->dc; - struct bio_vec *bv; - int i; - - bio_for_each_segment_all(bv, &io->bio, i) - mempool_free(bv->bv_page, &dc->writeback_page_pool); - - if (!io->error) { - BKEY_PADDED(k) tmp; - int ret; - - bkey_copy(&tmp.k, &io->replace.key); - io->replace.hook.fn = bch_extent_cmpxchg; - bkey_extent_set_cached(&tmp.k.k, true); - - ret = bch_btree_insert(dc->disk.c, BTREE_ID_EXTENTS, &tmp.k, - NULL, &io->replace.hook, NULL, 0); - if (io->replace.successes == 0) - trace_bcache_writeback_collision(&io->replace.key.k); - - atomic_long_inc(ret - ? &dc->disk.c->writeback_keys_failed - : &dc->disk.c->writeback_keys_done); - } - - bch_keybuf_put(&dc->writeback_keys, io->w); - - closure_return_with_destructor(cl, dirty_io_destructor); -} - -static void dirty_endio(struct bio *bio) -{ - struct dirty_io *io = container_of(bio, struct dirty_io, bio); - - if (bio->bi_error) { - trace_bcache_writeback_error(&io->replace.key.k, - op_is_write(bio_op(&io->bio)), - bio->bi_error); - io->error = bio->bi_error; - } - - closure_put(&io->cl); -} - -static void write_dirty(struct closure *cl) -{ - struct dirty_io *io = container_of(cl, struct dirty_io, cl); - - if (!io->error) { - dirty_init(io); - bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0); - io->bio.bi_iter.bi_sector = - bkey_start_offset(&io->replace.key.k); - io->bio.bi_bdev = io->dc->disk_sb.bdev; - io->bio.bi_end_io = dirty_endio; - - closure_bio_submit(&io->bio, cl); - } - - continue_at(cl, write_dirty_finish, io->dc->disk.c->wq); -} - -static void read_dirty_endio(struct bio *bio) -{ - struct dirty_io *io = container_of(bio, struct dirty_io, bio); - - bch_dev_nonfatal_io_err_on(bio->bi_error, io->ca, "writeback read"); - - bch_account_io_completion(io->ca); - - if (ptr_stale(io->ca, &io->ptr)) - bio->bi_error = -EINTR; - - dirty_endio(bio); -} - -static void read_dirty_submit(struct closure *cl) -{ - struct dirty_io *io = container_of(cl, struct dirty_io, cl); - - closure_bio_submit(&io->bio, cl); - - continue_at(cl, write_dirty, system_freezable_wq); -} - -static u64 read_dirty(struct cached_dev *dc) -{ - struct keybuf_key *w; - struct dirty_io *io; - struct closure cl; - unsigned i; - struct bio_vec *bv; - u64 sectors_written = 0; - BKEY_PADDED(k) tmp; - - closure_init_stack(&cl); - - while (!bch_ratelimit_wait_freezable_stoppable(&dc->writeback_pd.rate)) { - w = bch_keybuf_next(&dc->writeback_keys); - if (!w) - break; - - sectors_written += w->key.k.size; - bkey_copy(&tmp.k, &w->key); - - while (tmp.k.k.size) { - struct extent_pick_ptr pick; - - bch_extent_pick_ptr(dc->disk.c, - bkey_i_to_s_c(&tmp.k), - &pick); - if (IS_ERR_OR_NULL(pick.ca)) - break; - - io = kzalloc(sizeof(*io) + sizeof(struct bio_vec) * - DIV_ROUND_UP(tmp.k.k.size, - PAGE_SECTORS), - GFP_KERNEL); - if (!io) { - trace_bcache_writeback_alloc_fail(pick.ca->fs, - tmp.k.k.size); - io = mempool_alloc(&dc->writeback_io_pool, - GFP_KERNEL); - memset(io, 0, sizeof(*io) + - sizeof(struct bio_vec) * - DIRTY_IO_MEMPOOL_BVECS); - io->from_mempool = true; - - bkey_copy(&io->replace.key, &tmp.k); - - if (DIRTY_IO_MEMPOOL_SECTORS < - io->replace.key.k.size) - bch_key_resize(&io->replace.key.k, - DIRTY_IO_MEMPOOL_SECTORS); - } else { - bkey_copy(&io->replace.key, &tmp.k); - } - - io->dc = dc; - io->ca = pick.ca; - io->w = w; - io->ptr = pick.ptr; - atomic_inc(&w->ref); - - dirty_init(io); - bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); - io->bio.bi_iter.bi_sector = pick.ptr.offset; - io->bio.bi_bdev = pick.ca->disk_sb.bdev; - io->bio.bi_end_io = read_dirty_endio; - - bio_for_each_segment_all(bv, &io->bio, i) { - bv->bv_page = - mempool_alloc(&dc->writeback_page_pool, - i ? GFP_NOWAIT - : GFP_KERNEL); - if (!bv->bv_page) { - BUG_ON(!i); - io->bio.bi_vcnt = i; - - io->bio.bi_iter.bi_size = - io->bio.bi_vcnt * PAGE_SIZE; - - bch_key_resize(&io->replace.key.k, - bio_sectors(&io->bio)); - break; - } - } - - bch_cut_front(io->replace.key.k.p, &tmp.k); - trace_bcache_writeback(&io->replace.key.k); - - bch_ratelimit_increment(&dc->writeback_pd.rate, - io->replace.key.k.size << 9); - - closure_call(&io->cl, read_dirty_submit, NULL, &cl); - } - - bch_keybuf_put(&dc->writeback_keys, w); - } - - /* - * Wait for outstanding writeback IOs to finish (and keybuf slots to be - * freed) before refilling again - */ - closure_sync(&cl); - - return sectors_written; -} - -/* Scan for dirty data */ - -static void __bcache_dev_sectors_dirty_add(struct bcache_device *d, - u64 offset, int nr_sectors) -{ - unsigned stripe_offset, stripe, sectors_dirty; - - if (!d) - return; - - if (!d->stripe_sectors_dirty) - return; - - stripe = offset_to_stripe(d, offset); - stripe_offset = offset & (d->stripe_size - 1); - - while (nr_sectors) { - int s = min_t(unsigned, abs(nr_sectors), - d->stripe_size - stripe_offset); - - if (nr_sectors < 0) - s = -s; - - if (stripe >= d->nr_stripes) - return; - - sectors_dirty = atomic_add_return(s, - d->stripe_sectors_dirty + stripe); - if (sectors_dirty == d->stripe_size) - set_bit(stripe, d->full_dirty_stripes); - else - clear_bit(stripe, d->full_dirty_stripes); - - nr_sectors -= s; - stripe_offset = 0; - stripe++; - } -} - -void bcache_dev_sectors_dirty_add(struct bch_fs *c, unsigned inode, - u64 offset, int nr_sectors) -{ - struct bcache_device *d; - - rcu_read_lock(); - d = bch_dev_find(c, inode); - if (d) - __bcache_dev_sectors_dirty_add(d, offset, nr_sectors); - rcu_read_unlock(); -} - -static bool dirty_pred(struct keybuf *buf, struct bkey_s_c k) -{ - struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys); - - BUG_ON(k.k->p.inode != bcache_dev_inum(&dc->disk)); - - return bkey_extent_is_data(k.k) && - !bkey_extent_is_cached(k.k); -} - -static void refill_full_stripes(struct cached_dev *dc) -{ - struct keybuf *buf = &dc->writeback_keys; - unsigned inode = bcache_dev_inum(&dc->disk); - unsigned start_stripe, stripe, next_stripe; - bool wrapped = false; - - stripe = offset_to_stripe(&dc->disk, buf->last_scanned.offset); - - if (stripe >= dc->disk.nr_stripes) - stripe = 0; - - start_stripe = stripe; - - while (1) { - stripe = find_next_bit(dc->disk.full_dirty_stripes, - dc->disk.nr_stripes, stripe); - - if (stripe == dc->disk.nr_stripes) - goto next; - - next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes, - dc->disk.nr_stripes, stripe); - - buf->last_scanned = POS(inode, - stripe * dc->disk.stripe_size); - - bch_refill_keybuf(dc->disk.c, buf, - POS(inode, - next_stripe * dc->disk.stripe_size), - dirty_pred); - - if (array_freelist_empty(&buf->freelist)) - return; - - stripe = next_stripe; -next: - if (wrapped && stripe > start_stripe) - return; - - if (stripe == dc->disk.nr_stripes) { - stripe = 0; - wrapped = true; - } - } -} - -static u64 bch_writeback(struct cached_dev *dc) -{ - struct keybuf *buf = &dc->writeback_keys; - unsigned inode = bcache_dev_inum(&dc->disk); - struct bpos start = POS(inode, 0); - struct bpos end = POS(inode, KEY_OFFSET_MAX); - struct bpos start_pos; - u64 sectors_written = 0; - - buf->last_scanned = POS(inode, 0); - - while (bkey_cmp(buf->last_scanned, end) < 0 && - !kthread_should_stop()) { - down_write(&dc->writeback_lock); - - if (!atomic_read(&dc->has_dirty)) { - up_write(&dc->writeback_lock); - set_current_state(TASK_INTERRUPTIBLE); - - if (kthread_should_stop()) - return sectors_written; - - schedule(); - try_to_freeze(); - return sectors_written; - } - - if (bkey_cmp(buf->last_scanned, end) >= 0) - buf->last_scanned = POS(inode, 0); - - if (dc->partial_stripes_expensive) { - refill_full_stripes(dc); - if (array_freelist_empty(&buf->freelist)) - goto refill_done; - } - - start_pos = buf->last_scanned; - bch_refill_keybuf(dc->disk.c, buf, end, dirty_pred); - - if (bkey_cmp(buf->last_scanned, end) >= 0) { - /* - * If we get to the end start scanning again from the - * beginning, and only scan up to where we initially - * started scanning from: - */ - buf->last_scanned = start; - bch_refill_keybuf(dc->disk.c, buf, start_pos, - dirty_pred); - } - - if (RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { - atomic_set(&dc->has_dirty, 0); - cached_dev_put(dc); - SET_BDEV_STATE(dc->disk_sb.sb, BDEV_STATE_CLEAN); - bch_write_bdev_super(dc, NULL); - } - -refill_done: - up_write(&dc->writeback_lock); - - bch_ratelimit_reset(&dc->writeback_pd.rate); - sectors_written += read_dirty(dc); - } - - return sectors_written; -} - -static int bch_writeback_thread(void *arg) -{ - struct cached_dev *dc = arg; - struct bch_fs *c = dc->disk.c; - struct io_clock *clock = &c->io_clock[WRITE]; - unsigned long last; - u64 sectors_written; - - set_freezable(); - - while (!kthread_should_stop()) { - if (kthread_wait_freezable(dc->writeback_running || - test_bit(BCACHE_DEV_DETACHING, - &dc->disk.flags))) - break; - - last = atomic_long_read(&clock->now); - - sectors_written = bch_writeback(dc); - - if (sectors_written < c->capacity >> 4) - bch_kthread_io_clock_wait(clock, - last + (c->capacity >> 5)); - } - - return 0; -} - -/** - * bch_keylist_recalc_oldest_gens - update oldest_gen pointers from writeback keys - * - * This prevents us from wrapping around gens for a bucket only referenced from - * writeback keybufs. We don't actually care that the data in those buckets is - * marked live, only that we don't wrap the gens. - */ -void bch_writeback_recalc_oldest_gens(struct bch_fs *c) -{ - struct radix_tree_iter iter; - void **slot; - - rcu_read_lock(); - - radix_tree_for_each_slot(slot, &c->devices, &iter, 0) { - struct bcache_device *d; - struct cached_dev *dc; - - d = radix_tree_deref_slot(slot); - - if (!CACHED_DEV(&d->inode.v)) - continue; - dc = container_of(d, struct cached_dev, disk); - - bch_keybuf_recalc_oldest_gens(c, &dc->writeback_keys); - } - - rcu_read_unlock(); -} - -/* Init */ - -void bch_sectors_dirty_init(struct cached_dev *dc, struct bch_fs *c) -{ - struct bcache_device *d = &dc->disk; - struct btree_iter iter; - struct bkey_s_c k; - - /* - * We have to do this before the disk is added to the radix tree or we - * race with moving GC - */ - for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, - POS(bcache_dev_inum(d), 0), k) { - if (k.k->p.inode > bcache_dev_inum(d)) - break; - - if (bkey_extent_is_data(k.k) && - !bkey_extent_is_cached(k.k)) - __bcache_dev_sectors_dirty_add(d, - bkey_start_offset(k.k), - k.k->size); - - bch_btree_iter_cond_resched(&iter); - } - bch_btree_iter_unlock(&iter); - - dc->writeback_pd.last_actual = bcache_dev_sectors_dirty(d); -} - -void bch_cached_dev_writeback_stop(struct cached_dev *dc) -{ - cancel_delayed_work_sync(&dc->writeback_pd_update); - if (!IS_ERR_OR_NULL(dc->writeback_thread)) { - kthread_stop(dc->writeback_thread); - dc->writeback_thread = NULL; - } -} - -void bch_cached_dev_writeback_free(struct cached_dev *dc) -{ - struct bcache_device *d = &dc->disk; - - mempool_exit(&dc->writeback_page_pool); - mempool_exit(&dc->writeback_io_pool); - kvfree(d->full_dirty_stripes); - kvfree(d->stripe_sectors_dirty); -} - -int bch_cached_dev_writeback_init(struct cached_dev *dc) -{ - struct bcache_device *d = &dc->disk; - sector_t sectors; - size_t n; - - sectors = get_capacity(dc->disk.disk); - - if (!d->stripe_size) { -#ifdef CONFIG_BCACHE_DEBUG - d->stripe_size = 1 << 0; -#else - d->stripe_size = 1 << 31; -#endif - } - - pr_debug("stripe size: %d sectors", d->stripe_size); - d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); - - if (!d->nr_stripes || - d->nr_stripes > INT_MAX || - d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { - pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", - (unsigned)d->nr_stripes); - return -ENOMEM; - } - - n = d->nr_stripes * sizeof(atomic_t); - d->stripe_sectors_dirty = n < PAGE_SIZE << 6 - ? kzalloc(n, GFP_KERNEL) - : vzalloc(n); - if (!d->stripe_sectors_dirty) { - pr_err("cannot allocate stripe_sectors_dirty"); - return -ENOMEM; - } - - n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long); - d->full_dirty_stripes = n < PAGE_SIZE << 6 - ? kzalloc(n, GFP_KERNEL) - : vzalloc(n); - if (!d->full_dirty_stripes) { - pr_err("cannot allocate full_dirty_stripes"); - return -ENOMEM; - } - - if (mempool_init_kmalloc_pool(&dc->writeback_io_pool, 4, - sizeof(struct dirty_io) + - sizeof(struct bio_vec) * - DIRTY_IO_MEMPOOL_BVECS) || - mempool_init_page_pool(&dc->writeback_page_pool, - (64 << 10) / PAGE_SIZE, 0)) - return -ENOMEM; - - init_rwsem(&dc->writeback_lock); - bch_keybuf_init(&dc->writeback_keys); - - dc->writeback_metadata = true; - dc->writeback_running = true; - dc->writeback_percent = 10; - dc->writeback_pd_update_seconds = 5; - - bch_pd_controller_init(&dc->writeback_pd); - INIT_DELAYED_WORK(&dc->writeback_pd_update, update_writeback_rate); - - return 0; -} - -int bch_cached_dev_writeback_start(struct cached_dev *dc) -{ - dc->writeback_thread = kthread_create(bch_writeback_thread, dc, - "bcache_writeback"); - if (IS_ERR(dc->writeback_thread)) - return PTR_ERR(dc->writeback_thread); - - schedule_delayed_work(&dc->writeback_pd_update, - dc->writeback_pd_update_seconds * HZ); - - bch_writeback_queue(dc); - - return 0; -} diff --git a/libbcache/writeback.h b/libbcache/writeback.h deleted file mode 100644 index 82ce306e..00000000 --- a/libbcache/writeback.h +++ /dev/null @@ -1,122 +0,0 @@ -#ifndef _BCACHE_WRITEBACK_H -#define _BCACHE_WRITEBACK_H - -#include "blockdev.h" -#include "buckets.h" - -#define CUTOFF_WRITEBACK 60 -#define CUTOFF_WRITEBACK_SYNC 30 - -static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) -{ - uint64_t i, ret = 0; - - for (i = 0; i < d->nr_stripes; i++) - ret += atomic_read(d->stripe_sectors_dirty + i); - - return ret; -} - -static inline unsigned offset_to_stripe(struct bcache_device *d, - uint64_t offset) -{ - do_div(offset, d->stripe_size); - return offset; -} - -static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, - uint64_t offset, - unsigned nr_sectors) -{ - unsigned stripe = offset_to_stripe(&dc->disk, offset); - - while (1) { - if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) - return true; - - if (nr_sectors <= dc->disk.stripe_size) - return false; - - nr_sectors -= dc->disk.stripe_size; - stripe++; - } -} - -static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, - unsigned cache_mode, bool would_skip) -{ - struct bch_fs *c = dc->disk.c; - u64 available = sectors_available(c); - - if (cache_mode != CACHE_MODE_WRITEBACK || - test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || - available * 100 < c->capacity * CUTOFF_WRITEBACK_SYNC) - return false; - - if (dc->partial_stripes_expensive && - bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, - bio_sectors(bio))) - return true; - - if (would_skip) - return false; - - return bio->bi_opf & REQ_SYNC || - available * 100 < c->capacity * CUTOFF_WRITEBACK; -} - -static inline void bch_writeback_queue(struct cached_dev *dc) -{ - if (!IS_ERR_OR_NULL(dc->writeback_thread)) - wake_up_process(dc->writeback_thread); -} - -static inline void bch_writeback_add(struct cached_dev *dc) -{ - if (!atomic_read(&dc->has_dirty) && - !atomic_xchg(&dc->has_dirty, 1)) { - atomic_inc(&dc->count); - - if (BDEV_STATE(dc->disk_sb.sb) != BDEV_STATE_DIRTY) { - SET_BDEV_STATE(dc->disk_sb.sb, BDEV_STATE_DIRTY); - /* XXX: should do this synchronously */ - bch_write_bdev_super(dc, NULL); - } - - bch_writeback_queue(dc); - } -} - -#ifndef NO_BCACHE_WRITEBACK - -void bcache_dev_sectors_dirty_add(struct bch_fs *, unsigned, u64, int); - -void bch_writeback_recalc_oldest_gens(struct bch_fs *); -void bch_sectors_dirty_init(struct cached_dev *, struct bch_fs *c); - -void bch_cached_dev_writeback_stop(struct cached_dev *); -void bch_cached_dev_writeback_free(struct cached_dev *); -int bch_cached_dev_writeback_init(struct cached_dev *); -int bch_cached_dev_writeback_start(struct cached_dev *); - -#else - -static inline void bcache_dev_sectors_dirty_add(struct bch_fs *c, - unsigned i, u64 o, int n) {} -static inline void bch_writeback_recalc_oldest_gens(struct bch_fs *c) {} -static inline void bch_sectors_dirty_init(struct cached_dev *dc, - struct bch_fs *c) {} -static inline void bch_cached_dev_writeback_stop(struct cached_dev *dc) {} -static inline void bch_cached_dev_writeback_free(struct cached_dev *dc) {} -static inline int bch_cached_dev_writeback_init(struct cached_dev *dc) -{ - return 0; -} -static inline int bch_cached_dev_writeback_start(struct cached_dev *dc) -{ - return 0; -} - -#endif - -#endif diff --git a/libbcache/xattr.h b/libbcache/xattr.h deleted file mode 100644 index c48c7acf..00000000 --- a/libbcache/xattr.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _BCACHE_XATTR_H -#define _BCACHE_XATTR_H - -extern const struct bkey_ops bch_bkey_xattr_ops; - -struct dentry; -struct xattr_handler; -struct bch_hash_info; - -int bch_xattr_get(struct bch_fs *, struct inode *, - const char *, void *, size_t, int); -int __bch_xattr_set(struct bch_fs *, u64, const struct bch_hash_info *, - const char *, const void *, size_t, int, int, u64 *); -int bch_xattr_set(struct bch_fs *, struct inode *, - const char *, const void *, size_t, int, int); -ssize_t bch_xattr_list(struct dentry *, char *, size_t); - -extern const struct xattr_handler *bch_xattr_handlers[]; - -#endif /* _BCACHE_XATTR_H */ diff --git a/libbcache.c b/libbcachefs.c index 1278fdfb..9e46ff9d 100644 --- a/libbcache.c +++ b/libbcachefs.c @@ -12,10 +12,10 @@ #include <uuid/uuid.h> -#include "linux/bcache.h" -#include "libbcache.h" +#include "bcachefs_format.h" #include "checksum.h" #include "crypto.h" +#include "libbcachefs.h" #include "opts.h" #include "super-io.h" @@ -277,7 +277,7 @@ struct bch_sb *__bcache_super_read(int fd, u64 sector) xpread(fd, &sb, sizeof(sb), sector << 9); if (memcmp(&sb.magic, &BCACHE_MAGIC, sizeof(sb.magic))) - die("not a bcache superblock"); + die("not a bcachefs superblock"); size_t bytes = vstruct_bytes(&sb); @@ -340,7 +340,7 @@ void bcache_super_print(struct bch_sb *sb, int units) pr_units(1U << BCH_SB_JOURNAL_ENTRY_SIZE(sb), units), BCH_SB_ERROR_ACTION(sb) < BCH_NR_ERROR_ACTIONS - ? bch_error_actions[BCH_SB_ERROR_ACTION(sb)] + ? bch2_error_actions[BCH_SB_ERROR_ACTION(sb)] : "unknown", BCH_SB_CLEAN(sb), @@ -351,19 +351,19 @@ void bcache_super_print(struct bch_sb *sb, int units) BCH_SB_DATA_REPLICAS_WANT(sb), BCH_SB_META_CSUM_TYPE(sb) < BCH_CSUM_NR - ? bch_csum_types[BCH_SB_META_CSUM_TYPE(sb)] + ? bch2_csum_types[BCH_SB_META_CSUM_TYPE(sb)] : "unknown", BCH_SB_DATA_CSUM_TYPE(sb) < BCH_CSUM_NR - ? bch_csum_types[BCH_SB_DATA_CSUM_TYPE(sb)] + ? bch2_csum_types[BCH_SB_DATA_CSUM_TYPE(sb)] : "unknown", BCH_SB_COMPRESSION_TYPE(sb) < BCH_COMPRESSION_NR - ? bch_compression_types[BCH_SB_COMPRESSION_TYPE(sb)] + ? bch2_compression_types[BCH_SB_COMPRESSION_TYPE(sb)] : "unknown", BCH_SB_STR_HASH_TYPE(sb) < BCH_STR_HASH_NR - ? bch_str_hash_types[BCH_SB_STR_HASH_TYPE(sb)] + ? bch2_str_hash_types[BCH_SB_STR_HASH_TYPE(sb)] : "unknown", BCH_SB_INODE_32BIT(sb), @@ -372,7 +372,7 @@ void bcache_super_print(struct bch_sb *sb, int units) sb->nr_devices); - mi = bch_sb_get_members(sb); + mi = bch2_sb_get_members(sb); if (!mi) { printf("Member info section missing\n"); return; @@ -407,7 +407,7 @@ void bcache_super_print(struct bch_sb *sb, int units) last_mount ? ctime(&last_mount) : "(never)", BCH_MEMBER_STATE(m) < BCH_MEMBER_STATE_NR - ? bch_dev_state[BCH_MEMBER_STATE(m)] + ? bch2_dev_state[BCH_MEMBER_STATE(m)] : "unknown", BCH_MEMBER_TIER(m), @@ -415,7 +415,7 @@ void bcache_super_print(struct bch_sb *sb, int units) BCH_MEMBER_HAS_DATA(m), BCH_MEMBER_REPLACEMENT(m) < CACHE_REPLACEMENT_NR - ? bch_cache_replacement_policies[BCH_MEMBER_REPLACEMENT(m)] + ? bch2_cache_replacement_policies[BCH_MEMBER_REPLACEMENT(m)] : "unknown", BCH_MEMBER_DISCARD(m)); diff --git a/libbcache.h b/libbcachefs.h index 965f09c0..f9cf8fe4 100644 --- a/libbcache.h +++ b/libbcachefs.h @@ -1,13 +1,12 @@ #ifndef _LIBBCACHE_H #define _LIBBCACHE_H -#include <linux/bcache.h> #include <linux/uuid.h> -#include "tools-util.h" -#include "vstructs.h" -#include "stdbool.h" +#include <stdbool.h> +#include "bcachefs_format.h" #include "tools-util.h" +#include "vstructs.h" struct cache_sb; diff --git a/libbcache/acl.c b/libbcachefs/acl.c index 4363c57e..6fcac72c 100644 --- a/libbcache/acl.c +++ b/libbcachefs/acl.c @@ -1,4 +1,4 @@ -#include "bcache.h" +#include "bcachefs.h" #include <linux/init.h> #include <linux/sched.h> @@ -11,7 +11,7 @@ /* * Convert from filesystem to in-memory representation. */ -static struct posix_acl *bch_acl_from_disk(const void *value, size_t size) +static struct posix_acl *bch2_acl_from_disk(const void *value, size_t size) { const char *end = (char *)value + size; int n, count; @@ -25,7 +25,7 @@ static struct posix_acl *bch_acl_from_disk(const void *value, size_t size) cpu_to_le32(BCH_ACL_VERSION)) return ERR_PTR(-EINVAL); value = (char *)value + sizeof(bch_acl_header); - count = bch_acl_count(size); + count = bch2_acl_count(size); if (count < 0) return ERR_PTR(-EINVAL); if (count == 0) @@ -82,13 +82,13 @@ fail: /* * Convert from in-memory to filesystem representation. */ -static void *bch_acl_to_disk(const struct posix_acl *acl, size_t *size) +static void *bch2_acl_to_disk(const struct posix_acl *acl, size_t *size) { bch_acl_header *ext_acl; char *e; size_t n; - *size = bch_acl_size(acl->a_count); + *size = bch2_acl_size(acl->a_count); ext_acl = kmalloc(sizeof(bch_acl_header) + acl->a_count * sizeof(bch_acl_entry), GFP_KERNEL); if (!ext_acl) @@ -131,7 +131,7 @@ fail: return ERR_PTR(-EINVAL); } -struct posix_acl *bch_get_acl(struct inode *inode, int type) +struct posix_acl *bch2_get_acl(struct inode *inode, int type) { struct bch_fs *c = inode->i_sb->s_fs_info; int name_index; @@ -149,16 +149,16 @@ struct posix_acl *bch_get_acl(struct inode *inode, int type) default: BUG(); } - ret = bch_xattr_get(c, inode, "", NULL, 0, name_index); + ret = bch2_xattr_get(c, inode, "", NULL, 0, name_index); if (ret > 0) { value = kmalloc(ret, GFP_KERNEL); if (!value) return ERR_PTR(-ENOMEM); - ret = bch_xattr_get(c, inode, "", value, + ret = bch2_xattr_get(c, inode, "", value, ret, name_index); } if (ret > 0) - acl = bch_acl_from_disk(value, ret); + acl = bch2_acl_from_disk(value, ret); else if (ret == -ENODATA || ret == -ENOSYS) acl = NULL; else @@ -171,7 +171,7 @@ struct posix_acl *bch_get_acl(struct inode *inode, int type) return acl; } -int bch_set_acl(struct inode *inode, struct posix_acl *acl, int type) +int bch2_set_acl(struct inode *inode, struct posix_acl *acl, int type) { struct bch_fs *c = inode->i_sb->s_fs_info; int name_index; @@ -206,12 +206,12 @@ int bch_set_acl(struct inode *inode, struct posix_acl *acl, int type) } if (acl) { - value = bch_acl_to_disk(acl, &size); + value = bch2_acl_to_disk(acl, &size); if (IS_ERR(value)) return (int)PTR_ERR(value); } - ret = bch_xattr_set(c, inode, "", value, size, 0, name_index); + ret = bch2_xattr_set(c, inode, "", value, size, 0, name_index); kfree(value); diff --git a/libbcache/acl.h b/libbcachefs/acl.h index 079e5689..2e51726f 100644 --- a/libbcache/acl.h +++ b/libbcachefs/acl.h @@ -23,7 +23,7 @@ typedef struct { __le32 a_version; } bch_acl_header; -static inline size_t bch_acl_size(int count) +static inline size_t bch2_acl_size(int count) { if (count <= 4) { return sizeof(bch_acl_header) + @@ -35,7 +35,7 @@ static inline size_t bch_acl_size(int count) } } -static inline int bch_acl_count(size_t size) +static inline int bch2_acl_count(size_t size) { ssize_t s; @@ -52,5 +52,5 @@ static inline int bch_acl_count(size_t size) } } -extern struct posix_acl *bch_get_acl(struct inode *, int); -extern int bch_set_acl(struct inode *, struct posix_acl *, int); +extern struct posix_acl *bch2_get_acl(struct inode *, int); +extern int bch2_set_acl(struct inode *, struct posix_acl *, int); diff --git a/libbcache/alloc.c b/libbcachefs/alloc.c index 2f892914..3067181c 100644 --- a/libbcache/alloc.c +++ b/libbcachefs/alloc.c @@ -39,13 +39,13 @@ * time around, and we garbage collect or rewrite the priorities sooner than we * would have otherwise. * - * bch_bucket_alloc() allocates a single bucket from a specific device. + * bch2_bucket_alloc() allocates a single bucket from a specific device. * - * bch_bucket_alloc_set() allocates one or more buckets from different devices + * bch2_bucket_alloc_set() allocates one or more buckets from different devices * in a given filesystem. * * invalidate_buckets() drives all the processes described above. It's called - * from bch_bucket_alloc() and a few other places that need to make sure free + * from bch2_bucket_alloc() and a few other places that need to make sure free * buckets are ready. * * invalidate_buckets_(lru|fifo)() find buckets that are available to be @@ -53,7 +53,7 @@ * in either lru or fifo order. */ -#include "bcache.h" +#include "bcachefs.h" #include "alloc.h" #include "btree_update.h" #include "buckets.h" @@ -71,14 +71,14 @@ #include <linux/math64.h> #include <linux/random.h> #include <linux/rcupdate.h> -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> -static void __bch_bucket_free(struct bch_dev *, struct bucket *); -static void bch_recalc_min_prio(struct bch_dev *, int); +static void __bch2_bucket_free(struct bch_dev *, struct bucket *); +static void bch2_recalc_min_prio(struct bch_dev *, int); /* Allocation groups: */ -void bch_dev_group_remove(struct dev_group *grp, struct bch_dev *ca) +void bch2_dev_group_remove(struct dev_group *grp, struct bch_dev *ca) { unsigned i; @@ -96,7 +96,7 @@ void bch_dev_group_remove(struct dev_group *grp, struct bch_dev *ca) spin_unlock(&grp->lock); } -void bch_dev_group_add(struct dev_group *grp, struct bch_dev *ca) +void bch2_dev_group_add(struct dev_group *grp, struct bch_dev *ca) { unsigned i; @@ -132,7 +132,7 @@ static void pd_controllers_update(struct work_struct *work) rcu_read_lock(); for (i = 0; i < ARRAY_SIZE(c->tiers); i++) { - bch_pd_controller_update(&c->tiers[i].pd, + bch2_pd_controller_update(&c->tiers[i].pd, div_u64(faster_tiers_size * c->tiering_percent, 100), faster_tiers_dirty, @@ -140,7 +140,7 @@ static void pd_controllers_update(struct work_struct *work) spin_lock(&c->tiers[i].devs.lock); group_for_each_dev(ca, &c->tiers[i].devs, iter) { - struct bch_dev_usage stats = bch_dev_usage_read(ca); + struct bch_dev_usage stats = bch2_dev_usage_read(ca); unsigned bucket_bits = ca->bucket_bits + 9; u64 size = (ca->mi.nbuckets - @@ -159,7 +159,7 @@ static void pd_controllers_update(struct work_struct *work) fragmented = max(0LL, fragmented); - bch_pd_controller_update(&ca->moving_gc_pd, + bch2_pd_controller_update(&ca->moving_gc_pd, free, fragmented, -1); faster_tiers_size += size; @@ -192,7 +192,7 @@ static void pd_controllers_update(struct work_struct *work) if (c->fastest_tier) copygc_can_free = U64_MAX; - bch_pd_controller_update(&c->foreground_write_pd, + bch2_pd_controller_update(&c->foreground_write_pd, min(copygc_can_free, div_u64(fastest_tier_size * c->foreground_target_percent, @@ -241,7 +241,7 @@ static int prio_io(struct bch_dev *ca, uint64_t bucket, int op) ca->bio_prio->bi_iter.bi_sector = bucket * ca->mi.bucket_size; ca->bio_prio->bi_bdev = ca->disk_sb.bdev; ca->bio_prio->bi_iter.bi_size = bucket_bytes(ca); - bch_bio_map(ca->bio_prio, ca->disk_buckets); + bch2_bio_map(ca->bio_prio, ca->disk_buckets); return submit_bio_wait(ca->bio_prio); } @@ -256,7 +256,7 @@ static struct nonce prio_nonce(struct prio_set *p) }}; } -static int bch_prio_write(struct bch_dev *ca) +static int bch2_prio_write(struct bch_dev *ca) { struct bch_fs *c = ca->fs; struct journal *j = &c->journal; @@ -267,7 +267,7 @@ static int bch_prio_write(struct bch_dev *ca) if (c->opts.nochanges) return 0; - trace_bcache_prio_write_start(ca); + trace_prio_write_start(ca); atomic64_add(ca->mi.bucket_size * prio_buckets(ca), &ca->meta_sectors_written); @@ -293,7 +293,7 @@ static int bch_prio_write(struct bch_dev *ca) get_random_bytes(&p->nonce, sizeof(p->nonce)); spin_lock(&ca->prio_buckets_lock); - r = bch_bucket_alloc(ca, RESERVE_PRIO); + r = bch2_bucket_alloc(ca, RESERVE_PRIO); BUG_ON(!r); /* @@ -301,27 +301,27 @@ static int bch_prio_write(struct bch_dev *ca) * it getting gc'd from under us */ ca->prio_buckets[i] = r; - bch_mark_metadata_bucket(ca, ca->buckets + r, + bch2_mark_metadata_bucket(ca, ca->buckets + r, BUCKET_PRIOS, false); spin_unlock(&ca->prio_buckets_lock); - SET_PSET_CSUM_TYPE(p, bch_meta_checksum_type(c)); + SET_PSET_CSUM_TYPE(p, bch2_meta_checksum_type(c)); - bch_encrypt(c, PSET_CSUM_TYPE(p), + bch2_encrypt(c, PSET_CSUM_TYPE(p), prio_nonce(p), p->encrypted_start, bucket_bytes(ca) - offsetof(struct prio_set, encrypted_start)); - p->csum = bch_checksum(c, PSET_CSUM_TYPE(p), + p->csum = bch2_checksum(c, PSET_CSUM_TYPE(p), prio_nonce(p), (void *) p + sizeof(p->csum), bucket_bytes(ca) - sizeof(p->csum)); ret = prio_io(ca, r, REQ_OP_WRITE); - if (bch_dev_fatal_io_err_on(ret, ca, + if (bch2_dev_fatal_io_err_on(ret, ca, "prio write to bucket %zu", r) || - bch_meta_write_fault("prio")) + bch2_meta_write_fault("prio")) return ret; } @@ -338,15 +338,15 @@ static int bch_prio_write(struct bch_dev *ca) if (!test_bit(JOURNAL_STARTED, &c->journal.flags)) break; - ret = bch_journal_res_get(j, &res, u64s, u64s); + ret = bch2_journal_res_get(j, &res, u64s, u64s); if (ret) return ret; need_new_journal_entry = j->buf[res.idx].nr_prio_buckets < ca->dev_idx + 1; - bch_journal_res_put(j, &res); + bch2_journal_res_put(j, &res); - ret = bch_journal_flush_seq(j, res.seq); + ret = bch2_journal_flush_seq(j, res.seq); if (ret) return ret; } while (need_new_journal_entry); @@ -360,7 +360,7 @@ static int bch_prio_write(struct bch_dev *ca) for (i = 0; i < prio_buckets(ca); i++) { if (ca->prio_last_buckets[i]) - __bch_bucket_free(ca, + __bch2_bucket_free(ca, &ca->buckets[ca->prio_last_buckets[i]]); ca->prio_last_buckets[i] = ca->prio_buckets[i]; @@ -368,11 +368,11 @@ static int bch_prio_write(struct bch_dev *ca) spin_unlock(&ca->prio_buckets_lock); - trace_bcache_prio_write_end(ca); + trace_prio_write_end(ca); return 0; } -int bch_prio_read(struct bch_dev *ca) +int bch2_prio_read(struct bch_dev *ca) { struct bch_fs *c = ca->fs; struct prio_set *p = ca->disk_buckets; @@ -404,10 +404,10 @@ int bch_prio_read(struct bch_dev *ca) bucket_nr++; ret = prio_io(ca, bucket, REQ_OP_READ); - if (bch_dev_fatal_io_err_on(ret, ca, + if (bch2_dev_fatal_io_err_on(ret, ca, "prior read from bucket %llu", bucket) || - bch_meta_read_fault("prio")) + bch2_meta_read_fault("prio")) return -EIO; got = le64_to_cpu(p->magic); @@ -420,15 +420,15 @@ int bch_prio_read(struct bch_dev *ca) "prio bucket with unknown csum type %llu bucket %lluu", PSET_CSUM_TYPE(p), bucket); - csum = bch_checksum(c, PSET_CSUM_TYPE(p), + csum = bch2_checksum(c, PSET_CSUM_TYPE(p), prio_nonce(p), (void *) p + sizeof(p->csum), bucket_bytes(ca) - sizeof(p->csum)); - unfixable_fsck_err_on(bch_crc_cmp(csum, p->csum), c, + unfixable_fsck_err_on(bch2_crc_cmp(csum, p->csum), c, "bad checksum reading prios from bucket %llu", bucket); - bch_encrypt(c, PSET_CSUM_TYPE(p), + bch2_encrypt(c, PSET_CSUM_TYPE(p), prio_nonce(p), p->encrypted_start, bucket_bytes(ca) - @@ -445,8 +445,8 @@ int bch_prio_read(struct bch_dev *ca) } mutex_lock(&c->bucket_lock); - bch_recalc_min_prio(ca, READ); - bch_recalc_min_prio(ca, WRITE); + bch2_recalc_min_prio(ca, READ); + bch2_recalc_min_prio(ca, WRITE); mutex_unlock(&c->bucket_lock); ret = 0; @@ -476,7 +476,7 @@ static int wait_buckets_available(struct bch_dev *ca) if (ca->inc_gen_needs_gc >= fifo_free(&ca->free_inc)) { if (c->gc_thread) { - trace_bcache_gc_cannot_inc_gens(ca->fs); + trace_gc_cannot_inc_gens(ca->fs); atomic_inc(&c->kick_gc); wake_up_process(ca->fs->gc_thread); } @@ -521,7 +521,7 @@ static void verify_not_on_freelist(struct bch_dev *ca, size_t bucket) /* Bucket heap / gen */ -void bch_recalc_min_prio(struct bch_dev *ca, int rw) +void bch2_recalc_min_prio(struct bch_dev *ca, int rw) { struct bch_fs *c = ca->fs; struct prio_clock *clock = &c->prio_clock[rw]; @@ -550,25 +550,25 @@ void bch_recalc_min_prio(struct bch_dev *ca, int rw) clock->min_prio = clock->hand - max_delta; } -static void bch_rescale_prios(struct bch_fs *c, int rw) +static void bch2_rescale_prios(struct bch_fs *c, int rw) { struct prio_clock *clock = &c->prio_clock[rw]; struct bch_dev *ca; struct bucket *g; unsigned i; - trace_bcache_rescale_prios(c); + trace_rescale_prios(c); for_each_member_device(ca, c, i) { for_each_bucket(g, ca) g->prio[rw] = clock->hand - (clock->hand - g->prio[rw]) / 2; - bch_recalc_min_prio(ca, rw); + bch2_recalc_min_prio(ca, rw); } } -static void bch_inc_clock_hand(struct io_timer *timer) +static void bch2_inc_clock_hand(struct io_timer *timer) { struct prio_clock *clock = container_of(timer, struct prio_clock, rescale); @@ -582,7 +582,7 @@ static void bch_inc_clock_hand(struct io_timer *timer) /* if clock cannot be advanced more, rescale prio */ if (clock->hand == (u16) (clock->min_prio - 1)) - bch_rescale_prios(c, clock->rw); + bch2_rescale_prios(c, clock->rw); mutex_unlock(&c->bucket_lock); @@ -601,16 +601,16 @@ static void bch_inc_clock_hand(struct io_timer *timer) */ timer->expire += capacity >> 10; - bch_io_timer_add(&c->io_clock[clock->rw], timer); + bch2_io_timer_add(&c->io_clock[clock->rw], timer); } -static void bch_prio_timer_init(struct bch_fs *c, int rw) +static void bch2_prio_timer_init(struct bch_fs *c, int rw) { struct prio_clock *clock = &c->prio_clock[rw]; struct io_timer *timer = &clock->rescale; clock->rw = rw; - timer->fn = bch_inc_clock_hand; + timer->fn = bch2_inc_clock_hand; timer->expire = c->capacity >> 10; } @@ -626,7 +626,7 @@ static inline bool can_inc_bucket_gen(struct bch_dev *ca, struct bucket *g) return bucket_gc_gen(ca, g) < BUCKET_GC_GEN_MAX; } -static bool bch_can_invalidate_bucket(struct bch_dev *ca, struct bucket *g) +static bool bch2_can_invalidate_bucket(struct bch_dev *ca, struct bucket *g) { if (!is_available_bucket(READ_ONCE(g->mark))) return false; @@ -637,11 +637,11 @@ static bool bch_can_invalidate_bucket(struct bch_dev *ca, struct bucket *g) return can_inc_bucket_gen(ca, g); } -static void bch_invalidate_one_bucket(struct bch_dev *ca, struct bucket *g) +static void bch2_invalidate_one_bucket(struct bch_dev *ca, struct bucket *g) { spin_lock(&ca->freelist_lock); - bch_invalidate_bucket(ca, g); + bch2_invalidate_bucket(ca, g); g->read_prio = ca->fs->prio_clock[READ].hand; g->write_prio = ca->fs->prio_clock[WRITE].hand; @@ -689,8 +689,8 @@ static void invalidate_buckets_lru(struct bch_dev *ca) ca->heap.used = 0; mutex_lock(&ca->fs->bucket_lock); - bch_recalc_min_prio(ca, READ); - bch_recalc_min_prio(ca, WRITE); + bch2_recalc_min_prio(ca, READ); + bch2_recalc_min_prio(ca, WRITE); /* * Find buckets with lowest read priority, by building a maxheap sorted @@ -698,7 +698,7 @@ static void invalidate_buckets_lru(struct bch_dev *ca) * all buckets have been visited. */ for_each_bucket(g, ca) { - if (!bch_can_invalidate_bucket(ca, g)) + if (!bch2_can_invalidate_bucket(ca, g)) continue; bucket_heap_push(ca, g, bucket_sort_key(g)); @@ -714,13 +714,13 @@ static void invalidate_buckets_lru(struct bch_dev *ca) heap_resort(&ca->heap, bucket_max_cmp); /* - * If we run out of buckets to invalidate, bch_allocator_thread() will + * If we run out of buckets to invalidate, bch2_allocator_thread() will * kick stuff and retry us */ while (!fifo_full(&ca->free_inc) && heap_pop(&ca->heap, e, bucket_max_cmp)) { - BUG_ON(!bch_can_invalidate_bucket(ca, e.g)); - bch_invalidate_one_bucket(ca, e.g); + BUG_ON(!bch2_can_invalidate_bucket(ca, e.g)); + bch2_invalidate_one_bucket(ca, e.g); } mutex_unlock(&ca->fs->bucket_lock); @@ -739,8 +739,8 @@ static void invalidate_buckets_fifo(struct bch_dev *ca) g = ca->buckets + ca->fifo_last_bucket++; - if (bch_can_invalidate_bucket(ca, g)) - bch_invalidate_one_bucket(ca, g); + if (bch2_can_invalidate_bucket(ca, g)) + bch2_invalidate_one_bucket(ca, g); if (++checked >= ca->mi.nbuckets) return; @@ -753,14 +753,14 @@ static void invalidate_buckets_random(struct bch_dev *ca) size_t checked = 0; while (!fifo_full(&ca->free_inc)) { - size_t n = bch_rand_range(ca->mi.nbuckets - + size_t n = bch2_rand_range(ca->mi.nbuckets - ca->mi.first_bucket) + ca->mi.first_bucket; g = ca->buckets + n; - if (bch_can_invalidate_bucket(ca, g)) - bch_invalidate_one_bucket(ca, g); + if (bch2_can_invalidate_bucket(ca, g)) + bch2_invalidate_one_bucket(ca, g); if (++checked >= ca->mi.nbuckets / 2) return; @@ -784,7 +784,7 @@ static void invalidate_buckets(struct bch_dev *ca) } } -static bool __bch_allocator_push(struct bch_dev *ca, long bucket) +static bool __bch2_allocator_push(struct bch_dev *ca, long bucket) { if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) goto success; @@ -804,12 +804,12 @@ success: return true; } -static bool bch_allocator_push(struct bch_dev *ca, long bucket) +static bool bch2_allocator_push(struct bch_dev *ca, long bucket) { bool ret; spin_lock(&ca->freelist_lock); - ret = __bch_allocator_push(ca, bucket); + ret = __bch2_allocator_push(ca, bucket); if (ret) fifo_pop(&ca->free_inc, bucket); spin_unlock(&ca->freelist_lock); @@ -817,7 +817,7 @@ static bool bch_allocator_push(struct bch_dev *ca, long bucket) return ret; } -static void bch_find_empty_buckets(struct bch_fs *c, struct bch_dev *ca) +static void bch2_find_empty_buckets(struct bch_fs *c, struct bch_dev *ca) { u16 last_seq_ondisk = c->journal.last_seq_ondisk; struct bucket *g; @@ -831,7 +831,7 @@ static void bch_find_empty_buckets(struct bch_fs *c, struct bch_dev *ca) !bucket_needs_journal_commit(m, last_seq_ondisk)) { spin_lock(&ca->freelist_lock); - bch_mark_alloc_bucket(ca, g, true); + bch2_mark_alloc_bucket(ca, g, true); g->read_prio = c->prio_clock[READ].hand; g->write_prio = c->prio_clock[WRITE].hand; @@ -854,7 +854,7 @@ static void bch_find_empty_buckets(struct bch_fs *c, struct bch_dev *ca) * of free_inc, try to invalidate some buckets and write out * prios and gens. */ -static int bch_allocator_thread(void *arg) +static int bch2_allocator_thread(void *arg) { struct bch_dev *ca = arg; struct bch_fs *c = ca->fs; @@ -862,7 +862,7 @@ static int bch_allocator_thread(void *arg) set_freezable(); - bch_find_empty_buckets(c, ca); + bch2_find_empty_buckets(c, ca); while (1) { /* @@ -888,7 +888,7 @@ static int bch_allocator_thread(void *arg) while (1) { set_current_state(TASK_INTERRUPTIBLE); - if (bch_allocator_push(ca, bucket)) + if (bch2_allocator_push(ca, bucket)) break; if (kthread_should_stop()) { @@ -908,7 +908,7 @@ static int bch_allocator_thread(void *arg) * See if we have buckets we can reuse without invalidating them * or forcing a journal commit: */ - //bch_find_empty_buckets(c, ca); + //bch2_find_empty_buckets(c, ca); if (fifo_used(&ca->free_inc) * 2 > ca->free_inc.size) { up_read(&c->gc_lock); @@ -931,7 +931,7 @@ static int bch_allocator_thread(void *arg) */ invalidate_buckets(ca); - trace_bcache_alloc_batch(ca, fifo_used(&ca->free_inc), + trace_alloc_batch(ca, fifo_used(&ca->free_inc), ca->free_inc.size); } @@ -941,7 +941,7 @@ static int bch_allocator_thread(void *arg) * free_inc is full of newly-invalidated buckets, must write out * prios and gens before they can be re-used */ - ret = bch_prio_write(ca); + ret = bch2_prio_write(ca); if (ret) { /* * Emergency read only - allocator thread has to @@ -959,7 +959,7 @@ static int bch_allocator_thread(void *arg) long bucket; fifo_pop(&ca->free_inc, bucket); - bch_mark_free_bucket(ca, ca->buckets + bucket); + bch2_mark_free_bucket(ca, ca->buckets + bucket); } spin_unlock(&ca->freelist_lock); goto out; @@ -967,7 +967,7 @@ static int bch_allocator_thread(void *arg) } out: /* - * Avoid a race with bch_usage_update() trying to wake us up after + * Avoid a race with bch2_usage_update() trying to wake us up after * we've exited: */ synchronize_rcu(); @@ -981,7 +981,7 @@ out: * * Returns index of bucket on success, 0 on failure * */ -size_t bch_bucket_alloc(struct bch_dev *ca, enum alloc_reserve reserve) +size_t bch2_bucket_alloc(struct bch_dev *ca, enum alloc_reserve reserve) { struct bucket *g; long r; @@ -993,15 +993,15 @@ size_t bch_bucket_alloc(struct bch_dev *ca, enum alloc_reserve reserve) spin_unlock(&ca->freelist_lock); - trace_bcache_bucket_alloc_fail(ca, reserve); + trace_bucket_alloc_fail(ca, reserve); return 0; out: verify_not_on_freelist(ca, r); spin_unlock(&ca->freelist_lock); - trace_bcache_bucket_alloc(ca, reserve); + trace_bucket_alloc(ca, reserve); - bch_wake_allocator(ca); + bch2_wake_allocator(ca); g = ca->buckets + r; @@ -1011,9 +1011,9 @@ out: return r; } -static void __bch_bucket_free(struct bch_dev *ca, struct bucket *g) +static void __bch2_bucket_free(struct bch_dev *ca, struct bucket *g) { - bch_mark_free_bucket(ca, g); + bch2_mark_free_bucket(ca, g); g->read_prio = ca->fs->prio_clock[READ].hand; g->write_prio = ca->fs->prio_clock[WRITE].hand; @@ -1053,7 +1053,7 @@ static void recalc_alloc_group_weights(struct bch_fs *c, } } -static enum bucket_alloc_ret bch_bucket_alloc_group(struct bch_fs *c, +static enum bucket_alloc_ret bch2_bucket_alloc_group(struct bch_fs *c, struct open_bucket *ob, enum alloc_reserve reserve, unsigned nr_replicas, @@ -1104,7 +1104,7 @@ static enum bucket_alloc_ret bch_bucket_alloc_group(struct bch_fs *c, get_random_int() > devs->d[i].weight) continue; - bucket = bch_bucket_alloc(ca, reserve); + bucket = bch2_bucket_alloc(ca, reserve); if (!bucket) { if (fail_idx == -1) fail_idx = i; @@ -1141,7 +1141,7 @@ err: return ret; } -static enum bucket_alloc_ret __bch_bucket_alloc_set(struct bch_fs *c, +static enum bucket_alloc_ret __bch2_bucket_alloc_set(struct bch_fs *c, struct write_point *wp, struct open_bucket *ob, unsigned nr_replicas, @@ -1156,20 +1156,20 @@ static enum bucket_alloc_ret __bch_bucket_alloc_set(struct bch_fs *c, * XXX: switch off wp->type and do something more intelligent here */ if (wp->group) - return bch_bucket_alloc_group(c, ob, reserve, nr_replicas, + return bch2_bucket_alloc_group(c, ob, reserve, nr_replicas, wp->group, devs_used); /* foreground writes: prefer fastest tier: */ tier = READ_ONCE(c->fastest_tier); if (tier) - bch_bucket_alloc_group(c, ob, reserve, nr_replicas, + bch2_bucket_alloc_group(c, ob, reserve, nr_replicas, &tier->devs, devs_used); - return bch_bucket_alloc_group(c, ob, reserve, nr_replicas, + return bch2_bucket_alloc_group(c, ob, reserve, nr_replicas, &c->all_devs, devs_used); } -static int bch_bucket_alloc_set(struct bch_fs *c, struct write_point *wp, +static int bch2_bucket_alloc_set(struct bch_fs *c, struct write_point *wp, struct open_bucket *ob, unsigned nr_replicas, enum alloc_reserve reserve, long *devs_used, struct closure *cl) @@ -1177,7 +1177,7 @@ static int bch_bucket_alloc_set(struct bch_fs *c, struct write_point *wp, bool waiting = false; while (1) { - switch (__bch_bucket_alloc_set(c, wp, ob, nr_replicas, + switch (__bch2_bucket_alloc_set(c, wp, ob, nr_replicas, reserve, devs_used)) { case ALLOC_SUCCESS: if (waiting) @@ -1192,7 +1192,7 @@ static int bch_bucket_alloc_set(struct bch_fs *c, struct write_point *wp, case FREELIST_EMPTY: if (!cl || waiting) - trace_bcache_freelist_empty_fail(c, + trace_freelist_empty_fail(c, reserve, cl); if (!cl) @@ -1229,7 +1229,7 @@ static int bch_bucket_alloc_set(struct bch_fs *c, struct write_point *wp, * reference _after_ doing the index update that makes its allocation reachable. */ -static void __bch_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) +static void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) { const struct bch_extent_ptr *ptr; @@ -1238,7 +1238,7 @@ static void __bch_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) open_bucket_for_each_ptr(ob, ptr) { struct bch_dev *ca = c->devs[ptr->dev]; - bch_mark_alloc_bucket(ca, PTR_BUCKET(ca, ptr), false); + bch2_mark_alloc_bucket(ca, PTR_BUCKET(ca, ptr), false); } ob->nr_ptrs = 0; @@ -1248,16 +1248,16 @@ static void __bch_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) closure_wake_up(&c->open_buckets_wait); } -void bch_open_bucket_put(struct bch_fs *c, struct open_bucket *b) +void bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *b) { if (atomic_dec_and_test(&b->pin)) { spin_lock(&c->open_buckets_lock); - __bch_open_bucket_put(c, b); + __bch2_open_bucket_put(c, b); spin_unlock(&c->open_buckets_lock); } } -static struct open_bucket *bch_open_bucket_get(struct bch_fs *c, +static struct open_bucket *bch2_open_bucket_get(struct bch_fs *c, unsigned nr_reserved, struct closure *cl) { @@ -1276,9 +1276,9 @@ static struct open_bucket *bch_open_bucket_get(struct bch_fs *c, ret->has_full_ptrs = false; c->open_buckets_nr_free--; - trace_bcache_open_bucket_alloc(c, cl); + trace_open_bucket_alloc(c, cl); } else { - trace_bcache_open_bucket_alloc_fail(c, cl); + trace_open_bucket_alloc_fail(c, cl); if (cl) { closure_wait(&c->open_buckets_wait, cl); @@ -1339,7 +1339,7 @@ static void open_bucket_copy_unused_ptrs(struct bch_fs *c, static void verify_not_stale(struct bch_fs *c, const struct open_bucket *ob) { -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG const struct bch_extent_ptr *ptr; open_bucket_for_each_ptr(ob, ptr) { @@ -1396,7 +1396,7 @@ static int open_bucket_add_buckets(struct bch_fs *c, for (i = 0; i < ob->nr_ptrs; i++) __set_bit(ob->ptrs[i].dev, devs_used); - ret = bch_bucket_alloc_set(c, wp, ob, nr_replicas, + ret = bch2_bucket_alloc_set(c, wp, ob, nr_replicas, reserve, devs_used, cl); if (ret == -EROFS && @@ -1409,12 +1409,12 @@ static int open_bucket_add_buckets(struct bch_fs *c, /* * Get us an open_bucket we can allocate from, return with it locked: */ -struct open_bucket *bch_alloc_sectors_start(struct bch_fs *c, - struct write_point *wp, - unsigned nr_replicas, - unsigned nr_replicas_required, - enum alloc_reserve reserve, - struct closure *cl) +struct open_bucket *bch2_alloc_sectors_start(struct bch_fs *c, + struct write_point *wp, + unsigned nr_replicas, + unsigned nr_replicas_required, + enum alloc_reserve reserve, + struct closure *cl) { struct open_bucket *ob; unsigned open_buckets_reserved = wp == &c->btree_write_point @@ -1435,7 +1435,7 @@ retry: if (!ob || ob->has_full_ptrs) { struct open_bucket *new_ob; - new_ob = bch_open_bucket_get(c, open_buckets_reserved, cl); + new_ob = bch2_open_bucket_get(c, open_buckets_reserved, cl); if (IS_ERR(new_ob)) return new_ob; @@ -1449,7 +1449,7 @@ retry: cmpxchg(&wp->b, ob, new_ob) != ob) { /* We raced: */ mutex_unlock(&new_ob->lock); - bch_open_bucket_put(c, new_ob); + bch2_open_bucket_put(c, new_ob); if (ob) mutex_unlock(&ob->lock); @@ -1459,7 +1459,7 @@ retry: if (ob) { open_bucket_copy_unused_ptrs(c, new_ob, ob); mutex_unlock(&ob->lock); - bch_open_bucket_put(c, ob); + bch2_open_bucket_put(c, ob); } ob = new_ob; @@ -1485,9 +1485,9 @@ retry: * Append pointers to the space we just allocated to @k, and mark @sectors space * as allocated out of @ob */ -void bch_alloc_sectors_append_ptrs(struct bch_fs *c, struct bkey_i_extent *e, - unsigned nr_replicas, struct open_bucket *ob, - unsigned sectors) +void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct bkey_i_extent *e, + unsigned nr_replicas, struct open_bucket *ob, + unsigned sectors) { struct bch_extent_ptr tmp; bool has_data = false; @@ -1495,7 +1495,7 @@ void bch_alloc_sectors_append_ptrs(struct bch_fs *c, struct bkey_i_extent *e, /* * We're keeping any existing pointer k has, and appending new pointers: - * __bch_write() will only write to the pointers we add here: + * __bch2_write() will only write to the pointers we add here: */ BUG_ON(sectors > ob->sectors_free); @@ -1505,7 +1505,7 @@ void bch_alloc_sectors_append_ptrs(struct bch_fs *c, struct bkey_i_extent *e, has_data = true; for (i = 0; i < min(ob->nr_ptrs, nr_replicas); i++) { - EBUG_ON(bch_extent_has_device(extent_i_to_s_c(e), ob->ptrs[i].dev)); + EBUG_ON(bch2_extent_has_device(extent_i_to_s_c(e), ob->ptrs[i].dev)); tmp = ob->ptrs[i]; tmp.cached = bkey_extent_is_cached(&e->k); @@ -1522,7 +1522,7 @@ void bch_alloc_sectors_append_ptrs(struct bch_fs *c, struct bkey_i_extent *e, * Append pointers to the space we just allocated to @k, and mark @sectors space * as allocated out of @ob */ -void bch_alloc_sectors_done(struct bch_fs *c, struct write_point *wp, +void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp, struct open_bucket *ob) { bool has_data = false; @@ -1560,35 +1560,35 @@ void bch_alloc_sectors_done(struct bch_fs *c, struct write_point *wp, * @k - key to return the allocated space information. * @cl - closure to wait for a bucket */ -struct open_bucket *bch_alloc_sectors(struct bch_fs *c, - struct write_point *wp, - struct bkey_i_extent *e, - unsigned nr_replicas, - unsigned nr_replicas_required, - enum alloc_reserve reserve, - struct closure *cl) +struct open_bucket *bch2_alloc_sectors(struct bch_fs *c, + struct write_point *wp, + struct bkey_i_extent *e, + unsigned nr_replicas, + unsigned nr_replicas_required, + enum alloc_reserve reserve, + struct closure *cl) { struct open_bucket *ob; - ob = bch_alloc_sectors_start(c, wp, nr_replicas, + ob = bch2_alloc_sectors_start(c, wp, nr_replicas, nr_replicas_required, reserve, cl); if (IS_ERR_OR_NULL(ob)) return ob; if (e->k.size > ob->sectors_free) - bch_key_resize(&e->k, ob->sectors_free); + bch2_key_resize(&e->k, ob->sectors_free); - bch_alloc_sectors_append_ptrs(c, e, nr_replicas, ob, e->k.size); + bch2_alloc_sectors_append_ptrs(c, e, nr_replicas, ob, e->k.size); - bch_alloc_sectors_done(c, wp, ob); + bch2_alloc_sectors_done(c, wp, ob); return ob; } /* Startup/shutdown (ro/rw): */ -void bch_recalc_capacity(struct bch_fs *c) +void bch2_recalc_capacity(struct bch_fs *c) { struct bch_tier *fastest_tier = NULL, *slowest_tier = NULL, *tier; struct bch_dev *ca; @@ -1676,14 +1676,14 @@ set_capacity: c->capacity = capacity; if (c->capacity) { - bch_io_timer_add(&c->io_clock[READ], + bch2_io_timer_add(&c->io_clock[READ], &c->prio_clock[READ].rescale); - bch_io_timer_add(&c->io_clock[WRITE], + bch2_io_timer_add(&c->io_clock[WRITE], &c->prio_clock[WRITE].rescale); } else { - bch_io_timer_del(&c->io_clock[READ], + bch2_io_timer_del(&c->io_clock[READ], &c->prio_clock[READ].rescale); - bch_io_timer_del(&c->io_clock[WRITE], + bch2_io_timer_del(&c->io_clock[WRITE], &c->prio_clock[WRITE].rescale); } @@ -1691,7 +1691,7 @@ set_capacity: closure_wake_up(&c->freelist_wait); } -static void bch_stop_write_point(struct bch_dev *ca, +static void bch2_stop_write_point(struct bch_dev *ca, struct write_point *wp) { struct bch_fs *c = ca->fs; @@ -1713,10 +1713,10 @@ found: mutex_unlock(&ob->lock); /* Drop writepoint's ref: */ - bch_open_bucket_put(c, ob); + bch2_open_bucket_put(c, ob); } -static bool bch_dev_has_open_write_point(struct bch_dev *ca) +static bool bch2_dev_has_open_write_point(struct bch_dev *ca) { struct bch_fs *c = ca->fs; struct bch_extent_ptr *ptr; @@ -1739,7 +1739,7 @@ static bool bch_dev_has_open_write_point(struct bch_dev *ca) } /* device goes ro: */ -void bch_dev_allocator_stop(struct bch_dev *ca) +void bch2_dev_allocator_stop(struct bch_dev *ca) { struct bch_fs *c = ca->fs; struct dev_group *tier = &c->tiers[ca->mi.tier].devs; @@ -1751,10 +1751,10 @@ void bch_dev_allocator_stop(struct bch_dev *ca) /* First, remove device from allocation groups: */ - bch_dev_group_remove(tier, ca); - bch_dev_group_remove(&c->all_devs, ca); + bch2_dev_group_remove(tier, ca); + bch2_dev_group_remove(&c->all_devs, ca); - bch_recalc_capacity(c); + bch2_recalc_capacity(c); /* * Stopping the allocator thread comes after removing from allocation @@ -1767,7 +1767,7 @@ void bch_dev_allocator_stop(struct bch_dev *ca) /* * We need an rcu barrier between setting ca->alloc_thread = NULL and - * the thread shutting down to avoid a race with bch_usage_update() - + * the thread shutting down to avoid a race with bch2_usage_update() - * the allocator thread itself does a synchronize_rcu() on exit. * * XXX: it would be better to have the rcu barrier be asynchronous @@ -1781,20 +1781,20 @@ void bch_dev_allocator_stop(struct bch_dev *ca) /* Next, close write points that point to this device... */ for (i = 0; i < ARRAY_SIZE(c->write_points); i++) - bch_stop_write_point(ca, &c->write_points[i]); + bch2_stop_write_point(ca, &c->write_points[i]); - bch_stop_write_point(ca, &ca->copygc_write_point); - bch_stop_write_point(ca, &c->promote_write_point); - bch_stop_write_point(ca, &ca->tiering_write_point); - bch_stop_write_point(ca, &c->migration_write_point); - bch_stop_write_point(ca, &c->btree_write_point); + bch2_stop_write_point(ca, &ca->copygc_write_point); + bch2_stop_write_point(ca, &c->promote_write_point); + bch2_stop_write_point(ca, &ca->tiering_write_point); + bch2_stop_write_point(ca, &c->migration_write_point); + bch2_stop_write_point(ca, &c->btree_write_point); mutex_lock(&c->btree_reserve_cache_lock); while (c->btree_reserve_cache_nr) { struct btree_alloc *a = &c->btree_reserve_cache[--c->btree_reserve_cache_nr]; - bch_open_bucket_put(c, a->ob); + bch2_open_bucket_put(c, a->ob); } mutex_unlock(&c->btree_reserve_cache_lock); @@ -1808,7 +1808,7 @@ void bch_dev_allocator_stop(struct bch_dev *ca) while (1) { closure_wait(&c->open_buckets_wait, &cl); - if (!bch_dev_has_open_write_point(ca)) { + if (!bch2_dev_has_open_write_point(ca)) { closure_wake_up(&c->open_buckets_wait); break; } @@ -1820,7 +1820,7 @@ void bch_dev_allocator_stop(struct bch_dev *ca) /* * Startup the allocator thread for transition to RW mode: */ -int bch_dev_allocator_start(struct bch_dev *ca) +int bch2_dev_allocator_start(struct bch_dev *ca) { struct bch_fs *c = ca->fs; struct dev_group *tier = &c->tiers[ca->mi.tier].devs; @@ -1834,26 +1834,26 @@ int bch_dev_allocator_start(struct bch_dev *ca) if (ca->alloc_thread) return 0; - k = kthread_create(bch_allocator_thread, ca, "bcache_allocator"); + k = kthread_create(bch2_allocator_thread, ca, "bcache_allocator"); if (IS_ERR(k)) return 0; get_task_struct(k); ca->alloc_thread = k; - bch_dev_group_add(tier, ca); - bch_dev_group_add(&c->all_devs, ca); + bch2_dev_group_add(tier, ca); + bch2_dev_group_add(&c->all_devs, ca); mutex_lock(&c->sb_lock); - journal_buckets = bch_sb_get_journal(ca->disk_sb.sb); - has_journal = bch_nr_journal_buckets(journal_buckets) >= + journal_buckets = bch2_sb_get_journal(ca->disk_sb.sb); + has_journal = bch2_nr_journal_buckets(journal_buckets) >= BCH_JOURNAL_BUCKETS_MIN; mutex_unlock(&c->sb_lock); if (has_journal) - bch_dev_group_add(&c->journal.devs, ca); + bch2_dev_group_add(&c->journal.devs, ca); - bch_recalc_capacity(c); + bch2_recalc_capacity(c); /* * Don't wake up allocator thread until after adding device to @@ -1864,15 +1864,15 @@ int bch_dev_allocator_start(struct bch_dev *ca) return 0; } -void bch_fs_allocator_init(struct bch_fs *c) +void bch2_fs_allocator_init(struct bch_fs *c) { unsigned i; INIT_LIST_HEAD(&c->open_buckets_open); INIT_LIST_HEAD(&c->open_buckets_free); spin_lock_init(&c->open_buckets_lock); - bch_prio_timer_init(c, READ); - bch_prio_timer_init(c, WRITE); + bch2_prio_timer_init(c, READ); + bch2_prio_timer_init(c, WRITE); /* open bucket 0 is a sentinal NULL: */ mutex_init(&c->open_buckets[0].lock); @@ -1896,12 +1896,12 @@ void bch_fs_allocator_init(struct bch_fs *c) INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update); spin_lock_init(&c->foreground_write_pd_lock); - bch_pd_controller_init(&c->foreground_write_pd); + bch2_pd_controller_init(&c->foreground_write_pd); /* * We do not want the write rate to have an effect on the computed * rate, for two reasons: * - * We do not call bch_ratelimit_delay() at all if the write rate + * We do not call bch2_ratelimit_delay() at all if the write rate * exceeds 1GB/s. In this case, the PD controller will think we are * not "keeping up" and not change the rate. */ @@ -1909,5 +1909,5 @@ void bch_fs_allocator_init(struct bch_fs *c) init_timer(&c->foreground_write_wakeup); c->foreground_write_wakeup.data = (unsigned long) c; - c->foreground_write_wakeup.function = bch_wake_delayed_writes; + c->foreground_write_wakeup.function = bch2_wake_delayed_writes; } diff --git a/libbcache/alloc.h b/libbcachefs/alloc.h index f8aa762d..08638b25 100644 --- a/libbcache/alloc.h +++ b/libbcachefs/alloc.h @@ -20,31 +20,31 @@ static inline size_t prio_buckets(const struct bch_dev *ca) return DIV_ROUND_UP((size_t) (ca)->mi.nbuckets, prios_per_bucket(ca)); } -void bch_dev_group_remove(struct dev_group *, struct bch_dev *); -void bch_dev_group_add(struct dev_group *, struct bch_dev *); +void bch2_dev_group_remove(struct dev_group *, struct bch_dev *); +void bch2_dev_group_add(struct dev_group *, struct bch_dev *); -int bch_prio_read(struct bch_dev *); +int bch2_prio_read(struct bch_dev *); -size_t bch_bucket_alloc(struct bch_dev *, enum alloc_reserve); +size_t bch2_bucket_alloc(struct bch_dev *, enum alloc_reserve); -void bch_open_bucket_put(struct bch_fs *, struct open_bucket *); +void bch2_open_bucket_put(struct bch_fs *, struct open_bucket *); -struct open_bucket *bch_alloc_sectors_start(struct bch_fs *, +struct open_bucket *bch2_alloc_sectors_start(struct bch_fs *, struct write_point *, unsigned, unsigned, enum alloc_reserve, struct closure *); -void bch_alloc_sectors_append_ptrs(struct bch_fs *, struct bkey_i_extent *, +void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct bkey_i_extent *, unsigned, struct open_bucket *, unsigned); -void bch_alloc_sectors_done(struct bch_fs *, struct write_point *, +void bch2_alloc_sectors_done(struct bch_fs *, struct write_point *, struct open_bucket *); -struct open_bucket *bch_alloc_sectors(struct bch_fs *, struct write_point *, +struct open_bucket *bch2_alloc_sectors(struct bch_fs *, struct write_point *, struct bkey_i_extent *, unsigned, unsigned, enum alloc_reserve, struct closure *); -static inline void bch_wake_allocator(struct bch_dev *ca) +static inline void bch2_wake_allocator(struct bch_dev *ca) { struct task_struct *p; @@ -77,9 +77,9 @@ static inline struct bch_dev *dev_group_next(struct dev_group *devs, (_ptr) < (_ob)->ptrs + (_ob)->nr_ptrs; \ (_ptr)++) -void bch_recalc_capacity(struct bch_fs *); -void bch_dev_allocator_stop(struct bch_dev *); -int bch_dev_allocator_start(struct bch_dev *); -void bch_fs_allocator_init(struct bch_fs *); +void bch2_recalc_capacity(struct bch_fs *); +void bch2_dev_allocator_stop(struct bch_dev *); +int bch2_dev_allocator_start(struct bch_dev *); +void bch2_fs_allocator_init(struct bch_fs *); #endif /* _BCACHE_ALLOC_H */ diff --git a/libbcache/alloc_types.h b/libbcachefs/alloc_types.h index 1bf48ef9..1bf48ef9 100644 --- a/libbcache/alloc_types.h +++ b/libbcachefs/alloc_types.h diff --git a/libbcache/bcache.h b/libbcachefs/bcachefs.h index 1d0e998c..6e08947c 100644 --- a/libbcache/bcache.h +++ b/libbcachefs/bcachefs.h @@ -176,11 +176,11 @@ */ #undef pr_fmt -#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ +#define pr_fmt(fmt) "bcachefs: %s() " fmt "\n", __func__ #include <linux/bug.h> -#include <linux/bcache.h> #include <linux/bio.h> +#include <linux/closure.h> #include <linux/kobject.h> #include <linux/lglock.h> #include <linux/list.h> @@ -195,33 +195,33 @@ #include <linux/types.h> #include <linux/workqueue.h> +#include "bcachefs_format.h" #include "bset.h" #include "fifo.h" -#include "util.h" -#include "closure.h" #include "opts.h" +#include "util.h" #include <linux/dynamic_fault.h> -#define bch_fs_init_fault(name) \ - dynamic_fault("bcache:bch_fs_init:" name) -#define bch_meta_read_fault(name) \ - dynamic_fault("bcache:meta:read:" name) -#define bch_meta_write_fault(name) \ - dynamic_fault("bcache:meta:write:" name) +#define bch2_fs_init_fault(name) \ + dynamic_fault("bcachefs:bch_fs_init:" name) +#define bch2_meta_read_fault(name) \ + dynamic_fault("bcachefs:meta:read:" name) +#define bch2_meta_write_fault(name) \ + dynamic_fault("bcachefs:meta:write:" name) -#ifndef bch_fmt -#define bch_fmt(_c, fmt) "bcache (%s): " fmt "\n", ((_c)->name) +#ifndef bch2_fmt +#define bch2_fmt(_c, fmt) "bcachefs (%s): " fmt "\n", ((_c)->name) #endif #define bch_info(c, fmt, ...) \ - printk(KERN_INFO bch_fmt(c, fmt), ##__VA_ARGS__) + printk(KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__) #define bch_notice(c, fmt, ...) \ - printk(KERN_NOTICE bch_fmt(c, fmt), ##__VA_ARGS__) + printk(KERN_NOTICE bch2_fmt(c, fmt), ##__VA_ARGS__) #define bch_warn(c, fmt, ...) \ - printk(KERN_WARNING bch_fmt(c, fmt), ##__VA_ARGS__) + printk(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__) #define bch_err(c, fmt, ...) \ - printk(KERN_ERR bch_fmt(c, fmt), ##__VA_ARGS__) + printk(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__) #define bch_verbose(c, fmt, ...) \ do { \ @@ -261,7 +261,7 @@ do { \ #define BCH_DEBUG_PARAMS_ALL() BCH_DEBUG_PARAMS_ALWAYS() BCH_DEBUG_PARAMS_DEBUG() -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALL() #else #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALWAYS() @@ -269,8 +269,7 @@ do { \ /* name, frequency_units, duration_units */ #define BCH_TIME_STATS() \ - BCH_TIME_STAT(mca_alloc, sec, us) \ - BCH_TIME_STAT(mca_scan, sec, ms) \ + BCH_TIME_STAT(btree_node_mem_alloc, sec, us) \ BCH_TIME_STAT(btree_gc, sec, ms) \ BCH_TIME_STAT(btree_coalesce, sec, ms) \ BCH_TIME_STAT(btree_split, sec, us) \ @@ -282,15 +281,12 @@ do { \ BCH_TIME_STAT(journal_flush_seq, us, us) #include "alloc_types.h" -#include "blockdev_types.h" #include "buckets_types.h" #include "clock_types.h" #include "io_types.h" #include "journal_types.h" #include "keylist_types.h" -#include "keybuf_types.h" #include "move_types.h" -#include "stats_types.h" #include "super_types.h" /* 256k, in sectors */ @@ -353,7 +349,7 @@ struct bch_dev { u8 dev_idx; /* * Cached version of this device's member info from superblock - * Committed by bch_write_super() -> bch_fs_mi_update() + * Committed by bch2_write_super() -> bch_fs_mi_update() */ struct bch_member_cpu mi; uuid_le uuid; @@ -435,10 +431,6 @@ struct bch_dev { struct work_struct io_error_work; /* The rest of this all shows up in sysfs */ -#define IO_ERROR_SHIFT 20 - atomic_t io_errors; - atomic_t io_count; - atomic64_t meta_sectors_written; atomic64_t btree_sectors_written; u64 __percpu *sectors_written; @@ -454,7 +446,6 @@ struct bch_dev { */ enum { BCH_FS_INITIAL_GC_DONE, - BCH_FS_DETACHING, BCH_FS_EMERGENCY_RO, BCH_FS_WRITE_DISABLE_COMPLETE, BCH_FS_GC_STOPPING, @@ -513,7 +504,7 @@ struct bch_fs { struct bch_opts opts; - /* Updated by bch_sb_update():*/ + /* Updated by bch2_sb_update():*/ struct { uuid_le uuid; uuid_le user_uuid; @@ -717,16 +708,12 @@ struct bch_fs { struct mutex zlib_workspace_lock; mempool_t compression_bounce[2]; + struct crypto_shash *sha256; struct crypto_blkcipher *chacha20; struct crypto_shash *poly1305; atomic64_t key_version; - /* For punting bio submissions to workqueue, io.c */ - struct bio_list bio_submit_list; - struct work_struct bio_submit_work; - spinlock_t bio_submit_lock; - struct bio_list read_retry_list; struct work_struct read_retry_work; spinlock_t read_retry_lock; @@ -737,14 +724,10 @@ struct bch_fs { unsigned writeback_pages_max; atomic_long_t nr_inodes; - /* NOTIFICATIONS */ - struct mutex uevent_lock; - struct kobj_uevent_env uevent_env; - /* DEBUG JUNK */ struct dentry *debug; struct btree_debug btree_debug[BTREE_ID_NR]; -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG struct btree *verify_data; struct btree_node *verify_ondisk; struct mutex verify_lock; @@ -764,28 +747,8 @@ struct bch_fs { unsigned bucket_journal_seq; - /* CACHING OTHER BLOCK DEVICES */ - mempool_t search; - struct radix_tree_root devices; - struct list_head cached_devs; - u64 cached_dev_sectors; - struct closure caching; - -#define CONGESTED_MAX 1024 - unsigned congested_last_us; - atomic_t congested; - /* The rest of this all shows up in sysfs */ - unsigned congested_read_threshold_us; - unsigned congested_write_threshold_us; - - struct cache_accounting accounting; atomic_long_t cache_read_races; - atomic_long_t writeback_keys_done; - atomic_long_t writeback_keys_failed; - - unsigned error_limit; - unsigned error_decay; unsigned foreground_write_ratelimit_enabled:1; unsigned copy_gc_enabled:1; @@ -808,7 +771,7 @@ struct bch_fs { #undef BCH_TIME_STAT }; -static inline bool bch_fs_running(struct bch_fs *c) +static inline bool bch2_fs_running(struct bch_fs *c) { return c->state == BCH_FS_RO || c->state == BCH_FS_RW; } diff --git a/include/linux/bcache.h b/libbcachefs/bcachefs_format.h index c221747b..0a0dc870 100644 --- a/include/linux/bcache.h +++ b/libbcachefs/bcachefs_format.h @@ -391,7 +391,7 @@ struct bch_csum { #define BCH_CSUM_CHACHA20_POLY1305_128 4U #define BCH_CSUM_NR 5U -static inline _Bool bch_csum_type_is_encryption(unsigned type) +static inline _Bool bch2_csum_type_is_encryption(unsigned type) { switch (type) { case BCH_CSUM_CHACHA20_POLY1305_80: @@ -805,7 +805,7 @@ enum cache_replacement { }; struct bch_sb_layout { - uuid_le magic; /* bcache superblock UUID */ + uuid_le magic; /* bcachefs superblock UUID */ __u8 layout_type; __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */ __u8 nr_superblocks; @@ -893,7 +893,7 @@ struct bch_sb_field_replication { /* * @offset - sector where this sb was written * @version - on disk format version - * @magic - identifies as a bcache superblock (BCACHE_MAGIC) + * @magic - identifies as a bcachefs superblock (BCACHE_MAGIC) * @seq - incremented each time superblock is written * @uuid - used for generating various magic numbers and identifying * member devices, never changes @@ -1035,7 +1035,7 @@ struct backingdev_sb { __le64 offset; /* sector where this sb was written */ __le64 version; /* of on disk format */ - uuid_le magic; /* bcache superblock UUID */ + uuid_le magic; /* bcachefs superblock UUID */ uuid_le disk_uuid; @@ -1116,7 +1116,7 @@ static inline _Bool SB_IS_BDEV(const struct bch_sb *sb) #define PSET_MAGIC __cpu_to_le64(0x6750e15f87337f91ULL) #define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL) -static inline __le64 __bch_sb_magic(struct bch_sb *sb) +static inline __le64 __bch2_sb_magic(struct bch_sb *sb) { __le64 ret; memcpy(&ret, &sb->uuid, sizeof(ret)); @@ -1125,17 +1125,17 @@ static inline __le64 __bch_sb_magic(struct bch_sb *sb) static inline __u64 __jset_magic(struct bch_sb *sb) { - return __le64_to_cpu(__bch_sb_magic(sb) ^ JSET_MAGIC); + return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC); } static inline __u64 __pset_magic(struct bch_sb *sb) { - return __le64_to_cpu(__bch_sb_magic(sb) ^ PSET_MAGIC); + return __le64_to_cpu(__bch2_sb_magic(sb) ^ PSET_MAGIC); } static inline __u64 __bset_magic(struct bch_sb *sb) { - return __le64_to_cpu(__bch_sb_magic(sb) ^ BSET_MAGIC); + return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC); } /* Journal */ @@ -1344,103 +1344,6 @@ struct btree_node_entry { }; } __attribute__((packed)); -/* OBSOLETE */ - -#define BITMASK(name, type, field, offset, end) \ -static const unsigned name##_OFFSET = offset; \ -static const unsigned name##_BITS = (end - offset); \ -static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \ - \ -static inline __u64 name(const type *k) \ -{ return (k->field >> offset) & ~(~0ULL << (end - offset)); } \ - \ -static inline void SET_##name(type *k, __u64 v) \ -{ \ - k->field &= ~(~(~0ULL << (end - offset)) << offset); \ - k->field |= (v & ~(~0ULL << (end - offset))) << offset; \ -} - -struct bkey_v0 { - __u64 high; - __u64 low; - __u64 ptr[]; -}; - -#define KEY0_FIELD(name, field, offset, size) \ - BITMASK(name, struct bkey_v0, field, offset, size) - -KEY0_FIELD(KEY0_PTRS, high, 60, 63) -KEY0_FIELD(KEY0_CSUM, high, 56, 58) -KEY0_FIELD(KEY0_DIRTY, high, 36, 37) - -KEY0_FIELD(KEY0_SIZE, high, 20, 36) -KEY0_FIELD(KEY0_INODE, high, 0, 20) - -static inline unsigned long bkey_v0_u64s(const struct bkey_v0 *k) -{ - return (sizeof(struct bkey_v0) / sizeof(__u64)) + KEY0_PTRS(k); -} - -static inline struct bkey_v0 *bkey_v0_next(const struct bkey_v0 *k) -{ - __u64 *d = (__u64 *) k; - - return (struct bkey_v0 *) (d + bkey_v0_u64s(k)); -} - -struct jset_v0 { - __u64 csum; - __u64 magic; - __u64 seq; - __u32 version; - __u32 keys; - - __u64 last_seq; - - __BKEY_PADDED(uuid_bucket, 4); - __BKEY_PADDED(btree_root, 4); - __u16 btree_level; - __u16 pad[3]; - - __u64 prio_bucket[64]; - - union { - struct bkey start[0]; - __u64 d[0]; - }; -}; - -/* UUIDS - per backing device/flash only volume metadata */ - -struct uuid_entry_v0 { - uuid_le uuid; - __u8 label[32]; - __u32 first_reg; - __u32 last_reg; - __u32 invalidated; - __u32 pad; -}; - -struct uuid_entry { - union { - struct { - uuid_le uuid; - __u8 label[32]; - __u32 first_reg; - __u32 last_reg; - __u32 invalidated; - - __u32 flags; - /* Size of flash only volumes */ - __u64 sectors; - }; - - __u8 pad[128]; - }; -}; - -BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1); - #ifdef __cplusplus } #endif diff --git a/include/linux/bcache-ioctl.h b/libbcachefs/bcachefs_ioctl.h index ca769369..2218a00b 100644 --- a/include/linux/bcache-ioctl.h +++ b/libbcachefs/bcachefs_ioctl.h @@ -1,8 +1,8 @@ #ifndef _LINUX_BCACHE_IOCTL_H #define _LINUX_BCACHE_IOCTL_H -#include <linux/bcache.h> #include <linux/uuid.h> +#include "bcachefs_format.h" #ifdef __cplusplus extern "C" { diff --git a/libbcache/bkey.c b/libbcachefs/bkey.c index 374237e2..b9ceb6ea 100644 --- a/libbcache/bkey.c +++ b/libbcachefs/bkey.c @@ -1,18 +1,15 @@ -#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ - -#include <linux/kernel.h> - +#include "bcachefs.h" #include "bkey.h" #include "bset.h" #include "util.h" -const struct bkey_format bch_bkey_format_current = BKEY_FORMAT_CURRENT; +const struct bkey_format bch2_bkey_format_current = BKEY_FORMAT_CURRENT; -struct bkey __bkey_unpack_key(const struct bkey_format *, +struct bkey __bch2_bkey_unpack_key(const struct bkey_format *, const struct bkey_packed *); -void bch_to_binary(char *out, const u64 *p, unsigned nr_bits) +void bch2_to_binary(char *out, const u64 *p, unsigned nr_bits) { unsigned bit = high_bit_offset, done = 0; @@ -34,9 +31,9 @@ void bch_to_binary(char *out, const u64 *p, unsigned nr_bits) } } -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG -static void bch_bkey_pack_verify(const struct bkey_packed *packed, +static void bch2_bkey_pack_verify(const struct bkey_packed *packed, const struct bkey *unpacked, const struct bkey_format *format) { @@ -47,16 +44,16 @@ static void bch_bkey_pack_verify(const struct bkey_packed *packed, BUG_ON(packed->u64s < bkeyp_key_u64s(format, packed)); - tmp = __bkey_unpack_key(format, packed); + tmp = __bch2_bkey_unpack_key(format, packed); if (memcmp(&tmp, unpacked, sizeof(struct bkey))) { char buf1[160], buf2[160]; char buf3[160], buf4[160]; - bch_bkey_to_text(buf1, sizeof(buf1), unpacked); - bch_bkey_to_text(buf2, sizeof(buf2), &tmp); - bch_to_binary(buf3, (void *) unpacked, 80); - bch_to_binary(buf4, high_word(format, packed), 80); + bch2_bkey_to_text(buf1, sizeof(buf1), unpacked); + bch2_bkey_to_text(buf2, sizeof(buf2), &tmp); + bch2_to_binary(buf3, (void *) unpacked, 80); + bch2_to_binary(buf4, high_word(format, packed), 80); panic("keys differ: format u64s %u fields %u %u %u %u %u\n%s\n%s\n%s\n%s\n", format->key_u64s, @@ -70,12 +67,12 @@ static void bch_bkey_pack_verify(const struct bkey_packed *packed, } #else -static inline void bch_bkey_pack_verify(const struct bkey_packed *packed, +static inline void bch2_bkey_pack_verify(const struct bkey_packed *packed, const struct bkey *unpacked, const struct bkey_format *format) {} #endif -int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k) +int bch2_bkey_to_text(char *buf, size_t size, const struct bkey *k) { char *out = buf, *end = buf + size; @@ -218,7 +215,7 @@ static bool set_inc_field(struct pack_state *state, unsigned field, u64 v) * Also: doesn't work on extents - it doesn't preserve the invariant that * if k is packed bkey_start_pos(k) will successfully pack */ -static bool bch_bkey_transform_key(const struct bkey_format *out_f, +static bool bch2_bkey_transform_key(const struct bkey_format *out_f, struct bkey_packed *out, const struct bkey_format *in_f, const struct bkey_packed *in) @@ -244,12 +241,12 @@ static bool bch_bkey_transform_key(const struct bkey_format *out_f, return true; } -bool bch_bkey_transform(const struct bkey_format *out_f, +bool bch2_bkey_transform(const struct bkey_format *out_f, struct bkey_packed *out, const struct bkey_format *in_f, const struct bkey_packed *in) { - if (!bch_bkey_transform_key(out_f, out, in_f, in)) + if (!bch2_bkey_transform_key(out_f, out, in_f, in)) return false; memcpy_u64s((u64 *) out + out_f->key_u64s, @@ -266,7 +263,7 @@ bool bch_bkey_transform(const struct bkey_format *out_f, x(BKEY_FIELD_VERSION_HI, version.hi) \ x(BKEY_FIELD_VERSION_LO, version.lo) -struct bkey __bkey_unpack_key(const struct bkey_format *format, +struct bkey __bch2_bkey_unpack_key(const struct bkey_format *format, const struct bkey_packed *in) { struct unpack_state state = unpack_state_init(format, in); @@ -310,9 +307,9 @@ struct bpos __bkey_unpack_pos(const struct bkey_format *format, #endif /** - * bkey_pack_key -- pack just the key, not the value + * bch2_bkey_pack_key -- pack just the key, not the value */ -bool bkey_pack_key(struct bkey_packed *out, const struct bkey *in, +bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in, const struct bkey_format *format) { struct pack_state state = pack_state_init(format, out); @@ -340,14 +337,14 @@ bool bkey_pack_key(struct bkey_packed *out, const struct bkey *in, out->needs_whiteout = in->needs_whiteout; out->type = in->type; - bch_bkey_pack_verify(out, in, format); + bch2_bkey_pack_verify(out, in, format); return true; } /** - * bkey_unpack -- unpack the key and the value + * bch2_bkey_unpack -- unpack the key and the value */ -void bkey_unpack(const struct btree *b, struct bkey_i *dst, +void bch2_bkey_unpack(const struct btree *b, struct bkey_i *dst, const struct bkey_packed *src) { dst->k = bkey_unpack_key(b, src); @@ -358,14 +355,14 @@ void bkey_unpack(const struct btree *b, struct bkey_i *dst, } /** - * bkey_pack -- pack the key and the value + * bch2_bkey_pack -- pack the key and the value */ -bool bkey_pack(struct bkey_packed *out, const struct bkey_i *in, +bool bch2_bkey_pack(struct bkey_packed *out, const struct bkey_i *in, const struct bkey_format *format) { struct bkey_packed tmp; - if (!bkey_pack_key(&tmp, &in->k, format)) + if (!bch2_bkey_pack_key(&tmp, &in->k, format)) return false; memmove_u64s((u64 *) out + format->key_u64s, @@ -407,7 +404,7 @@ static bool set_inc_field_lossy(struct pack_state *state, unsigned field, u64 v) return ret; } -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG static bool bkey_packed_successor(struct bkey_packed *out, const struct btree *b, struct bkey_packed k) @@ -456,13 +453,13 @@ static bool bkey_packed_successor(struct bkey_packed *out, * legal to use a packed pos that isn't equivalent to the original pos, * _provided_ it compares <= to the original pos. */ -enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out, +enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out, struct bpos in, const struct btree *b) { const struct bkey_format *f = &b->format; struct pack_state state = pack_state_init(f, out); -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG struct bpos orig = in; #endif bool exact = true; @@ -510,7 +507,7 @@ enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out, out->format = KEY_FORMAT_LOCAL_BTREE; out->type = KEY_TYPE_DELETED; -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG if (exact) { BUG_ON(bkey_cmp_left_packed(b, out, &orig)); } else { @@ -525,7 +522,7 @@ enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out, return exact ? BKEY_PACK_POS_EXACT : BKEY_PACK_POS_SMALLER; } -void bch_bkey_format_init(struct bkey_format_state *s) +void bch2_bkey_format_init(struct bkey_format_state *s) { unsigned i; @@ -549,7 +546,7 @@ static void __bkey_format_add(struct bkey_format_state *s, /* * Changes @format so that @k can be successfully packed with @format */ -void bch_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k) +void bch2_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k) { #define x(id, field) __bkey_format_add(s, id, k->field); bkey_fields() @@ -557,7 +554,7 @@ void bch_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k) __bkey_format_add(s, BKEY_FIELD_OFFSET, bkey_start_offset(k)); } -void bch_bkey_format_add_pos(struct bkey_format_state *s, struct bpos p) +void bch2_bkey_format_add_pos(struct bkey_format_state *s, struct bpos p) { unsigned field = 0; @@ -580,7 +577,7 @@ static void set_format_field(struct bkey_format *f, enum bch_bkey_fields i, f->field_offset[i] = cpu_to_le64(offset); } -struct bkey_format bch_bkey_format_done(struct bkey_format_state *s) +struct bkey_format bch2_bkey_format_done(struct bkey_format_state *s) { unsigned i, bits = KEY_PACKED_BITS_START; struct bkey_format ret = { @@ -620,11 +617,11 @@ struct bkey_format bch_bkey_format_done(struct bkey_format_state *s) } } - EBUG_ON(bch_bkey_format_validate(&ret)); + EBUG_ON(bch2_bkey_format_validate(&ret)); return ret; } -const char *bch_bkey_format_validate(struct bkey_format *f) +const char *bch2_bkey_format_validate(struct bkey_format *f) { unsigned i, bits = KEY_PACKED_BITS_START; @@ -657,9 +654,9 @@ const char *bch_bkey_format_validate(struct bkey_format *f) * Bits are indexed from 0 - return is [0, nr_key_bits) */ __pure -unsigned bkey_greatest_differing_bit(const struct btree *b, - const struct bkey_packed *l_k, - const struct bkey_packed *r_k) +unsigned bch2_bkey_greatest_differing_bit(const struct btree *b, + const struct bkey_packed *l_k, + const struct bkey_packed *r_k) { const u64 *l = high_word(&b->format, l_k); const u64 *r = high_word(&b->format, r_k); @@ -701,8 +698,7 @@ unsigned bkey_greatest_differing_bit(const struct btree *b, * Bits are indexed from 0 - return is [0, nr_key_bits) */ __pure -unsigned bkey_ffs(const struct btree *b, - const struct bkey_packed *k) +unsigned bch2_bkey_ffs(const struct btree *b, const struct bkey_packed *k) { const u64 *p = high_word(&b->format, k); unsigned nr_key_bits = b->nr_key_bits; @@ -957,7 +953,7 @@ set_field: return out; } -int bch_compile_bkey_format(const struct bkey_format *format, void *_out) +int bch2_compile_bkey_format(const struct bkey_format *format, void *_out) { bool eax_zeroed = false; u8 *out = _out; @@ -1034,9 +1030,9 @@ static inline int __bkey_cmp_bits(const u64 *l, const u64 *r, #endif __pure -int __bkey_cmp_packed_format_checked(const struct bkey_packed *l, - const struct bkey_packed *r, - const struct btree *b) +int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *l, + const struct bkey_packed *r, + const struct btree *b) { const struct bkey_format *f = &b->format; int ret; @@ -1054,33 +1050,33 @@ int __bkey_cmp_packed_format_checked(const struct bkey_packed *l, } __pure __flatten -int __bkey_cmp_left_packed_format_checked(const struct btree *b, - const struct bkey_packed *l, - const struct bpos *r) +int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *b, + const struct bkey_packed *l, + const struct bpos *r) { return bkey_cmp(bkey_unpack_pos_format_checked(b, l), *r); } __pure __flatten -int __bkey_cmp_packed(const struct bkey_packed *l, - const struct bkey_packed *r, - const struct btree *b) +int __bch2_bkey_cmp_packed(const struct bkey_packed *l, + const struct bkey_packed *r, + const struct btree *b) { int packed = bkey_lr_packed(l, r); if (likely(packed == BKEY_PACKED_BOTH)) - return __bkey_cmp_packed_format_checked(l, r, b); + return __bch2_bkey_cmp_packed_format_checked(l, r, b); switch (packed) { case BKEY_PACKED_NONE: return bkey_cmp(((struct bkey *) l)->p, ((struct bkey *) r)->p); case BKEY_PACKED_LEFT: - return __bkey_cmp_left_packed_format_checked(b, + return __bch2_bkey_cmp_left_packed_format_checked(b, (struct bkey_packed *) l, &((struct bkey *) r)->p); case BKEY_PACKED_RIGHT: - return -__bkey_cmp_left_packed_format_checked(b, + return -__bch2_bkey_cmp_left_packed_format_checked(b, (struct bkey_packed *) r, &((struct bkey *) l)->p); default: @@ -1089,17 +1085,18 @@ int __bkey_cmp_packed(const struct bkey_packed *l, } __pure __flatten -int bkey_cmp_left_packed(const struct btree *b, - const struct bkey_packed *l, const struct bpos *r) +int __bch2_bkey_cmp_left_packed(const struct btree *b, + const struct bkey_packed *l, + const struct bpos *r) { const struct bkey *l_unpacked; return unlikely(l_unpacked = packed_to_bkey_c(l)) ? bkey_cmp(l_unpacked->p, *r) - : __bkey_cmp_left_packed_format_checked(b, l, r); + : __bch2_bkey_cmp_left_packed_format_checked(b, l, r); } -void bch_bpos_swab(struct bpos *p) +void bch2_bpos_swab(struct bpos *p) { u8 *l = (u8 *) p; u8 *h = ((u8 *) &p[1]) - 1; @@ -1111,9 +1108,9 @@ void bch_bpos_swab(struct bpos *p) } } -void bch_bkey_swab_key(const struct bkey_format *_f, struct bkey_packed *k) +void bch2_bkey_swab_key(const struct bkey_format *_f, struct bkey_packed *k) { - const struct bkey_format *f = bkey_packed(k) ? _f : &bch_bkey_format_current; + const struct bkey_format *f = bkey_packed(k) ? _f : &bch2_bkey_format_current; u8 *l = k->key_start; u8 *h = (u8 *) (k->_data + f->key_u64s) - 1; @@ -1124,8 +1121,8 @@ void bch_bkey_swab_key(const struct bkey_format *_f, struct bkey_packed *k) } } -#ifdef CONFIG_BCACHE_DEBUG -void bkey_pack_test(void) +#ifdef CONFIG_BCACHEFS_DEBUG +void bch2_bkey_pack_test(void) { struct bkey t = KEY(4134ULL, 1250629070527416633ULL, 0); struct bkey_packed p; @@ -1140,7 +1137,7 @@ void bkey_pack_test(void) }; struct unpack_state in_s = - unpack_state_init(&bch_bkey_format_current, (void *) &t); + unpack_state_init(&bch2_bkey_format_current, (void *) &t); struct pack_state out_s = pack_state_init(&test_format, &p); unsigned i; @@ -1162,6 +1159,6 @@ void bkey_pack_test(void) panic("failed at %u\n", i); } - BUG_ON(!bkey_pack_key(&p, &t, &test_format)); + BUG_ON(!bch2_bkey_pack_key(&p, &t, &test_format)); } #endif diff --git a/libbcache/bkey.h b/libbcachefs/bkey.h index 0893134f..1383c96b 100644 --- a/libbcache/bkey.h +++ b/libbcachefs/bkey.h @@ -2,13 +2,13 @@ #define _BCACHE_BKEY_H #include <linux/bug.h> -#include <linux/bcache.h> +#include "bcachefs_format.h" #include "util.h" #include "vstructs.h" -void bch_to_binary(char *, const u64 *, unsigned); -int bch_bkey_to_text(char *, size_t, const struct bkey *); +void bch2_to_binary(char *, const u64 *, unsigned); +int bch2_bkey_to_text(char *, size_t, const struct bkey *); #define BKEY_PADDED(key) __BKEY_PADDED(key, BKEY_EXTENT_VAL_U64s_MAX) @@ -112,38 +112,45 @@ struct bkey_format_state { u64 field_max[BKEY_NR_FIELDS]; }; -void bch_bkey_format_init(struct bkey_format_state *); -void bch_bkey_format_add_key(struct bkey_format_state *, const struct bkey *); -void bch_bkey_format_add_pos(struct bkey_format_state *, struct bpos); -struct bkey_format bch_bkey_format_done(struct bkey_format_state *); -const char *bch_bkey_format_validate(struct bkey_format *); +void bch2_bkey_format_init(struct bkey_format_state *); +void bch2_bkey_format_add_key(struct bkey_format_state *, const struct bkey *); +void bch2_bkey_format_add_pos(struct bkey_format_state *, struct bpos); +struct bkey_format bch2_bkey_format_done(struct bkey_format_state *); +const char *bch2_bkey_format_validate(struct bkey_format *); __pure -unsigned bkey_greatest_differing_bit(const struct btree *, - const struct bkey_packed *, - const struct bkey_packed *); +unsigned bch2_bkey_greatest_differing_bit(const struct btree *, + const struct bkey_packed *, + const struct bkey_packed *); __pure -unsigned bkey_ffs(const struct btree *, const struct bkey_packed *); +unsigned bch2_bkey_ffs(const struct btree *, const struct bkey_packed *); __pure -int __bkey_cmp_packed_format_checked(const struct bkey_packed *, +int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *, const struct bkey_packed *, const struct btree *); __pure -int __bkey_cmp_left_packed_format_checked(const struct btree *, +int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *, const struct bkey_packed *, const struct bpos *); __pure -int __bkey_cmp_packed(const struct bkey_packed *, - const struct bkey_packed *, - const struct btree *); +int __bch2_bkey_cmp_packed(const struct bkey_packed *, + const struct bkey_packed *, + const struct btree *); __pure -int bkey_cmp_left_packed(const struct btree *, - const struct bkey_packed *, - const struct bpos *); +int __bch2_bkey_cmp_left_packed(const struct btree *, + const struct bkey_packed *, + const struct bpos *); + +static inline __pure +int bkey_cmp_left_packed(const struct btree *b, + const struct bkey_packed *l, const struct bpos *r) +{ + return __bch2_bkey_cmp_left_packed(b, l, r); +} /* * we prefer to pass bpos by ref, but it's often enough terribly convenient to @@ -181,7 +188,7 @@ static inline int bkey_cmp_left_packed_byval(const struct btree *b, &((struct bkey *) (_l))->p); \ break; \ case BKEY_PACKED_BOTH: \ - _cmp = __bkey_cmp_packed((void *) (_l), \ + _cmp = __bch2_bkey_cmp_packed((void *) (_l), \ (void *) (_r), (_b)); \ break; \ } \ @@ -208,8 +215,8 @@ static inline struct bpos bpos_min(struct bpos l, struct bpos r) return bkey_cmp(l, r) < 0 ? l : r; } -void bch_bpos_swab(struct bpos *); -void bch_bkey_swab_key(const struct bkey_format *, struct bkey_packed *); +void bch2_bpos_swab(struct bpos *); +void bch2_bkey_swab_key(const struct bkey_format *, struct bkey_packed *); static __always_inline int bversion_cmp(struct bversion l, struct bversion r) { @@ -227,7 +234,7 @@ static __always_inline int bversion_zero(struct bversion v) return !bversion_cmp(v, ZERO_VERSION); } -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG /* statement expressions confusing unlikely()? */ #define bkey_packed(_k) \ ({ EBUG_ON((_k)->format > KEY_FORMAT_CURRENT); \ @@ -328,22 +335,22 @@ static inline void set_bkeyp_val_u64s(const struct bkey_format *format, #define bkeyp_val(_format, _k) \ ((struct bch_val *) ((_k)->_data + bkeyp_key_u64s(_format, _k))) -extern const struct bkey_format bch_bkey_format_current; +extern const struct bkey_format bch2_bkey_format_current; -bool bch_bkey_transform(const struct bkey_format *, - struct bkey_packed *, - const struct bkey_format *, - const struct bkey_packed *); +bool bch2_bkey_transform(const struct bkey_format *, + struct bkey_packed *, + const struct bkey_format *, + const struct bkey_packed *); -struct bkey __bkey_unpack_key(const struct bkey_format *, - const struct bkey_packed *); +struct bkey __bch2_bkey_unpack_key(const struct bkey_format *, + const struct bkey_packed *); #ifndef HAVE_BCACHE_COMPILED_UNPACK struct bpos __bkey_unpack_pos(const struct bkey_format *, const struct bkey_packed *); #endif -bool bkey_pack_key(struct bkey_packed *, const struct bkey *, +bool bch2_bkey_pack_key(struct bkey_packed *, const struct bkey *, const struct bkey_format *); enum bkey_pack_pos_ret { @@ -352,18 +359,18 @@ enum bkey_pack_pos_ret { BKEY_PACK_POS_FAIL, }; -enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *, struct bpos, +enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *, struct bpos, const struct btree *); static inline bool bkey_pack_pos(struct bkey_packed *out, struct bpos in, const struct btree *b) { - return bkey_pack_pos_lossy(out, in, b) == BKEY_PACK_POS_EXACT; + return bch2_bkey_pack_pos_lossy(out, in, b) == BKEY_PACK_POS_EXACT; } -void bkey_unpack(const struct btree *, struct bkey_i *, +void bch2_bkey_unpack(const struct btree *, struct bkey_i *, const struct bkey_packed *); -bool bkey_pack(struct bkey_packed *, const struct bkey_i *, +bool bch2_bkey_pack(struct bkey_packed *, const struct bkey_i *, const struct bkey_format *); static inline u64 bkey_field_max(const struct bkey_format *f, @@ -377,11 +384,11 @@ static inline u64 bkey_field_max(const struct bkey_format *f, #ifdef CONFIG_X86_64 #define HAVE_BCACHE_COMPILED_UNPACK 1 -int bch_compile_bkey_format(const struct bkey_format *, void *); +int bch2_compile_bkey_format(const struct bkey_format *, void *); #else -static inline int bch_compile_bkey_format(const struct bkey_format *format, +static inline int bch2_compile_bkey_format(const struct bkey_format *format, void *out) { return 0; } #endif @@ -558,12 +565,12 @@ static inline struct bkey_i_##name *bkey_##name##_init(struct bkey_i *_k)\ BKEY_VAL_ACCESSORS(cookie, KEY_TYPE_COOKIE); -static inline void __bch_extent_assert(u8 type, u8 nr) +static inline void __bch2_extent_assert(u8 type, u8 nr) { EBUG_ON(type != BCH_EXTENT && type != BCH_EXTENT_CACHED); } -__BKEY_VAL_ACCESSORS(extent, BCH_EXTENT, __bch_extent_assert); +__BKEY_VAL_ACCESSORS(extent, BCH_EXTENT, __bch2_extent_assert); BKEY_VAL_ACCESSORS(reservation, BCH_RESERVATION); BKEY_VAL_ACCESSORS(inode, BCH_INODE_FS); @@ -597,10 +604,10 @@ BKEY_VAL_ACCESSORS(xattr, BCH_XATTR); #define next_word(p) nth_word(p, 1) #define prev_word(p) nth_word(p, -1) -#ifdef CONFIG_BCACHE_DEBUG -void bkey_pack_test(void); +#ifdef CONFIG_BCACHEFS_DEBUG +void bch2_bkey_pack_test(void); #else -static inline void bkey_pack_test(void) {} +static inline void bch2_bkey_pack_test(void) {} #endif #endif /* _BCACHE_BKEY_H */ diff --git a/libbcache/bkey_methods.c b/libbcachefs/bkey_methods.c index 2908489c..51a13fca 100644 --- a/libbcache/bkey_methods.c +++ b/libbcachefs/bkey_methods.c @@ -1,5 +1,5 @@ -#include "bcache.h" +#include "bcachefs.h" #include "bkey_methods.h" #include "btree_types.h" #include "dirent.h" @@ -8,19 +8,19 @@ #include "inode.h" #include "xattr.h" -const struct bkey_ops *bch_bkey_ops[] = { - [BKEY_TYPE_EXTENTS] = &bch_bkey_extent_ops, - [BKEY_TYPE_INODES] = &bch_bkey_inode_ops, - [BKEY_TYPE_DIRENTS] = &bch_bkey_dirent_ops, - [BKEY_TYPE_XATTRS] = &bch_bkey_xattr_ops, - [BKEY_TYPE_BTREE] = &bch_bkey_btree_ops, +const struct bkey_ops *bch2_bkey_ops[] = { + [BKEY_TYPE_EXTENTS] = &bch2_bkey_extent_ops, + [BKEY_TYPE_INODES] = &bch2_bkey_inode_ops, + [BKEY_TYPE_DIRENTS] = &bch2_bkey_dirent_ops, + [BKEY_TYPE_XATTRS] = &bch2_bkey_xattr_ops, + [BKEY_TYPE_BTREE] = &bch2_bkey_btree_ops, }; /* Returns string indicating reason for being invalid, or NULL if valid: */ -const char *bkey_invalid(struct bch_fs *c, enum bkey_type type, +const char *bch2_bkey_invalid(struct bch_fs *c, enum bkey_type type, struct bkey_s_c k) { - const struct bkey_ops *ops = bch_bkey_ops[type]; + const struct bkey_ops *ops = bch2_bkey_ops[type]; if (k.k->u64s < BKEY_U64s) return "u64s too small"; @@ -52,8 +52,8 @@ const char *bkey_invalid(struct bch_fs *c, enum bkey_type type, } } -const char *btree_bkey_invalid(struct bch_fs *c, struct btree *b, - struct bkey_s_c k) +const char *bch2_btree_bkey_invalid(struct bch_fs *c, struct btree *b, + struct bkey_s_c k) { if (bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0) return "key before start of btree node"; @@ -64,23 +64,23 @@ const char *btree_bkey_invalid(struct bch_fs *c, struct btree *b, if (k.k->p.snapshot) return "nonzero snapshot"; - return bkey_invalid(c, btree_node_type(b), k); + return bch2_bkey_invalid(c, btree_node_type(b), k); } -void bkey_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k) +void bch2_bkey_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k) { enum bkey_type type = btree_node_type(b); - const struct bkey_ops *ops = bch_bkey_ops[type]; + const struct bkey_ops *ops = bch2_bkey_ops[type]; const char *invalid; BUG_ON(!k.k->u64s); - invalid = btree_bkey_invalid(c, b, k); + invalid = bch2_btree_bkey_invalid(c, b, k); if (invalid) { char buf[160]; - bch_bkey_val_to_text(c, type, buf, sizeof(buf), k); - bch_fs_bug(c, "invalid bkey %s: %s", buf, invalid); + bch2_bkey_val_to_text(c, type, buf, sizeof(buf), k); + bch2_fs_bug(c, "invalid bkey %s: %s", buf, invalid); return; } @@ -89,23 +89,23 @@ void bkey_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k) ops->key_debugcheck(c, b, k); } -void bch_val_to_text(struct bch_fs *c, enum bkey_type type, +void bch2_val_to_text(struct bch_fs *c, enum bkey_type type, char *buf, size_t size, struct bkey_s_c k) { - const struct bkey_ops *ops = bch_bkey_ops[type]; + const struct bkey_ops *ops = bch2_bkey_ops[type]; if (k.k->type >= KEY_TYPE_GENERIC_NR && ops->val_to_text) ops->val_to_text(c, buf, size, k); } -void bch_bkey_val_to_text(struct bch_fs *c, enum bkey_type type, +void bch2_bkey_val_to_text(struct bch_fs *c, enum bkey_type type, char *buf, size_t size, struct bkey_s_c k) { - const struct bkey_ops *ops = bch_bkey_ops[type]; + const struct bkey_ops *ops = bch2_bkey_ops[type]; char *out = buf, *end = buf + size; - out += bch_bkey_to_text(out, end - out, k.k); + out += bch2_bkey_to_text(out, end - out, k.k); if (k.k->type >= KEY_TYPE_GENERIC_NR && ops->val_to_text) { @@ -114,13 +114,13 @@ void bch_bkey_val_to_text(struct bch_fs *c, enum bkey_type type, } } -void bch_bkey_swab(enum bkey_type type, +void bch2_bkey_swab(enum bkey_type type, const struct bkey_format *f, struct bkey_packed *k) { - const struct bkey_ops *ops = bch_bkey_ops[type]; + const struct bkey_ops *ops = bch2_bkey_ops[type]; - bch_bkey_swab_key(f, k); + bch2_bkey_swab_key(f, k); if (ops->swab) ops->swab(f, k); diff --git a/libbcache/bkey_methods.h b/libbcachefs/bkey_methods.h index 111b1789..d372fa61 100644 --- a/libbcache/bkey_methods.h +++ b/libbcachefs/bkey_methods.h @@ -62,20 +62,20 @@ struct bkey_ops { bool is_extents; }; -const char *bkey_invalid(struct bch_fs *, enum bkey_type, struct bkey_s_c); -const char *btree_bkey_invalid(struct bch_fs *, struct btree *, - struct bkey_s_c); +const char *bch2_bkey_invalid(struct bch_fs *, enum bkey_type, struct bkey_s_c); +const char *bch2_btree_bkey_invalid(struct bch_fs *, struct btree *, + struct bkey_s_c); -void bkey_debugcheck(struct bch_fs *, struct btree *, struct bkey_s_c); -void bch_val_to_text(struct bch_fs *, enum bkey_type, - char *, size_t, struct bkey_s_c); -void bch_bkey_val_to_text(struct bch_fs *, enum bkey_type, - char *, size_t, struct bkey_s_c); +void bch2_bkey_debugcheck(struct bch_fs *, struct btree *, struct bkey_s_c); +void bch2_val_to_text(struct bch_fs *, enum bkey_type, + char *, size_t, struct bkey_s_c); +void bch2_bkey_val_to_text(struct bch_fs *, enum bkey_type, + char *, size_t, struct bkey_s_c); -void bch_bkey_swab(enum bkey_type, const struct bkey_format *, - struct bkey_packed *); +void bch2_bkey_swab(enum bkey_type, const struct bkey_format *, + struct bkey_packed *); -extern const struct bkey_ops *bch_bkey_ops[]; +extern const struct bkey_ops *bch2_bkey_ops[]; #undef DEF_BTREE_ID diff --git a/libbcache/bset.c b/libbcachefs/bset.c index a88d8017..280dcf3e 100644 --- a/libbcache/bset.c +++ b/libbcachefs/bset.c @@ -5,11 +5,10 @@ * Copyright 2012 Google, Inc. */ -#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__ - +#include "bcachefs.h" +#include "bset.h" #include "eytzinger.h" #include "util.h" -#include "bset.h" #include <asm/unaligned.h> #include <linux/dynamic_fault.h> @@ -19,9 +18,9 @@ /* hack.. */ #include "alloc_types.h" -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> -struct bset_tree *bch_bkey_to_bset(struct btree *b, struct bkey_packed *k) +struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k) { struct bset_tree *t; @@ -49,7 +48,7 @@ struct bset_tree *bch_bkey_to_bset(struct btree *b, struct bkey_packed *k) * by the time we actually do the insert will all be deleted. */ -void bch_dump_bset(struct btree *b, struct bset *i, unsigned set) +void bch2_dump_bset(struct btree *b, struct bset *i, unsigned set) { struct bkey_packed *_k, *_n; struct bkey k, n; @@ -63,7 +62,7 @@ void bch_dump_bset(struct btree *b, struct bset *i, unsigned set) _k = _n, k = n) { _n = bkey_next(_k); - bch_bkey_to_text(buf, sizeof(buf), &k); + bch2_bkey_to_text(buf, sizeof(buf), &k); printk(KERN_ERR "block %u key %zi/%u: %s\n", set, _k->_data - i->_data, i->u64s, buf); @@ -90,17 +89,17 @@ void bch_dump_bset(struct btree *b, struct bset *i, unsigned set) } } -void bch_dump_btree_node(struct btree *b) +void bch2_dump_btree_node(struct btree *b) { struct bset_tree *t; console_lock(); for_each_bset(b, t) - bch_dump_bset(b, bset(b, t), t - b->set); + bch2_dump_bset(b, bset(b, t), t - b->set); console_unlock(); } -void bch_dump_btree_node_iter(struct btree *b, +void bch2_dump_btree_node_iter(struct btree *b, struct btree_node_iter *iter) { struct btree_node_iter_set *set; @@ -109,17 +108,17 @@ void bch_dump_btree_node_iter(struct btree *b, btree_node_iter_for_each(iter, set) { struct bkey_packed *k = __btree_node_offset_to_key(b, set->k); - struct bset_tree *t = bch_bkey_to_bset(b, k); + struct bset_tree *t = bch2_bkey_to_bset(b, k); struct bkey uk = bkey_unpack_key(b, k); char buf[100]; - bch_bkey_to_text(buf, sizeof(buf), &uk); + bch2_bkey_to_text(buf, sizeof(buf), &uk); printk(KERN_ERR "set %zu key %zi/%u: %s\n", t - b->set, k->_data - bset(b, t)->_data, bset(b, t)->u64s, buf); } } -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG static bool keys_out_of_order(struct btree *b, const struct bkey_packed *prev, @@ -135,7 +134,7 @@ static bool keys_out_of_order(struct btree *b, !bkey_cmp_packed(b, prev, next)); } -void __bch_verify_btree_nr_keys(struct btree *b) +void __bch2_verify_btree_nr_keys(struct btree *b) { struct bset_tree *t; struct bkey_packed *k; @@ -151,11 +150,11 @@ void __bch_verify_btree_nr_keys(struct btree *b) BUG_ON(memcmp(&nr, &b->nr, sizeof(nr))); } -static void bch_btree_node_iter_next_check(struct btree_node_iter *iter, +static void bch2_btree_node_iter_next_check(struct btree_node_iter *iter, struct btree *b, struct bkey_packed *k) { - const struct bkey_packed *n = bch_btree_node_iter_peek_all(iter, b); + const struct bkey_packed *n = bch2_btree_node_iter_peek_all(iter, b); bkey_unpack_key(b, k); @@ -165,14 +164,14 @@ static void bch_btree_node_iter_next_check(struct btree_node_iter *iter, struct bkey nu = bkey_unpack_key(b, n); char buf1[80], buf2[80]; - bch_dump_btree_node(b); - bch_bkey_to_text(buf1, sizeof(buf1), &ku); - bch_bkey_to_text(buf2, sizeof(buf2), &nu); + bch2_dump_btree_node(b); + bch2_bkey_to_text(buf1, sizeof(buf1), &ku); + bch2_bkey_to_text(buf2, sizeof(buf2), &nu); panic("out of order/overlapping:\n%s\n%s\n", buf1, buf2); } } -void bch_btree_node_iter_verify(struct btree_node_iter *iter, +void bch2_btree_node_iter_verify(struct btree_node_iter *iter, struct btree *b) { struct btree_node_iter_set *set; @@ -186,7 +185,7 @@ void bch_btree_node_iter_verify(struct btree_node_iter *iter, btree_node_iter_for_each(iter, set) { k = __btree_node_offset_to_key(b, set->k); - t = bch_bkey_to_bset(b, k); + t = bch2_bkey_to_bset(b, k); BUG_ON(__btree_node_offset_to_key(b, set->end) != btree_bkey_last(b, t)); @@ -198,30 +197,30 @@ void bch_btree_node_iter_verify(struct btree_node_iter *iter, first = __btree_node_offset_to_key(b, iter->data[0].k); for_each_bset(b, t) - if (bch_btree_node_iter_bset_pos(iter, b, t) == + if (bch2_btree_node_iter_bset_pos(iter, b, t) == btree_bkey_last(b, t) && - (k = bkey_prev_all(b, t, btree_bkey_last(b, t)))) + (k = bch2_bkey_prev_all(b, t, btree_bkey_last(b, t)))) BUG_ON(__btree_node_iter_cmp(iter->is_extents, b, k, first) > 0); } -void bch_verify_key_order(struct btree *b, +void bch2_verify_key_order(struct btree *b, struct btree_node_iter *iter, struct bkey_packed *where) { - struct bset_tree *t = bch_bkey_to_bset(b, where); + struct bset_tree *t = bch2_bkey_to_bset(b, where); struct bkey_packed *k, *prev; struct bkey uk, uw = bkey_unpack_key(b, where); - k = bkey_prev_all(b, t, where); + k = bch2_bkey_prev_all(b, t, where); if (k && keys_out_of_order(b, k, where, iter->is_extents)) { char buf1[100], buf2[100]; - bch_dump_btree_node(b); + bch2_dump_btree_node(b); uk = bkey_unpack_key(b, k); - bch_bkey_to_text(buf1, sizeof(buf1), &uk); - bch_bkey_to_text(buf2, sizeof(buf2), &uw); + bch2_bkey_to_text(buf1, sizeof(buf1), &uk); + bch2_bkey_to_text(buf2, sizeof(buf2), &uw); panic("out of order with prev:\n%s\n%s\n", buf1, buf2); } @@ -235,13 +234,13 @@ void bch_verify_key_order(struct btree *b, where < btree_bkey_last(b, t)) continue; - k = bch_btree_node_iter_bset_pos(iter, b, t); + k = bch2_btree_node_iter_bset_pos(iter, b, t); if (k == btree_bkey_last(b, t)) - k = bkey_prev_all(b, t, k); + k = bch2_bkey_prev_all(b, t, k); while (bkey_cmp_left_packed_byval(b, k, bkey_start_pos(&uw)) > 0 && - (prev = bkey_prev_all(b, t, k))) + (prev = bch2_bkey_prev_all(b, t, k))) k = prev; for (; @@ -265,7 +264,7 @@ void bch_verify_key_order(struct btree *b, #else -static void bch_btree_node_iter_next_check(struct btree_node_iter *iter, +static void bch2_btree_node_iter_next_check(struct btree_node_iter *iter, struct btree *b, struct bkey_packed *k) {} @@ -414,7 +413,7 @@ static struct bkey_float *bkey_float(const struct btree *b, static void bset_aux_tree_verify(struct btree *b) { -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG struct bset_tree *t; for_each_bset(b, t) { @@ -433,13 +432,13 @@ static void bset_aux_tree_verify(struct btree *b) /* Memory allocation */ -void bch_btree_keys_free(struct btree *b) +void bch2_btree_keys_free(struct btree *b) { vfree(b->aux_data); b->aux_data = NULL; } -int bch_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp) +int bch2_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp) { b->page_order = page_order; b->aux_data = __vmalloc(btree_aux_data_bytes(b), gfp, @@ -450,19 +449,19 @@ int bch_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp) return 0; } -void bch_btree_keys_init(struct btree *b, bool *expensive_debug_checks) +void bch2_btree_keys_init(struct btree *b, bool *expensive_debug_checks) { unsigned i; b->nsets = 0; memset(&b->nr, 0, sizeof(b->nr)); -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG b->expensive_debug_checks = expensive_debug_checks; #endif for (i = 0; i < MAX_BSETS; i++) b->set[i].data_offset = U16_MAX; - bch_bset_set_no_aux_tree(b, b->set); + bch2_bset_set_no_aux_tree(b, b->set); } /* Binary tree stuff for auxiliary search trees */ @@ -578,7 +577,7 @@ static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t, }; } -static void bch_bset_verify_rw_aux_tree(struct btree *b, +static void bch2_bset_verify_rw_aux_tree(struct btree *b, struct bset_tree *t) { struct bkey_packed *k = btree_bkey_first(b, t); @@ -753,7 +752,7 @@ static void make_bfloat(struct btree *b, struct bset_tree *t, * Note that this may be negative - we may be running off the low end * of the key: we handle this later: */ - exponent = (int) bkey_greatest_differing_bit(b, l, r) - (bits - 1); + exponent = (int) bch2_bkey_greatest_differing_bit(b, l, r) - (bits - 1); /* * Then we calculate the actual shift value, from the start of the key @@ -803,7 +802,7 @@ static void make_bfloat(struct btree *b, struct bset_tree *t, * the comparison in bset_search_tree. If we're dropping set bits, * increment it: */ - if (exponent > (int) bkey_ffs(b, m)) { + if (exponent > (int) bch2_bkey_ffs(b, m)) { if (j < BFLOAT_32BIT_NR ? f->mantissa32 == U32_MAX : f->mantissa16 == U16_MAX) @@ -917,7 +916,7 @@ static void bset_alloc_tree(struct btree *b, struct bset_tree *t) for (i = b->set; i != t; i++) BUG_ON(bset_has_rw_aux_tree(i)); - bch_bset_set_no_aux_tree(b, t); + bch2_bset_set_no_aux_tree(b, t); /* round up to next cacheline: */ t->aux_data_offset = round_up(bset_aux_tree_buf_start(b, t), @@ -926,7 +925,7 @@ static void bset_alloc_tree(struct btree *b, struct bset_tree *t) bset_aux_tree_verify(b); } -void bch_bset_build_aux_tree(struct btree *b, struct bset_tree *t, +void bch2_bset_build_aux_tree(struct btree *b, struct bset_tree *t, bool writeable) { if (writeable @@ -947,7 +946,7 @@ void bch_bset_build_aux_tree(struct btree *b, struct bset_tree *t, bset_aux_tree_verify(b); } -void bch_bset_init_first(struct btree *b, struct bset *i) +void bch2_bset_init_first(struct btree *b, struct bset *i) { struct bset_tree *t; @@ -961,7 +960,7 @@ void bch_bset_init_first(struct btree *b, struct bset *i) set_btree_bset(b, t, i); } -void bch_bset_init_next(struct btree *b, struct bset *i) +void bch2_bset_init_next(struct btree *b, struct bset *i) { struct bset_tree *t; @@ -1013,8 +1012,8 @@ static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t, return p; } -struct bkey_packed *bkey_prev_all(struct btree *b, struct bset_tree *t, - struct bkey_packed *k) +struct bkey_packed *bch2_bkey_prev_all(struct btree *b, struct bset_tree *t, + struct bkey_packed *k) { struct bkey_packed *p; @@ -1028,8 +1027,8 @@ struct bkey_packed *bkey_prev_all(struct btree *b, struct bset_tree *t, return p; } -struct bkey_packed *bkey_prev(struct btree *b, struct bset_tree *t, - struct bkey_packed *k) +struct bkey_packed *bch2_bkey_prev(struct btree *b, struct bset_tree *t, + struct bkey_packed *k) { while (1) { struct bkey_packed *p, *i, *ret = NULL; @@ -1062,7 +1061,7 @@ static void rw_aux_tree_fix_invalidated_key(struct btree *b, rw_aux_tree(b, t)[j].offset == offset) rw_aux_tree_set(b, t, j, k); - bch_bset_verify_rw_aux_tree(b, t); + bch2_bset_verify_rw_aux_tree(b, t); } static void ro_aux_tree_fix_invalidated_key(struct btree *b, @@ -1118,12 +1117,12 @@ static void ro_aux_tree_fix_invalidated_key(struct btree *b, } /** - * bch_bset_fix_invalidated_key() - given an existing key @k that has been + * bch2_bset_fix_invalidated_key() - given an existing key @k that has been * modified, fix any auxiliary search tree by remaking all the nodes in the * auxiliary search tree that @k corresponds to */ -void bch_bset_fix_invalidated_key(struct btree *b, struct bset_tree *t, - struct bkey_packed *k) +void bch2_bset_fix_invalidated_key(struct btree *b, struct bset_tree *t, + struct bkey_packed *k) { switch (bset_aux_tree_type(t)) { case BSET_NO_AUX_TREE: @@ -1137,11 +1136,11 @@ void bch_bset_fix_invalidated_key(struct btree *b, struct bset_tree *t, } } -static void bch_bset_fix_lookup_table(struct btree *b, - struct bset_tree *t, - struct bkey_packed *_where, - unsigned clobber_u64s, - unsigned new_u64s) +static void bch2_bset_fix_lookup_table(struct btree *b, + struct bset_tree *t, + struct bkey_packed *_where, + unsigned clobber_u64s, + unsigned new_u64s) { int shift = new_u64s - clobber_u64s; unsigned l, j, where = __btree_node_key_to_offset(b, _where); @@ -1220,23 +1219,23 @@ static void bch_bset_fix_lookup_table(struct btree *b, } } - bch_bset_verify_rw_aux_tree(b, t); + bch2_bset_verify_rw_aux_tree(b, t); bset_aux_tree_verify(b); } -void bch_bset_insert(struct btree *b, - struct btree_node_iter *iter, - struct bkey_packed *where, - struct bkey_i *insert, - unsigned clobber_u64s) +void bch2_bset_insert(struct btree *b, + struct btree_node_iter *iter, + struct bkey_packed *where, + struct bkey_i *insert, + unsigned clobber_u64s) { struct bkey_format *f = &b->format; struct bset_tree *t = bset_tree_last(b); struct bkey_packed packed, *src = bkey_to_packed(insert); - bch_bset_verify_rw_aux_tree(b, t); + bch2_bset_verify_rw_aux_tree(b, t); - if (bkey_pack_key(&packed, &insert->k, f)) + if (bch2_bkey_pack_key(&packed, &insert->k, f)) src = &packed; if (!bkey_whiteout(&insert->k)) @@ -1259,21 +1258,21 @@ void bch_bset_insert(struct btree *b, memcpy_u64s(bkeyp_val(f, where), &insert->v, bkeyp_val_u64s(f, src)); - bch_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s); + bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s); - bch_verify_key_order(b, iter, where); - bch_verify_btree_nr_keys(b); + bch2_verify_key_order(b, iter, where); + bch2_verify_btree_nr_keys(b); } -void bch_bset_delete(struct btree *b, - struct bkey_packed *where, - unsigned clobber_u64s) +void bch2_bset_delete(struct btree *b, + struct bkey_packed *where, + unsigned clobber_u64s) { struct bset_tree *t = bset_tree_last(b); u64 *src_p = where->_data + clobber_u64s; u64 *dst_p = where->_data; - bch_bset_verify_rw_aux_tree(b, t); + bch2_bset_verify_rw_aux_tree(b, t); BUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s); @@ -1281,7 +1280,7 @@ void bch_bset_delete(struct btree *b, le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s); set_btree_bset_end(b, t); - bch_bset_fix_lookup_table(b, t, where, clobber_u64s, 0); + bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, 0); } /* Lookup */ @@ -1385,7 +1384,7 @@ static struct bkey_packed *bset_search_tree(const struct btree *b, * Returns the first key greater than or equal to @search */ __always_inline __flatten -static struct bkey_packed *bch_bset_search(struct btree *b, +static struct bkey_packed *bch2_bset_search(struct btree *b, struct bset_tree *t, struct bpos search, struct bkey_packed *packed_search, @@ -1442,8 +1441,8 @@ static struct bkey_packed *bch_bset_search(struct btree *b, !btree_iter_pos_cmp_packed(b, &search, m, strictly_greater)) m = bkey_next(m); - if (IS_ENABLED(CONFIG_BCACHE_DEBUG)) { - struct bkey_packed *prev = bkey_prev_all(b, t, m); + if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) { + struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m); BUG_ON(prev && btree_iter_pos_cmp_p_or_unp(b, search, packed_search, @@ -1455,10 +1454,10 @@ static struct bkey_packed *bch_bset_search(struct btree *b, /* Btree node iterator */ -void bch_btree_node_iter_push(struct btree_node_iter *iter, - struct btree *b, - const struct bkey_packed *k, - const struct bkey_packed *end) +void bch2_btree_node_iter_push(struct btree_node_iter *iter, + struct btree *b, + const struct bkey_packed *k, + const struct bkey_packed *end) { if (k != end) { struct btree_node_iter_set *pos, n = @@ -1488,12 +1487,12 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter, trace_bkey_pack_pos_fail(search); for_each_bset(b, t) - __bch_btree_node_iter_push(iter, b, - bch_bset_search(b, t, search, NULL, NULL, + __bch2_btree_node_iter_push(iter, b, + bch2_bset_search(b, t, search, NULL, NULL, strictly_greater), btree_bkey_last(b, t)); - bch_btree_node_iter_sort(iter, b); + bch2_btree_node_iter_sort(iter, b); } /** @@ -1536,9 +1535,9 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter, * So we've got to search for start_of_range, then after the lookup iterate * past any extents that compare equal to the position we searched for. */ -void bch_btree_node_iter_init(struct btree_node_iter *iter, - struct btree *b, struct bpos search, - bool strictly_greater, bool is_extents) +void bch2_btree_node_iter_init(struct btree_node_iter *iter, + struct btree *b, struct bpos search, + bool strictly_greater, bool is_extents) { struct bset_tree *t; struct bkey_packed p, *packed_search = NULL; @@ -1546,12 +1545,12 @@ void bch_btree_node_iter_init(struct btree_node_iter *iter, EBUG_ON(bkey_cmp(search, b->data->min_key) < 0); bset_aux_tree_verify(b); - __bch_btree_node_iter_init(iter, is_extents); + __bch2_btree_node_iter_init(iter, is_extents); //if (bkey_cmp(search, b->curr_max_key) > 0) // return; - switch (bkey_pack_pos_lossy(&p, search, b)) { + switch (bch2_bkey_pack_pos_lossy(&p, search, b)) { case BKEY_PACK_POS_EXACT: packed_search = &p; break; @@ -1565,33 +1564,33 @@ void bch_btree_node_iter_init(struct btree_node_iter *iter, } for_each_bset(b, t) - __bch_btree_node_iter_push(iter, b, - bch_bset_search(b, t, search, + __bch2_btree_node_iter_push(iter, b, + bch2_bset_search(b, t, search, packed_search, &p, strictly_greater), btree_bkey_last(b, t)); - bch_btree_node_iter_sort(iter, b); + bch2_btree_node_iter_sort(iter, b); } -void bch_btree_node_iter_init_from_start(struct btree_node_iter *iter, - struct btree *b, - bool is_extents) +void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter, + struct btree *b, + bool is_extents) { struct bset_tree *t; - __bch_btree_node_iter_init(iter, is_extents); + __bch2_btree_node_iter_init(iter, is_extents); for_each_bset(b, t) - __bch_btree_node_iter_push(iter, b, + __bch2_btree_node_iter_push(iter, b, btree_bkey_first(b, t), btree_bkey_last(b, t)); - bch_btree_node_iter_sort(iter, b); + bch2_btree_node_iter_sort(iter, b); } -struct bkey_packed *bch_btree_node_iter_bset_pos(struct btree_node_iter *iter, - struct btree *b, - struct bset_tree *t) +struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter, + struct btree *b, + struct bset_tree *t) { struct btree_node_iter_set *set; @@ -1629,8 +1628,8 @@ static inline void btree_node_iter_sort_two(struct btree_node_iter *iter, swap(iter->data[first], iter->data[first + 1]); } -void bch_btree_node_iter_sort(struct btree_node_iter *iter, - struct btree *b) +void bch2_btree_node_iter_sort(struct btree_node_iter *iter, + struct btree *b) { EBUG_ON(iter->used > 3); @@ -1644,7 +1643,6 @@ void bch_btree_node_iter_sort(struct btree_node_iter *iter, if (iter->used > 1) btree_node_iter_sort_two(iter, b, 0); } -EXPORT_SYMBOL(bch_btree_node_iter_sort); /** * bch_btree_node_iter_advance - advance @iter by one key @@ -1652,12 +1650,12 @@ EXPORT_SYMBOL(bch_btree_node_iter_sort); * Doesn't do debugchecks - for cases where (insert_fixup_extent()) a bset might * momentarily have out of order extents. */ -void bch_btree_node_iter_advance(struct btree_node_iter *iter, - struct btree *b) +void bch2_btree_node_iter_advance(struct btree_node_iter *iter, + struct btree *b) { - struct bkey_packed *k = bch_btree_node_iter_peek_all(iter, b); + struct bkey_packed *k = bch2_btree_node_iter_peek_all(iter, b); - iter->data->k += __bch_btree_node_iter_peek_all(iter, b)->u64s; + iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s; BUG_ON(iter->data->k > iter->data->end); @@ -1668,14 +1666,14 @@ void bch_btree_node_iter_advance(struct btree_node_iter *iter, btree_node_iter_sift(iter, b, 0); - bch_btree_node_iter_next_check(iter, b, k); + bch2_btree_node_iter_next_check(iter, b, k); } /* * Expensive: */ -struct bkey_packed *bch_btree_node_iter_prev_all(struct btree_node_iter *iter, - struct btree *b) +struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter, + struct btree *b) { struct bkey_packed *k, *prev = NULL; struct btree_node_iter_set *set; @@ -1683,11 +1681,11 @@ struct bkey_packed *bch_btree_node_iter_prev_all(struct btree_node_iter *iter, struct bset_tree *prev_t; unsigned end; - bch_btree_node_iter_verify(iter, b); + bch2_btree_node_iter_verify(iter, b); for_each_bset(b, t) { - k = bkey_prev_all(b, t, - bch_btree_node_iter_bset_pos(iter, b, t)); + k = bch2_bkey_prev_all(b, t, + bch2_btree_node_iter_bset_pos(iter, b, t)); if (k && (!prev || __btree_node_iter_cmp(iter->is_extents, b, k, prev) > 0)) { @@ -1723,31 +1721,30 @@ out: return prev; } -struct bkey_packed *bch_btree_node_iter_prev(struct btree_node_iter *iter, - struct btree *b) +struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *iter, + struct btree *b) { struct bkey_packed *k; do { - k = bch_btree_node_iter_prev_all(iter, b); + k = bch2_btree_node_iter_prev_all(iter, b); } while (k && bkey_deleted(k)); return k; } -struct bkey_s_c bch_btree_node_iter_peek_unpack(struct btree_node_iter *iter, - struct btree *b, - struct bkey *u) +struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter, + struct btree *b, + struct bkey *u) { - struct bkey_packed *k = bch_btree_node_iter_peek(iter, b); + struct bkey_packed *k = bch2_btree_node_iter_peek(iter, b); return k ? bkey_disassemble(b, k, u) : bkey_s_c_null; } -EXPORT_SYMBOL(bch_btree_node_iter_peek_unpack); /* Mergesort */ -void bch_btree_keys_stats(struct btree *b, struct bset_stats *stats) +void bch2_btree_keys_stats(struct btree *b, struct bset_stats *stats) { struct bset_tree *t; @@ -1778,10 +1775,10 @@ void bch_btree_keys_stats(struct btree *b, struct bset_stats *stats) } } -int bch_bkey_print_bfloat(struct btree *b, struct bkey_packed *k, - char *buf, size_t size) +int bch2_bkey_print_bfloat(struct btree *b, struct bkey_packed *k, + char *buf, size_t size) { - struct bset_tree *t = bch_bkey_to_bset(b, k); + struct bset_tree *t = bch2_bkey_to_bset(b, k); struct bkey_packed *l, *r, *p; struct bkey uk, up; char buf1[200], buf2[200]; @@ -1811,13 +1808,13 @@ int bch_bkey_print_bfloat(struct btree *b, struct bkey_packed *k, ? btree_bkey_first(b, t) : tree_to_prev_bkey(b, t, j >> ffs(j)); r = is_power_of_2(j + 1) - ? bkey_prev_all(b, t, btree_bkey_last(b, t)) + ? bch2_bkey_prev_all(b, t, btree_bkey_last(b, t)) : tree_to_bkey(b, t, j >> (ffz(j) + 1)); up = bkey_unpack_key(b, p); uk = bkey_unpack_key(b, k); - bch_to_binary(buf1, high_word(&b->format, p), b->nr_key_bits); - bch_to_binary(buf2, high_word(&b->format, k), b->nr_key_bits); + bch2_to_binary(buf1, high_word(&b->format, p), b->nr_key_bits); + bch2_to_binary(buf2, high_word(&b->format, k), b->nr_key_bits); return scnprintf(buf, size, " failed prev at depth %u\n" @@ -1827,8 +1824,8 @@ int bch_bkey_print_bfloat(struct btree *b, struct bkey_packed *k, "\t%s\n" "\t%s\n", ilog2(j), - bkey_greatest_differing_bit(b, l, r), - bkey_greatest_differing_bit(b, p, k), + bch2_bkey_greatest_differing_bit(b, l, r), + bch2_bkey_greatest_differing_bit(b, p, k), uk.p.inode, uk.p.offset, up.p.inode, up.p.offset, buf1, buf2); diff --git a/libbcache/bset.h b/libbcachefs/bset.h index 70868c51..76a83fcb 100644 --- a/libbcache/bset.h +++ b/libbcachefs/bset.h @@ -1,10 +1,10 @@ #ifndef _BCACHE_BSET_H #define _BCACHE_BSET_H -#include <linux/bcache.h> #include <linux/kernel.h> #include <linux/types.h> +#include "bcachefs_format.h" #include "bkey.h" #include "bkey_methods.h" #include "btree_types.h" @@ -188,14 +188,14 @@ bkey_unpack_key_format_checked(const struct btree *b, compiled_unpack_fn unpack_fn = b->aux_data; unpack_fn(&dst, src); - if (IS_ENABLED(CONFIG_BCACHE_DEBUG)) { - struct bkey dst2 = __bkey_unpack_key(&b->format, src); + if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) { + struct bkey dst2 = __bch2_bkey_unpack_key(&b->format, src); BUG_ON(memcmp(&dst, &dst2, sizeof(dst))); } } #else - dst = __bkey_unpack_key(&b->format, src); + dst = __bch2_bkey_unpack_key(&b->format, src); #endif return dst; } @@ -254,12 +254,12 @@ static inline struct bkey_s __bkey_disassemble(struct btree *b, #define for_each_bset(_b, _t) \ for (_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++) -extern bool bch_expensive_debug_checks; +extern bool bch2_expensive_debug_checks; static inline bool btree_keys_expensive_checks(struct btree *b) { -#ifdef CONFIG_BCACHE_DEBUG - return bch_expensive_debug_checks || *b->expensive_debug_checks; +#ifdef CONFIG_BCACHEFS_DEBUG + return bch2_expensive_debug_checks || *b->expensive_debug_checks; #else return false; #endif @@ -275,7 +275,7 @@ static inline bool bset_has_rw_aux_tree(struct bset_tree *t) return bset_aux_tree_type(t) == BSET_RW_AUX_TREE; } -static inline void bch_bset_set_no_aux_tree(struct btree *b, +static inline void bch2_bset_set_no_aux_tree(struct btree *b, struct bset_tree *t) { BUG_ON(t < b->set); @@ -295,12 +295,12 @@ static inline void btree_node_set_format(struct btree *b, b->format = f; b->nr_key_bits = bkey_format_key_bits(&f); - len = bch_compile_bkey_format(&b->format, b->aux_data); + len = bch2_compile_bkey_format(&b->format, b->aux_data); BUG_ON(len < 0 || len > U8_MAX); b->unpack_fn_len = len; - bch_bset_set_no_aux_tree(b, b->set); + bch2_bset_set_no_aux_tree(b, b->set); } static inline struct bset *bset_next_set(struct btree *b, @@ -313,19 +313,19 @@ static inline struct bset *bset_next_set(struct btree *b, return ((void *) i) + round_up(vstruct_bytes(i), block_bytes); } -void bch_btree_keys_free(struct btree *); -int bch_btree_keys_alloc(struct btree *, unsigned, gfp_t); -void bch_btree_keys_init(struct btree *, bool *); +void bch2_btree_keys_free(struct btree *); +int bch2_btree_keys_alloc(struct btree *, unsigned, gfp_t); +void bch2_btree_keys_init(struct btree *, bool *); -void bch_bset_init_first(struct btree *, struct bset *); -void bch_bset_init_next(struct btree *, struct bset *); -void bch_bset_build_aux_tree(struct btree *, struct bset_tree *, bool); -void bch_bset_fix_invalidated_key(struct btree *, struct bset_tree *, +void bch2_bset_init_first(struct btree *, struct bset *); +void bch2_bset_init_next(struct btree *, struct bset *); +void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool); +void bch2_bset_fix_invalidated_key(struct btree *, struct bset_tree *, struct bkey_packed *); -void bch_bset_insert(struct btree *, struct btree_node_iter *, +void bch2_bset_insert(struct btree *, struct btree_node_iter *, struct bkey_packed *, struct bkey_i *, unsigned); -void bch_bset_delete(struct btree *, struct bkey_packed *, unsigned); +void bch2_bset_delete(struct btree *, struct bkey_packed *, unsigned); /* Bkey utility code */ @@ -341,9 +341,9 @@ static inline int bkey_cmp_p_or_unp(const struct btree *b, return bkey_cmp(packed_to_bkey_c(l)->p, *r); if (likely(r_packed)) - return __bkey_cmp_packed_format_checked(l, r_packed, b); + return __bch2_bkey_cmp_packed_format_checked(l, r_packed, b); - return __bkey_cmp_left_packed_format_checked(b, l, r); + return __bch2_bkey_cmp_left_packed_format_checked(b, l, r); } /* Returns true if @k is after iterator position @pos */ @@ -379,11 +379,11 @@ static inline bool btree_iter_pos_cmp_p_or_unp(const struct btree *b, (cmp == 0 && !strictly_greater && !bkey_deleted(k)); } -struct bset_tree *bch_bkey_to_bset(struct btree *, struct bkey_packed *); -struct bkey_packed *bkey_prev_all(struct btree *, struct bset_tree *, +struct bset_tree *bch2_bkey_to_bset(struct btree *, struct bkey_packed *); +struct bkey_packed *bch2_bkey_prev_all(struct btree *, struct bset_tree *, struct bkey_packed *); -struct bkey_packed *bkey_prev(struct btree *, struct bset_tree *, - struct bkey_packed *); +struct bkey_packed *bch2_bkey_prev(struct btree *, struct bset_tree *, + struct bkey_packed *); enum bch_extent_overlap { BCH_EXTENT_OVERLAP_ALL = 0, @@ -393,7 +393,7 @@ enum bch_extent_overlap { }; /* Returns how k overlaps with m */ -static inline enum bch_extent_overlap bch_extent_overlap(const struct bkey *k, +static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k, const struct bkey *m) { int cmp1 = bkey_cmp(k->p, m->p) < 0; @@ -414,33 +414,33 @@ struct btree_node_iter { } data[MAX_BSETS]; }; -static inline void __bch_btree_node_iter_init(struct btree_node_iter *iter, +static inline void __bch2_btree_node_iter_init(struct btree_node_iter *iter, bool is_extents) { iter->used = 0; iter->is_extents = is_extents; } -void bch_btree_node_iter_push(struct btree_node_iter *, struct btree *, +void bch2_btree_node_iter_push(struct btree_node_iter *, struct btree *, const struct bkey_packed *, const struct bkey_packed *); -void bch_btree_node_iter_init(struct btree_node_iter *, struct btree *, +void bch2_btree_node_iter_init(struct btree_node_iter *, struct btree *, struct bpos, bool, bool); -void bch_btree_node_iter_init_from_start(struct btree_node_iter *, +void bch2_btree_node_iter_init_from_start(struct btree_node_iter *, struct btree *, bool); -struct bkey_packed *bch_btree_node_iter_bset_pos(struct btree_node_iter *, +struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *, struct btree *, struct bset_tree *); -void bch_btree_node_iter_sort(struct btree_node_iter *, struct btree *); -void bch_btree_node_iter_advance(struct btree_node_iter *, struct btree *); +void bch2_btree_node_iter_sort(struct btree_node_iter *, struct btree *); +void bch2_btree_node_iter_advance(struct btree_node_iter *, struct btree *); #define btree_node_iter_for_each(_iter, _set) \ for (_set = (_iter)->data; \ _set < (_iter)->data + (_iter)->used; \ _set++) -static inline bool bch_btree_node_iter_end(struct btree_node_iter *iter) +static inline bool bch2_btree_node_iter_end(struct btree_node_iter *iter) { return !iter->used; } @@ -452,7 +452,7 @@ static inline int __btree_node_iter_cmp(bool is_extents, { /* * For non extents, when keys compare equal the deleted keys have to - * come first - so that bch_btree_node_iter_next_check() can detect + * come first - so that bch2_btree_node_iter_next_check() can detect * duplicate nondeleted keys (and possibly other reasons?) * * For extents, bkey_deleted() is used as a proxy for k->size == 0, so @@ -473,7 +473,7 @@ static inline int btree_node_iter_cmp(struct btree_node_iter *iter, __btree_node_offset_to_key(b, r.k)); } -static inline void __bch_btree_node_iter_push(struct btree_node_iter *iter, +static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter, struct btree *b, const struct bkey_packed *k, const struct bkey_packed *end) @@ -486,47 +486,47 @@ static inline void __bch_btree_node_iter_push(struct btree_node_iter *iter, } static inline struct bkey_packed * -__bch_btree_node_iter_peek_all(struct btree_node_iter *iter, +__bch2_btree_node_iter_peek_all(struct btree_node_iter *iter, struct btree *b) { return __btree_node_offset_to_key(b, iter->data->k); } static inline struct bkey_packed * -bch_btree_node_iter_peek_all(struct btree_node_iter *iter, +bch2_btree_node_iter_peek_all(struct btree_node_iter *iter, struct btree *b) { - return bch_btree_node_iter_end(iter) + return bch2_btree_node_iter_end(iter) ? NULL - : __bch_btree_node_iter_peek_all(iter, b); + : __bch2_btree_node_iter_peek_all(iter, b); } static inline struct bkey_packed * -bch_btree_node_iter_peek(struct btree_node_iter *iter, struct btree *b) +bch2_btree_node_iter_peek(struct btree_node_iter *iter, struct btree *b) { struct bkey_packed *ret; - while ((ret = bch_btree_node_iter_peek_all(iter, b)) && + while ((ret = bch2_btree_node_iter_peek_all(iter, b)) && bkey_deleted(ret)) - bch_btree_node_iter_advance(iter, b); + bch2_btree_node_iter_advance(iter, b); return ret; } static inline struct bkey_packed * -bch_btree_node_iter_next_all(struct btree_node_iter *iter, struct btree *b) +bch2_btree_node_iter_next_all(struct btree_node_iter *iter, struct btree *b) { - struct bkey_packed *ret = bch_btree_node_iter_peek_all(iter, b); + struct bkey_packed *ret = bch2_btree_node_iter_peek_all(iter, b); if (ret) - bch_btree_node_iter_advance(iter, b); + bch2_btree_node_iter_advance(iter, b); return ret; } -struct bkey_packed *bch_btree_node_iter_prev_all(struct btree_node_iter *, +struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *, struct btree *); -struct bkey_packed *bch_btree_node_iter_prev(struct btree_node_iter *, +struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *, struct btree *); /* @@ -534,18 +534,18 @@ struct bkey_packed *bch_btree_node_iter_prev(struct btree_node_iter *, * overlapping) keys */ #define for_each_btree_node_key(b, k, iter, _is_extents) \ - for (bch_btree_node_iter_init_from_start((iter), (b), (_is_extents));\ - ((k) = bch_btree_node_iter_peek(iter, b)); \ - bch_btree_node_iter_advance(iter, b)) + for (bch2_btree_node_iter_init_from_start((iter), (b), (_is_extents));\ + ((k) = bch2_btree_node_iter_peek(iter, b)); \ + bch2_btree_node_iter_advance(iter, b)) -struct bkey_s_c bch_btree_node_iter_peek_unpack(struct btree_node_iter *, +struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *, struct btree *, struct bkey *); #define for_each_btree_node_key_unpack(b, k, iter, _is_extents, unpacked)\ - for (bch_btree_node_iter_init_from_start((iter), (b), (_is_extents));\ - (k = bch_btree_node_iter_peek_unpack((iter), (b), (unpacked))).k;\ - bch_btree_node_iter_advance(iter, b)) + for (bch2_btree_node_iter_init_from_start((iter), (b), (_is_extents));\ + (k = bch2_btree_node_iter_peek_unpack((iter), (b), (unpacked))).k;\ + bch2_btree_node_iter_advance(iter, b)) /* Accounting: */ @@ -579,37 +579,37 @@ struct bset_stats { size_t failed_overflow; }; -void bch_btree_keys_stats(struct btree *, struct bset_stats *); -int bch_bkey_print_bfloat(struct btree *, struct bkey_packed *, +void bch2_btree_keys_stats(struct btree *, struct bset_stats *); +int bch2_bkey_print_bfloat(struct btree *, struct bkey_packed *, char *, size_t); /* Debug stuff */ -void bch_dump_bset(struct btree *, struct bset *, unsigned); -void bch_dump_btree_node(struct btree *); -void bch_dump_btree_node_iter(struct btree *, struct btree_node_iter *); +void bch2_dump_bset(struct btree *, struct bset *, unsigned); +void bch2_dump_btree_node(struct btree *); +void bch2_dump_btree_node_iter(struct btree *, struct btree_node_iter *); -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG -void __bch_verify_btree_nr_keys(struct btree *); -void bch_btree_node_iter_verify(struct btree_node_iter *, struct btree *); -void bch_verify_key_order(struct btree *, struct btree_node_iter *, +void __bch2_verify_btree_nr_keys(struct btree *); +void bch2_btree_node_iter_verify(struct btree_node_iter *, struct btree *); +void bch2_verify_key_order(struct btree *, struct btree_node_iter *, struct bkey_packed *); #else -static inline void __bch_verify_btree_nr_keys(struct btree *b) {} -static inline void bch_btree_node_iter_verify(struct btree_node_iter *iter, +static inline void __bch2_verify_btree_nr_keys(struct btree *b) {} +static inline void bch2_btree_node_iter_verify(struct btree_node_iter *iter, struct btree *b) {} -static inline void bch_verify_key_order(struct btree *b, +static inline void bch2_verify_key_order(struct btree *b, struct btree_node_iter *iter, struct bkey_packed *where) {} #endif -static inline void bch_verify_btree_nr_keys(struct btree *b) +static inline void bch2_verify_btree_nr_keys(struct btree *b) { if (btree_keys_expensive_checks(b)) - __bch_verify_btree_nr_keys(b); + __bch2_verify_btree_nr_keys(b); } #endif diff --git a/libbcache/btree_cache.c b/libbcachefs/btree_cache.c index a43e12da..c4cc26f9 100644 --- a/libbcache/btree_cache.c +++ b/libbcachefs/btree_cache.c @@ -1,5 +1,5 @@ -#include "bcache.h" +#include "bcachefs.h" #include "btree_cache.h" #include "btree_io.h" #include "btree_iter.h" @@ -7,18 +7,18 @@ #include "debug.h" #include "extents.h" -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> #define DEF_BTREE_ID(kwd, val, name) name, -const char * const bch_btree_ids[] = { +const char * const bch2_btree_ids[] = { DEFINE_BCH_BTREE_IDS() NULL }; #undef DEF_BTREE_ID -void bch_recalc_btree_reserve(struct bch_fs *c) +void bch2_recalc_btree_reserve(struct bch_fs *c) { unsigned i, reserve = 16; @@ -42,7 +42,7 @@ static void __mca_data_free(struct bch_fs *c, struct btree *b) free_pages((unsigned long) b->data, btree_page_order(c)); b->data = NULL; - bch_btree_keys_free(b); + bch2_btree_keys_free(b); } static void mca_data_free(struct bch_fs *c, struct btree *b) @@ -68,7 +68,7 @@ static void mca_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp) if (!b->data) goto err; - if (bch_btree_keys_alloc(b, order, gfp)) + if (bch2_btree_keys_alloc(b, order, gfp)) goto err; c->btree_cache_used++; @@ -96,7 +96,7 @@ static struct btree *mca_bucket_alloc(struct bch_fs *c, gfp_t gfp) /* Btree in memory cache - hash table */ -void mca_hash_remove(struct bch_fs *c, struct btree *b) +void bch2_btree_node_hash_remove(struct bch_fs *c, struct btree *b) { BUG_ON(btree_node_dirty(b)); @@ -109,7 +109,7 @@ void mca_hash_remove(struct bch_fs *c, struct btree *b) bkey_i_to_extent(&b->key)->v._data[0] = 0; } -int mca_hash_insert(struct bch_fs *c, struct btree *b, +int bch2_btree_node_hash_insert(struct bch_fs *c, struct btree *b, unsigned level, enum btree_id id) { int ret; @@ -170,9 +170,9 @@ static int mca_reap_notrace(struct bch_fs *c, struct btree *b, bool flush) */ if (btree_node_dirty(b)) { if (verify_btree_ondisk(c)) - bch_btree_node_write(c, b, NULL, SIX_LOCK_intent, -1); + bch2_btree_node_write(c, b, NULL, SIX_LOCK_intent, -1); else - __bch_btree_node_write(c, b, NULL, SIX_LOCK_read, -1); + __bch2_btree_node_write(c, b, NULL, SIX_LOCK_read, -1); } /* wait for any in flight btree write */ @@ -191,12 +191,12 @@ static int mca_reap(struct bch_fs *c, struct btree *b, bool flush) { int ret = mca_reap_notrace(c, b, flush); - trace_bcache_mca_reap(c, b, ret); + trace_btree_node_reap(c, b, ret); return ret; } -static unsigned long bch_mca_scan(struct shrinker *shrink, - struct shrink_control *sc) +static unsigned long bch2_mca_scan(struct shrinker *shrink, + struct shrink_control *sc) { struct bch_fs *c = container_of(shrink, struct bch_fs, btree_cache_shrink); @@ -207,8 +207,6 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, unsigned long freed = 0; unsigned i; - u64 start_time = local_clock(); - if (btree_shrinker_disabled(c)) return SHRINK_STOP; @@ -260,7 +258,7 @@ restart: if (!btree_node_accessed(b) && !mca_reap(c, b, false)) { - /* can't call mca_hash_remove under btree_cache_lock */ + /* can't call bch2_btree_node_hash_remove under btree_cache_lock */ freed++; if (&t->list != &c->btree_cache) list_move_tail(&c->btree_cache, &t->list); @@ -268,7 +266,7 @@ restart: mca_data_free(c, b); mutex_unlock(&c->btree_cache_lock); - mca_hash_remove(c, b); + bch2_btree_node_hash_remove(c, b); six_unlock_write(&b->lock); six_unlock_intent(&b->lock); @@ -286,19 +284,11 @@ restart: mutex_unlock(&c->btree_cache_lock); out: - bch_time_stats_update(&c->mca_scan_time, start_time); - - trace_bcache_mca_scan(c, - touched * btree_pages(c), - freed * btree_pages(c), - can_free * btree_pages(c), - sc->nr_to_scan); - return (unsigned long) freed * btree_pages(c); } -static unsigned long bch_mca_count(struct shrinker *shrink, - struct shrink_control *sc) +static unsigned long bch2_mca_count(struct shrinker *shrink, + struct shrink_control *sc) { struct bch_fs *c = container_of(shrink, struct bch_fs, btree_cache_shrink); @@ -312,7 +302,7 @@ static unsigned long bch_mca_count(struct shrinker *shrink, return mca_can_free(c) * btree_pages(c); } -void bch_fs_btree_exit(struct bch_fs *c) +void bch2_fs_btree_exit(struct bch_fs *c) { struct btree *b; unsigned i; @@ -322,7 +312,7 @@ void bch_fs_btree_exit(struct bch_fs *c) mutex_lock(&c->btree_cache_lock); -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG if (c->verify_data) list_move(&c->verify_data->list, &c->btree_cache); @@ -340,7 +330,7 @@ void bch_fs_btree_exit(struct bch_fs *c) b = list_first_entry(&c->btree_cache, struct btree, list); if (btree_node_dirty(b)) - bch_btree_complete_write(c, b, btree_current_write(b)); + bch2_btree_complete_write(c, b, btree_current_write(b)); clear_btree_node_dirty(b); mca_data_free(c, b); @@ -359,7 +349,7 @@ void bch_fs_btree_exit(struct bch_fs *c) rhashtable_destroy(&c->btree_cache_table); } -int bch_fs_btree_init(struct bch_fs *c) +int bch2_fs_btree_init(struct bch_fs *c) { unsigned i; int ret; @@ -370,7 +360,7 @@ int bch_fs_btree_init(struct bch_fs *c) c->btree_cache_table_init_done = true; - bch_recalc_btree_reserve(c); + bch2_recalc_btree_reserve(c); for (i = 0; i < c->btree_cache_reserve; i++) if (!mca_bucket_alloc(c, GFP_KERNEL)) @@ -379,7 +369,7 @@ int bch_fs_btree_init(struct bch_fs *c) list_splice_init(&c->btree_cache, &c->btree_cache_freeable); -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG mutex_init(&c->verify_lock); c->verify_ondisk = (void *) @@ -394,8 +384,8 @@ int bch_fs_btree_init(struct bch_fs *c) list_del_init(&c->verify_data->list); #endif - c->btree_cache_shrink.count_objects = bch_mca_count; - c->btree_cache_shrink.scan_objects = bch_mca_scan; + c->btree_cache_shrink.count_objects = bch2_mca_count; + c->btree_cache_shrink.scan_objects = bch2_mca_scan; c->btree_cache_shrink.seeks = 4; c->btree_cache_shrink.batch = btree_pages(c) * 2; register_shrinker(&c->btree_cache_shrink); @@ -409,16 +399,16 @@ int bch_fs_btree_init(struct bch_fs *c) * cannibalize_bucket() will take. This means every time we unlock the root of * the btree, we need to release this lock if we have it held. */ -void mca_cannibalize_unlock(struct bch_fs *c) +void bch2_btree_node_cannibalize_unlock(struct bch_fs *c) { if (c->btree_cache_alloc_lock == current) { - trace_bcache_mca_cannibalize_unlock(c); + trace_btree_node_cannibalize_unlock(c); c->btree_cache_alloc_lock = NULL; closure_wake_up(&c->mca_wait); } } -int mca_cannibalize_lock(struct bch_fs *c, struct closure *cl) +int bch2_btree_node_cannibalize_lock(struct bch_fs *c, struct closure *cl) { struct task_struct *old; @@ -427,7 +417,7 @@ int mca_cannibalize_lock(struct bch_fs *c, struct closure *cl) goto success; if (!cl) { - trace_bcache_mca_cannibalize_lock_fail(c); + trace_btree_node_cannibalize_lock_fail(c); return -ENOMEM; } @@ -441,11 +431,11 @@ int mca_cannibalize_lock(struct bch_fs *c, struct closure *cl) goto success; } - trace_bcache_mca_cannibalize_lock_fail(c); + trace_btree_node_cannibalize_lock_fail(c); return -EAGAIN; success: - trace_bcache_mca_cannibalize_lock(c); + trace_btree_node_cannibalize_lock(c); return 0; } @@ -471,7 +461,7 @@ static struct btree *mca_cannibalize(struct bch_fs *c) } } -struct btree *mca_alloc(struct bch_fs *c) +struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c) { struct btree *b; u64 start_time = local_clock(); @@ -521,9 +511,9 @@ out: b->sib_u64s[1] = 0; b->whiteout_u64s = 0; b->uncompacted_whiteout_u64s = 0; - bch_btree_keys_init(b, &c->expensive_debug_checks); + bch2_btree_keys_init(b, &c->expensive_debug_checks); - bch_time_stats_update(&c->mca_alloc_time, start_time); + bch2_time_stats_update(&c->btree_node_mem_alloc_time, start_time); return b; err: @@ -533,9 +523,9 @@ err: list_del_init(&b->list); mutex_unlock(&c->btree_cache_lock); - mca_hash_remove(c, b); + bch2_btree_node_hash_remove(c, b); - trace_bcache_mca_cannibalize(c); + trace_btree_node_cannibalize(c); goto out; } @@ -544,20 +534,20 @@ err: } /* Slowpath, don't want it inlined into btree_iter_traverse() */ -static noinline struct btree *bch_btree_node_fill(struct btree_iter *iter, - const struct bkey_i *k, - unsigned level, - enum six_lock_type lock_type) +static noinline struct btree *bch2_btree_node_fill(struct btree_iter *iter, + const struct bkey_i *k, + unsigned level, + enum six_lock_type lock_type) { struct bch_fs *c = iter->c; struct btree *b; - b = mca_alloc(c); + b = bch2_btree_node_mem_alloc(c); if (IS_ERR(b)) return b; bkey_copy(&b->key, k); - if (mca_hash_insert(c, b, level, iter->btree_id)) { + if (bch2_btree_node_hash_insert(c, b, level, iter->btree_id)) { /* raced with another fill: */ /* mark as unhashed... */ @@ -584,7 +574,7 @@ static noinline struct btree *bch_btree_node_fill(struct btree_iter *iter, if (btree_node_read_locked(iter, level + 1)) btree_node_unlock(iter, level + 1); - bch_btree_node_read(c, b); + bch2_btree_node_read(c, b); six_unlock_write(&b->lock); if (lock_type == SIX_LOCK_read) @@ -602,9 +592,9 @@ static noinline struct btree *bch_btree_node_fill(struct btree_iter *iter, * The btree node will have either a read or a write lock held, depending on * the @write parameter. */ -struct btree *bch_btree_node_get(struct btree_iter *iter, - const struct bkey_i *k, unsigned level, - enum six_lock_type lock_type) +struct btree *bch2_btree_node_get(struct btree_iter *iter, + const struct bkey_i *k, unsigned level, + enum six_lock_type lock_type) { struct btree *b; struct bset_tree *t; @@ -617,11 +607,11 @@ retry: if (unlikely(!b)) { /* - * We must have the parent locked to call bch_btree_node_fill(), + * We must have the parent locked to call bch2_btree_node_fill(), * else we could read in a btree node from disk that's been * freed: */ - b = bch_btree_node_fill(iter, k, level, lock_type); + b = bch2_btree_node_fill(iter, k, level, lock_type); /* We raced and found the btree node in the cache */ if (!b) @@ -654,7 +644,7 @@ retry: * when they're freed - and PTR_HASH() is zeroed out, which we * check for after we lock the node. * - * Then, btree_node_relock() on the parent will fail - because + * Then, bch2_btree_node_relock() on the parent will fail - because * the parent was modified, when the pointer to the node we want * was removed - and we'll bail out: */ @@ -668,7 +658,7 @@ retry: b->level != level || race_fault())) { six_unlock_type(&b->lock, lock_type); - if (btree_node_relock(iter, level + 1)) + if (bch2_btree_node_relock(iter, level + 1)) goto retry; return ERR_PTR(-EINTR); @@ -702,8 +692,8 @@ retry: return b; } -int bch_print_btree_node(struct bch_fs *c, struct btree *b, - char *buf, size_t len) +int bch2_print_btree_node(struct bch_fs *c, struct btree *b, + char *buf, size_t len) { const struct bkey_format *f = &b->format; struct bset_stats stats; @@ -711,9 +701,9 @@ int bch_print_btree_node(struct bch_fs *c, struct btree *b, memset(&stats, 0, sizeof(stats)); - bch_val_to_text(c, BKEY_TYPE_BTREE, ptrs, sizeof(ptrs), + bch2_val_to_text(c, BKEY_TYPE_BTREE, ptrs, sizeof(ptrs), bkey_i_to_s_c(&b->key)); - bch_btree_keys_stats(b, &stats); + bch2_btree_keys_stats(b, &stats); return scnprintf(buf, len, "l %u %llu:%llu - %llu:%llu:\n" diff --git a/libbcache/btree_cache.h b/libbcachefs/btree_cache.h index 0d1c00c4..23f637ab 100644 --- a/libbcache/btree_cache.h +++ b/libbcachefs/btree_cache.h @@ -1,29 +1,29 @@ #ifndef _BCACHE_BTREE_CACHE_H #define _BCACHE_BTREE_CACHE_H -#include "bcache.h" +#include "bcachefs.h" #include "btree_types.h" struct btree_iter; -extern const char * const bch_btree_ids[]; +extern const char * const bch2_btree_ids[]; -void bch_recalc_btree_reserve(struct bch_fs *); +void bch2_recalc_btree_reserve(struct bch_fs *); -void mca_hash_remove(struct bch_fs *, struct btree *); -int mca_hash_insert(struct bch_fs *, struct btree *, - unsigned, enum btree_id); +void bch2_btree_node_hash_remove(struct bch_fs *, struct btree *); +int bch2_btree_node_hash_insert(struct bch_fs *, struct btree *, + unsigned, enum btree_id); -void mca_cannibalize_unlock(struct bch_fs *); -int mca_cannibalize_lock(struct bch_fs *, struct closure *); +void bch2_btree_node_cannibalize_unlock(struct bch_fs *); +int bch2_btree_node_cannibalize_lock(struct bch_fs *, struct closure *); -struct btree *mca_alloc(struct bch_fs *); +struct btree *bch2_btree_node_mem_alloc(struct bch_fs *); -struct btree *bch_btree_node_get(struct btree_iter *, const struct bkey_i *, - unsigned, enum six_lock_type); +struct btree *bch2_btree_node_get(struct btree_iter *, const struct bkey_i *, + unsigned, enum six_lock_type); -void bch_fs_btree_exit(struct bch_fs *); -int bch_fs_btree_init(struct bch_fs *); +void bch2_fs_btree_exit(struct bch_fs *); +int bch2_fs_btree_init(struct bch_fs *); #define for_each_cached_btree(_b, _c, _tbl, _iter, _pos) \ for ((_tbl) = rht_dereference_rcu((_c)->btree_cache_table.tbl, \ @@ -65,7 +65,7 @@ static inline unsigned btree_blocks(struct bch_fs *c) #define btree_node_root(_c, _b) ((_c)->btree_roots[(_b)->btree_id].b) -int bch_print_btree_node(struct bch_fs *, struct btree *, +int bch2_print_btree_node(struct bch_fs *, struct btree *, char *, size_t); #endif /* _BCACHE_BTREE_CACHE_H */ diff --git a/libbcache/btree_gc.c b/libbcachefs/btree_gc.c index 5270d442..0883b9b4 100644 --- a/libbcache/btree_gc.c +++ b/libbcachefs/btree_gc.c @@ -3,7 +3,7 @@ * Copyright (C) 2014 Datera Inc. */ -#include "bcache.h" +#include "bcachefs.h" #include "alloc.h" #include "bkey_methods.h" #include "btree_locking.h" @@ -19,14 +19,13 @@ #include "keylist.h" #include "move.h" #include "super-io.h" -#include "writeback.h" #include <linux/slab.h> #include <linux/bitops.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/rcupdate.h> -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> struct range_checks { struct range_level { @@ -54,7 +53,7 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b, ? btree_type_successor(b->btree_id, l->max) : l->max; - bch_fs_inconsistent_on(bkey_cmp(b->data->min_key, expected_min), c, + bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, expected_min), c, "btree node has incorrect min key: %llu:%llu != %llu:%llu", b->data->min_key.inode, b->data->min_key.offset, @@ -66,14 +65,14 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b, if (b->level > r->depth) { l = &r->l[b->level - 1]; - bch_fs_inconsistent_on(bkey_cmp(b->data->min_key, l->min), c, + bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, l->min), c, "btree node min doesn't match min of child nodes: %llu:%llu != %llu:%llu", b->data->min_key.inode, b->data->min_key.offset, l->min.inode, l->min.offset); - bch_fs_inconsistent_on(bkey_cmp(b->data->max_key, l->max), c, + bch2_fs_inconsistent_on(bkey_cmp(b->data->max_key, l->max), c, "btree node max doesn't match max of child nodes: %llu:%llu != %llu:%llu", b->data->max_key.inode, b->data->max_key.offset, @@ -87,7 +86,7 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b, } } -u8 bch_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k) +u8 bch2_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k) { const struct bch_extent_ptr *ptr; u8 max_stale = 0; @@ -112,29 +111,29 @@ u8 bch_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k) /* * For runtime mark and sweep: */ -static u8 bch_btree_mark_key(struct bch_fs *c, enum bkey_type type, +static u8 bch2_btree_mark_key(struct bch_fs *c, enum bkey_type type, struct bkey_s_c k) { switch (type) { case BKEY_TYPE_BTREE: - bch_gc_mark_key(c, k, c->sb.btree_node_size, true); + bch2_gc_mark_key(c, k, c->sb.btree_node_size, true); return 0; case BKEY_TYPE_EXTENTS: - bch_gc_mark_key(c, k, k.k->size, false); - return bch_btree_key_recalc_oldest_gen(c, k); + bch2_gc_mark_key(c, k, k.k->size, false); + return bch2_btree_key_recalc_oldest_gen(c, k); default: BUG(); } } -u8 bch_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type, - struct bkey_s_c k) +u8 bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type, + struct bkey_s_c k) { atomic64_set(&c->key_version, max_t(u64, k.k->version.lo, atomic64_read(&c->key_version))); - return bch_btree_mark_key(c, type, k); + return bch2_btree_mark_key(c, type, k); } static bool btree_gc_mark_node(struct bch_fs *c, struct btree *b) @@ -148,8 +147,8 @@ static bool btree_gc_mark_node(struct bch_fs *c, struct btree *b) for_each_btree_node_key_unpack(b, k, &iter, btree_node_is_extents(b), &unpacked) { - bkey_debugcheck(c, b, k); - stale = max(stale, bch_btree_mark_key(c, + bch2_bkey_debugcheck(c, b, k); + stale = max(stale, bch2_btree_mark_key(c, btree_node_type(b), k)); } @@ -179,7 +178,7 @@ static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos) __gc_pos_set(c, new_pos); } -static int bch_gc_btree(struct bch_fs *c, enum btree_id btree_id) +static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id) { struct btree_iter iter; struct btree *b; @@ -199,32 +198,32 @@ static int bch_gc_btree(struct bch_fs *c, enum btree_id btree_id) for_each_btree_node(&iter, c, btree_id, POS_MIN, depth, b) { btree_node_range_checks(c, b, &r); - bch_verify_btree_nr_keys(b); + bch2_verify_btree_nr_keys(b); should_rewrite = btree_gc_mark_node(c, b); gc_pos_set(c, gc_pos_btree_node(b)); if (should_rewrite) - bch_btree_node_rewrite(&iter, b, NULL); + bch2_btree_node_rewrite(&iter, b, NULL); - bch_btree_iter_cond_resched(&iter); + bch2_btree_iter_cond_resched(&iter); } - ret = bch_btree_iter_unlock(&iter); + ret = bch2_btree_iter_unlock(&iter); if (ret) return ret; mutex_lock(&c->btree_root_lock); b = c->btree_roots[btree_id].b; - bch_btree_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&b->key)); + bch2_btree_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&b->key)); gc_pos_set(c, gc_pos_btree_root(b->btree_id)); mutex_unlock(&c->btree_root_lock); return 0; } -static void bch_mark_allocator_buckets(struct bch_fs *c) +static void bch2_mark_allocator_buckets(struct bch_fs *c) { struct bch_dev *ca; struct open_bucket *ob; @@ -235,11 +234,11 @@ static void bch_mark_allocator_buckets(struct bch_fs *c) spin_lock(&ca->freelist_lock); fifo_for_each_entry(i, &ca->free_inc, iter) - bch_mark_alloc_bucket(ca, &ca->buckets[i], true); + bch2_mark_alloc_bucket(ca, &ca->buckets[i], true); for (j = 0; j < RESERVE_NR; j++) fifo_for_each_entry(i, &ca->free[j], iter) - bch_mark_alloc_bucket(ca, &ca->buckets[i], true); + bch2_mark_alloc_bucket(ca, &ca->buckets[i], true); spin_unlock(&ca->freelist_lock); } @@ -252,7 +251,7 @@ static void bch_mark_allocator_buckets(struct bch_fs *c) mutex_lock(&ob->lock); open_bucket_for_each_ptr(ob, ptr) { ca = c->devs[ptr->dev]; - bch_mark_alloc_bucket(ca, PTR_BUCKET(ca, ptr), true); + bch2_mark_alloc_bucket(ca, PTR_BUCKET(ca, ptr), true); } mutex_unlock(&ob->lock); } @@ -264,12 +263,12 @@ static void mark_metadata_sectors(struct bch_dev *ca, u64 start, u64 end, u64 b = start >> ca->bucket_bits; do { - bch_mark_metadata_bucket(ca, ca->buckets + b, type, true); + bch2_mark_metadata_bucket(ca, ca->buckets + b, type, true); b++; } while (b < end >> ca->bucket_bits); } -static void bch_dev_mark_superblocks(struct bch_dev *ca) +static void bch2_dev_mark_superblocks(struct bch_dev *ca) { struct bch_sb_layout *layout = &ca->disk_sb.sb->layout; unsigned i; @@ -290,20 +289,20 @@ static void bch_dev_mark_superblocks(struct bch_dev *ca) /* * Mark non btree metadata - prios, journal */ -void bch_mark_dev_metadata(struct bch_fs *c, struct bch_dev *ca) +void bch2_mark_dev_metadata(struct bch_fs *c, struct bch_dev *ca) { unsigned i; u64 b; lockdep_assert_held(&c->sb_lock); - bch_dev_mark_superblocks(ca); + bch2_dev_mark_superblocks(ca); spin_lock(&c->journal.lock); for (i = 0; i < ca->journal.nr; i++) { b = ca->journal.buckets[i]; - bch_mark_metadata_bucket(ca, ca->buckets + b, + bch2_mark_metadata_bucket(ca, ca->buckets + b, BUCKET_JOURNAL, true); } @@ -314,14 +313,14 @@ void bch_mark_dev_metadata(struct bch_fs *c, struct bch_dev *ca) for (i = 0; i < prio_buckets(ca) * 2; i++) { b = ca->prio_buckets[i]; if (b) - bch_mark_metadata_bucket(ca, ca->buckets + b, + bch2_mark_metadata_bucket(ca, ca->buckets + b, BUCKET_PRIOS, true); } spin_unlock(&ca->prio_buckets_lock); } -static void bch_mark_metadata(struct bch_fs *c) +static void bch2_mark_metadata(struct bch_fs *c) { struct bch_dev *ca; unsigned i; @@ -330,12 +329,12 @@ static void bch_mark_metadata(struct bch_fs *c) gc_pos_set(c, gc_phase(GC_PHASE_SB_METADATA)); for_each_online_member(ca, c, i) - bch_mark_dev_metadata(c, ca); + bch2_mark_dev_metadata(c, ca); mutex_unlock(&c->sb_lock); } -/* Also see bch_pending_btree_node_free_insert_done() */ -static void bch_mark_pending_btree_node_frees(struct bch_fs *c) +/* Also see bch2_pending_btree_node_free_insert_done() */ +static void bch2_mark_pending_btree_node_frees(struct bch_fs *c) { struct bch_fs_usage stats = { 0 }; struct btree_interior_update *as; @@ -346,7 +345,7 @@ static void bch_mark_pending_btree_node_frees(struct bch_fs *c) for_each_pending_btree_node_free(c, as, d) if (d->index_update_done) - __bch_gc_mark_key(c, bkey_i_to_s_c(&d->key), + __bch2_gc_mark_key(c, bkey_i_to_s_c(&d->key), c->sb.btree_node_size, true, &stats); /* @@ -360,7 +359,7 @@ static void bch_mark_pending_btree_node_frees(struct bch_fs *c) /** * bch_gc - recompute bucket marks and oldest_gen, rewrite btree nodes */ -void bch_gc(struct bch_fs *c) +void bch2_gc(struct bch_fs *c) { struct bch_dev *ca; struct bucket *g; @@ -391,13 +390,13 @@ void bch_gc(struct bch_fs *c) if (test_bit(BCH_FS_GC_FAILURE, &c->flags)) return; - trace_bcache_gc_start(c); + trace_gc_start(c); /* - * Do this before taking gc_lock - bch_disk_reservation_get() blocks on + * Do this before taking gc_lock - bch2_disk_reservation_get() blocks on * gc_lock if sectors_available goes to 0: */ - bch_recalc_sectors_available(c); + bch2_recalc_sectors_available(c); down_write(&c->gc_lock); @@ -405,13 +404,13 @@ void bch_gc(struct bch_fs *c) /* * Indicates to buckets code that gc is now in progress - done under - * usage_lock to avoid racing with bch_mark_key(): + * usage_lock to avoid racing with bch2_mark_key(): */ __gc_pos_set(c, GC_POS_MIN); /* Save a copy of the existing bucket stats while we recompute them: */ for_each_member_device(ca, c, i) { - ca->usage_cached = __bch_dev_usage_read(ca); + ca->usage_cached = __bch2_dev_usage_read(ca); for_each_possible_cpu(cpu) { struct bch_dev_usage *p = per_cpu_ptr(ca->usage_percpu, cpu); @@ -419,7 +418,7 @@ void bch_gc(struct bch_fs *c) } } - c->usage_cached = __bch_fs_usage_read(c); + c->usage_cached = __bch2_fs_usage_read(c); for_each_possible_cpu(cpu) { struct bch_fs_usage *p = per_cpu_ptr(c->usage_percpu, cpu); @@ -443,12 +442,12 @@ void bch_gc(struct bch_fs *c) } /* Walk allocator's references: */ - bch_mark_allocator_buckets(c); + bch2_mark_allocator_buckets(c); /* Walk btree: */ while (c->gc_pos.phase < (int) BTREE_ID_NR) { int ret = c->btree_roots[c->gc_pos.phase].b - ? bch_gc_btree(c, (int) c->gc_pos.phase) + ? bch2_gc_btree(c, (int) c->gc_pos.phase) : 0; if (ret) { @@ -461,9 +460,8 @@ void bch_gc(struct bch_fs *c) gc_pos_set(c, gc_phase(c->gc_pos.phase + 1)); } - bch_mark_metadata(c); - bch_mark_pending_btree_node_frees(c); - bch_writeback_recalc_oldest_gens(c); + bch2_mark_metadata(c); + bch2_mark_pending_btree_node_frees(c); for_each_member_device(ca, c, i) atomic_long_set(&ca->saturated_count, 0); @@ -472,15 +470,15 @@ void bch_gc(struct bch_fs *c) gc_pos_set(c, gc_phase(GC_PHASE_DONE)); up_write(&c->gc_lock); - trace_bcache_gc_end(c); - bch_time_stats_update(&c->btree_gc_time, start_time); + trace_gc_end(c); + bch2_time_stats_update(&c->btree_gc_time, start_time); /* * Wake up allocator in case it was waiting for buckets * because of not being able to inc gens */ for_each_member_device(ca, c, i) - bch_wake_allocator(ca); + bch2_wake_allocator(ca); } /* Btree coalescing */ @@ -499,8 +497,8 @@ static void recalc_packed_keys(struct btree *b) btree_keys_account_key_add(&b->nr, 0, k); } -static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], - struct btree_iter *iter) +static void bch2_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], + struct btree_iter *iter) { struct btree *parent = iter->nodes[old_nodes[0]->level + 1]; struct bch_fs *c = iter->c; @@ -514,7 +512,7 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], struct bkey_format new_format; memset(new_nodes, 0, sizeof(new_nodes)); - bch_keylist_init(&keylist, NULL, 0); + bch2_keylist_init(&keylist, NULL, 0); /* Count keys that are not deleted */ for (i = 0; i < GC_MERGE_NODES && old_nodes[i]; i++) @@ -528,50 +526,51 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], DIV_ROUND_UP(u64s, nr_old_nodes - 1)) > blocks) return; - res = bch_btree_reserve_get(c, parent, nr_old_nodes, + res = bch2_btree_reserve_get(c, parent, nr_old_nodes, BTREE_INSERT_NOFAIL| BTREE_INSERT_USE_RESERVE, NULL); if (IS_ERR(res)) { - trace_bcache_btree_gc_coalesce_fail(c, + trace_btree_gc_coalesce_fail(c, BTREE_GC_COALESCE_FAIL_RESERVE_GET); return; } - if (bch_keylist_realloc(&keylist, NULL, 0, + if (bch2_keylist_realloc(&keylist, NULL, 0, (BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) { - trace_bcache_btree_gc_coalesce_fail(c, + trace_btree_gc_coalesce_fail(c, BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC); goto out; } /* Find a format that all keys in @old_nodes can pack into */ - bch_bkey_format_init(&format_state); + bch2_bkey_format_init(&format_state); for (i = 0; i < nr_old_nodes; i++) - __bch_btree_calc_format(&format_state, old_nodes[i]); + __bch2_btree_calc_format(&format_state, old_nodes[i]); - new_format = bch_bkey_format_done(&format_state); + new_format = bch2_bkey_format_done(&format_state); /* Check if repacking would make any nodes too big to fit */ for (i = 0; i < nr_old_nodes; i++) - if (!bch_btree_node_format_fits(c, old_nodes[i], &new_format)) { - trace_bcache_btree_gc_coalesce_fail(c, + if (!bch2_btree_node_format_fits(c, old_nodes[i], &new_format)) { + trace_btree_gc_coalesce_fail(c, BTREE_GC_COALESCE_FAIL_FORMAT_FITS); goto out; } - trace_bcache_btree_gc_coalesce(c, parent, nr_old_nodes); + trace_btree_gc_coalesce(c, parent, nr_old_nodes); - as = bch_btree_interior_update_alloc(c); + as = bch2_btree_interior_update_alloc(c); for (i = 0; i < nr_old_nodes; i++) - bch_btree_interior_update_will_free_node(c, as, old_nodes[i]); + bch2_btree_interior_update_will_free_node(c, as, old_nodes[i]); /* Repack everything with @new_format and sort down to one bset */ for (i = 0; i < nr_old_nodes; i++) - new_nodes[i] = __btree_node_alloc_replacement(c, old_nodes[i], - new_format, res); + new_nodes[i] = + __bch2_btree_node_alloc_replacement(c, old_nodes[i], + new_format, res); /* * Conceptually we concatenate the nodes together and slice them @@ -609,7 +608,7 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], set_btree_bset_end(n1, n1->set); six_unlock_write(&n2->lock); - bch_btree_node_free_never_inserted(c, n2); + bch2_btree_node_free_never_inserted(c, n2); six_unlock_intent(&n2->lock); memmove(new_nodes + i - 1, @@ -645,10 +644,10 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], recalc_packed_keys(n); btree_node_reset_sib_u64s(n); - bch_btree_build_aux_trees(n); + bch2_btree_build_aux_trees(n); six_unlock_write(&n->lock); - bch_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1); + bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1); } /* @@ -670,35 +669,35 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES], bkey_init(&delete.k); delete.k.p = old_nodes[i]->key.k.p; - bch_keylist_add_in_order(&keylist, &delete); + bch2_keylist_add_in_order(&keylist, &delete); next: i = i; } /* - * Keys for the new nodes get inserted: bch_btree_insert_keys() only + * Keys for the new nodes get inserted: bch2_btree_insert_keys() only * does the lookup once and thus expects the keys to be in sorted order * so we have to make sure the new keys are correctly ordered with * respect to the deleted keys added in the previous loop */ for (i = 0; i < nr_new_nodes; i++) - bch_keylist_add_in_order(&keylist, &new_nodes[i]->key); + bch2_keylist_add_in_order(&keylist, &new_nodes[i]->key); /* Insert the newly coalesced nodes */ - bch_btree_insert_node(parent, iter, &keylist, res, as); + bch2_btree_insert_node(parent, iter, &keylist, res, as); - BUG_ON(!bch_keylist_empty(&keylist)); + BUG_ON(!bch2_keylist_empty(&keylist)); BUG_ON(iter->nodes[old_nodes[0]->level] != old_nodes[0]); - BUG_ON(!bch_btree_iter_node_replace(iter, new_nodes[0])); + BUG_ON(!bch2_btree_iter_node_replace(iter, new_nodes[0])); for (i = 0; i < nr_new_nodes; i++) - btree_open_bucket_put(c, new_nodes[i]); + bch2_btree_open_bucket_put(c, new_nodes[i]); /* Free the old nodes and update our sliding window */ for (i = 0; i < nr_old_nodes; i++) { - bch_btree_node_free_inmem(iter, old_nodes[i]); + bch2_btree_node_free_inmem(iter, old_nodes[i]); six_unlock_intent(&old_nodes[i]->lock); /* @@ -716,11 +715,11 @@ next: } } out: - bch_keylist_free(&keylist, NULL); - bch_btree_reserve_put(c, res); + bch2_keylist_free(&keylist, NULL); + bch2_btree_reserve_put(c, res); } -static int bch_coalesce_btree(struct bch_fs *c, enum btree_id btree_id) +static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id) { struct btree_iter iter; struct btree *b; @@ -759,7 +758,7 @@ static int bch_coalesce_btree(struct bch_fs *c, enum btree_id btree_id) } memset(merge + i, 0, (GC_MERGE_NODES - i) * sizeof(merge[0])); - bch_coalesce_nodes(merge, &iter); + bch2_coalesce_nodes(merge, &iter); for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) { lock_seq[i] = merge[i]->lock.state.seq; @@ -769,11 +768,11 @@ static int bch_coalesce_btree(struct bch_fs *c, enum btree_id btree_id) lock_seq[0] = merge[0]->lock.state.seq; if (test_bit(BCH_FS_GC_STOPPING, &c->flags)) { - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return -ESHUTDOWN; } - bch_btree_iter_cond_resched(&iter); + bch2_btree_iter_cond_resched(&iter); /* * If the parent node wasn't relocked, it might have been split @@ -785,13 +784,13 @@ static int bch_coalesce_btree(struct bch_fs *c, enum btree_id btree_id) memset(merge + 1, 0, (GC_MERGE_NODES - 1) * sizeof(merge[0])); } - return bch_btree_iter_unlock(&iter); + return bch2_btree_iter_unlock(&iter); } /** * bch_coalesce - coalesce adjacent nodes with low occupancy */ -void bch_coalesce(struct bch_fs *c) +void bch2_coalesce(struct bch_fs *c) { u64 start_time; enum btree_id id; @@ -800,12 +799,12 @@ void bch_coalesce(struct bch_fs *c) return; down_read(&c->gc_lock); - trace_bcache_gc_coalesce_start(c); + trace_gc_coalesce_start(c); start_time = local_clock(); for (id = 0; id < BTREE_ID_NR; id++) { int ret = c->btree_roots[id].b - ? bch_coalesce_btree(c, id) + ? bch2_coalesce_btree(c, id) : 0; if (ret) { @@ -816,12 +815,12 @@ void bch_coalesce(struct bch_fs *c) } } - bch_time_stats_update(&c->btree_coalesce_time, start_time); - trace_bcache_gc_coalesce_end(c); + bch2_time_stats_update(&c->btree_coalesce_time, start_time); + trace_gc_coalesce_end(c); up_read(&c->gc_lock); } -static int bch_gc_thread(void *arg) +static int bch2_gc_thread(void *arg) { struct bch_fs *c = arg; struct io_clock *clock = &c->io_clock[WRITE]; @@ -846,16 +845,16 @@ static int bch_gc_thread(void *arg) break; } - bch_io_clock_schedule_timeout(clock, next); + bch2_io_clock_schedule_timeout(clock, next); try_to_freeze(); } last = atomic_long_read(&clock->now); last_kick = atomic_read(&c->kick_gc); - bch_gc(c); + bch2_gc(c); if (!btree_gc_coalesce_disabled(c)) - bch_coalesce(c); + bch2_coalesce(c); debug_check_no_locks_held(); } @@ -863,7 +862,7 @@ static int bch_gc_thread(void *arg) return 0; } -void bch_gc_thread_stop(struct bch_fs *c) +void bch2_gc_thread_stop(struct bch_fs *c) { set_bit(BCH_FS_GC_STOPPING, &c->flags); @@ -874,13 +873,13 @@ void bch_gc_thread_stop(struct bch_fs *c) clear_bit(BCH_FS_GC_STOPPING, &c->flags); } -int bch_gc_thread_start(struct bch_fs *c) +int bch2_gc_thread_start(struct bch_fs *c) { struct task_struct *p; BUG_ON(c->gc_thread); - p = kthread_create(bch_gc_thread, c, "bcache_gc"); + p = kthread_create(bch2_gc_thread, c, "bcache_gc"); if (IS_ERR(p)) return PTR_ERR(p); @@ -891,7 +890,7 @@ int bch_gc_thread_start(struct bch_fs *c) /* Initial GC computes bucket marks during startup */ -static void bch_initial_gc_btree(struct bch_fs *c, enum btree_id id) +static void bch2_initial_gc_btree(struct bch_fs *c, enum btree_id id) { struct btree_iter iter; struct btree *b; @@ -917,29 +916,29 @@ static void bch_initial_gc_btree(struct bch_fs *c, enum btree_id id) for_each_btree_node_key_unpack(b, k, &node_iter, btree_node_is_extents(b), &unpacked) - bch_btree_mark_key_initial(c, btree_node_type(b), k); + bch2_btree_mark_key_initial(c, btree_node_type(b), k); } - bch_btree_iter_cond_resched(&iter); + bch2_btree_iter_cond_resched(&iter); } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); - bch_btree_mark_key(c, BKEY_TYPE_BTREE, + bch2_btree_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&c->btree_roots[id].b->key)); } -int bch_initial_gc(struct bch_fs *c, struct list_head *journal) +int bch2_initial_gc(struct bch_fs *c, struct list_head *journal) { enum btree_id id; for (id = 0; id < BTREE_ID_NR; id++) - bch_initial_gc_btree(c, id); + bch2_initial_gc_btree(c, id); if (journal) - bch_journal_mark(c, journal); + bch2_journal_mark(c, journal); - bch_mark_metadata(c); + bch2_mark_metadata(c); /* * Skip past versions that might have possibly been used (as nonces), diff --git a/libbcache/btree_gc.h b/libbcachefs/btree_gc.h index f1794fdf..07210d33 100644 --- a/libbcache/btree_gc.h +++ b/libbcachefs/btree_gc.h @@ -5,15 +5,15 @@ enum bkey_type; -void bch_coalesce(struct bch_fs *); -void bch_gc(struct bch_fs *); -void bch_gc_thread_stop(struct bch_fs *); -int bch_gc_thread_start(struct bch_fs *); -int bch_initial_gc(struct bch_fs *, struct list_head *); -u8 bch_btree_key_recalc_oldest_gen(struct bch_fs *, struct bkey_s_c); -u8 bch_btree_mark_key_initial(struct bch_fs *, enum bkey_type, +void bch2_coalesce(struct bch_fs *); +void bch2_gc(struct bch_fs *); +void bch2_gc_thread_stop(struct bch_fs *); +int bch2_gc_thread_start(struct bch_fs *); +int bch2_initial_gc(struct bch_fs *, struct list_head *); +u8 bch2_btree_key_recalc_oldest_gen(struct bch_fs *, struct bkey_s_c); +u8 bch2_btree_mark_key_initial(struct bch_fs *, enum bkey_type, struct bkey_s_c); -void bch_mark_dev_metadata(struct bch_fs *, struct bch_dev *); +void bch2_mark_dev_metadata(struct bch_fs *, struct bch_dev *); /* * For concurrent mark and sweep (with other index updates), we define a total @@ -28,7 +28,7 @@ void bch_mark_dev_metadata(struct bch_fs *, struct bch_dev *); * between the updater adding/removing the reference and updating the GC marks; * without that, we would at best double count sometimes. * - * That part is important - whenever calling bch_mark_pointers(), a lock _must_ + * That part is important - whenever calling bch2_mark_pointers(), a lock _must_ * be held that prevents GC from passing the position the updater is at. * * (What about the start of gc, when we're clearing all the marks? GC clears the diff --git a/libbcache/btree_io.c b/libbcachefs/btree_io.c index 737e54ec..728cbcd9 100644 --- a/libbcache/btree_io.c +++ b/libbcachefs/btree_io.c @@ -1,5 +1,5 @@ -#include "bcache.h" +#include "bcachefs.h" #include "bkey_methods.h" #include "btree_cache.h" #include "btree_update.h" @@ -15,13 +15,13 @@ #include "journal.h" #include "super-io.h" -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> static void verify_no_dups(struct btree *b, struct bkey_packed *start, struct bkey_packed *end) { -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG struct bkey_packed *k; for (k = start; k != end && bkey_next(k) != end; k = bkey_next(k)) { @@ -231,16 +231,16 @@ static unsigned sort_extent_whiteouts(struct bkey_packed *dst, BUG_ON(new_size < l.k.size); - bch_key_resize(&l.k, new_size); + bch2_key_resize(&l.k, new_size); if (bkey_cmp(l.k.p, r.k.p) >= 0) continue; - bch_cut_front(l.k.p, &r); + bch2_cut_front(l.k.p, &r); } if (prev) { - if (!bkey_pack(out, &l, f)) { + if (!bch2_bkey_pack(out, &l, f)) { BUG_ON(l_packed); bkey_copy(out, &l); } @@ -253,7 +253,7 @@ static unsigned sort_extent_whiteouts(struct bkey_packed *dst, } if (prev) { - if (!bkey_pack(out, &l, f)) { + if (!bch2_bkey_pack(out, &l, f)) { BUG_ON(l_packed); bkey_copy(out, &l); } @@ -285,7 +285,7 @@ static unsigned should_compact_bset(struct btree *b, struct bset_tree *t, return 0; } -bool __bch_compact_whiteouts(struct bch_fs *c, struct btree *b, +bool __bch2_compact_whiteouts(struct bch_fs *c, struct btree *b, enum compact_mode mode) { const struct bkey_format *f = &b->format; @@ -377,7 +377,7 @@ bool __bch_compact_whiteouts(struct bch_fs *c, struct btree *b, if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) { i->u64s = cpu_to_le16((u64 *) out - i->_data); set_btree_bset_end(b, t); - bch_bset_set_no_aux_tree(b, t); + bch2_bset_set_no_aux_tree(b, t); } } @@ -410,15 +410,15 @@ bool __bch_compact_whiteouts(struct bch_fs *c, struct btree *b, btree_bounce_free(c, order, used_mempool, whiteouts); if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) - bch_btree_build_aux_trees(b); + bch2_btree_build_aux_trees(b); bch_btree_keys_u64s_remaining(c, b); - bch_verify_btree_nr_keys(b); + bch2_verify_btree_nr_keys(b); return true; } -static bool bch_drop_whiteouts(struct btree *b) +static bool bch2_drop_whiteouts(struct btree *b) { struct bset_tree *t; bool ret = false; @@ -456,11 +456,11 @@ static bool bch_drop_whiteouts(struct btree *b) } i->u64s = cpu_to_le16((u64 *) out - i->_data); - bch_bset_set_no_aux_tree(b, t); + bch2_bset_set_no_aux_tree(b, t); ret = true; } - bch_verify_btree_nr_keys(b); + bch2_verify_btree_nr_keys(b); return ret; } @@ -593,7 +593,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b, BUG_ON(vstruct_end(&out->keys) > (void *) out + (PAGE_SIZE << order)); if (sorting_entire_node) - bch_time_stats_update(&c->btree_sort_time, start_time); + bch2_time_stats_update(&c->btree_sort_time, start_time); /* Make sure we preserve bset journal_seq: */ for (t = b->set + start_idx + 1; @@ -639,11 +639,11 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b, b->nr.bset_u64s[i] = 0; set_btree_bset_end(b, &b->set[start_idx]); - bch_bset_set_no_aux_tree(b, &b->set[start_idx]); + bch2_bset_set_no_aux_tree(b, &b->set[start_idx]); btree_bounce_free(c, order, used_mempool, out); - bch_verify_btree_nr_keys(b); + bch2_verify_btree_nr_keys(b); } /* Sort + repack in a new format: */ @@ -659,15 +659,15 @@ static struct btree_nr_keys sort_repack(struct bset *dst, memset(&nr, 0, sizeof(nr)); - while ((in = bch_btree_node_iter_next_all(src_iter, src))) { + while ((in = bch2_btree_node_iter_next_all(src_iter, src))) { if (filter_whiteouts && bkey_whiteout(in)) continue; - if (bch_bkey_transform(out_f, out, bkey_packed(in) - ? in_f : &bch_bkey_format_current, in)) + if (bch2_bkey_transform(out_f, out, bkey_packed(in) + ? in_f : &bch2_bkey_format_current, in)) out->format = KEY_FORMAT_LOCAL_BTREE; else - bkey_unpack(src, (void *) out, in); + bch2_bkey_unpack(src, (void *) out, in); btree_keys_account_key_add(&nr, 0, out); out = bkey_next(out); @@ -693,7 +693,7 @@ static struct btree_nr_keys sort_repack_merge(struct bch_fs *c, memset(&nr, 0, sizeof(nr)); - while ((k = bch_btree_node_iter_next_all(iter, src))) { + while ((k = bch2_btree_node_iter_next_all(iter, src))) { if (filter_whiteouts && bkey_whiteout(k)) continue; @@ -701,7 +701,7 @@ static struct btree_nr_keys sort_repack_merge(struct bch_fs *c, * The filter might modify pointers, so we have to unpack the * key and values to &tmp.k: */ - bkey_unpack(src, &tmp.k, k); + bch2_bkey_unpack(src, &tmp.k, k); if (filter && filter(c, src, bkey_i_to_s(&tmp.k))) continue; @@ -718,7 +718,7 @@ static struct btree_nr_keys sort_repack_merge(struct bch_fs *c, * copy the current key - but first pack prev (in place): */ if (prev) { - bkey_pack(prev, (void *) prev, out_f); + bch2_bkey_pack(prev, (void *) prev, out_f); btree_keys_account_key_add(&nr, 0, prev); prev = bkey_next(prev); @@ -730,7 +730,7 @@ static struct btree_nr_keys sort_repack_merge(struct bch_fs *c, } if (prev) { - bkey_pack(prev, (void *) prev, out_f); + bch2_bkey_pack(prev, (void *) prev, out_f); btree_keys_account_key_add(&nr, 0, prev); out = bkey_next(prev); } else { @@ -741,7 +741,7 @@ static struct btree_nr_keys sort_repack_merge(struct bch_fs *c, return nr; } -void bch_btree_sort_into(struct bch_fs *c, +void bch2_btree_sort_into(struct bch_fs *c, struct btree *dst, struct btree *src) { @@ -751,9 +751,9 @@ void bch_btree_sort_into(struct bch_fs *c, BUG_ON(dst->nsets != 1); - bch_bset_set_no_aux_tree(dst, dst->set); + bch2_bset_set_no_aux_tree(dst, dst->set); - bch_btree_node_iter_init_from_start(&src_iter, src, + bch2_btree_node_iter_init_from_start(&src_iter, src, btree_node_is_extents(src)); if (btree_node_ops(src)->key_normalize || @@ -770,7 +770,7 @@ void bch_btree_sort_into(struct bch_fs *c, &dst->format, true); - bch_time_stats_update(&c->btree_sort_time, start_time); + bch2_time_stats_update(&c->btree_sort_time, start_time); set_btree_bset_end(dst, dst->set); @@ -779,7 +779,7 @@ void bch_btree_sort_into(struct bch_fs *c, dst->nr.packed_keys += nr.packed_keys; dst->nr.unpacked_keys += nr.unpacked_keys; - bch_verify_btree_nr_keys(dst); + bch2_verify_btree_nr_keys(dst); } #define SORT_CRIT (4096 / sizeof(u64)) @@ -814,12 +814,12 @@ static bool btree_node_compact(struct bch_fs *c, struct btree *b, return ret; } -void bch_btree_build_aux_trees(struct btree *b) +void bch2_btree_build_aux_trees(struct btree *b) { struct bset_tree *t; for_each_bset(b, t) - bch_bset_build_aux_tree(b, t, + bch2_bset_build_aux_tree(b, t, bset_unwritten(b, bset(b, t)) && t == bset_tree_last(b)); } @@ -833,7 +833,7 @@ void bch_btree_build_aux_trees(struct btree *b) * * Returns true if we sorted (i.e. invalidated iterators */ -void bch_btree_init_next(struct bch_fs *c, struct btree *b, +void bch2_btree_init_next(struct bch_fs *c, struct btree *b, struct btree_iter *iter) { struct btree_node_entry *bne; @@ -846,12 +846,12 @@ void bch_btree_init_next(struct bch_fs *c, struct btree *b, bne = want_new_bset(c, b); if (bne) - bch_bset_init_next(b, &bne->keys); + bch2_bset_init_next(b, &bne->keys); - bch_btree_build_aux_trees(b); + bch2_btree_build_aux_trees(b); if (iter && did_sort) - bch_btree_iter_reinit_node(iter, b); + bch2_btree_iter_reinit_node(iter, b); } static struct nonce btree_nonce(struct btree *b, @@ -868,12 +868,12 @@ static struct nonce btree_nonce(struct btree *b, static void bset_encrypt(struct bch_fs *c, struct bset *i, struct nonce nonce) { - bch_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data, + bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data, vstruct_end(i) - (void *) i->_data); } #define btree_node_error(b, c, ptr, fmt, ...) \ - bch_fs_inconsistent(c, \ + bch2_fs_inconsistent(c, \ "btree node error at btree %u level %u/%u bucket %zu block %u u64s %u: " fmt,\ (b)->btree_id, (b)->level, btree_node_root(c, b) \ ? btree_node_root(c, b)->level : -1, \ @@ -938,15 +938,15 @@ static const char *validate_bset(struct bch_fs *c, struct btree *b, } if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) - bch_bkey_swab(btree_node_type(b), &b->format, k); + bch2_bkey_swab(btree_node_type(b), &b->format, k); u = bkey_disassemble(b, k, &tmp); - invalid = btree_bkey_invalid(c, b, u); + invalid = bch2_btree_bkey_invalid(c, b, u); if (invalid) { char buf[160]; - bch_bkey_val_to_text(c, btree_node_type(b), + bch2_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), u); btree_node_error(b, c, ptr, "invalid bkey %s: %s", buf, invalid); @@ -999,7 +999,7 @@ static bool extent_contains_ptr(struct bkey_s_c_extent e, return false; } -void bch_btree_node_read_done(struct bch_fs *c, struct btree *b, +void bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, struct bch_dev *ca, const struct bch_extent_ptr *ptr) { @@ -1015,10 +1015,10 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b, int ret; iter = mempool_alloc(&c->fill_iter, GFP_NOIO); - __bch_btree_node_iter_init(iter, btree_node_is_extents(b)); + __bch2_btree_node_iter_init(iter, btree_node_is_extents(b)); err = "dynamic fault"; - if (bch_meta_read_fault("btree")) + if (bch2_meta_read_fault("btree")) goto err; while (b->written < c->sb.btree_node_size) { @@ -1036,7 +1036,7 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b, goto err; err = "unknown checksum type"; - if (!bch_checksum_type_valid(c, BSET_CSUM_TYPE(i))) + if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i))) goto err; /* XXX: retry checksum errors */ @@ -1045,10 +1045,10 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b, csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data); err = "bad checksum"; - if (bch_crc_cmp(csum, b->data->csum)) + if (bch2_crc_cmp(csum, b->data->csum)) goto err; - bch_encrypt(c, BSET_CSUM_TYPE(i), nonce, + bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &b->data->flags, (void *) &b->data->keys - (void *) &b->data->flags); @@ -1064,8 +1064,8 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b, u64 *p = (u64 *) &b->data->ptr; *p = swab64(*p); - bch_bpos_swab(&b->data->min_key); - bch_bpos_swab(&b->data->max_key); + bch2_bpos_swab(&b->data->min_key); + bch2_bpos_swab(&b->data->max_key); } err = "incorrect btree id"; @@ -1085,7 +1085,7 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b, b->data->ptr)) goto err; - err = bch_bkey_format_validate(&b->data->format); + err = bch2_bkey_format_validate(&b->data->format); if (err) goto err; @@ -1100,7 +1100,7 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b, break; err = "unknown checksum type"; - if (!bch_checksum_type_valid(c, BSET_CSUM_TYPE(i))) + if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i))) goto err; nonce = btree_nonce(b, i, b->written << 9); @@ -1122,18 +1122,18 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b, b->written += sectors; err = "insufficient memory"; - ret = bch_journal_seq_should_ignore(c, le64_to_cpu(i->journal_seq), b); + ret = bch2_journal_seq_should_ignore(c, le64_to_cpu(i->journal_seq), b); if (ret < 0) goto err; if (ret) continue; - __bch_btree_node_iter_push(iter, b, + __bch2_btree_node_iter_push(iter, b, i->start, vstruct_idx(i, whiteout_u64s)); - __bch_btree_node_iter_push(iter, b, + __bch2_btree_node_iter_push(iter, b, vstruct_idx(i, whiteout_u64s), vstruct_last(i)); } @@ -1149,8 +1149,8 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b, sorted->keys.u64s = 0; b->nr = btree_node_is_extents(b) - ? bch_extent_sort_fix_overlapping(c, &sorted->keys, b, iter) - : bch_key_sort_fix_overlapping(&sorted->keys, b, iter); + ? bch2_extent_sort_fix_overlapping(c, &sorted->keys, b, iter) + : bch2_key_sort_fix_overlapping(&sorted->keys, b, iter); u64s = le16_to_cpu(sorted->keys.u64s); *sorted = *b->data; @@ -1163,7 +1163,7 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b, btree_bounce_free(c, ilog2(btree_pages(c)), used_mempool, sorted); - bch_bset_build_aux_tree(b, b->set, false); + bch2_bset_build_aux_tree(b, b->set, false); set_needs_whiteout(btree_bset_first(b)); @@ -1177,24 +1177,16 @@ err: goto out; } -static void btree_node_read_endio(struct bio *bio) -{ - closure_put(bio->bi_private); -} - -void bch_btree_node_read(struct bch_fs *c, struct btree *b) +void bch2_btree_node_read(struct bch_fs *c, struct btree *b) { uint64_t start_time = local_clock(); - struct closure cl; struct bio *bio; struct extent_pick_ptr pick; - trace_bcache_btree_read(c, b); + trace_btree_read(c, b); - closure_init_stack(&cl); - - pick = bch_btree_pick_ptr(c, b); - if (bch_fs_fatal_err_on(!pick.ca, c, + pick = bch2_btree_pick_ptr(c, b); + if (bch2_fs_fatal_err_on(!pick.ca, c, "no cache device for btree node")) { set_btree_node_read_error(b); return; @@ -1204,32 +1196,27 @@ void bch_btree_node_read(struct bch_fs *c, struct btree *b) bio->bi_bdev = pick.ca->disk_sb.bdev; bio->bi_iter.bi_sector = pick.ptr.offset; bio->bi_iter.bi_size = btree_bytes(c); - bio->bi_end_io = btree_node_read_endio; - bio->bi_private = &cl; bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); + bch2_bio_map(bio, b->data); - bch_bio_map(bio, b->data); - - closure_get(&cl); - bch_generic_make_request(bio, c); - closure_sync(&cl); + submit_bio_wait(bio); - if (bch_dev_fatal_io_err_on(bio->bi_error, + if (bch2_dev_fatal_io_err_on(bio->bi_error, pick.ca, "IO error reading bucket %zu", PTR_BUCKET_NR(pick.ca, &pick.ptr)) || - bch_meta_read_fault("btree")) { + bch2_meta_read_fault("btree")) { set_btree_node_read_error(b); goto out; } - bch_btree_node_read_done(c, b, pick.ca, &pick.ptr); - bch_time_stats_update(&c->btree_read_time, start_time); + bch2_btree_node_read_done(c, b, pick.ca, &pick.ptr); + bch2_time_stats_update(&c->btree_read_time, start_time); out: bio_put(bio); percpu_ref_put(&pick.ca->io_ref); } -int bch_btree_root_read(struct bch_fs *c, enum btree_id id, +int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, const struct bkey_i *k, unsigned level) { struct closure cl; @@ -1239,19 +1226,19 @@ int bch_btree_root_read(struct bch_fs *c, enum btree_id id, closure_init_stack(&cl); do { - ret = mca_cannibalize_lock(c, &cl); + ret = bch2_btree_node_cannibalize_lock(c, &cl); closure_sync(&cl); } while (ret); - b = mca_alloc(c); - mca_cannibalize_unlock(c); + b = bch2_btree_node_mem_alloc(c); + bch2_btree_node_cannibalize_unlock(c); BUG_ON(IS_ERR(b)); bkey_copy(&b->key, k); - BUG_ON(mca_hash_insert(c, b, level, id)); + BUG_ON(bch2_btree_node_hash_insert(c, b, level, id)); - bch_btree_node_read(c, b); + bch2_btree_node_read(c, b); six_unlock_write(&b->lock); if (btree_node_read_error(b)) { @@ -1259,16 +1246,16 @@ int bch_btree_root_read(struct bch_fs *c, enum btree_id id, return -EIO; } - bch_btree_set_root_initial(c, b, NULL); + bch2_btree_set_root_initial(c, b, NULL); six_unlock_intent(&b->lock); return 0; } -void bch_btree_complete_write(struct bch_fs *c, struct btree *b, +void bch2_btree_complete_write(struct bch_fs *c, struct btree *b, struct btree_write *w) { - bch_journal_pin_drop(&c->journal, &w->journal); + bch2_journal_pin_drop(&c->journal, &w->journal); closure_wake_up(&w->wait); } @@ -1277,14 +1264,14 @@ static void btree_node_write_done(struct bch_fs *c, struct btree *b) struct btree_write *w = btree_prev_write(b); /* - * Before calling bch_btree_complete_write() - if the write errored, we + * Before calling bch2_btree_complete_write() - if the write errored, we * have to halt new journal writes before they see this btree node * write as completed: */ if (btree_node_write_error(b)) - bch_journal_halt(&c->journal); + bch2_journal_halt(&c->journal); - bch_btree_complete_write(c, b, w); + bch2_btree_complete_write(c, b, w); btree_node_io_unlock(b); } @@ -1297,8 +1284,8 @@ static void btree_node_write_endio(struct bio *bio) struct closure *cl = !wbio->split ? wbio->cl : NULL; struct bch_dev *ca = wbio->ca; - if (bch_dev_fatal_io_err_on(bio->bi_error, ca, "btree write") || - bch_meta_write_fault("btree")) + if (bch2_dev_fatal_io_err_on(bio->bi_error, ca, "btree write") || + bch2_meta_write_fault("btree")) set_btree_node_write_error(b); if (wbio->bounce) @@ -1322,7 +1309,7 @@ static void btree_node_write_endio(struct bio *bio) percpu_ref_put(&ca->io_ref); } -void __bch_btree_node_write(struct bch_fs *c, struct btree *b, +void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, struct closure *parent, enum six_lock_type lock_type_held, int idx_to_write) @@ -1383,10 +1370,10 @@ void __bch_btree_node_write(struct bch_fs *c, struct btree *b, if (lock_type_held == SIX_LOCK_intent) { six_lock_write(&b->lock); - __bch_compact_whiteouts(c, b, COMPACT_WRITTEN); + __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN); six_unlock_write(&b->lock); } else { - __bch_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK); + __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK); } BUG_ON(b->uncompacted_whiteout_u64s); @@ -1461,12 +1448,12 @@ void __bch_btree_node_write(struct bch_fs *c, struct btree *b, BUG_ON(i->seq != b->data->keys.seq); i->version = cpu_to_le16(BCACHE_BSET_VERSION); - SET_BSET_CSUM_TYPE(i, bch_meta_checksum_type(c)); + SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c)); nonce = btree_nonce(b, i, b->written << 9); if (bn) { - bch_encrypt(c, BSET_CSUM_TYPE(i), nonce, + bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags, (void *) &b->data->keys - (void *) &b->data->flags); @@ -1492,7 +1479,7 @@ void __bch_btree_node_write(struct bch_fs *c, struct btree *b, BUG_ON(b->written + sectors_to_write > c->sb.btree_node_size); - trace_bcache_btree_write(b, bytes_to_write, sectors_to_write); + trace_btree_write(b, bytes_to_write, sectors_to_write); /* * We handle btree write errors by immediately halting the journal - @@ -1504,10 +1491,10 @@ void __bch_btree_node_write(struct bch_fs *c, struct btree *b, * reflect that those writes were done and the data flushed from the * journal: * - * Make sure to update b->written so bch_btree_init_next() doesn't + * Make sure to update b->written so bch2_btree_init_next() doesn't * break: */ - if (bch_journal_error(&c->journal) || + if (bch2_journal_error(&c->journal) || c->opts.nochanges) { set_btree_node_noevict(b); b->written += sectors_to_write; @@ -1533,7 +1520,7 @@ void __bch_btree_node_write(struct bch_fs *c, struct btree *b, if (parent) closure_get(parent); - bch_bio_map(bio, data); + bch2_bio_map(bio, data); /* * If we're appending to a leaf node, we don't technically need FUA - @@ -1562,13 +1549,13 @@ void __bch_btree_node_write(struct bch_fs *c, struct btree *b, b->written += sectors_to_write; - bch_submit_wbio_replicas(wbio, c, &k.key, true); + bch2_submit_wbio_replicas(wbio, c, &k.key); } /* * Work that must be done with write lock held: */ -bool bch_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) +bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) { bool invalidated_iter = false; struct btree_node_entry *bne; @@ -1599,13 +1586,13 @@ bool bch_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) btree_node_sort(c, b, NULL, 0, b->nsets, true); invalidated_iter = true; } else { - invalidated_iter = bch_drop_whiteouts(b); + invalidated_iter = bch2_drop_whiteouts(b); } for_each_bset(b, t) set_needs_whiteout(bset(b, t)); - bch_btree_verify(c, b); + bch2_btree_verify(c, b); /* * If later we don't unconditionally sort down to a single bset, we have @@ -1615,9 +1602,9 @@ bool bch_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) bne = want_new_bset(c, b); if (bne) - bch_bset_init_next(b, &bne->keys); + bch2_bset_init_next(b, &bne->keys); - bch_btree_build_aux_trees(b); + bch2_btree_build_aux_trees(b); return invalidated_iter; } @@ -1625,7 +1612,7 @@ bool bch_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) /* * Use this one if the node is intent locked: */ -void bch_btree_node_write(struct bch_fs *c, struct btree *b, +void bch2_btree_node_write(struct bch_fs *c, struct btree *b, struct closure *parent, enum six_lock_type lock_type_held, int idx_to_write) @@ -1635,33 +1622,33 @@ void bch_btree_node_write(struct bch_fs *c, struct btree *b, if (lock_type_held == SIX_LOCK_intent || six_trylock_convert(&b->lock, SIX_LOCK_read, SIX_LOCK_intent)) { - __bch_btree_node_write(c, b, parent, SIX_LOCK_intent, idx_to_write); + __bch2_btree_node_write(c, b, parent, SIX_LOCK_intent, idx_to_write); six_lock_write(&b->lock); - bch_btree_post_write_cleanup(c, b); + bch2_btree_post_write_cleanup(c, b); six_unlock_write(&b->lock); if (lock_type_held == SIX_LOCK_read) six_lock_downgrade(&b->lock); } else { - __bch_btree_node_write(c, b, parent, SIX_LOCK_read, idx_to_write); + __bch2_btree_node_write(c, b, parent, SIX_LOCK_read, idx_to_write); } } -static void bch_btree_node_write_dirty(struct bch_fs *c, struct btree *b, +static void bch2_btree_node_write_dirty(struct bch_fs *c, struct btree *b, struct closure *parent) { six_lock_read(&b->lock); BUG_ON(b->level); - bch_btree_node_write(c, b, parent, SIX_LOCK_read, -1); + bch2_btree_node_write(c, b, parent, SIX_LOCK_read, -1); six_unlock_read(&b->lock); } /* * Write all dirty btree nodes to disk, including roots */ -void bch_btree_flush(struct bch_fs *c) +void bch2_btree_flush(struct bch_fs *c) { struct closure cl; struct btree *b; @@ -1685,11 +1672,11 @@ restart: rht_for_each_entry_rcu(b, pos, tbl, i, hash) /* * XXX - locking for b->level, when called from - * bch_journal_move() + * bch2_journal_move() */ if (!b->level && btree_node_dirty(b)) { rcu_read_unlock(); - bch_btree_node_write_dirty(c, b, &cl); + bch2_btree_node_write_dirty(c, b, &cl); dropped_lock = true; rcu_read_lock(); goto restart; @@ -1715,7 +1702,7 @@ restart: * that the journal has been flushed so that all the bsets we compacted should * be visible. */ -void bch_btree_node_flush_journal_entries(struct bch_fs *c, +void bch2_btree_node_flush_journal_entries(struct bch_fs *c, struct btree *b, struct closure *cl) { @@ -1731,7 +1718,7 @@ void bch_btree_node_flush_journal_entries(struct bch_fs *c, u64 seq = le64_to_cpu(bset(b, &b->set[i])->journal_seq); if (seq) { - bch_journal_flush_seq_async(&c->journal, seq, cl); + bch2_journal_flush_seq_async(&c->journal, seq, cl); break; } } diff --git a/libbcache/btree_io.h b/libbcachefs/btree_io.h index 0f75f456..290fb5d7 100644 --- a/libbcache/btree_io.h +++ b/libbcachefs/btree_io.h @@ -25,9 +25,9 @@ enum compact_mode { COMPACT_WRITTEN_NO_WRITE_LOCK, }; -bool __bch_compact_whiteouts(struct bch_fs *, struct btree *, enum compact_mode); +bool __bch2_compact_whiteouts(struct bch_fs *, struct btree *, enum compact_mode); -static inline bool bch_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b) +static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b) { struct bset_tree *t; @@ -41,33 +41,33 @@ static inline bool bch_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b return false; compact: - return __bch_compact_whiteouts(c, b, COMPACT_LAZY); + return __bch2_compact_whiteouts(c, b, COMPACT_LAZY); } -void bch_btree_sort_into(struct bch_fs *, struct btree *, struct btree *); +void bch2_btree_sort_into(struct bch_fs *, struct btree *, struct btree *); -void bch_btree_build_aux_trees(struct btree *); -void bch_btree_init_next(struct bch_fs *, struct btree *, +void bch2_btree_build_aux_trees(struct btree *); +void bch2_btree_init_next(struct bch_fs *, struct btree *, struct btree_iter *); -void bch_btree_node_read_done(struct bch_fs *, struct btree *, +void bch2_btree_node_read_done(struct bch_fs *, struct btree *, struct bch_dev *, const struct bch_extent_ptr *); -void bch_btree_node_read(struct bch_fs *, struct btree *); -int bch_btree_root_read(struct bch_fs *, enum btree_id, +void bch2_btree_node_read(struct bch_fs *, struct btree *); +int bch2_btree_root_read(struct bch_fs *, enum btree_id, const struct bkey_i *, unsigned); -void bch_btree_complete_write(struct bch_fs *, struct btree *, +void bch2_btree_complete_write(struct bch_fs *, struct btree *, struct btree_write *); -void __bch_btree_node_write(struct bch_fs *, struct btree *, +void __bch2_btree_node_write(struct bch_fs *, struct btree *, struct closure *, enum six_lock_type, int); -bool bch_btree_post_write_cleanup(struct bch_fs *, struct btree *); +bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *); -void bch_btree_node_write(struct bch_fs *, struct btree *, +void bch2_btree_node_write(struct bch_fs *, struct btree *, struct closure *, enum six_lock_type, int); -void bch_btree_flush(struct bch_fs *); -void bch_btree_node_flush_journal_entries(struct bch_fs *, struct btree *, +void bch2_btree_flush(struct bch_fs *); +void bch2_btree_node_flush_journal_entries(struct bch_fs *, struct btree *, struct closure *); #endif /* _BCACHE_BTREE_IO_H */ diff --git a/libbcache/btree_iter.c b/libbcachefs/btree_iter.c index 04b4bc2e..fb5c507e 100644 --- a/libbcache/btree_iter.c +++ b/libbcachefs/btree_iter.c @@ -1,5 +1,5 @@ -#include "bcache.h" +#include "bcachefs.h" #include "bkey_methods.h" #include "btree_cache.h" #include "btree_iter.h" @@ -7,7 +7,7 @@ #include "debug.h" #include "extents.h" -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> #define BTREE_ITER_NOT_END ((struct btree *) 1) @@ -19,10 +19,10 @@ static inline bool is_btree_node(struct btree_iter *iter, unsigned l) /* Btree node locking: */ /* - * Updates the saved lock sequence number, so that btree_node_relock() will + * Updates the saved lock sequence number, so that bch2_btree_node_relock() will * succeed: */ -void btree_node_unlock_write(struct btree *b, struct btree_iter *iter) +void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter) { struct btree_iter *linked; @@ -37,7 +37,7 @@ void btree_node_unlock_write(struct btree *b, struct btree_iter *iter) six_unlock_write(&b->lock); } -void btree_node_lock_write(struct btree *b, struct btree_iter *iter) +void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter) { struct btree_iter *linked; unsigned readers = 0; @@ -70,24 +70,7 @@ void btree_node_lock_write(struct btree *b, struct btree_iter *iter) } } -/* versions that allow iter to be null: */ -void __btree_node_unlock_write(struct btree *b, struct btree_iter *iter) -{ - if (likely(iter)) - btree_node_unlock_write(b, iter); - else - six_unlock_write(&b->lock); -} - -void __btree_node_lock_write(struct btree *b, struct btree_iter *iter) -{ - if (likely(iter)) - btree_node_lock_write(b, iter); - else - six_lock_write(&b->lock); -} - -bool btree_node_relock(struct btree_iter *iter, unsigned level) +bool bch2_btree_node_relock(struct btree_iter *iter, unsigned level) { struct btree_iter *linked; struct btree *b = iter->nodes[level]; @@ -125,7 +108,7 @@ success: } /* Slowpath: */ -bool __bch_btree_node_lock(struct btree *b, struct bpos pos, +bool __bch2_btree_node_lock(struct btree *b, struct bpos pos, unsigned level, struct btree_iter *iter, enum six_lock_type type) @@ -224,7 +207,7 @@ static void btree_iter_drop_extra_locks(struct btree_iter *iter) } } -bool __bch_btree_iter_set_locks_want(struct btree_iter *iter, +bool __bch2_btree_iter_set_locks_want(struct btree_iter *iter, unsigned new_locks_want) { struct btree_iter *linked; @@ -243,7 +226,7 @@ bool __bch_btree_iter_set_locks_want(struct btree_iter *iter, btree_iter_drop_extra_locks(iter); for (l = iter->level; l < iter->locks_want && iter->nodes[l]; l++) - if (!btree_node_relock(iter, l)) + if (!bch2_btree_node_relock(iter, l)) goto fail; return true; @@ -261,7 +244,7 @@ fail: return false; } -static int __bch_btree_iter_unlock(struct btree_iter *iter) +static int __bch2_btree_iter_unlock(struct btree_iter *iter) { BUG_ON(iter->error == -EINTR); @@ -271,71 +254,71 @@ static int __bch_btree_iter_unlock(struct btree_iter *iter) return iter->error; } -int bch_btree_iter_unlock(struct btree_iter *iter) +int bch2_btree_iter_unlock(struct btree_iter *iter) { struct btree_iter *linked; for_each_linked_btree_iter(iter, linked) - __bch_btree_iter_unlock(linked); - return __bch_btree_iter_unlock(iter); + __bch2_btree_iter_unlock(linked); + return __bch2_btree_iter_unlock(iter); } /* Btree iterator: */ -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG -static void __bch_btree_iter_verify(struct btree_iter *iter, +static void __bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b) { struct btree_node_iter *node_iter = &iter->node_iters[b->level]; struct btree_node_iter tmp = *node_iter; struct bkey_packed *k; - bch_btree_node_iter_verify(node_iter, b); + bch2_btree_node_iter_verify(node_iter, b); /* * For interior nodes, the iterator will have skipped past * deleted keys: */ k = b->level - ? bch_btree_node_iter_prev(&tmp, b) - : bch_btree_node_iter_prev_all(&tmp, b); + ? bch2_btree_node_iter_prev(&tmp, b) + : bch2_btree_node_iter_prev_all(&tmp, b); if (k && btree_iter_pos_cmp_packed(b, &iter->pos, k, iter->is_extents)) { char buf[100]; struct bkey uk = bkey_unpack_key(b, k); - bch_bkey_to_text(buf, sizeof(buf), &uk); + bch2_bkey_to_text(buf, sizeof(buf), &uk); panic("prev key should be before after pos:\n%s\n%llu:%llu\n", buf, iter->pos.inode, iter->pos.offset); } - k = bch_btree_node_iter_peek_all(node_iter, b); + k = bch2_btree_node_iter_peek_all(node_iter, b); if (k && !btree_iter_pos_cmp_packed(b, &iter->pos, k, iter->is_extents)) { char buf[100]; struct bkey uk = bkey_unpack_key(b, k); - bch_bkey_to_text(buf, sizeof(buf), &uk); + bch2_bkey_to_text(buf, sizeof(buf), &uk); panic("next key should be before iter pos:\n%llu:%llu\n%s\n", iter->pos.inode, iter->pos.offset, buf); } } -void bch_btree_iter_verify(struct btree_iter *iter, struct btree *b) +void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b) { struct btree_iter *linked; if (iter->nodes[b->level] == b) - __bch_btree_iter_verify(iter, b); + __bch2_btree_iter_verify(iter, b); for_each_linked_btree_node(iter, b, linked) - __bch_btree_iter_verify(iter, b); + __bch2_btree_iter_verify(iter, b); } #endif -static void __bch_btree_node_iter_fix(struct btree_iter *iter, +static void __bch2_btree_node_iter_fix(struct btree_iter *iter, struct btree *b, struct btree_node_iter *node_iter, struct bset_tree *t, @@ -357,7 +340,7 @@ static void __bch_btree_node_iter_fix(struct btree_iter *iter, if (new_u64s && btree_iter_pos_cmp_packed(b, &iter->pos, where, iter->is_extents)) - bch_btree_node_iter_push(node_iter, b, where, end); + bch2_btree_node_iter_push(node_iter, b, where, end); return; found: set->end = (int) set->end + shift; @@ -370,12 +353,12 @@ found: btree_iter_pos_cmp_packed(b, &iter->pos, where, iter->is_extents)) { set->k = offset; - bch_btree_node_iter_sort(node_iter, b); + bch2_btree_node_iter_sort(node_iter, b); } else if (set->k < offset + clobber_u64s) { set->k = offset + new_u64s; if (set->k == set->end) *set = node_iter->data[--node_iter->used]; - bch_btree_node_iter_sort(node_iter, b); + bch2_btree_node_iter_sort(node_iter, b); } else { set->k = (int) set->k + shift; } @@ -409,11 +392,11 @@ found: struct bkey_packed *k; for_each_bset(b, t) { - if (bch_bkey_to_bset(b, where) == t) + if (bch2_bkey_to_bset(b, where) == t) continue; - k = bkey_prev_all(b, t, - bch_btree_node_iter_bset_pos(node_iter, b, t)); + k = bch2_bkey_prev_all(b, t, + bch2_btree_node_iter_bset_pos(node_iter, b, t)); if (k && __btree_node_iter_cmp(node_iter, b, k, where) > 0) { @@ -424,11 +407,11 @@ found: btree_node_iter_for_each(node_iter, set) if (set->k == offset) { set->k = __btree_node_key_to_offset(b, k); - bch_btree_node_iter_sort(node_iter, b); + bch2_btree_node_iter_sort(node_iter, b); goto next_bset; } - bch_btree_node_iter_push(node_iter, b, k, + bch2_btree_node_iter_push(node_iter, b, k, btree_bkey_last(b, t)); } next_bset: @@ -437,7 +420,7 @@ next_bset: } } -void bch_btree_node_iter_fix(struct btree_iter *iter, +void bch2_btree_node_iter_fix(struct btree_iter *iter, struct btree *b, struct btree_node_iter *node_iter, struct bset_tree *t, @@ -448,22 +431,22 @@ void bch_btree_node_iter_fix(struct btree_iter *iter, struct btree_iter *linked; if (node_iter != &iter->node_iters[b->level]) - __bch_btree_node_iter_fix(iter, b, node_iter, t, + __bch2_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, new_u64s); if (iter->nodes[b->level] == b) - __bch_btree_node_iter_fix(iter, b, + __bch2_btree_node_iter_fix(iter, b, &iter->node_iters[b->level], t, where, clobber_u64s, new_u64s); for_each_linked_btree_node(iter, b, linked) - __bch_btree_node_iter_fix(linked, b, + __bch2_btree_node_iter_fix(linked, b, &linked->node_iters[b->level], t, where, clobber_u64s, new_u64s); /* interior node iterators are... special... */ if (!b->level) - bch_btree_iter_verify(iter, b); + bch2_btree_iter_verify(iter, b); } /* peek_all() doesn't skip deleted keys */ @@ -471,7 +454,7 @@ static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter) { struct btree *b = iter->nodes[iter->level]; struct bkey_packed *k = - bch_btree_node_iter_peek_all(&iter->node_iters[iter->level], b); + bch2_btree_node_iter_peek_all(&iter->node_iters[iter->level], b); struct bkey_s_c ret; EBUG_ON(!btree_node_locked(iter, iter->level)); @@ -482,7 +465,7 @@ static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter) ret = bkey_disassemble(b, k, &iter->k); if (debug_check_bkeys(iter->c)) - bkey_debugcheck(iter->c, b, ret); + bch2_bkey_debugcheck(iter->c, b, ret); return ret; } @@ -491,7 +474,7 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter) { struct btree *b = iter->nodes[iter->level]; struct bkey_packed *k = - bch_btree_node_iter_peek(&iter->node_iters[iter->level], b); + bch2_btree_node_iter_peek(&iter->node_iters[iter->level], b); struct bkey_s_c ret; EBUG_ON(!btree_node_locked(iter, iter->level)); @@ -502,14 +485,14 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter) ret = bkey_disassemble(b, k, &iter->k); if (debug_check_bkeys(iter->c)) - bkey_debugcheck(iter->c, b, ret); + bch2_bkey_debugcheck(iter->c, b, ret); return ret; } static inline void __btree_iter_advance(struct btree_iter *iter) { - bch_btree_node_iter_advance(&iter->node_iters[iter->level], + bch2_btree_node_iter_advance(&iter->node_iters[iter->level], iter->nodes[iter->level]); } @@ -521,16 +504,16 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) bool parent_locked; struct bkey_packed *k; - if (!IS_ENABLED(CONFIG_BCACHE_DEBUG) || + if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) || !iter->nodes[b->level + 1]) return; parent_locked = btree_node_locked(iter, b->level + 1); - if (!btree_node_relock(iter, b->level + 1)) + if (!bch2_btree_node_relock(iter, b->level + 1)) return; - k = bch_btree_node_iter_peek_all(&iter->node_iters[b->level + 1], + k = bch2_btree_node_iter_peek_all(&iter->node_iters[b->level + 1], iter->nodes[b->level + 1]); if (!k || bkey_deleted(k) || @@ -539,7 +522,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) char buf[100]; struct bkey uk = bkey_unpack_key(b, k); - bch_bkey_to_text(buf, sizeof(buf), &uk); + bch2_bkey_to_text(buf, sizeof(buf), &uk); panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n", buf, b->key.k.p.inode, b->key.k.p.offset); } @@ -551,13 +534,13 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b) static inline void __btree_iter_init(struct btree_iter *iter, struct btree *b) { - bch_btree_node_iter_init(&iter->node_iters[b->level], b, + bch2_btree_node_iter_init(&iter->node_iters[b->level], b, iter->pos, iter->is_extents, btree_node_is_extents(b)); /* Skip to first non whiteout: */ if (b->level) - bch_btree_node_iter_peek(&iter->node_iters[b->level], b); + bch2_btree_node_iter_peek(&iter->node_iters[b->level], b); } static inline bool btree_iter_pos_in_node(struct btree_iter *iter, @@ -585,14 +568,14 @@ static inline void btree_iter_node_set(struct btree_iter *iter, * A btree node is being replaced - update the iterator to point to the new * node: */ -bool bch_btree_iter_node_replace(struct btree_iter *iter, struct btree *b) +bool bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b) { struct btree_iter *linked; for_each_linked_btree_iter(iter, linked) if (btree_iter_pos_in_node(linked, b)) { /* - * bch_btree_iter_node_drop() has already been called - + * bch2_btree_iter_node_drop() has already been called - * the old node we're replacing has already been * unlocked and the pointer invalidated */ @@ -606,7 +589,7 @@ bool bch_btree_iter_node_replace(struct btree_iter *iter, struct btree *b) * progress... * * Instead, btree_iter_node_set() sets things up so - * btree_node_relock() will succeed: + * bch2_btree_node_relock() will succeed: */ if (btree_want_intent(linked, b->level)) { @@ -627,7 +610,7 @@ bool bch_btree_iter_node_replace(struct btree_iter *iter, struct btree *b) return true; } -void bch_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b) +void bch2_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b) { struct btree_iter *linked; unsigned level = b->level; @@ -639,7 +622,7 @@ void bch_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b) } } -void bch_btree_iter_node_drop(struct btree_iter *iter, struct btree *b) +void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b) { unsigned level = b->level; @@ -654,7 +637,7 @@ void bch_btree_iter_node_drop(struct btree_iter *iter, struct btree *b) * A btree node has been modified in such a way as to invalidate iterators - fix * them: */ -void bch_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b) +void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b) { struct btree_iter *linked; @@ -721,7 +704,7 @@ static inline int btree_iter_down(struct btree_iter *iter) bkey_reassemble(&tmp.k, k); - b = bch_btree_node_get(iter, &tmp.k, level, lock_type); + b = bch2_btree_node_get(iter, &tmp.k, level, lock_type); if (unlikely(IS_ERR(b))) return PTR_ERR(b); @@ -736,14 +719,14 @@ static void btree_iter_up(struct btree_iter *iter) btree_node_unlock(iter, iter->level++); } -int __must_check __bch_btree_iter_traverse(struct btree_iter *); +int __must_check __bch2_btree_iter_traverse(struct btree_iter *); static int btree_iter_traverse_error(struct btree_iter *iter, int ret) { struct bch_fs *c = iter->c; struct btree_iter *linked, *sorted_iters, **i; retry_all: - bch_btree_iter_unlock(iter); + bch2_btree_iter_unlock(iter); if (ret != -ENOMEM && ret != -EINTR) goto io_error; @@ -754,7 +737,7 @@ retry_all: closure_init_stack(&cl); do { - ret = mca_cannibalize_lock(c, &cl); + ret = bch2_btree_node_cannibalize_lock(c, &cl); closure_sync(&cl); } while (ret); } @@ -790,7 +773,7 @@ retry_all: iter = sorted_iters; do { retry: - ret = __bch_btree_iter_traverse(iter); + ret = __bch2_btree_iter_traverse(iter); if (unlikely(ret)) { if (ret == -EINTR) goto retry; @@ -802,7 +785,7 @@ retry: ret = btree_iter_linked(iter) ? -EINTR : 0; out: - mca_cannibalize_unlock(c); + bch2_btree_node_cannibalize_unlock(c); return ret; io_error: BUG_ON(ret != -EIO); @@ -819,9 +802,9 @@ io_error: * Returns 0 on success, -EIO on error (error reading in a btree node). * * On error, caller (peek_node()/peek_key()) must return NULL; the error is - * stashed in the iterator and returned from bch_btree_iter_unlock(). + * stashed in the iterator and returned from bch2_btree_iter_unlock(). */ -int __must_check __bch_btree_iter_traverse(struct btree_iter *iter) +int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter) { unsigned depth_want = iter->level; @@ -833,7 +816,7 @@ int __must_check __bch_btree_iter_traverse(struct btree_iter *iter) for (i = iter->level + 1; i < iter->locks_want && iter->nodes[i]; i++) - if (!btree_node_relock(iter, i)) { + if (!bch2_btree_node_relock(iter, i)) { while (iter->nodes[iter->level] && iter->level + 1 < iter->locks_want) btree_iter_up(iter); @@ -847,7 +830,7 @@ int __must_check __bch_btree_iter_traverse(struct btree_iter *iter) */ while (iter->nodes[iter->level] && !(is_btree_node(iter, iter->level) && - btree_node_relock(iter, iter->level) && + bch2_btree_node_relock(iter, iter->level) && btree_iter_pos_cmp(iter->pos, &iter->nodes[iter->level]->key.k, iter->is_extents))) @@ -884,7 +867,7 @@ int __must_check __bch_btree_iter_traverse(struct btree_iter *iter) return 0; } -int __must_check bch_btree_iter_traverse(struct btree_iter *iter) +int __must_check bch2_btree_iter_traverse(struct btree_iter *iter) { int ret; @@ -893,7 +876,7 @@ int __must_check bch_btree_iter_traverse(struct btree_iter *iter) iter->at_end_of_leaf = false; - ret = __bch_btree_iter_traverse(iter); + ret = __bch2_btree_iter_traverse(iter); if (unlikely(ret)) ret = btree_iter_traverse_error(iter, ret); @@ -902,14 +885,14 @@ int __must_check bch_btree_iter_traverse(struct btree_iter *iter) /* Iterate across nodes (leaf and interior nodes) */ -struct btree *bch_btree_iter_peek_node(struct btree_iter *iter) +struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) { struct btree *b; int ret; EBUG_ON(iter->is_extents); - ret = bch_btree_iter_traverse(iter); + ret = bch2_btree_iter_traverse(iter); if (ret) return NULL; @@ -923,7 +906,7 @@ struct btree *bch_btree_iter_peek_node(struct btree_iter *iter) return b; } -struct btree *bch_btree_iter_next_node(struct btree_iter *iter, unsigned depth) +struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth) { struct btree *b; int ret; @@ -936,7 +919,7 @@ struct btree *bch_btree_iter_next_node(struct btree_iter *iter, unsigned depth) return NULL; /* parent node usually won't be locked: redo traversal if necessary */ - ret = bch_btree_iter_traverse(iter); + ret = bch2_btree_iter_traverse(iter); if (ret) return NULL; @@ -953,7 +936,7 @@ struct btree *bch_btree_iter_next_node(struct btree_iter *iter, unsigned depth) : bkey_successor(iter->pos); iter->level = depth; - ret = bch_btree_iter_traverse(iter); + ret = bch2_btree_iter_traverse(iter); if (ret) return NULL; @@ -967,7 +950,7 @@ struct btree *bch_btree_iter_next_node(struct btree_iter *iter, unsigned depth) /* Iterate across keys (in leaf nodes only) */ -void bch_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos) +void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos) { struct btree *b = iter->nodes[0]; struct btree_node_iter *node_iter = &iter->node_iters[0]; @@ -978,10 +961,10 @@ void bch_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_p EBUG_ON(!btree_node_locked(iter, 0)); EBUG_ON(bkey_cmp(new_pos, b->key.k.p) > 0); - while ((k = bch_btree_node_iter_peek_all(node_iter, b)) && + while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) && !btree_iter_pos_cmp_packed(b, &new_pos, k, iter->is_extents)) - bch_btree_node_iter_advance(node_iter, b); + bch2_btree_node_iter_advance(node_iter, b); if (!k && !btree_iter_pos_cmp(new_pos, &b->key.k, iter->is_extents)) @@ -990,25 +973,25 @@ void bch_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_p iter->pos = new_pos; } -void bch_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos) +void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos) { EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); /* XXX handle this */ iter->pos = new_pos; } -void bch_btree_iter_advance_pos(struct btree_iter *iter) +void bch2_btree_iter_advance_pos(struct btree_iter *iter) { /* * We use iter->k instead of iter->pos for extents: iter->pos will be * equal to the start of the extent we returned, but we need to advance * to the end of the extent we returned. */ - bch_btree_iter_set_pos(iter, + bch2_btree_iter_set_pos(iter, btree_type_successor(iter->btree_id, iter->k.p)); } /* XXX: expensive */ -void bch_btree_iter_rewind(struct btree_iter *iter, struct bpos pos) +void bch2_btree_iter_rewind(struct btree_iter *iter, struct bpos pos) { /* incapable of rewinding across nodes: */ BUG_ON(bkey_cmp(pos, iter->nodes[iter->level]->data->min_key) < 0); @@ -1017,13 +1000,13 @@ void bch_btree_iter_rewind(struct btree_iter *iter, struct bpos pos) __btree_iter_init(iter, iter->nodes[iter->level]); } -struct bkey_s_c bch_btree_iter_peek(struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) { struct bkey_s_c k; int ret; while (1) { - ret = bch_btree_iter_traverse(iter); + ret = bch2_btree_iter_traverse(iter); if (unlikely(ret)) { iter->k = KEY(iter->pos.inode, iter->pos.offset, 0); return bkey_s_c_err(ret); @@ -1037,7 +1020,7 @@ struct bkey_s_c bch_btree_iter_peek(struct btree_iter *iter) */ if (!iter->is_extents || bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) - bch_btree_iter_set_pos(iter, bkey_start_pos(k.k)); + bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k)); return k; } @@ -1045,7 +1028,7 @@ struct bkey_s_c bch_btree_iter_peek(struct btree_iter *iter) if (!bkey_cmp(iter->pos, POS_MAX)) { iter->k = KEY(iter->pos.inode, iter->pos.offset, 0); - bch_btree_iter_unlock(iter); + bch2_btree_iter_unlock(iter); return bkey_s_c_null; } @@ -1053,14 +1036,14 @@ struct bkey_s_c bch_btree_iter_peek(struct btree_iter *iter) } } -struct bkey_s_c bch_btree_iter_peek_with_holes(struct btree_iter *iter) +struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *iter) { struct bkey_s_c k; struct bkey n; int ret; while (1) { - ret = bch_btree_iter_traverse(iter); + ret = bch2_btree_iter_traverse(iter); if (unlikely(ret)) { iter->k = KEY(iter->pos.inode, iter->pos.offset, 0); return bkey_s_c_err(ret); @@ -1082,7 +1065,7 @@ recheck: if (!k.k) k.k = &iter->nodes[0]->key.k; - bch_key_resize(&n, + bch2_key_resize(&n, min_t(u64, KEY_SIZE_MAX, (k.k->p.inode == n.p.inode ? bkey_start_offset(k.k) @@ -1102,13 +1085,13 @@ recheck: } } -void __bch_btree_iter_init(struct btree_iter *iter, struct bch_fs *c, +void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c, enum btree_id btree_id, struct bpos pos, unsigned locks_want, unsigned depth) { iter->level = depth; - /* bch_bkey_ops isn't used much, this would be a cache miss */ - /* iter->is_extents = bch_bkey_ops[btree_id]->is_extents; */ + /* bch2_bkey_ops isn't used much, this would be a cache miss */ + /* iter->is_extents = bch2_bkey_ops[btree_id]->is_extents; */ iter->is_extents = btree_id == BTREE_ID_EXTENTS; iter->nodes_locked = 0; iter->nodes_intent_locked = 0; @@ -1125,14 +1108,14 @@ void __bch_btree_iter_init(struct btree_iter *iter, struct bch_fs *c, prefetch(c->btree_roots[btree_id].b); } -void bch_btree_iter_link(struct btree_iter *iter, struct btree_iter *new) +void bch2_btree_iter_link(struct btree_iter *iter, struct btree_iter *new) { BUG_ON(btree_iter_linked(new)); new->next = iter->next; iter->next = new; - if (IS_ENABLED(CONFIG_BCACHE_DEBUG)) { + if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) { unsigned nr_iters = 1; for_each_linked_btree_iter(iter, new) @@ -1142,9 +1125,9 @@ void bch_btree_iter_link(struct btree_iter *iter, struct btree_iter *new) } } -void bch_btree_iter_copy(struct btree_iter *dst, struct btree_iter *src) +void bch2_btree_iter_copy(struct btree_iter *dst, struct btree_iter *src) { - bch_btree_iter_unlock(dst); + bch2_btree_iter_unlock(dst); memcpy(dst, src, offsetof(struct btree_iter, next)); dst->nodes_locked = dst->nodes_intent_locked = 0; } diff --git a/libbcache/btree_iter.h b/libbcachefs/btree_iter.h index acca2c68..39731f0b 100644 --- a/libbcache/btree_iter.h +++ b/libbcachefs/btree_iter.h @@ -8,7 +8,7 @@ struct btree_iter { u8 level; /* - * Used in bch_btree_iter_traverse(), to indicate whether we're + * Used in bch2_btree_iter_traverse(), to indicate whether we're * searching for @pos or the first key strictly greater than @pos */ u8 is_extents; @@ -23,7 +23,7 @@ struct btree_iter { enum btree_id btree_id:8; /* - * indicates we need to call bch_btree_iter_traverse() to revalidate + * indicates we need to call bch2_btree_iter_traverse() to revalidate * iterator: */ u8 at_end_of_leaf; @@ -44,7 +44,7 @@ struct btree_iter { * btree_iter_next_node() knows that it's finished with a depth first * traversal. Just unlocking a node (with btree_node_unlock()) is fine, * and if you really don't want that node used again (e.g. btree_split() - * freed it) decrementing lock_seq will cause btree_node_relock() to + * freed it) decrementing lock_seq will cause bch2_btree_node_relock() to * always fail (but since freeing a btree node takes a write lock on the * node, which increments the node's lock seq, that's not actually * necessary in that example). @@ -55,8 +55,8 @@ struct btree_iter { struct btree_node_iter node_iters[BTREE_MAX_DEPTH]; /* - * Current unpacked key - so that bch_btree_iter_next()/ - * bch_btree_iter_next_with_holes() can correctly advance pos. + * Current unpacked key - so that bch2_btree_iter_next()/ + * bch2_btree_iter_next_with_holes() can correctly advance pos. */ struct bkey k; @@ -115,27 +115,27 @@ __next_linked_btree_node(struct btree_iter *iter, struct btree *b, * @_b is assumed to be locked by @_iter * * Filters out iterators that don't have a valid btree_node iterator for @_b - - * i.e. iterators for which btree_node_relock() would not succeed. + * i.e. iterators for which bch2_btree_node_relock() would not succeed. */ #define for_each_linked_btree_node(_iter, _b, _linked) \ for ((_linked) = (_iter); \ ((_linked) = __next_linked_btree_node(_iter, _b, _linked));) -#ifdef CONFIG_BCACHE_DEBUG -void bch_btree_iter_verify(struct btree_iter *, struct btree *); +#ifdef CONFIG_BCACHEFS_DEBUG +void bch2_btree_iter_verify(struct btree_iter *, struct btree *); #else -static inline void bch_btree_iter_verify(struct btree_iter *iter, +static inline void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b) {} #endif -void bch_btree_node_iter_fix(struct btree_iter *, struct btree *, +void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *, struct btree_node_iter *, struct bset_tree *, struct bkey_packed *, unsigned, unsigned); -int bch_btree_iter_unlock(struct btree_iter *); -bool __bch_btree_iter_set_locks_want(struct btree_iter *, unsigned); +int bch2_btree_iter_unlock(struct btree_iter *); +bool __bch2_btree_iter_set_locks_want(struct btree_iter *, unsigned); -static inline bool bch_btree_iter_set_locks_want(struct btree_iter *iter, +static inline bool bch2_btree_iter_set_locks_want(struct btree_iter *iter, unsigned new_locks_want) { new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH); @@ -144,48 +144,48 @@ static inline bool bch_btree_iter_set_locks_want(struct btree_iter *iter, iter->nodes_intent_locked == (1 << new_locks_want) - 1) return true; - return __bch_btree_iter_set_locks_want(iter, new_locks_want); + return __bch2_btree_iter_set_locks_want(iter, new_locks_want); } -bool bch_btree_iter_node_replace(struct btree_iter *, struct btree *); -void bch_btree_iter_node_drop_linked(struct btree_iter *, struct btree *); -void bch_btree_iter_node_drop(struct btree_iter *, struct btree *); +bool bch2_btree_iter_node_replace(struct btree_iter *, struct btree *); +void bch2_btree_iter_node_drop_linked(struct btree_iter *, struct btree *); +void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *); -void bch_btree_iter_reinit_node(struct btree_iter *, struct btree *); +void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *); -int __must_check bch_btree_iter_traverse(struct btree_iter *); +int __must_check bch2_btree_iter_traverse(struct btree_iter *); -struct btree *bch_btree_iter_peek_node(struct btree_iter *); -struct btree *bch_btree_iter_next_node(struct btree_iter *, unsigned); +struct btree *bch2_btree_iter_peek_node(struct btree_iter *); +struct btree *bch2_btree_iter_next_node(struct btree_iter *, unsigned); -struct bkey_s_c bch_btree_iter_peek(struct btree_iter *); -struct bkey_s_c bch_btree_iter_peek_with_holes(struct btree_iter *); -void bch_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos); -void bch_btree_iter_set_pos(struct btree_iter *, struct bpos); -void bch_btree_iter_advance_pos(struct btree_iter *); -void bch_btree_iter_rewind(struct btree_iter *, struct bpos); +struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *); +struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *); +void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos); +void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos); +void bch2_btree_iter_advance_pos(struct btree_iter *); +void bch2_btree_iter_rewind(struct btree_iter *, struct bpos); -void __bch_btree_iter_init(struct btree_iter *, struct bch_fs *, +void __bch2_btree_iter_init(struct btree_iter *, struct bch_fs *, enum btree_id, struct bpos, unsigned , unsigned); -static inline void bch_btree_iter_init(struct btree_iter *iter, +static inline void bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c, enum btree_id btree_id, struct bpos pos) { - __bch_btree_iter_init(iter, c, btree_id, pos, 0, 0); + __bch2_btree_iter_init(iter, c, btree_id, pos, 0, 0); } -static inline void bch_btree_iter_init_intent(struct btree_iter *iter, +static inline void bch2_btree_iter_init_intent(struct btree_iter *iter, struct bch_fs *c, enum btree_id btree_id, struct bpos pos) { - __bch_btree_iter_init(iter, c, btree_id, pos, 1, 0); + __bch2_btree_iter_init(iter, c, btree_id, pos, 1, 0); } -void bch_btree_iter_link(struct btree_iter *, struct btree_iter *); -void bch_btree_iter_copy(struct btree_iter *, struct btree_iter *); +void bch2_btree_iter_link(struct btree_iter *, struct btree_iter *); +void bch2_btree_iter_copy(struct btree_iter *, struct btree_iter *); static inline struct bpos btree_type_successor(enum btree_id id, struct bpos pos) @@ -217,22 +217,22 @@ static inline int btree_iter_cmp(const struct btree_iter *l, #define __for_each_btree_node(_iter, _c, _btree_id, _start, _depth, \ _b, _locks_want) \ - for (__bch_btree_iter_init((_iter), (_c), (_btree_id), \ + for (__bch2_btree_iter_init((_iter), (_c), (_btree_id), \ _start, _locks_want, _depth), \ (_iter)->is_extents = false, \ - _b = bch_btree_iter_peek_node(_iter); \ + _b = bch2_btree_iter_peek_node(_iter); \ (_b); \ - (_b) = bch_btree_iter_next_node(_iter, _depth)) + (_b) = bch2_btree_iter_next_node(_iter, _depth)) #define for_each_btree_node(_iter, _c, _btree_id, _start, _depth, _b) \ __for_each_btree_node(_iter, _c, _btree_id, _start, _depth, _b, 0) #define __for_each_btree_key(_iter, _c, _btree_id, _start, \ _k, _locks_want) \ - for (__bch_btree_iter_init((_iter), (_c), (_btree_id), \ + for (__bch2_btree_iter_init((_iter), (_c), (_btree_id), \ _start, _locks_want, 0); \ - !IS_ERR_OR_NULL(((_k) = bch_btree_iter_peek(_iter)).k); \ - bch_btree_iter_advance_pos(_iter)) + !IS_ERR_OR_NULL(((_k) = bch2_btree_iter_peek(_iter)).k); \ + bch2_btree_iter_advance_pos(_iter)) #define for_each_btree_key(_iter, _c, _btree_id, _start, _k) \ __for_each_btree_key(_iter, _c, _btree_id, _start, _k, 0) @@ -242,10 +242,10 @@ static inline int btree_iter_cmp(const struct btree_iter *l, #define __for_each_btree_key_with_holes(_iter, _c, _btree_id, \ _start, _k, _locks_want) \ - for (__bch_btree_iter_init((_iter), (_c), (_btree_id), \ + for (__bch2_btree_iter_init((_iter), (_c), (_btree_id), \ _start, _locks_want, 0); \ - !IS_ERR_OR_NULL(((_k) = bch_btree_iter_peek_with_holes(_iter)).k);\ - bch_btree_iter_advance_pos(_iter)) + !IS_ERR_OR_NULL(((_k) = bch2_btree_iter_peek_with_holes(_iter)).k);\ + bch2_btree_iter_advance_pos(_iter)) #define for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k) \ __for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k, 0) @@ -263,19 +263,19 @@ static inline int btree_iter_err(struct bkey_s_c k) * Unlocks before scheduling * Note: does not revalidate iterator */ -static inline void bch_btree_iter_cond_resched(struct btree_iter *iter) +static inline void bch2_btree_iter_cond_resched(struct btree_iter *iter) { struct btree_iter *linked; if (need_resched()) { for_each_linked_btree_iter(iter, linked) - bch_btree_iter_unlock(linked); - bch_btree_iter_unlock(iter); + bch2_btree_iter_unlock(linked); + bch2_btree_iter_unlock(iter); schedule(); } else if (race_fault()) { for_each_linked_btree_iter(iter, linked) - bch_btree_iter_unlock(linked); - bch_btree_iter_unlock(iter); + bch2_btree_iter_unlock(linked); + bch2_btree_iter_unlock(iter); } } diff --git a/libbcache/btree_locking.h b/libbcachefs/btree_locking.h index 76f85c0d..27709d1d 100644 --- a/libbcache/btree_locking.h +++ b/libbcachefs/btree_locking.h @@ -96,7 +96,7 @@ static inline void btree_node_unlock(struct btree_iter *iter, unsigned level) mark_btree_node_unlocked(iter, level); } -bool __bch_btree_node_lock(struct btree *, struct bpos, unsigned, +bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned, struct btree_iter *, enum six_lock_type); static inline bool btree_node_lock(struct btree *b, struct bpos pos, @@ -105,15 +105,12 @@ static inline bool btree_node_lock(struct btree *b, struct bpos pos, enum six_lock_type type) { return likely(six_trylock_type(&b->lock, type)) || - __bch_btree_node_lock(b, pos, level, iter, type); + __bch2_btree_node_lock(b, pos, level, iter, type); } -bool btree_node_relock(struct btree_iter *, unsigned); +bool bch2_btree_node_relock(struct btree_iter *, unsigned); -void btree_node_unlock_write(struct btree *, struct btree_iter *); -void btree_node_lock_write(struct btree *, struct btree_iter *); - -void __btree_node_unlock_write(struct btree *, struct btree_iter *); -void __btree_node_lock_write(struct btree *, struct btree_iter *); +void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *); +void bch2_btree_node_lock_write(struct btree *, struct btree_iter *); #endif /* _BCACHE_BTREE_LOCKING_H */ diff --git a/libbcache/btree_types.h b/libbcachefs/btree_types.h index cfca12ea..915e42c2 100644 --- a/libbcache/btree_types.h +++ b/libbcachefs/btree_types.h @@ -1,8 +1,6 @@ #ifndef _BCACHE_BTREE_TYPES_H #define _BCACHE_BTREE_TYPES_H -#include <linux/bcache.h> -#include <linux/kernel.h> #include <linux/list.h> #include <linux/rhashtable.h> #include <linux/semaphore.h> @@ -95,10 +93,10 @@ struct btree { u8 unpack_fn_len; /* - * XXX: add a delete sequence number, so when btree_node_relock() fails - * because the lock sequence number has changed - i.e. the contents were - * modified - we can still relock the node if it's still the one we - * want, without redoing the traversal + * XXX: add a delete sequence number, so when bch2_btree_node_relock() + * fails because the lock sequence number has changed - i.e. the + * contents were modified - we can still relock the node if it's still + * the one we want, without redoing the traversal */ /* @@ -119,7 +117,7 @@ struct btree { struct btree_write writes[2]; -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG bool *expensive_debug_checks; #endif }; @@ -241,7 +239,7 @@ static inline enum bkey_type btree_node_type(struct btree *b) static inline const struct bkey_ops *btree_node_ops(struct btree *b) { - return bch_bkey_ops[btree_node_type(b)]; + return bch2_bkey_ops[btree_node_type(b)]; } static inline bool btree_node_has_ptrs(struct btree *b) diff --git a/libbcache/btree_update.c b/libbcachefs/btree_update.c index 751a51c2..51dff1b7 100644 --- a/libbcache/btree_update.c +++ b/libbcachefs/btree_update.c @@ -1,5 +1,5 @@ -#include "bcache.h" +#include "bcachefs.h" #include "alloc.h" #include "bkey_methods.h" #include "btree_cache.h" @@ -16,7 +16,7 @@ #include <linux/random.h> #include <linux/sort.h> -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> static void btree_interior_update_updated_root(struct bch_fs *, struct btree_interior_update *, @@ -24,13 +24,13 @@ static void btree_interior_update_updated_root(struct bch_fs *, /* Calculate ideal packed bkey format for new btree nodes: */ -void __bch_btree_calc_format(struct bkey_format_state *s, struct btree *b) +void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b) { struct bkey_packed *k; struct bset_tree *t; struct bkey uk; - bch_bkey_format_add_pos(s, b->data->min_key); + bch2_bkey_format_add_pos(s, b->data->min_key); for_each_bset(b, t) for (k = btree_bkey_first(b, t); @@ -38,18 +38,18 @@ void __bch_btree_calc_format(struct bkey_format_state *s, struct btree *b) k = bkey_next(k)) if (!bkey_whiteout(k)) { uk = bkey_unpack_key(b, k); - bch_bkey_format_add_key(s, &uk); + bch2_bkey_format_add_key(s, &uk); } } -static struct bkey_format bch_btree_calc_format(struct btree *b) +static struct bkey_format bch2_btree_calc_format(struct btree *b) { struct bkey_format_state s; - bch_bkey_format_init(&s); - __bch_btree_calc_format(&s, b); + bch2_bkey_format_init(&s); + __bch2_btree_calc_format(&s, b); - return bch_bkey_format_done(&s); + return bch2_bkey_format_done(&s); } static size_t btree_node_u64s_with_format(struct btree *b, @@ -75,7 +75,7 @@ static size_t btree_node_u64s_with_format(struct btree *b, * This assumes all keys can pack with the new format -- it just checks if * the re-packed keys would fit inside the node itself. */ -bool bch_btree_node_format_fits(struct bch_fs *c, struct btree *b, +bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b, struct bkey_format *new_f) { size_t u64s = btree_node_u64s_with_format(b, new_f); @@ -92,7 +92,7 @@ bool bch_btree_node_format_fits(struct bch_fs *c, struct btree *b, * Must be called _before_ btree_interior_update_updated_root() or * btree_interior_update_updated_btree: */ -static void bch_btree_node_free_index(struct bch_fs *c, struct btree *b, +static void bch2_btree_node_free_index(struct bch_fs *c, struct btree *b, enum btree_id id, struct bkey_s_c k, struct bch_fs_usage *stats) { @@ -136,13 +136,13 @@ found: */ /* - * bch_mark_key() compares the current gc pos to the pos we're + * bch2_mark_key() compares the current gc pos to the pos we're * moving this reference from, hence one comparison here: */ if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) { struct bch_fs_usage tmp = { 0 }; - bch_mark_key(c, bkey_i_to_s_c(&d->key), + bch2_mark_key(c, bkey_i_to_s_c(&d->key), -c->sb.btree_node_size, true, b ? gc_pos_btree_node(b) : gc_pos_btree_root(id), @@ -159,7 +159,7 @@ found: static void __btree_node_free(struct bch_fs *c, struct btree *b, struct btree_iter *iter) { - trace_bcache_btree_node_free(c, b); + trace_btree_node_free(c, b); BUG_ON(b == btree_node_root(c, b)); BUG_ON(b->ob); @@ -168,10 +168,10 @@ static void __btree_node_free(struct bch_fs *c, struct btree *b, six_lock_write(&b->lock); if (btree_node_dirty(b)) - bch_btree_complete_write(c, b, btree_current_write(b)); + bch2_btree_complete_write(c, b, btree_current_write(b)); clear_btree_node_dirty(b); - mca_hash_remove(c, b); + bch2_btree_node_hash_remove(c, b); mutex_lock(&c->btree_cache_lock); list_move(&b->list, &c->btree_cache_freeable); @@ -179,13 +179,14 @@ static void __btree_node_free(struct bch_fs *c, struct btree *b, /* * By using six_unlock_write() directly instead of - * btree_node_unlock_write(), we don't update the iterator's sequence - * numbers and cause future btree_node_relock() calls to fail: + * bch2_btree_node_unlock_write(), we don't update the iterator's + * sequence numbers and cause future bch2_btree_node_relock() calls to + * fail: */ six_unlock_write(&b->lock); } -void bch_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b) +void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b) { struct open_bucket *ob = b->ob; @@ -193,26 +194,26 @@ void bch_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b) __btree_node_free(c, b, NULL); - bch_open_bucket_put(c, ob); + bch2_open_bucket_put(c, ob); } -void bch_btree_node_free_inmem(struct btree_iter *iter, struct btree *b) +void bch2_btree_node_free_inmem(struct btree_iter *iter, struct btree *b) { - bch_btree_iter_node_drop_linked(iter, b); + bch2_btree_iter_node_drop_linked(iter, b); __btree_node_free(iter->c, b, iter); - bch_btree_iter_node_drop(iter, b); + bch2_btree_iter_node_drop(iter, b); } -static void bch_btree_node_free_ondisk(struct bch_fs *c, +static void bch2_btree_node_free_ondisk(struct bch_fs *c, struct pending_btree_node_free *pending) { struct bch_fs_usage stats = { 0 }; BUG_ON(!pending->index_update_done); - bch_mark_key(c, bkey_i_to_s_c(&pending->key), + bch2_mark_key(c, bkey_i_to_s_c(&pending->key), -c->sb.btree_node_size, true, gc_phase(GC_PHASE_PENDING_DELETE), &stats, 0); @@ -222,13 +223,13 @@ static void bch_btree_node_free_ondisk(struct bch_fs *c, */ } -void btree_open_bucket_put(struct bch_fs *c, struct btree *b) +void bch2_btree_open_bucket_put(struct bch_fs *c, struct btree *b) { - bch_open_bucket_put(c, b->ob); + bch2_open_bucket_put(c, b->ob); b->ob = NULL; } -static struct btree *__bch_btree_node_alloc(struct bch_fs *c, +static struct btree *__bch2_btree_node_alloc(struct bch_fs *c, bool use_reserve, struct disk_reservation *res, struct closure *cl) @@ -255,7 +256,7 @@ retry: bkey_extent_init(&tmp.k); tmp.k.k.size = c->sb.btree_node_size, - ob = bch_alloc_sectors(c, &c->btree_write_point, + ob = bch2_alloc_sectors(c, &c->btree_write_point, bkey_i_to_extent(&tmp.k), res->nr_replicas, c->opts.metadata_replicas_required, @@ -265,11 +266,11 @@ retry: return ERR_CAST(ob); if (tmp.k.k.size < c->sb.btree_node_size) { - bch_open_bucket_put(c, ob); + bch2_open_bucket_put(c, ob); goto retry; } mem_alloc: - b = mca_alloc(c); + b = bch2_btree_node_mem_alloc(c); /* we hold cannibalize_lock: */ BUG_ON(IS_ERR(b)); @@ -282,7 +283,7 @@ mem_alloc: return b; } -static struct btree *bch_btree_node_alloc(struct bch_fs *c, +static struct btree *bch2_btree_node_alloc(struct bch_fs *c, unsigned level, enum btree_id id, struct btree_reserve *reserve) { @@ -292,12 +293,12 @@ static struct btree *bch_btree_node_alloc(struct bch_fs *c, b = reserve->b[--reserve->nr]; - BUG_ON(mca_hash_insert(c, b, level, id)); + BUG_ON(bch2_btree_node_hash_insert(c, b, level, id)); set_btree_node_accessed(b); set_btree_node_dirty(b); - bch_bset_init_first(b, &b->data->keys); + bch2_bset_init_first(b, &b->data->keys); memset(&b->nr, 0, sizeof(b->nr)); b->data->magic = cpu_to_le64(bset_magic(c)); b->data->flags = 0; @@ -305,22 +306,22 @@ static struct btree *bch_btree_node_alloc(struct bch_fs *c, SET_BTREE_NODE_LEVEL(b->data, level); b->data->ptr = bkey_i_to_extent(&b->key)->v.start->ptr; - bch_btree_build_aux_trees(b); + bch2_btree_build_aux_trees(b); - bch_check_mark_super(c, &b->key, true); + bch2_check_mark_super(c, &b->key, true); - trace_bcache_btree_node_alloc(c, b); + trace_btree_node_alloc(c, b); return b; } -struct btree *__btree_node_alloc_replacement(struct bch_fs *c, - struct btree *b, - struct bkey_format format, - struct btree_reserve *reserve) +struct btree *__bch2_btree_node_alloc_replacement(struct bch_fs *c, + struct btree *b, + struct bkey_format format, + struct btree_reserve *reserve) { struct btree *n; - n = bch_btree_node_alloc(c, b->level, b->btree_id, reserve); + n = bch2_btree_node_alloc(c, b->level, b->btree_id, reserve); n->data->min_key = b->data->min_key; n->data->max_key = b->data->max_key; @@ -328,33 +329,31 @@ struct btree *__btree_node_alloc_replacement(struct bch_fs *c, btree_node_set_format(n, format); - bch_btree_sort_into(c, n, b); + bch2_btree_sort_into(c, n, b); btree_node_reset_sib_u64s(n); n->key.k.p = b->key.k.p; - trace_bcache_btree_node_alloc_replacement(c, b, n); - return n; } -struct btree *btree_node_alloc_replacement(struct bch_fs *c, - struct btree *b, - struct btree_reserve *reserve) +static struct btree *bch2_btree_node_alloc_replacement(struct bch_fs *c, + struct btree *b, + struct btree_reserve *reserve) { - struct bkey_format new_f = bch_btree_calc_format(b); + struct bkey_format new_f = bch2_btree_calc_format(b); /* * The keys might expand with the new format - if they wouldn't fit in * the btree node anymore, use the old format for now: */ - if (!bch_btree_node_format_fits(c, b, &new_f)) + if (!bch2_btree_node_format_fits(c, b, &new_f)) new_f = b->format; - return __btree_node_alloc_replacement(c, b, new_f, reserve); + return __bch2_btree_node_alloc_replacement(c, b, new_f, reserve); } -static void bch_btree_set_root_inmem(struct bch_fs *c, struct btree *b, +static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b, struct btree_reserve *btree_reserve) { struct btree *old = btree_node_root(c, b); @@ -371,28 +370,28 @@ static void bch_btree_set_root_inmem(struct bch_fs *c, struct btree *b, if (btree_reserve) { /* * New allocation (we're not being called because we're in - * bch_btree_root_read()) - do marking while holding + * bch2_btree_root_read()) - do marking while holding * btree_root_lock: */ struct bch_fs_usage stats = { 0 }; - bch_mark_key(c, bkey_i_to_s_c(&b->key), + bch2_mark_key(c, bkey_i_to_s_c(&b->key), c->sb.btree_node_size, true, gc_pos_btree_root(b->btree_id), &stats, 0); if (old) - bch_btree_node_free_index(c, NULL, old->btree_id, + bch2_btree_node_free_index(c, NULL, old->btree_id, bkey_i_to_s_c(&old->key), &stats); - bch_fs_usage_apply(c, &stats, &btree_reserve->disk_res, + bch2_fs_usage_apply(c, &stats, &btree_reserve->disk_res, gc_pos_btree_root(b->btree_id)); } - bch_recalc_btree_reserve(c); + bch2_recalc_btree_reserve(c); } -static void bch_btree_set_root_ondisk(struct bch_fs *c, struct btree *b) +static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b) { struct btree_root *r = &c->btree_roots[b->btree_id]; @@ -410,13 +409,13 @@ static void bch_btree_set_root_ondisk(struct bch_fs *c, struct btree *b) * Only for filesystem bringup, when first reading the btree roots or allocating * btree roots when initializing a new filesystem: */ -void bch_btree_set_root_initial(struct bch_fs *c, struct btree *b, +void bch2_btree_set_root_initial(struct bch_fs *c, struct btree *b, struct btree_reserve *btree_reserve) { BUG_ON(btree_node_root(c, b)); - bch_btree_set_root_inmem(c, b, btree_reserve); - bch_btree_set_root_ondisk(c, b); + bch2_btree_set_root_inmem(c, b, btree_reserve); + bch2_btree_set_root_ondisk(c, b); } /** @@ -431,14 +430,14 @@ void bch_btree_set_root_initial(struct bch_fs *c, struct btree *b, * is nothing new to be done. This just guarantees that there is a * journal write. */ -static void bch_btree_set_root(struct btree_iter *iter, struct btree *b, +static void bch2_btree_set_root(struct btree_iter *iter, struct btree *b, struct btree_interior_update *as, struct btree_reserve *btree_reserve) { struct bch_fs *c = iter->c; struct btree *old; - trace_bcache_btree_set_root(c, b); + trace_btree_set_root(c, b); BUG_ON(!b->written); old = btree_node_root(c, b); @@ -447,9 +446,9 @@ static void bch_btree_set_root(struct btree_iter *iter, struct btree *b, * Ensure no one is using the old root while we switch to the * new root: */ - btree_node_lock_write(old, iter); + bch2_btree_node_lock_write(old, iter); - bch_btree_set_root_inmem(c, b, btree_reserve); + bch2_btree_set_root_inmem(c, b, btree_reserve); btree_interior_update_updated_root(c, as, iter->btree_id); @@ -460,31 +459,31 @@ static void bch_btree_set_root(struct btree_iter *iter, struct btree *b, * an intent lock on the new root, and any updates that would * depend on the new root would have to update the new root. */ - btree_node_unlock_write(old, iter); + bch2_btree_node_unlock_write(old, iter); } static struct btree *__btree_root_alloc(struct bch_fs *c, unsigned level, enum btree_id id, struct btree_reserve *reserve) { - struct btree *b = bch_btree_node_alloc(c, level, id, reserve); + struct btree *b = bch2_btree_node_alloc(c, level, id, reserve); b->data->min_key = POS_MIN; b->data->max_key = POS_MAX; - b->data->format = bch_btree_calc_format(b); + b->data->format = bch2_btree_calc_format(b); b->key.k.p = POS_MAX; btree_node_set_format(b, b->data->format); - bch_btree_build_aux_trees(b); + bch2_btree_build_aux_trees(b); six_unlock_write(&b->lock); return b; } -void bch_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve) +void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve) { - bch_disk_reservation_put(c, &reserve->disk_res); + bch2_disk_reservation_put(c, &reserve->disk_res); mutex_lock(&c->btree_reserve_cache_lock); @@ -502,7 +501,7 @@ void bch_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve) b->ob = NULL; bkey_copy(&a->k, &b->key); } else { - bch_open_bucket_put(c, b->ob); + bch2_open_bucket_put(c, b->ob); b->ob = NULL; } @@ -516,7 +515,7 @@ void bch_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve) mempool_free(reserve, &c->btree_reserve_pool); } -static struct btree_reserve *__bch_btree_reserve_get(struct bch_fs *c, +static struct btree_reserve *__bch2_btree_reserve_get(struct bch_fs *c, unsigned nr_nodes, unsigned flags, struct closure *cl) @@ -535,11 +534,11 @@ static struct btree_reserve *__bch_btree_reserve_get(struct bch_fs *c, * This check isn't necessary for correctness - it's just to potentially * prevent us from doing a lot of work that'll end up being wasted: */ - ret = bch_journal_error(&c->journal); + ret = bch2_journal_error(&c->journal); if (ret) return ERR_PTR(ret); - if (bch_disk_reservation_get(c, &disk_res, sectors, disk_res_flags)) + if (bch2_disk_reservation_get(c, &disk_res, sectors, disk_res_flags)) return ERR_PTR(-ENOSPC); BUG_ON(nr_nodes > BTREE_RESERVE_MAX); @@ -548,9 +547,9 @@ static struct btree_reserve *__bch_btree_reserve_get(struct bch_fs *c, * Protects reaping from the btree node cache and using the btree node * open bucket reserve: */ - ret = mca_cannibalize_lock(c, cl); + ret = bch2_btree_node_cannibalize_lock(c, cl); if (ret) { - bch_disk_reservation_put(c, &disk_res); + bch2_disk_reservation_put(c, &disk_res); return ERR_PTR(ret); } @@ -560,7 +559,7 @@ static struct btree_reserve *__bch_btree_reserve_get(struct bch_fs *c, reserve->nr = 0; while (reserve->nr < nr_nodes) { - b = __bch_btree_node_alloc(c, flags & BTREE_INSERT_USE_RESERVE, + b = __bch2_btree_node_alloc(c, flags & BTREE_INSERT_USE_RESERVE, &disk_res, cl); if (IS_ERR(b)) { ret = PTR_ERR(b); @@ -570,16 +569,16 @@ static struct btree_reserve *__bch_btree_reserve_get(struct bch_fs *c, reserve->b[reserve->nr++] = b; } - mca_cannibalize_unlock(c); + bch2_btree_node_cannibalize_unlock(c); return reserve; err_free: - bch_btree_reserve_put(c, reserve); - mca_cannibalize_unlock(c); - trace_bcache_btree_reserve_get_fail(c, nr_nodes, cl); + bch2_btree_reserve_put(c, reserve); + bch2_btree_node_cannibalize_unlock(c); + trace_btree_reserve_get_fail(c, nr_nodes, cl); return ERR_PTR(ret); } -struct btree_reserve *bch_btree_reserve_get(struct bch_fs *c, +struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c, struct btree *b, unsigned extra_nodes, unsigned flags, @@ -588,11 +587,11 @@ struct btree_reserve *bch_btree_reserve_get(struct bch_fs *c, unsigned depth = btree_node_root(c, b)->level - b->level; unsigned nr_nodes = btree_reserve_required_nodes(depth) + extra_nodes; - return __bch_btree_reserve_get(c, nr_nodes, flags, cl); + return __bch2_btree_reserve_get(c, nr_nodes, flags, cl); } -int bch_btree_root_alloc(struct bch_fs *c, enum btree_id id, +int bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id, struct closure *writes) { struct closure cl; @@ -603,7 +602,7 @@ int bch_btree_root_alloc(struct bch_fs *c, enum btree_id id, while (1) { /* XXX haven't calculated capacity yet :/ */ - reserve = __bch_btree_reserve_get(c, 1, 0, &cl); + reserve = __bch2_btree_reserve_get(c, 1, 0, &cl); if (!IS_ERR(reserve)) break; @@ -615,18 +614,18 @@ int bch_btree_root_alloc(struct bch_fs *c, enum btree_id id, b = __btree_root_alloc(c, 0, id, reserve); - bch_btree_node_write(c, b, writes, SIX_LOCK_intent, -1); + bch2_btree_node_write(c, b, writes, SIX_LOCK_intent, -1); - bch_btree_set_root_initial(c, b, reserve); - btree_open_bucket_put(c, b); + bch2_btree_set_root_initial(c, b, reserve); + bch2_btree_open_bucket_put(c, b); six_unlock_intent(&b->lock); - bch_btree_reserve_put(c, reserve); + bch2_btree_reserve_put(c, reserve); return 0; } -static void bch_insert_fixup_btree_ptr(struct btree_iter *iter, +static void bch2_insert_fixup_btree_ptr(struct btree_iter *iter, struct btree *b, struct bkey_i *insert, struct btree_node_iter *node_iter, @@ -638,33 +637,33 @@ static void bch_insert_fixup_btree_ptr(struct btree_iter *iter, struct bkey tmp; if (bkey_extent_is_data(&insert->k)) - bch_mark_key(c, bkey_i_to_s_c(insert), + bch2_mark_key(c, bkey_i_to_s_c(insert), c->sb.btree_node_size, true, gc_pos_btree_node(b), &stats, 0); - while ((k = bch_btree_node_iter_peek_all(node_iter, b)) && + while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) && !btree_iter_pos_cmp_packed(b, &insert->k.p, k, false)) - bch_btree_node_iter_advance(node_iter, b); + bch2_btree_node_iter_advance(node_iter, b); /* * If we're overwriting, look up pending delete and mark so that gc * marks it on the pending delete list: */ if (k && !bkey_cmp_packed(b, k, &insert->k)) - bch_btree_node_free_index(c, b, iter->btree_id, + bch2_btree_node_free_index(c, b, iter->btree_id, bkey_disassemble(b, k, &tmp), &stats); - bch_fs_usage_apply(c, &stats, disk_res, gc_pos_btree_node(b)); + bch2_fs_usage_apply(c, &stats, disk_res, gc_pos_btree_node(b)); - bch_btree_bset_insert_key(iter, b, node_iter, insert); + bch2_btree_bset_insert_key(iter, b, node_iter, insert); set_btree_node_dirty(b); } /* Inserting into a given leaf node (last stage of insert): */ /* Handle overwrites and do insert, for non extents: */ -bool bch_btree_bset_insert_key(struct btree_iter *iter, +bool bch2_btree_bset_insert_key(struct btree_iter *iter, struct btree *b, struct btree_node_iter *node_iter, struct bkey_i *insert) @@ -681,11 +680,11 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter, bkey_cmp(insert->k.p, b->data->max_key) > 0); BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(iter->c, b)); - k = bch_btree_node_iter_peek_all(node_iter, b); + k = bch2_btree_node_iter_peek_all(node_iter, b); if (k && !bkey_cmp_packed(b, k, &insert->k)) { BUG_ON(bkey_whiteout(k)); - t = bch_bkey_to_bset(b, k); + t = bch2_bkey_to_bset(b, k); if (bset_unwritten(b, bset(b, t)) && bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k)) { @@ -710,8 +709,8 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter, * been written to disk) - just delete it: */ if (bkey_whiteout(&insert->k) && !k->needs_whiteout) { - bch_bset_delete(b, k, clobber_u64s); - bch_btree_node_iter_fix(iter, b, node_iter, t, + bch2_bset_delete(b, k, clobber_u64s); + bch2_btree_node_iter_fix(iter, b, node_iter, t, k, clobber_u64s, 0); return true; } @@ -720,7 +719,7 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter, } k->type = KEY_TYPE_DELETED; - bch_btree_node_iter_fix(iter, b, node_iter, t, k, + bch2_btree_node_iter_fix(iter, b, node_iter, t, k, k->u64s, k->u64s); if (bkey_whiteout(&insert->k)) { @@ -740,12 +739,12 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter, } t = bset_tree_last(b); - k = bch_btree_node_iter_bset_pos(node_iter, b, t); + k = bch2_btree_node_iter_bset_pos(node_iter, b, t); clobber_u64s = 0; overwrite: - bch_bset_insert(b, node_iter, k, insert, clobber_u64s); + bch2_bset_insert(b, node_iter, k, insert, clobber_u64s); if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k)) - bch_btree_node_iter_fix(iter, b, node_iter, t, k, + bch2_btree_node_iter_fix(iter, b, node_iter, t, k, clobber_u64s, k->u64s); return true; } @@ -772,7 +771,7 @@ static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin, * shouldn't: */ if (!b->level) - bch_btree_node_write(c, b, NULL, SIX_LOCK_read, i); + bch2_btree_node_write(c, b, NULL, SIX_LOCK_read, i); six_unlock_read(&b->lock); } @@ -786,7 +785,7 @@ static void btree_node_flush1(struct journal *j, struct journal_entry_pin *pin) return __btree_node_flush(j, pin, 1); } -void bch_btree_journal_key(struct btree_insert *trans, +void bch2_btree_journal_key(struct btree_insert *trans, struct btree_iter *iter, struct bkey_i *insert) { @@ -800,7 +799,7 @@ void bch_btree_journal_key(struct btree_insert *trans, test_bit(JOURNAL_REPLAY_DONE, &j->flags)); if (!journal_pin_active(&w->journal)) - bch_journal_pin_add(j, &w->journal, + bch2_journal_pin_add(j, &w->journal, btree_node_write_idx(b) == 0 ? btree_node_flush0 : btree_node_flush1); @@ -813,11 +812,11 @@ void bch_btree_journal_key(struct btree_insert *trans, * have a bug where we're seeing an extent with an invalid crc * entry in the journal, trying to track it down: */ - BUG_ON(bkey_invalid(c, b->btree_id, bkey_i_to_s_c(insert))); + BUG_ON(bch2_bkey_invalid(c, b->btree_id, bkey_i_to_s_c(insert))); /* ick */ insert->k.needs_whiteout = false; - bch_journal_add_keys(j, &trans->journal_res, + bch2_journal_add_keys(j, &trans->journal_res, b->btree_id, insert); insert->k.needs_whiteout = needs_whiteout; @@ -831,18 +830,18 @@ void bch_btree_journal_key(struct btree_insert *trans, } static enum btree_insert_ret -bch_insert_fixup_key(struct btree_insert *trans, +bch2_insert_fixup_key(struct btree_insert *trans, struct btree_insert_entry *insert) { struct btree_iter *iter = insert->iter; BUG_ON(iter->level); - if (bch_btree_bset_insert_key(iter, + if (bch2_btree_bset_insert_key(iter, iter->nodes[0], &iter->node_iters[0], insert->k)) - bch_btree_journal_key(trans, iter, insert->k); + bch2_btree_journal_key(trans, iter, insert->k); trans->did_work = true; return BTREE_INSERT_OK; @@ -850,7 +849,7 @@ bch_insert_fixup_key(struct btree_insert *trans, static void verify_keys_sorted(struct keylist *l) { -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG struct bkey_i *k; for_each_keylist_key(l, k) @@ -863,24 +862,24 @@ static void btree_node_lock_for_insert(struct btree *b, struct btree_iter *iter) { struct bch_fs *c = iter->c; - btree_node_lock_write(b, iter); + bch2_btree_node_lock_write(b, iter); if (btree_node_just_written(b) && - bch_btree_post_write_cleanup(c, b)) - bch_btree_iter_reinit_node(iter, b); + bch2_btree_post_write_cleanup(c, b)) + bch2_btree_iter_reinit_node(iter, b); /* * If the last bset has been written, or if it's gotten too big - start * a new bset to insert into: */ if (want_new_bset(c, b)) - bch_btree_init_next(c, b, iter); + bch2_btree_init_next(c, b, iter); } /* Asynchronous interior node update machinery */ struct btree_interior_update * -bch_btree_interior_update_alloc(struct bch_fs *c) +bch2_btree_interior_update_alloc(struct bch_fs *c) { struct btree_interior_update *as; @@ -890,7 +889,7 @@ bch_btree_interior_update_alloc(struct bch_fs *c) as->c = c; as->mode = BTREE_INTERIOR_NO_UPDATE; - bch_keylist_init(&as->parent_keys, as->inline_keys, + bch2_keylist_init(&as->parent_keys, as->inline_keys, ARRAY_SIZE(as->inline_keys)); mutex_lock(&c->btree_interior_update_lock); @@ -914,12 +913,12 @@ static void btree_interior_update_nodes_reachable(struct closure *cl) struct bch_fs *c = as->c; unsigned i; - bch_journal_pin_drop(&c->journal, &as->journal); + bch2_journal_pin_drop(&c->journal, &as->journal); mutex_lock(&c->btree_interior_update_lock); for (i = 0; i < as->nr_pending; i++) - bch_btree_node_free_ondisk(c, &as->pending[i]); + bch2_btree_node_free_ondisk(c, &as->pending[i]); as->nr_pending = 0; mutex_unlock(&c->btree_interior_update_lock); @@ -940,7 +939,7 @@ static void btree_interior_update_nodes_written(struct closure *cl) struct bch_fs *c = as->c; struct btree *b; - if (bch_journal_error(&c->journal)) { + if (bch2_journal_error(&c->journal)) { /* XXX what? */ } @@ -975,7 +974,7 @@ retry: list_del(&as->write_blocked_list); if (list_empty(&b->write_blocked)) - bch_btree_node_write(c, b, NULL, SIX_LOCK_read, -1); + bch2_btree_node_write(c, b, NULL, SIX_LOCK_read, -1); six_unlock_read(&b->lock); break; @@ -1008,7 +1007,7 @@ retry: BUG_ON(c->btree_roots[b->btree_id].as != as); c->btree_roots[b->btree_id].as = NULL; - bch_btree_set_root_ondisk(c, b); + bch2_btree_set_root_ondisk(c, b); /* * We don't have to wait anything anything here (before @@ -1043,7 +1042,7 @@ static void btree_interior_update_updated_btree(struct bch_fs *c, mutex_unlock(&c->btree_interior_update_lock); - bch_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl); + bch2_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl); continue_at(&as->cl, btree_interior_update_nodes_written, system_freezable_wq); @@ -1078,7 +1077,7 @@ static void btree_interior_update_updated_root(struct bch_fs *c, mutex_unlock(&c->btree_interior_update_lock); - bch_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl); + bch2_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl); continue_at(&as->cl, btree_interior_update_nodes_written, system_freezable_wq); @@ -1089,7 +1088,7 @@ static void interior_update_flush(struct journal *j, struct journal_entry_pin *p struct btree_interior_update *as = container_of(pin, struct btree_interior_update, journal); - bch_journal_flush_seq_async(j, as->journal_seq, NULL); + bch2_journal_flush_seq_async(j, as->journal_seq, NULL); } /* @@ -1097,7 +1096,7 @@ static void interior_update_flush(struct journal *j, struct journal_entry_pin *p * nodes and thus outstanding btree_interior_updates - redirect @b's * btree_interior_updates to point to this btree_interior_update: */ -void bch_btree_interior_update_will_free_node(struct bch_fs *c, +void bch2_btree_interior_update_will_free_node(struct bch_fs *c, struct btree_interior_update *as, struct btree *b) { @@ -1124,10 +1123,10 @@ void bch_btree_interior_update_will_free_node(struct bch_fs *c, * oldest pin of any of the nodes we're freeing. We'll release the pin * when the new nodes are persistent and reachable on disk: */ - bch_journal_pin_add_if_older(&c->journal, + bch2_journal_pin_add_if_older(&c->journal, &b->writes[0].journal, &as->journal, interior_update_flush); - bch_journal_pin_add_if_older(&c->journal, + bch2_journal_pin_add_if_older(&c->journal, &b->writes[1].journal, &as->journal, interior_update_flush); @@ -1171,18 +1170,18 @@ static void btree_node_interior_verify(struct btree *b) BUG_ON(!b->level); - bch_btree_node_iter_init(&iter, b, b->key.k.p, false, false); + bch2_btree_node_iter_init(&iter, b, b->key.k.p, false, false); #if 1 - BUG_ON(!(k = bch_btree_node_iter_peek(&iter, b)) || + BUG_ON(!(k = bch2_btree_node_iter_peek(&iter, b)) || bkey_cmp_left_packed(b, k, &b->key.k.p)); - BUG_ON((bch_btree_node_iter_advance(&iter, b), - !bch_btree_node_iter_end(&iter))); + BUG_ON((bch2_btree_node_iter_advance(&iter, b), + !bch2_btree_node_iter_end(&iter))); #else const char *msg; msg = "not found"; - k = bch_btree_node_iter_peek(&iter, b); + k = bch2_btree_node_iter_peek(&iter, b); if (!k) goto err; @@ -1190,14 +1189,14 @@ static void btree_node_interior_verify(struct btree *b) if (bkey_cmp_left_packed(b, k, &b->key.k.p)) goto err; - bch_btree_node_iter_advance(&iter, b); + bch2_btree_node_iter_advance(&iter, b); msg = "isn't last key"; - if (!bch_btree_node_iter_end(&iter)) + if (!bch2_btree_node_iter_end(&iter)) goto err; return; err: - bch_dump_btree_node(b); + bch2_dump_btree_node(b); printk(KERN_ERR "last key %llu:%llu %s\n", b->key.k.p.inode, b->key.k.p.offset, msg); BUG(); @@ -1205,7 +1204,7 @@ err: } static enum btree_insert_ret -bch_btree_insert_keys_interior(struct btree *b, +bch2_btree_insert_keys_interior(struct btree *b, struct btree_iter *iter, struct keylist *insert_keys, struct btree_interior_update *as, @@ -1214,7 +1213,7 @@ bch_btree_insert_keys_interior(struct btree *b, struct bch_fs *c = iter->c; struct btree_iter *linked; struct btree_node_iter node_iter; - struct bkey_i *insert = bch_keylist_front(insert_keys); + struct bkey_i *insert = bch2_keylist_front(insert_keys); struct bkey_packed *k; BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level)); @@ -1226,7 +1225,7 @@ bch_btree_insert_keys_interior(struct btree *b, if (bch_keylist_u64s(insert_keys) > bch_btree_keys_u64s_remaining(c, b)) { - btree_node_unlock_write(b, iter); + bch2_btree_node_unlock_write(b, iter); return BTREE_INSERT_BTREE_NODE_FULL; } @@ -1238,31 +1237,31 @@ bch_btree_insert_keys_interior(struct btree *b, * the iterator's current position - they know the keys go in * the node the iterator points to: */ - while ((k = bch_btree_node_iter_prev_all(&node_iter, b)) && + while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) && (bkey_cmp_packed(b, k, &insert->k) >= 0)) ; - while (!bch_keylist_empty(insert_keys)) { - insert = bch_keylist_front(insert_keys); + while (!bch2_keylist_empty(insert_keys)) { + insert = bch2_keylist_front(insert_keys); - bch_insert_fixup_btree_ptr(iter, b, insert, + bch2_insert_fixup_btree_ptr(iter, b, insert, &node_iter, &res->disk_res); - bch_keylist_pop_front(insert_keys); + bch2_keylist_pop_front(insert_keys); } btree_interior_update_updated_btree(c, as, b); for_each_linked_btree_node(iter, b, linked) - bch_btree_node_iter_peek(&linked->node_iters[b->level], + bch2_btree_node_iter_peek(&linked->node_iters[b->level], b); - bch_btree_node_iter_peek(&iter->node_iters[b->level], b); + bch2_btree_node_iter_peek(&iter->node_iters[b->level], b); - bch_btree_iter_verify(iter, b); + bch2_btree_iter_verify(iter, b); - if (bch_maybe_compact_whiteouts(c, b)) - bch_btree_iter_reinit_node(iter, b); + if (bch2_maybe_compact_whiteouts(c, b)) + bch2_btree_iter_reinit_node(iter, b); - btree_node_unlock_write(b, iter); + bch2_btree_node_unlock_write(b, iter); btree_node_interior_verify(b); return BTREE_INSERT_OK; @@ -1280,7 +1279,7 @@ static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n struct bset *set1, *set2; struct bkey_packed *k, *prev = NULL; - n2 = bch_btree_node_alloc(iter->c, n1->level, iter->btree_id, reserve); + n2 = bch2_btree_node_alloc(iter->c, n1->level, iter->btree_id, reserve); n2->data->max_key = n1->data->max_key; n2->data->format = n1->format; n2->key.k.p = n1->key.k.p; @@ -1343,8 +1342,8 @@ static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n btree_node_reset_sib_u64s(n1); btree_node_reset_sib_u64s(n2); - bch_verify_btree_nr_keys(n1); - bch_verify_btree_nr_keys(n2); + bch2_verify_btree_nr_keys(n1); + bch2_verify_btree_nr_keys(n2); if (n1->level) { btree_node_interior_verify(n1); @@ -1370,24 +1369,24 @@ static void btree_split_insert_keys(struct btree_iter *iter, struct btree *b, struct btree_reserve *res) { struct btree_node_iter node_iter; - struct bkey_i *k = bch_keylist_front(keys); + struct bkey_i *k = bch2_keylist_front(keys); struct bkey_packed *p; struct bset *i; BUG_ON(btree_node_type(b) != BKEY_TYPE_BTREE); - bch_btree_node_iter_init(&node_iter, b, k->k.p, false, false); + bch2_btree_node_iter_init(&node_iter, b, k->k.p, false, false); - while (!bch_keylist_empty(keys)) { - k = bch_keylist_front(keys); + while (!bch2_keylist_empty(keys)) { + k = bch2_keylist_front(keys); BUG_ON(bch_keylist_u64s(keys) > bch_btree_keys_u64s_remaining(iter->c, b)); BUG_ON(bkey_cmp(k->k.p, b->data->min_key) < 0); BUG_ON(bkey_cmp(k->k.p, b->data->max_key) > 0); - bch_insert_fixup_btree_ptr(iter, b, k, &node_iter, &res->disk_res); - bch_keylist_pop_front(keys); + bch2_insert_fixup_btree_ptr(iter, b, k, &node_iter, &res->disk_res); + bch2_keylist_pop_front(keys); } /* @@ -1426,31 +1425,31 @@ static void btree_split(struct btree *b, struct btree_iter *iter, BUG_ON(!parent && (b != btree_node_root(c, b))); BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level)); - bch_btree_interior_update_will_free_node(c, as, b); + bch2_btree_interior_update_will_free_node(c, as, b); - n1 = btree_node_alloc_replacement(c, b, reserve); + n1 = bch2_btree_node_alloc_replacement(c, b, reserve); if (b->level) btree_split_insert_keys(iter, n1, insert_keys, reserve); if (vstruct_blocks(n1->data, c->block_bits) > BTREE_SPLIT_THRESHOLD(c)) { - trace_bcache_btree_node_split(c, b, b->nr.live_u64s); + trace_btree_node_split(c, b, b->nr.live_u64s); n2 = __btree_split_node(iter, n1, reserve); - bch_btree_build_aux_trees(n2); - bch_btree_build_aux_trees(n1); + bch2_btree_build_aux_trees(n2); + bch2_btree_build_aux_trees(n1); six_unlock_write(&n2->lock); six_unlock_write(&n1->lock); - bch_btree_node_write(c, n2, &as->cl, SIX_LOCK_intent, -1); + bch2_btree_node_write(c, n2, &as->cl, SIX_LOCK_intent, -1); /* * Note that on recursive parent_keys == insert_keys, so we * can't start adding new keys to parent_keys before emptying it * out (which we did with btree_split_insert_keys() above) */ - bch_keylist_add(&as->parent_keys, &n1->key); - bch_keylist_add(&as->parent_keys, &n2->key); + bch2_keylist_add(&as->parent_keys, &n1->key); + bch2_keylist_add(&as->parent_keys, &n2->key); if (!parent) { /* Depth increases, make a new root */ @@ -1462,58 +1461,58 @@ static void btree_split(struct btree *b, struct btree_iter *iter, btree_split_insert_keys(iter, n3, &as->parent_keys, reserve); - bch_btree_node_write(c, n3, &as->cl, SIX_LOCK_intent, -1); + bch2_btree_node_write(c, n3, &as->cl, SIX_LOCK_intent, -1); } } else { - trace_bcache_btree_node_compact(c, b, b->nr.live_u64s); + trace_btree_node_compact(c, b, b->nr.live_u64s); - bch_btree_build_aux_trees(n1); + bch2_btree_build_aux_trees(n1); six_unlock_write(&n1->lock); - bch_keylist_add(&as->parent_keys, &n1->key); + bch2_keylist_add(&as->parent_keys, &n1->key); } - bch_btree_node_write(c, n1, &as->cl, SIX_LOCK_intent, -1); + bch2_btree_node_write(c, n1, &as->cl, SIX_LOCK_intent, -1); /* New nodes all written, now make them visible: */ if (parent) { /* Split a non root node */ - bch_btree_insert_node(parent, iter, &as->parent_keys, + bch2_btree_insert_node(parent, iter, &as->parent_keys, reserve, as); } else if (n3) { - bch_btree_set_root(iter, n3, as, reserve); + bch2_btree_set_root(iter, n3, as, reserve); } else { /* Root filled up but didn't need to be split */ - bch_btree_set_root(iter, n1, as, reserve); + bch2_btree_set_root(iter, n1, as, reserve); } - btree_open_bucket_put(c, n1); + bch2_btree_open_bucket_put(c, n1); if (n2) - btree_open_bucket_put(c, n2); + bch2_btree_open_bucket_put(c, n2); if (n3) - btree_open_bucket_put(c, n3); + bch2_btree_open_bucket_put(c, n3); /* * Note - at this point other linked iterators could still have @b read - * locked; we're depending on the bch_btree_iter_node_replace() calls + * locked; we're depending on the bch2_btree_iter_node_replace() calls * below removing all references to @b so we don't return with other * iterators pointing to a node they have locked that's been freed. * - * We have to free the node first because the bch_iter_node_replace() + * We have to free the node first because the bch2_iter_node_replace() * calls will drop _our_ iterator's reference - and intent lock - to @b. */ - bch_btree_node_free_inmem(iter, b); + bch2_btree_node_free_inmem(iter, b); /* Successful split, update the iterator to point to the new nodes: */ if (n3) - bch_btree_iter_node_replace(iter, n3); + bch2_btree_iter_node_replace(iter, n3); if (n2) - bch_btree_iter_node_replace(iter, n2); - bch_btree_iter_node_replace(iter, n1); + bch2_btree_iter_node_replace(iter, n2); + bch2_btree_iter_node_replace(iter, n1); - bch_time_stats_update(&c->btree_split_time, start_time); + bch2_time_stats_update(&c->btree_split_time, start_time); } /** @@ -1528,7 +1527,7 @@ static void btree_split(struct btree *b, struct btree_iter *iter, * If a split occurred, this function will return early. This can only happen * for leaf nodes -- inserts into interior nodes have to be atomic. */ -void bch_btree_insert_node(struct btree *b, +void bch2_btree_insert_node(struct btree *b, struct btree_iter *iter, struct keylist *insert_keys, struct btree_reserve *reserve, @@ -1537,7 +1536,7 @@ void bch_btree_insert_node(struct btree *b, BUG_ON(!b->level); BUG_ON(!reserve || !as); - switch (bch_btree_insert_keys_interior(b, iter, insert_keys, + switch (bch2_btree_insert_keys_interior(b, iter, insert_keys, as, reserve)) { case BTREE_INSERT_OK: break; @@ -1549,7 +1548,7 @@ void bch_btree_insert_node(struct btree *b, } } -static int bch_btree_split_leaf(struct btree_iter *iter, unsigned flags) +static int bch2_btree_split_leaf(struct btree_iter *iter, unsigned flags) { struct bch_fs *c = iter->c; struct btree *b = iter->nodes[0]; @@ -1562,7 +1561,7 @@ static int bch_btree_split_leaf(struct btree_iter *iter, unsigned flags) /* Hack, because gc and splitting nodes doesn't mix yet: */ if (!down_read_trylock(&c->gc_lock)) { - bch_btree_iter_unlock(iter); + bch2_btree_iter_unlock(iter); down_read(&c->gc_lock); } @@ -1570,16 +1569,16 @@ static int bch_btree_split_leaf(struct btree_iter *iter, unsigned flags) * XXX: figure out how far we might need to split, * instead of locking/reserving all the way to the root: */ - if (!bch_btree_iter_set_locks_want(iter, U8_MAX)) { + if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) { ret = -EINTR; goto out; } - reserve = bch_btree_reserve_get(c, b, 0, flags, &cl); + reserve = bch2_btree_reserve_get(c, b, 0, flags, &cl); if (IS_ERR(reserve)) { ret = PTR_ERR(reserve); if (ret == -EAGAIN) { - bch_btree_iter_unlock(iter); + bch2_btree_iter_unlock(iter); up_read(&c->gc_lock); closure_sync(&cl); return -EINTR; @@ -1587,12 +1586,12 @@ static int bch_btree_split_leaf(struct btree_iter *iter, unsigned flags) goto out; } - as = bch_btree_interior_update_alloc(c); + as = bch2_btree_interior_update_alloc(c); btree_split(b, iter, NULL, reserve, as); - bch_btree_reserve_put(c, reserve); + bch2_btree_reserve_put(c, reserve); - bch_btree_iter_set_locks_want(iter, 1); + bch2_btree_iter_set_locks_want(iter, 1); out: up_read(&c->gc_lock); return ret; @@ -1618,35 +1617,35 @@ static struct btree *btree_node_get_sibling(struct btree_iter *iter, if (!parent) return NULL; - if (!btree_node_relock(iter, level + 1)) { - bch_btree_iter_set_locks_want(iter, level + 2); + if (!bch2_btree_node_relock(iter, level + 1)) { + bch2_btree_iter_set_locks_want(iter, level + 2); return ERR_PTR(-EINTR); } node_iter = iter->node_iters[parent->level]; - k = bch_btree_node_iter_peek_all(&node_iter, parent); + k = bch2_btree_node_iter_peek_all(&node_iter, parent); BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p)); do { k = sib == btree_prev_sib - ? bch_btree_node_iter_prev_all(&node_iter, parent) - : (bch_btree_node_iter_advance(&node_iter, parent), - bch_btree_node_iter_peek_all(&node_iter, parent)); + ? bch2_btree_node_iter_prev_all(&node_iter, parent) + : (bch2_btree_node_iter_advance(&node_iter, parent), + bch2_btree_node_iter_peek_all(&node_iter, parent)); if (!k) return NULL; } while (bkey_deleted(k)); - bkey_unpack(parent, &tmp.k, k); + bch2_bkey_unpack(parent, &tmp.k, k); - ret = bch_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent); + ret = bch2_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent); if (IS_ERR(ret) && PTR_ERR(ret) == -EINTR) { btree_node_unlock(iter, level); - ret = bch_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent); + ret = bch2_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent); } - if (!IS_ERR(ret) && !btree_node_relock(iter, level)) { + if (!IS_ERR(ret) && !bch2_btree_node_relock(iter, level)) { six_unlock_intent(&ret->lock); ret = ERR_PTR(-EINTR); } @@ -1670,7 +1669,7 @@ static int __foreground_maybe_merge(struct btree_iter *iter, closure_init_stack(&cl); retry: - if (!btree_node_relock(iter, iter->level)) + if (!bch2_btree_node_relock(iter, iter->level)) return 0; b = iter->nodes[iter->level]; @@ -1703,10 +1702,10 @@ retry: next = m; } - bch_bkey_format_init(&new_s); - __bch_btree_calc_format(&new_s, b); - __bch_btree_calc_format(&new_s, m); - new_f = bch_bkey_format_done(&new_s); + bch2_bkey_format_init(&new_s); + __bch2_btree_calc_format(&new_s, b); + __bch2_btree_calc_format(&new_s, m); + new_f = bch2_bkey_format_done(&new_s); sib_u64s = btree_node_u64s_with_format(b, &new_f) + btree_node_u64s_with_format(m, &new_f); @@ -1728,7 +1727,7 @@ retry: /* We're changing btree topology, doesn't mix with gc: */ if (!down_read_trylock(&c->gc_lock)) { six_unlock_intent(&m->lock); - bch_btree_iter_unlock(iter); + bch2_btree_iter_unlock(iter); down_read(&c->gc_lock); up_read(&c->gc_lock); @@ -1736,12 +1735,12 @@ retry: goto out; } - if (!bch_btree_iter_set_locks_want(iter, U8_MAX)) { + if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) { ret = -EINTR; goto out_unlock; } - reserve = bch_btree_reserve_get(c, b, 0, + reserve = bch2_btree_reserve_get(c, b, 0, BTREE_INSERT_NOFAIL| BTREE_INSERT_USE_RESERVE, &cl); @@ -1750,12 +1749,12 @@ retry: goto out_unlock; } - as = bch_btree_interior_update_alloc(c); + as = bch2_btree_interior_update_alloc(c); - bch_btree_interior_update_will_free_node(c, as, b); - bch_btree_interior_update_will_free_node(c, as, m); + bch2_btree_interior_update_will_free_node(c, as, b); + bch2_btree_interior_update_will_free_node(c, as, m); - n = bch_btree_node_alloc(c, b->level, b->btree_id, reserve); + n = bch2_btree_node_alloc(c, b->level, b->btree_id, reserve); n->data->min_key = prev->data->min_key; n->data->max_key = next->data->max_key; n->data->format = new_f; @@ -1763,44 +1762,44 @@ retry: btree_node_set_format(n, new_f); - bch_btree_sort_into(c, n, prev); - bch_btree_sort_into(c, n, next); + bch2_btree_sort_into(c, n, prev); + bch2_btree_sort_into(c, n, next); - bch_btree_build_aux_trees(n); + bch2_btree_build_aux_trees(n); six_unlock_write(&n->lock); bkey_init(&delete.k); delete.k.p = prev->key.k.p; - bch_keylist_add(&as->parent_keys, &delete); - bch_keylist_add(&as->parent_keys, &n->key); + bch2_keylist_add(&as->parent_keys, &delete); + bch2_keylist_add(&as->parent_keys, &n->key); - bch_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1); + bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1); - bch_btree_insert_node(parent, iter, &as->parent_keys, reserve, as); + bch2_btree_insert_node(parent, iter, &as->parent_keys, reserve, as); - btree_open_bucket_put(c, n); - bch_btree_node_free_inmem(iter, b); - bch_btree_node_free_inmem(iter, m); - bch_btree_iter_node_replace(iter, n); + bch2_btree_open_bucket_put(c, n); + bch2_btree_node_free_inmem(iter, b); + bch2_btree_node_free_inmem(iter, m); + bch2_btree_iter_node_replace(iter, n); - bch_btree_iter_verify(iter, n); + bch2_btree_iter_verify(iter, n); - bch_btree_reserve_put(c, reserve); + bch2_btree_reserve_put(c, reserve); out_unlock: if (ret != -EINTR && ret != -EAGAIN) - bch_btree_iter_set_locks_want(iter, 1); + bch2_btree_iter_set_locks_want(iter, 1); six_unlock_intent(&m->lock); up_read(&c->gc_lock); out: if (ret == -EAGAIN || ret == -EINTR) { - bch_btree_iter_unlock(iter); + bch2_btree_iter_unlock(iter); ret = -EINTR; } closure_sync(&cl); if (ret == -EINTR) { - ret = bch_btree_iter_traverse(iter); + ret = bch2_btree_iter_traverse(iter); if (!ret) goto retry; } @@ -1840,8 +1839,8 @@ btree_insert_key(struct btree_insert *trans, int live_u64s_added, u64s_added; ret = !btree_node_is_extents(b) - ? bch_insert_fixup_key(trans, insert) - : bch_insert_fixup_extent(trans, insert); + ? bch2_insert_fixup_key(trans, insert) + : bch2_insert_fixup_extent(trans, insert); live_u64s_added = (int) b->nr.live_u64s - old_live_u64s; u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s; @@ -1852,10 +1851,10 @@ btree_insert_key(struct btree_insert *trans, b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added); if (u64s_added > live_u64s_added && - bch_maybe_compact_whiteouts(iter->c, b)) - bch_btree_iter_reinit_node(iter, b); + bch2_maybe_compact_whiteouts(iter->c, b)) + bch2_btree_iter_reinit_node(iter, b); - trace_bcache_btree_insert_key(c, b, insert->k); + trace_btree_insert_key(c, b, insert->k); return ret; } @@ -1888,7 +1887,7 @@ static void multi_unlock_write(struct btree_insert *trans) trans_for_each_entry(trans, i) if (!same_leaf_as_prev(trans, i)) - btree_node_unlock_write(i->iter->nodes[0], i->iter); + bch2_btree_node_unlock_write(i->iter->nodes[0], i->iter); } static int btree_trans_entry_cmp(const void *_l, const void *_r) @@ -1912,7 +1911,7 @@ static int btree_trans_entry_cmp(const void *_l, const void *_r) * -EROFS: filesystem read only * -EIO: journal or btree node IO error */ -int __bch_btree_insert_at(struct btree_insert *trans) +int __bch2_btree_insert_at(struct btree_insert *trans) { struct bch_fs *c = trans->c; struct btree_insert_entry *i; @@ -1934,7 +1933,7 @@ int __bch_btree_insert_at(struct btree_insert *trans) retry_locks: ret = -EINTR; trans_for_each_entry(trans, i) - if (!bch_btree_iter_set_locks_want(i->iter, 1)) + if (!bch2_btree_iter_set_locks_want(i->iter, 1)) goto err; retry: trans->did_work = false; @@ -1946,7 +1945,7 @@ retry: memset(&trans->journal_res, 0, sizeof(trans->journal_res)); ret = !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY) - ? bch_journal_res_get(&c->journal, + ? bch2_journal_res_get(&c->journal, &trans->journal_res, u64s, u64s) : 0; @@ -1962,14 +1961,14 @@ retry: u64s = 0; /* - * bch_btree_node_insert_fits() must be called under write lock: + * bch2_btree_node_insert_fits() must be called under write lock: * with only an intent lock, another thread can still call - * bch_btree_node_write(), converting an unwritten bset to a + * bch2_btree_node_write(), converting an unwritten bset to a * written one */ if (!i->done) { u64s += i->k->k.u64s + i->extra_res; - if (!bch_btree_node_insert_fits(c, + if (!bch2_btree_node_insert_fits(c, i->iter->nodes[0], u64s)) { split = i->iter; goto unlock; @@ -2015,7 +2014,7 @@ retry: } unlock: multi_unlock_write(trans); - bch_journal_res_put(&c->journal, &trans->journal_res); + bch2_journal_res_put(&c->journal, &trans->journal_res); if (split) goto split; @@ -2037,7 +2036,7 @@ unlock: } out: /* make sure we didn't lose an error: */ - if (!ret && IS_ENABLED(CONFIG_BCACHE_DEBUG)) + if (!ret && IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) trans_for_each_entry(trans, i) BUG_ON(!i->done); @@ -2049,7 +2048,7 @@ split: * allocating new btree nodes, and holding a journal reservation * potentially blocks the allocator: */ - ret = bch_btree_split_leaf(split, trans->flags); + ret = bch2_btree_split_leaf(split, trans->flags); if (ret) goto err; /* @@ -2066,7 +2065,7 @@ err: if (ret == -EINTR) { trans_for_each_entry(trans, i) { - int ret2 = bch_btree_iter_traverse(i->iter); + int ret2 = bch2_btree_iter_traverse(i->iter); if (ret2) { ret = ret2; goto out; @@ -2084,29 +2083,29 @@ err: goto out; } -int bch_btree_insert_list_at(struct btree_iter *iter, +int bch2_btree_insert_list_at(struct btree_iter *iter, struct keylist *keys, struct disk_reservation *disk_res, struct extent_insert_hook *hook, u64 *journal_seq, unsigned flags) { BUG_ON(flags & BTREE_INSERT_ATOMIC); - BUG_ON(bch_keylist_empty(keys)); + BUG_ON(bch2_keylist_empty(keys)); verify_keys_sorted(keys); - while (!bch_keylist_empty(keys)) { + while (!bch2_keylist_empty(keys)) { /* need to traverse between each insert */ - int ret = bch_btree_iter_traverse(iter); + int ret = bch2_btree_iter_traverse(iter); if (ret) return ret; - ret = bch_btree_insert_at(iter->c, disk_res, hook, + ret = bch2_btree_insert_at(iter->c, disk_res, hook, journal_seq, flags, - BTREE_INSERT_ENTRY(iter, bch_keylist_front(keys))); + BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys))); if (ret) return ret; - bch_keylist_pop_front(keys); + bch2_keylist_pop_front(keys); } return 0; @@ -2124,7 +2123,7 @@ int bch_btree_insert_list_at(struct btree_iter *iter, * -EAGAIN: @iter->cl was put on a waitlist waiting for btree node allocation * -EINTR: btree node was changed while upgrading to write lock */ -int bch_btree_insert_check_key(struct btree_iter *iter, +int bch2_btree_insert_check_key(struct btree_iter *iter, struct bkey_i *check_key) { struct bpos saved_pos = iter->pos; @@ -2142,11 +2141,11 @@ int bch_btree_insert_check_key(struct btree_iter *iter, bkey_copy(&tmp.key, check_key); - ret = bch_btree_insert_at(iter->c, NULL, NULL, NULL, + ret = bch2_btree_insert_at(iter->c, NULL, NULL, NULL, BTREE_INSERT_ATOMIC, BTREE_INSERT_ENTRY(iter, &tmp.key)); - bch_btree_iter_rewind(iter, saved_pos); + bch2_btree_iter_rewind(iter, saved_pos); return ret; } @@ -2158,7 +2157,7 @@ int bch_btree_insert_check_key(struct btree_iter *iter, * @insert_keys: list of keys to insert * @hook: insert callback */ -int bch_btree_insert(struct bch_fs *c, enum btree_id id, +int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k, struct disk_reservation *disk_res, struct extent_insert_hook *hook, @@ -2167,24 +2166,24 @@ int bch_btree_insert(struct bch_fs *c, enum btree_id id, struct btree_iter iter; int ret, ret2; - bch_btree_iter_init_intent(&iter, c, id, bkey_start_pos(&k->k)); + bch2_btree_iter_init_intent(&iter, c, id, bkey_start_pos(&k->k)); - ret = bch_btree_iter_traverse(&iter); + ret = bch2_btree_iter_traverse(&iter); if (unlikely(ret)) goto out; - ret = bch_btree_insert_at(c, disk_res, hook, journal_seq, flags, + ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq, flags, BTREE_INSERT_ENTRY(&iter, k)); -out: ret2 = bch_btree_iter_unlock(&iter); +out: ret2 = bch2_btree_iter_unlock(&iter); return ret ?: ret2; } /** - * bch_btree_update - like bch_btree_insert(), but asserts that we're + * bch_btree_update - like bch2_btree_insert(), but asserts that we're * overwriting an existing key */ -int bch_btree_update(struct bch_fs *c, enum btree_id id, +int bch2_btree_update(struct bch_fs *c, enum btree_id id, struct bkey_i *k, u64 *journal_seq) { struct btree_iter iter; @@ -2193,21 +2192,21 @@ int bch_btree_update(struct bch_fs *c, enum btree_id id, EBUG_ON(id == BTREE_ID_EXTENTS); - bch_btree_iter_init_intent(&iter, c, id, k->k.p); + bch2_btree_iter_init_intent(&iter, c, id, k->k.p); - u = bch_btree_iter_peek_with_holes(&iter); + u = bch2_btree_iter_peek_with_holes(&iter); ret = btree_iter_err(u); if (ret) return ret; if (bkey_deleted(u.k)) { - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return -ENOENT; } - ret = bch_btree_insert_at(c, NULL, NULL, journal_seq, 0, + ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq, 0, BTREE_INSERT_ENTRY(&iter, k)); - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return ret; } @@ -2216,7 +2215,7 @@ int bch_btree_update(struct bch_fs *c, enum btree_id id, * * Range is a half open interval - [start, end) */ -int bch_btree_delete_range(struct bch_fs *c, enum btree_id id, +int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id, struct bpos start, struct bpos end, struct bversion version, @@ -2228,9 +2227,9 @@ int bch_btree_delete_range(struct bch_fs *c, enum btree_id id, struct bkey_s_c k; int ret = 0; - bch_btree_iter_init_intent(&iter, c, id, start); + bch2_btree_iter_init_intent(&iter, c, id, start); - while ((k = bch_btree_iter_peek(&iter)).k && + while ((k = bch2_btree_iter_peek(&iter)).k && !(ret = btree_iter_err(k))) { unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits); /* really shouldn't be using a bare, unpadded bkey_i */ @@ -2248,7 +2247,7 @@ int bch_btree_delete_range(struct bch_fs *c, enum btree_id id, * because the range we want to delete could start in the middle * of k. * - * (bch_btree_iter_peek() does guarantee that iter.pos >= + * (bch2_btree_iter_peek() does guarantee that iter.pos >= * bkey_start_pos(k.k)). */ delete.k.p = iter.pos; @@ -2265,20 +2264,20 @@ int bch_btree_delete_range(struct bch_fs *c, enum btree_id id, delete.k.type = KEY_TYPE_DISCARD; /* create the biggest key we can */ - bch_key_resize(&delete.k, max_sectors); - bch_cut_back(end, &delete.k); + bch2_key_resize(&delete.k, max_sectors); + bch2_cut_back(end, &delete.k); } - ret = bch_btree_insert_at(c, disk_res, hook, journal_seq, + ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq, BTREE_INSERT_NOFAIL, BTREE_INSERT_ENTRY(&iter, &delete)); if (ret) break; - bch_btree_iter_cond_resched(&iter); + bch2_btree_iter_cond_resched(&iter); } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return ret; } @@ -2288,7 +2287,7 @@ int bch_btree_delete_range(struct bch_fs *c, enum btree_id id, * Returns 0 on success, -EINTR or -EAGAIN on failure (i.e. * btree_check_reserve() has to wait) */ -int bch_btree_node_rewrite(struct btree_iter *iter, struct btree *b, +int bch2_btree_node_rewrite(struct btree_iter *iter, struct btree *b, struct closure *cl) { struct bch_fs *c = iter->c; @@ -2304,42 +2303,42 @@ int bch_btree_node_rewrite(struct btree_iter *iter, struct btree *b, if (cl) flags |= BTREE_INSERT_USE_RESERVE; - if (!bch_btree_iter_set_locks_want(iter, U8_MAX)) + if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) return -EINTR; - reserve = bch_btree_reserve_get(c, b, 0, flags, cl); + reserve = bch2_btree_reserve_get(c, b, 0, flags, cl); if (IS_ERR(reserve)) { - trace_bcache_btree_gc_rewrite_node_fail(c, b); + trace_btree_gc_rewrite_node_fail(c, b); return PTR_ERR(reserve); } - as = bch_btree_interior_update_alloc(c); + as = bch2_btree_interior_update_alloc(c); - bch_btree_interior_update_will_free_node(c, as, b); + bch2_btree_interior_update_will_free_node(c, as, b); - n = btree_node_alloc_replacement(c, b, reserve); + n = bch2_btree_node_alloc_replacement(c, b, reserve); - bch_btree_build_aux_trees(n); + bch2_btree_build_aux_trees(n); six_unlock_write(&n->lock); - trace_bcache_btree_gc_rewrite_node(c, b); + trace_btree_gc_rewrite_node(c, b); - bch_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1); + bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1); if (parent) { - bch_btree_insert_node(parent, iter, + bch2_btree_insert_node(parent, iter, &keylist_single(&n->key), reserve, as); } else { - bch_btree_set_root(iter, n, as, reserve); + bch2_btree_set_root(iter, n, as, reserve); } - btree_open_bucket_put(c, n); + bch2_btree_open_bucket_put(c, n); - bch_btree_node_free_inmem(iter, b); + bch2_btree_node_free_inmem(iter, b); - BUG_ON(!bch_btree_iter_node_replace(iter, n)); + BUG_ON(!bch2_btree_iter_node_replace(iter, n)); - bch_btree_reserve_put(c, reserve); + bch2_btree_reserve_put(c, reserve); return 0; } diff --git a/libbcache/btree_update.h b/libbcachefs/btree_update.h index 0be71862..b18c44c7 100644 --- a/libbcache/btree_update.h +++ b/libbcachefs/btree_update.h @@ -24,8 +24,8 @@ struct btree_reserve { struct btree *b[BTREE_RESERVE_MAX]; }; -void __bch_btree_calc_format(struct bkey_format_state *, struct btree *); -bool bch_btree_node_format_fits(struct bch_fs *c, struct btree *, +void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *); +bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *, struct bkey_format *); /* Btree node freeing/allocation: */ @@ -130,40 +130,37 @@ struct btree_interior_update { list_for_each_entry(as, &c->btree_interior_update_list, list) \ for (p = as->pending; p < as->pending + as->nr_pending; p++) -void bch_btree_node_free_inmem(struct btree_iter *, struct btree *); -void bch_btree_node_free_never_inserted(struct bch_fs *, struct btree *); +void bch2_btree_node_free_inmem(struct btree_iter *, struct btree *); +void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *); +void bch2_btree_open_bucket_put(struct bch_fs *c, struct btree *); -void btree_open_bucket_put(struct bch_fs *c, struct btree *); - -struct btree *__btree_node_alloc_replacement(struct bch_fs *, +struct btree *__bch2_btree_node_alloc_replacement(struct bch_fs *, struct btree *, struct bkey_format, struct btree_reserve *); -struct btree *btree_node_alloc_replacement(struct bch_fs *, struct btree *, - struct btree_reserve *); struct btree_interior_update * -bch_btree_interior_update_alloc(struct bch_fs *); +bch2_btree_interior_update_alloc(struct bch_fs *); -void bch_btree_interior_update_will_free_node(struct bch_fs *, +void bch2_btree_interior_update_will_free_node(struct bch_fs *, struct btree_interior_update *, struct btree *); -void bch_btree_set_root_initial(struct bch_fs *, struct btree *, +void bch2_btree_set_root_initial(struct bch_fs *, struct btree *, struct btree_reserve *); -void bch_btree_reserve_put(struct bch_fs *, struct btree_reserve *); -struct btree_reserve *bch_btree_reserve_get(struct bch_fs *, +void bch2_btree_reserve_put(struct bch_fs *, struct btree_reserve *); +struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *, struct btree *, unsigned, unsigned, struct closure *); -int bch_btree_root_alloc(struct bch_fs *, enum btree_id, struct closure *); +int bch2_btree_root_alloc(struct bch_fs *, enum btree_id, struct closure *); /* Inserting into a given leaf node (last stage of insert): */ -bool bch_btree_bset_insert_key(struct btree_iter *, struct btree *, +bool bch2_btree_bset_insert_key(struct btree_iter *, struct btree *, struct btree_node_iter *, struct bkey_i *); -void bch_btree_journal_key(struct btree_insert *trans, struct btree_iter *, +void bch2_btree_journal_key(struct btree_insert *trans, struct btree_iter *, struct bkey_i *); static inline void *btree_data_end(struct bch_fs *c, struct btree *b) @@ -256,12 +253,12 @@ static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, * write lock must be held on @b (else the dirty bset that we were going to * insert into could be written out from under us) */ -static inline bool bch_btree_node_insert_fits(struct bch_fs *c, +static inline bool bch2_btree_node_insert_fits(struct bch_fs *c, struct btree *b, unsigned u64s) { if (btree_node_is_extents(b)) { /* The insert key might split an existing key - * (bch_insert_fixup_extent() -> BCH_EXTENT_OVERLAP_MIDDLE case: + * (bch2_insert_fixup_extent() -> BCH_EXTENT_OVERLAP_MIDDLE case: */ u64s += BKEY_EXTENT_U64s_MAX; } @@ -290,7 +287,7 @@ static inline void reserve_whiteout(struct btree *b, struct bset_tree *t, } } -void bch_btree_insert_node(struct btree *, struct btree_iter *, +void bch2_btree_insert_node(struct btree *, struct btree_iter *, struct keylist *, struct btree_reserve *, struct btree_interior_update *as); @@ -318,7 +315,7 @@ struct btree_insert { } *entries; }; -int __bch_btree_insert_at(struct btree_insert *); +int __bch2_btree_insert_at(struct btree_insert *); #define _TENTH_ARG(_1, _2, _3, _4, _5, _6, _7, _8, _9, N, ...) N @@ -352,9 +349,9 @@ int __bch_btree_insert_at(struct btree_insert *); * -EROFS: filesystem read only * -EIO: journal or btree node IO error */ -#define bch_btree_insert_at(_c, _disk_res, _hook, \ +#define bch2_btree_insert_at(_c, _disk_res, _hook, \ _journal_seq, _flags, ...) \ - __bch_btree_insert_at(&(struct btree_insert) { \ + __bch2_btree_insert_at(&(struct btree_insert) { \ .c = (_c), \ .disk_res = (_disk_res), \ .journal_seq = (_journal_seq), \ @@ -383,7 +380,7 @@ int __bch_btree_insert_at(struct btree_insert *); */ #define BTREE_INSERT_JOURNAL_REPLAY (1 << 3) -int bch_btree_insert_list_at(struct btree_iter *, struct keylist *, +int bch2_btree_insert_list_at(struct btree_iter *, struct keylist *, struct disk_reservation *, struct extent_insert_hook *, u64 *, unsigned); @@ -406,19 +403,19 @@ static inline bool journal_res_insert_fits(struct btree_insert *trans, return u64s <= trans->journal_res.u64s; } -int bch_btree_insert_check_key(struct btree_iter *, struct bkey_i *); -int bch_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *, +int bch2_btree_insert_check_key(struct btree_iter *, struct bkey_i *); +int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *, struct disk_reservation *, struct extent_insert_hook *, u64 *, int flags); -int bch_btree_update(struct bch_fs *, enum btree_id, +int bch2_btree_update(struct bch_fs *, enum btree_id, struct bkey_i *, u64 *); -int bch_btree_delete_range(struct bch_fs *, enum btree_id, +int bch2_btree_delete_range(struct bch_fs *, enum btree_id, struct bpos, struct bpos, struct bversion, struct disk_reservation *, struct extent_insert_hook *, u64 *); -int bch_btree_node_rewrite(struct btree_iter *, struct btree *, struct closure *); +int bch2_btree_node_rewrite(struct btree_iter *, struct btree *, struct closure *); #endif /* _BCACHE_BTREE_INSERT_H */ diff --git a/libbcache/buckets.c b/libbcachefs/buckets.c index 7be943d1..396251d5 100644 --- a/libbcache/buckets.c +++ b/libbcachefs/buckets.c @@ -62,24 +62,24 @@ * - free => metadata: cannot happen */ -#include "bcache.h" +#include "bcachefs.h" #include "alloc.h" #include "btree_gc.h" #include "buckets.h" #include "error.h" #include <linux/preempt.h> -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> #ifdef DEBUG_BUCKETS #define lg_local_lock lg_global_lock #define lg_local_unlock lg_global_unlock -static void bch_fs_stats_verify(struct bch_fs *c) +static void bch2_fs_stats_verify(struct bch_fs *c) { struct bch_fs_usage stats = - __bch_fs_usage_read(c); + __bch2_fs_usage_read(c); if ((s64) stats.sectors_dirty < 0) panic("sectors_dirty underflow: %lli\n", stats.sectors_dirty); @@ -99,7 +99,7 @@ static void bch_fs_stats_verify(struct bch_fs *c) #else -static void bch_fs_stats_verify(struct bch_fs *c) {} +static void bch2_fs_stats_verify(struct bch_fs *c) {} #endif @@ -107,7 +107,7 @@ static void bch_fs_stats_verify(struct bch_fs *c) {} * Clear journal_seq_valid for buckets for which it's not needed, to prevent * wraparound: */ -void bch_bucket_seq_cleanup(struct bch_fs *c) +void bch2_bucket_seq_cleanup(struct bch_fs *c) { u16 last_seq_ondisk = c->journal.last_seq_ondisk; struct bch_dev *ca; @@ -127,7 +127,7 @@ void bch_bucket_seq_cleanup(struct bch_fs *c) } } -#define bch_usage_add(_acc, _stats) \ +#define bch2_usage_add(_acc, _stats) \ do { \ typeof(_acc) _a = (_acc), _s = (_stats); \ unsigned i; \ @@ -136,18 +136,18 @@ do { \ ((u64 *) (_a))[i] += ((u64 *) (_s))[i]; \ } while (0) -#define bch_usage_read_raw(_stats) \ +#define bch2_usage_read_raw(_stats) \ ({ \ typeof(*this_cpu_ptr(_stats)) _acc = { 0 }; \ int cpu; \ \ for_each_possible_cpu(cpu) \ - bch_usage_add(&_acc, per_cpu_ptr((_stats), cpu)); \ + bch2_usage_add(&_acc, per_cpu_ptr((_stats), cpu)); \ \ _acc; \ }) -#define bch_usage_read_cached(_c, _cached, _uncached) \ +#define bch2_usage_read_cached(_c, _cached, _uncached) \ ({ \ typeof(_cached) _ret; \ unsigned _seq; \ @@ -155,35 +155,35 @@ do { \ do { \ _seq = read_seqcount_begin(&(_c)->gc_pos_lock); \ _ret = (_c)->gc_pos.phase == GC_PHASE_DONE \ - ? bch_usage_read_raw(_uncached) \ + ? bch2_usage_read_raw(_uncached) \ : (_cached); \ } while (read_seqcount_retry(&(_c)->gc_pos_lock, _seq)); \ \ _ret; \ }) -struct bch_dev_usage __bch_dev_usage_read(struct bch_dev *ca) +struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *ca) { - return bch_usage_read_raw(ca->usage_percpu); + return bch2_usage_read_raw(ca->usage_percpu); } -struct bch_dev_usage bch_dev_usage_read(struct bch_dev *ca) +struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca) { - return bch_usage_read_cached(ca->fs, + return bch2_usage_read_cached(ca->fs, ca->usage_cached, ca->usage_percpu); } struct bch_fs_usage -__bch_fs_usage_read(struct bch_fs *c) +__bch2_fs_usage_read(struct bch_fs *c) { - return bch_usage_read_raw(c->usage_percpu); + return bch2_usage_read_raw(c->usage_percpu); } struct bch_fs_usage -bch_fs_usage_read(struct bch_fs *c) +bch2_fs_usage_read(struct bch_fs *c) { - return bch_usage_read_cached(c, + return bch2_usage_read_cached(c, c->usage_cached, c->usage_percpu); } @@ -218,7 +218,7 @@ static bool bucket_became_unavailable(struct bch_fs *c, c && c->gc_pos.phase == GC_PHASE_DONE; } -void bch_fs_usage_apply(struct bch_fs *c, +void bch2_fs_usage_apply(struct bch_fs *c, struct bch_fs_usage *stats, struct disk_reservation *disk_res, struct gc_pos gc_pos) @@ -247,15 +247,15 @@ void bch_fs_usage_apply(struct bch_fs *c, stats->online_reserved = 0; if (!gc_will_visit(c, gc_pos)) - bch_usage_add(this_cpu_ptr(c->usage_percpu), stats); + bch2_usage_add(this_cpu_ptr(c->usage_percpu), stats); - bch_fs_stats_verify(c); + bch2_fs_stats_verify(c); lg_local_unlock(&c->usage_lock); memset(stats, 0, sizeof(*stats)); } -static void bch_fs_usage_update(struct bch_fs_usage *fs_usage, +static void bch2_fs_usage_update(struct bch_fs_usage *fs_usage, struct bucket_mark old, struct bucket_mark new) { fs_usage->s[S_COMPRESSED][S_CACHED] += @@ -266,13 +266,13 @@ static void bch_fs_usage_update(struct bch_fs_usage *fs_usage, new.dirty_sectors; } -static void bch_dev_usage_update(struct bch_dev *ca, +static void bch2_dev_usage_update(struct bch_dev *ca, struct bucket_mark old, struct bucket_mark new) { struct bch_fs *c = ca->fs; struct bch_dev_usage *dev_usage; - bch_fs_inconsistent_on(old.data_type && new.data_type && + bch2_fs_inconsistent_on(old.data_type && new.data_type && old.data_type != new.data_type, c, "different types of metadata in same bucket: %u, %u", old.data_type, new.data_type); @@ -295,18 +295,18 @@ static void bch_dev_usage_update(struct bch_dev *ca, preempt_enable(); if (!is_available_bucket(old) && is_available_bucket(new)) - bch_wake_allocator(ca); + bch2_wake_allocator(ca); } #define bucket_data_cmpxchg(ca, g, new, expr) \ ({ \ struct bucket_mark _old = bucket_cmpxchg(g, new, expr); \ \ - bch_dev_usage_update(ca, _old, new); \ + bch2_dev_usage_update(ca, _old, new); \ _old; \ }) -void bch_invalidate_bucket(struct bch_dev *ca, struct bucket *g) +void bch2_invalidate_bucket(struct bch_dev *ca, struct bucket *g) { struct bch_fs_usage stats = { 0 }; struct bucket_mark old, new; @@ -322,14 +322,14 @@ void bch_invalidate_bucket(struct bch_dev *ca, struct bucket *g) })); /* XXX: we're not actually updating fs usage's cached sectors... */ - bch_fs_usage_update(&stats, old, new); + bch2_fs_usage_update(&stats, old, new); if (!old.owned_by_allocator && old.cached_sectors) - trace_bcache_invalidate(ca, g - ca->buckets, + trace_invalidate(ca, g - ca->buckets, old.cached_sectors); } -void bch_mark_free_bucket(struct bch_dev *ca, struct bucket *g) +void bch2_mark_free_bucket(struct bch_dev *ca, struct bucket *g) { struct bucket_mark old, new; @@ -343,7 +343,7 @@ void bch_mark_free_bucket(struct bch_dev *ca, struct bucket *g) BUG_ON(bucket_became_unavailable(ca->fs, old, new)); } -void bch_mark_alloc_bucket(struct bch_dev *ca, struct bucket *g, +void bch2_mark_alloc_bucket(struct bch_dev *ca, struct bucket *g, bool owned_by_allocator) { struct bucket_mark new; @@ -353,7 +353,7 @@ void bch_mark_alloc_bucket(struct bch_dev *ca, struct bucket *g, })); } -void bch_mark_metadata_bucket(struct bch_dev *ca, struct bucket *g, +void bch2_mark_metadata_bucket(struct bch_dev *ca, struct bucket *g, enum bucket_data_type type, bool may_make_unavailable) { @@ -381,7 +381,7 @@ do { \ dst += (src); \ else { \ dst = (max); \ - trace_bcache_sectors_saturated(ca); \ + trace_sectors_saturated(ca); \ } \ } while (0) @@ -418,7 +418,7 @@ static unsigned __compressed_sectors(const union bch_extent_crc *crc, unsigned s * loop, to avoid racing with the start of gc clearing all the marks - GC does * that with the gc pos seqlock held. */ -static void bch_mark_pointer(struct bch_fs *c, +static void bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c_extent e, const union bch_extent_crc *crc, const struct bch_extent_ptr *ptr, @@ -509,7 +509,7 @@ static void bch_mark_pointer(struct bch_fs *c, &ca->saturated_count) >= ca->free_inc.size << ca->bucket_bits) { if (c->gc_thread) { - trace_bcache_gc_sectors_saturated(c); + trace_gc_sectors_saturated(c); wake_up_process(c->gc_thread); } } @@ -518,7 +518,7 @@ out: stats->s[S_UNCOMPRESSED][type] += sectors; } -static void bch_mark_extent(struct bch_fs *c, struct bkey_s_c_extent e, +static void bch2_mark_extent(struct bch_fs *c, struct bkey_s_c_extent e, s64 sectors, bool metadata, bool may_make_unavailable, struct bch_fs_usage *stats, @@ -532,13 +532,13 @@ static void bch_mark_extent(struct bch_fs *c, struct bkey_s_c_extent e, BUG_ON(!sectors); extent_for_each_ptr_crc(e, ptr, crc) - bch_mark_pointer(c, e, crc, ptr, sectors, + bch2_mark_pointer(c, e, crc, ptr, sectors, ptr->cached ? S_CACHED : type, may_make_unavailable, stats, gc_will_visit, journal_seq); } -static void __bch_mark_key(struct bch_fs *c, struct bkey_s_c k, +static void __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k, s64 sectors, bool metadata, bool may_make_unavailable, struct bch_fs_usage *stats, @@ -547,7 +547,7 @@ static void __bch_mark_key(struct bch_fs *c, struct bkey_s_c k, switch (k.k->type) { case BCH_EXTENT: case BCH_EXTENT_CACHED: - bch_mark_extent(c, bkey_s_c_to_extent(k), sectors, metadata, + bch2_mark_extent(c, bkey_s_c_to_extent(k), sectors, metadata, may_make_unavailable, stats, gc_will_visit, journal_seq); break; @@ -560,26 +560,26 @@ static void __bch_mark_key(struct bch_fs *c, struct bkey_s_c k, } } -void __bch_gc_mark_key(struct bch_fs *c, struct bkey_s_c k, +void __bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k, s64 sectors, bool metadata, struct bch_fs_usage *stats) { - __bch_mark_key(c, k, sectors, metadata, true, stats, false, 0); + __bch2_mark_key(c, k, sectors, metadata, true, stats, false, 0); } -void bch_gc_mark_key(struct bch_fs *c, struct bkey_s_c k, +void bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k, s64 sectors, bool metadata) { struct bch_fs_usage stats = { 0 }; - __bch_gc_mark_key(c, k, sectors, metadata, &stats); + __bch2_gc_mark_key(c, k, sectors, metadata, &stats); preempt_disable(); - bch_usage_add(this_cpu_ptr(c->usage_percpu), &stats); + bch2_usage_add(this_cpu_ptr(c->usage_percpu), &stats); preempt_enable(); } -void bch_mark_key(struct bch_fs *c, struct bkey_s_c k, +void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k, s64 sectors, bool metadata, struct gc_pos gc_pos, struct bch_fs_usage *stats, u64 journal_seq) { @@ -611,20 +611,20 @@ void bch_mark_key(struct bch_fs *c, struct bkey_s_c k, * (e.g. the btree node lock, or the relevant allocator lock). */ lg_local_lock(&c->usage_lock); - __bch_mark_key(c, k, sectors, metadata, false, stats, + __bch2_mark_key(c, k, sectors, metadata, false, stats, gc_will_visit(c, gc_pos), journal_seq); - bch_fs_stats_verify(c); + bch2_fs_stats_verify(c); lg_local_unlock(&c->usage_lock); } static u64 __recalc_sectors_available(struct bch_fs *c) { - return c->capacity - bch_fs_sectors_used(c); + return c->capacity - bch2_fs_sectors_used(c); } /* Used by gc when it's starting: */ -void bch_recalc_sectors_available(struct bch_fs *c) +void bch2_recalc_sectors_available(struct bch_fs *c) { int cpu; @@ -639,7 +639,7 @@ void bch_recalc_sectors_available(struct bch_fs *c) lg_global_unlock(&c->usage_lock); } -void bch_disk_reservation_put(struct bch_fs *c, +void bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res) { if (res->sectors) { @@ -647,7 +647,7 @@ void bch_disk_reservation_put(struct bch_fs *c, this_cpu_sub(c->usage_percpu->online_reserved, res->sectors); - bch_fs_stats_verify(c); + bch2_fs_stats_verify(c); lg_local_unlock(&c->usage_lock); res->sectors = 0; @@ -656,7 +656,7 @@ void bch_disk_reservation_put(struct bch_fs *c, #define SECTORS_CACHE 1024 -int bch_disk_reservation_add(struct bch_fs *c, +int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, unsigned sectors, int flags) { @@ -691,7 +691,7 @@ out: stats->online_reserved += sectors; res->sectors += sectors; - bch_fs_stats_verify(c); + bch2_fs_stats_verify(c); lg_local_unlock(&c->usage_lock); return 0; @@ -728,7 +728,7 @@ recalculate: ret = -ENOSPC; } - bch_fs_stats_verify(c); + bch2_fs_stats_verify(c); lg_global_unlock(&c->usage_lock); if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD)) up_read(&c->gc_lock); @@ -736,7 +736,7 @@ recalculate: return ret; } -int bch_disk_reservation_get(struct bch_fs *c, +int bch2_disk_reservation_get(struct bch_fs *c, struct disk_reservation *res, unsigned sectors, int flags) { @@ -746,5 +746,5 @@ int bch_disk_reservation_get(struct bch_fs *c, ? c->opts.metadata_replicas : c->opts.data_replicas; - return bch_disk_reservation_add(c, res, sectors, flags); + return bch2_disk_reservation_add(c, res, sectors, flags); } diff --git a/libbcache/buckets.h b/libbcachefs/buckets.h index 81355576..9c77304f 100644 --- a/libbcache/buckets.h +++ b/libbcachefs/buckets.h @@ -145,8 +145,8 @@ static inline unsigned bucket_sectors_used(struct bucket *g) /* Per device stats: */ -struct bch_dev_usage __bch_dev_usage_read(struct bch_dev *); -struct bch_dev_usage bch_dev_usage_read(struct bch_dev *); +struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *); +struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *); static inline u64 __dev_buckets_available(struct bch_dev *ca, struct bch_dev_usage stats) @@ -163,7 +163,7 @@ static inline u64 __dev_buckets_available(struct bch_dev *ca, */ static inline u64 dev_buckets_available(struct bch_dev *ca) { - return __dev_buckets_available(ca, bch_dev_usage_read(ca)); + return __dev_buckets_available(ca, bch2_dev_usage_read(ca)); } static inline u64 __dev_buckets_free(struct bch_dev *ca, @@ -176,19 +176,19 @@ static inline u64 __dev_buckets_free(struct bch_dev *ca, static inline u64 dev_buckets_free(struct bch_dev *ca) { - return __dev_buckets_free(ca, bch_dev_usage_read(ca)); + return __dev_buckets_free(ca, bch2_dev_usage_read(ca)); } /* Cache set stats: */ -struct bch_fs_usage __bch_fs_usage_read(struct bch_fs *); -struct bch_fs_usage bch_fs_usage_read(struct bch_fs *); -void bch_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *, +struct bch_fs_usage __bch2_fs_usage_read(struct bch_fs *); +struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *); +void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *, struct disk_reservation *, struct gc_pos); -static inline u64 __bch_fs_sectors_used(struct bch_fs *c) +static inline u64 __bch2_fs_sectors_used(struct bch_fs *c) { - struct bch_fs_usage stats = __bch_fs_usage_read(c); + struct bch_fs_usage stats = __bch2_fs_usage_read(c); u64 reserved = stats.persistent_reserved + stats.online_reserved; @@ -198,9 +198,9 @@ static inline u64 __bch_fs_sectors_used(struct bch_fs *c) (reserved >> 7); } -static inline u64 bch_fs_sectors_used(struct bch_fs *c) +static inline u64 bch2_fs_sectors_used(struct bch_fs *c) { - return min(c->capacity, __bch_fs_sectors_used(c)); + return min(c->capacity, __bch2_fs_sectors_used(c)); } /* XXX: kill? */ @@ -233,23 +233,23 @@ static inline bool bucket_needs_journal_commit(struct bucket_mark m, ((s16) m.journal_seq - (s16) last_seq_ondisk > 0); } -void bch_bucket_seq_cleanup(struct bch_fs *); +void bch2_bucket_seq_cleanup(struct bch_fs *); -void bch_invalidate_bucket(struct bch_dev *, struct bucket *); -void bch_mark_free_bucket(struct bch_dev *, struct bucket *); -void bch_mark_alloc_bucket(struct bch_dev *, struct bucket *, bool); -void bch_mark_metadata_bucket(struct bch_dev *, struct bucket *, +void bch2_invalidate_bucket(struct bch_dev *, struct bucket *); +void bch2_mark_free_bucket(struct bch_dev *, struct bucket *); +void bch2_mark_alloc_bucket(struct bch_dev *, struct bucket *, bool); +void bch2_mark_metadata_bucket(struct bch_dev *, struct bucket *, enum bucket_data_type, bool); -void __bch_gc_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool, +void __bch2_gc_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool, struct bch_fs_usage *); -void bch_gc_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool); -void bch_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool, +void bch2_gc_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool); +void bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool, struct gc_pos, struct bch_fs_usage *, u64); -void bch_recalc_sectors_available(struct bch_fs *); +void bch2_recalc_sectors_available(struct bch_fs *); -void bch_disk_reservation_put(struct bch_fs *, +void bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *); #define BCH_DISK_RESERVATION_NOFAIL (1 << 0) @@ -257,10 +257,10 @@ void bch_disk_reservation_put(struct bch_fs *, #define BCH_DISK_RESERVATION_GC_LOCK_HELD (1 << 2) #define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD (1 << 3) -int bch_disk_reservation_add(struct bch_fs *, +int bch2_disk_reservation_add(struct bch_fs *, struct disk_reservation *, unsigned, int); -int bch_disk_reservation_get(struct bch_fs *, +int bch2_disk_reservation_get(struct bch_fs *, struct disk_reservation *, unsigned, int); diff --git a/libbcache/buckets_types.h b/libbcachefs/buckets_types.h index ca187099..ca187099 100644 --- a/libbcache/buckets_types.h +++ b/libbcachefs/buckets_types.h diff --git a/libbcache/chardev.c b/libbcachefs/chardev.c index da6d827f..24b92a29 100644 --- a/libbcache/chardev.c +++ b/libbcachefs/chardev.c @@ -1,4 +1,5 @@ -#include "bcache.h" +#include "bcachefs.h" +#include "bcachefs_ioctl.h" #include "super.h" #include "super-io.h" @@ -10,9 +11,8 @@ #include <linux/ioctl.h> #include <linux/uaccess.h> #include <linux/slab.h> -#include <linux/bcache-ioctl.h> -static long bch_ioctl_assemble(struct bch_ioctl_assemble __user *user_arg) +static long bch2_ioctl_assemble(struct bch_ioctl_assemble __user *user_arg) { struct bch_ioctl_assemble arg; const char *err; @@ -47,7 +47,7 @@ static long bch_ioctl_assemble(struct bch_ioctl_assemble __user *user_arg) } } - err = bch_fs_open(devs, arg.nr_devs, bch_opts_empty(), NULL); + err = bch2_fs_open(devs, arg.nr_devs, bch2_opts_empty(), NULL); if (err) { pr_err("Could not open filesystem: %s", err); ret = -EINVAL; @@ -63,7 +63,7 @@ err: return ret; } -static long bch_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg) +static long bch2_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg) { struct bch_ioctl_incremental arg; const char *err; @@ -79,30 +79,30 @@ static long bch_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg) if (!path) return -ENOMEM; - err = bch_fs_open_incremental(path); + err = bch2_fs_open_incremental(path); kfree(path); if (err) { - pr_err("Could not register bcache devices: %s", err); + pr_err("Could not register bcachefs devices: %s", err); return -EINVAL; } return 0; } -static long bch_global_ioctl(unsigned cmd, void __user *arg) +static long bch2_global_ioctl(unsigned cmd, void __user *arg) { switch (cmd) { case BCH_IOCTL_ASSEMBLE: - return bch_ioctl_assemble(arg); + return bch2_ioctl_assemble(arg); case BCH_IOCTL_INCREMENTAL: - return bch_ioctl_incremental(arg); + return bch2_ioctl_incremental(arg); default: return -ENOTTY; } } -static long bch_ioctl_query_uuid(struct bch_fs *c, +static long bch2_ioctl_query_uuid(struct bch_fs *c, struct bch_ioctl_query_uuid __user *user_arg) { return copy_to_user(&user_arg->uuid, @@ -110,7 +110,7 @@ static long bch_ioctl_query_uuid(struct bch_fs *c, sizeof(c->sb.user_uuid)); } -static long bch_ioctl_start(struct bch_fs *c, struct bch_ioctl_start __user *user_arg) +static long bch2_ioctl_start(struct bch_fs *c, struct bch_ioctl_start __user *user_arg) { struct bch_ioctl_start arg; @@ -120,17 +120,17 @@ static long bch_ioctl_start(struct bch_fs *c, struct bch_ioctl_start __user *use if (arg.flags || arg.pad) return -EINVAL; - return bch_fs_start(c) ? -EIO : 0; + return bch2_fs_start(c) ? -EIO : 0; } -static long bch_ioctl_stop(struct bch_fs *c) +static long bch2_ioctl_stop(struct bch_fs *c) { - bch_fs_stop(c); + bch2_fs_stop(c); return 0; } /* returns with ref on ca->ref */ -static struct bch_dev *bch_device_lookup(struct bch_fs *c, +static struct bch_dev *bch2_device_lookup(struct bch_fs *c, const char __user *dev) { struct block_device *bdev; @@ -158,9 +158,9 @@ found: } #if 0 -static struct bch_member *bch_uuid_lookup(struct bch_fs *c, uuid_le uuid) +static struct bch_member *bch2_uuid_lookup(struct bch_fs *c, uuid_le uuid) { - struct bch_sb_field_members *mi = bch_sb_get_members(c->disk_sb); + struct bch_sb_field_members *mi = bch2_sb_get_members(c->disk_sb); unsigned i; lockdep_assert_held(&c->sb_lock); @@ -173,8 +173,8 @@ static struct bch_member *bch_uuid_lookup(struct bch_fs *c, uuid_le uuid) } #endif -static long bch_ioctl_disk_add(struct bch_fs *c, - struct bch_ioctl_disk __user *user_arg) +static long bch2_ioctl_disk_add(struct bch_fs *c, + struct bch_ioctl_disk __user *user_arg) { struct bch_ioctl_disk arg; char *path; @@ -190,14 +190,14 @@ static long bch_ioctl_disk_add(struct bch_fs *c, if (!path) return -ENOMEM; - ret = bch_dev_add(c, path); + ret = bch2_dev_add(c, path); kfree(path); return ret; } -static long bch_ioctl_disk_remove(struct bch_fs *c, - struct bch_ioctl_disk __user *user_arg) +static long bch2_ioctl_disk_remove(struct bch_fs *c, + struct bch_ioctl_disk __user *user_arg) { struct bch_ioctl_disk arg; struct bch_dev *ca; @@ -205,15 +205,15 @@ static long bch_ioctl_disk_remove(struct bch_fs *c, if (copy_from_user(&arg, user_arg, sizeof(arg))) return -EFAULT; - ca = bch_device_lookup(c, (const char __user *)(unsigned long) arg.dev); + ca = bch2_device_lookup(c, (const char __user *)(unsigned long) arg.dev); if (IS_ERR(ca)) return PTR_ERR(ca); - return bch_dev_remove(c, ca, arg.flags); + return bch2_dev_remove(c, ca, arg.flags); } -static long bch_ioctl_disk_online(struct bch_fs *c, - struct bch_ioctl_disk __user *user_arg) +static long bch2_ioctl_disk_online(struct bch_fs *c, + struct bch_ioctl_disk __user *user_arg) { struct bch_ioctl_disk arg; char *path; @@ -229,13 +229,13 @@ static long bch_ioctl_disk_online(struct bch_fs *c, if (!path) return -ENOMEM; - ret = bch_dev_online(c, path); + ret = bch2_dev_online(c, path); kfree(path); return ret; } -static long bch_ioctl_disk_offline(struct bch_fs *c, - struct bch_ioctl_disk __user *user_arg) +static long bch2_ioctl_disk_offline(struct bch_fs *c, + struct bch_ioctl_disk __user *user_arg) { struct bch_ioctl_disk arg; struct bch_dev *ca; @@ -247,17 +247,17 @@ static long bch_ioctl_disk_offline(struct bch_fs *c, if (arg.pad) return -EINVAL; - ca = bch_device_lookup(c, (const char __user *)(unsigned long) arg.dev); + ca = bch2_device_lookup(c, (const char __user *)(unsigned long) arg.dev); if (IS_ERR(ca)) return PTR_ERR(ca); - ret = bch_dev_offline(c, ca, arg.flags); + ret = bch2_dev_offline(c, ca, arg.flags); percpu_ref_put(&ca->ref); return ret; } -static long bch_ioctl_disk_set_state(struct bch_fs *c, - struct bch_ioctl_disk_set_state __user *user_arg) +static long bch2_ioctl_disk_set_state(struct bch_fs *c, + struct bch_ioctl_disk_set_state __user *user_arg) { struct bch_ioctl_disk_set_state arg; struct bch_dev *ca; @@ -266,18 +266,18 @@ static long bch_ioctl_disk_set_state(struct bch_fs *c, if (copy_from_user(&arg, user_arg, sizeof(arg))) return -EFAULT; - ca = bch_device_lookup(c, (const char __user *)(unsigned long) arg.dev); + ca = bch2_device_lookup(c, (const char __user *)(unsigned long) arg.dev); if (IS_ERR(ca)) return PTR_ERR(ca); - ret = bch_dev_set_state(c, ca, arg.new_state, arg.flags); + ret = bch2_dev_set_state(c, ca, arg.new_state, arg.flags); percpu_ref_put(&ca->ref); return ret; } -static long bch_ioctl_disk_evacuate(struct bch_fs *c, - struct bch_ioctl_disk __user *user_arg) +static long bch2_ioctl_disk_evacuate(struct bch_fs *c, + struct bch_ioctl_disk __user *user_arg) { struct bch_ioctl_disk arg; struct bch_dev *ca; @@ -286,22 +286,22 @@ static long bch_ioctl_disk_evacuate(struct bch_fs *c, if (copy_from_user(&arg, user_arg, sizeof(arg))) return -EFAULT; - ca = bch_device_lookup(c, (const char __user *)(unsigned long) arg.dev); + ca = bch2_device_lookup(c, (const char __user *)(unsigned long) arg.dev); if (IS_ERR(ca)) return PTR_ERR(ca); - ret = bch_dev_evacuate(c, ca); + ret = bch2_dev_evacuate(c, ca); percpu_ref_put(&ca->ref); return ret; } -long bch_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg) +long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg) { /* ioctls that don't require admin cap: */ switch (cmd) { case BCH_IOCTL_QUERY_UUID: - return bch_ioctl_query_uuid(c, arg); + return bch2_ioctl_query_uuid(c, arg); } if (!capable(CAP_SYS_ADMIN)) @@ -310,41 +310,41 @@ long bch_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg) /* ioctls that do require admin cap: */ switch (cmd) { case BCH_IOCTL_START: - return bch_ioctl_start(c, arg); + return bch2_ioctl_start(c, arg); case BCH_IOCTL_STOP: - return bch_ioctl_stop(c); + return bch2_ioctl_stop(c); case BCH_IOCTL_DISK_ADD: - return bch_ioctl_disk_add(c, arg); + return bch2_ioctl_disk_add(c, arg); case BCH_IOCTL_DISK_REMOVE: - return bch_ioctl_disk_remove(c, arg); + return bch2_ioctl_disk_remove(c, arg); case BCH_IOCTL_DISK_ONLINE: - return bch_ioctl_disk_online(c, arg); + return bch2_ioctl_disk_online(c, arg); case BCH_IOCTL_DISK_OFFLINE: - return bch_ioctl_disk_offline(c, arg); + return bch2_ioctl_disk_offline(c, arg); case BCH_IOCTL_DISK_SET_STATE: - return bch_ioctl_disk_set_state(c, arg); + return bch2_ioctl_disk_set_state(c, arg); case BCH_IOCTL_DISK_EVACUATE: - return bch_ioctl_disk_evacuate(c, arg); + return bch2_ioctl_disk_evacuate(c, arg); default: return -ENOTTY; } } -static long bch_chardev_ioctl(struct file *filp, unsigned cmd, unsigned long v) +static long bch2_chardev_ioctl(struct file *filp, unsigned cmd, unsigned long v) { struct bch_fs *c = filp->private_data; void __user *arg = (void __user *) v; return c - ? bch_fs_ioctl(c, cmd, arg) - : bch_global_ioctl(cmd, arg); + ? bch2_fs_ioctl(c, cmd, arg) + : bch2_global_ioctl(cmd, arg); } static const struct file_operations bch_chardev_fops = { .owner = THIS_MODULE, - .unlocked_ioctl = bch_chardev_ioctl, + .unlocked_ioctl = bch2_chardev_ioctl, .open = nonseekable_open, }; @@ -353,7 +353,7 @@ static struct class *bch_chardev_class; static struct device *bch_chardev; static DEFINE_IDR(bch_chardev_minor); -void bch_fs_chardev_exit(struct bch_fs *c) +void bch2_fs_chardev_exit(struct bch_fs *c) { if (!IS_ERR_OR_NULL(c->chardev)) device_unregister(c->chardev); @@ -361,7 +361,7 @@ void bch_fs_chardev_exit(struct bch_fs *c) idr_remove(&bch_chardev_minor, c->minor); } -int bch_fs_chardev_init(struct bch_fs *c) +int bch2_fs_chardev_init(struct bch_fs *c) { c->minor = idr_alloc(&bch_chardev_minor, c, 0, 0, GFP_KERNEL); if (c->minor < 0) @@ -369,14 +369,14 @@ int bch_fs_chardev_init(struct bch_fs *c) c->chardev = device_create(bch_chardev_class, NULL, MKDEV(bch_chardev_major, c->minor), NULL, - "bcache%u-ctl", c->minor); + "bcachefs%u-ctl", c->minor); if (IS_ERR(c->chardev)) return PTR_ERR(c->chardev); return 0; } -void bch_chardev_exit(void) +void bch2_chardev_exit(void) { if (!IS_ERR_OR_NULL(bch_chardev_class)) device_destroy(bch_chardev_class, @@ -384,22 +384,22 @@ void bch_chardev_exit(void) if (!IS_ERR_OR_NULL(bch_chardev_class)) class_destroy(bch_chardev_class); if (bch_chardev_major > 0) - unregister_chrdev(bch_chardev_major, "bcache"); + unregister_chrdev(bch_chardev_major, "bcachefs"); } -int __init bch_chardev_init(void) +int __init bch2_chardev_init(void) { - bch_chardev_major = register_chrdev(0, "bcache-ctl", &bch_chardev_fops); + bch_chardev_major = register_chrdev(0, "bcachefs-ctl", &bch_chardev_fops); if (bch_chardev_major < 0) return bch_chardev_major; - bch_chardev_class = class_create(THIS_MODULE, "bcache"); + bch_chardev_class = class_create(THIS_MODULE, "bcachefs"); if (IS_ERR(bch_chardev_class)) return PTR_ERR(bch_chardev_class); bch_chardev = device_create(bch_chardev_class, NULL, MKDEV(bch_chardev_major, 255), - NULL, "bcache-ctl"); + NULL, "bcachefs-ctl"); if (IS_ERR(bch_chardev)) return PTR_ERR(bch_chardev); diff --git a/libbcachefs/chardev.h b/libbcachefs/chardev.h new file mode 100644 index 00000000..e0e34e24 --- /dev/null +++ b/libbcachefs/chardev.h @@ -0,0 +1,30 @@ +#ifndef _BCACHE_CHARDEV_H +#define _BCACHE_CHARDEV_H + +#ifndef NO_BCACHE_CHARDEV + +long bch2_fs_ioctl(struct bch_fs *, unsigned, void __user *); + +void bch2_fs_chardev_exit(struct bch_fs *); +int bch2_fs_chardev_init(struct bch_fs *); + +void bch2_chardev_exit(void); +int __init bch2_chardev_init(void); + +#else + +static inline long bch2_fs_ioctl(struct bch_fs *c, + unsigned cmd, void __user * arg) +{ + return -ENOSYS; +} + +static inline void bch2_fs_chardev_exit(struct bch_fs *c) {} +static inline int bch2_fs_chardev_init(struct bch_fs *c) { return 0; } + +static inline void bch2_chardev_exit(void) {} +static inline int __init bch2_chardev_init(void) { return 0; } + +#endif + +#endif /* _BCACHE_CHARDEV_H */ diff --git a/libbcache/checksum.c b/libbcachefs/checksum.c index b96050db..4545a499 100644 --- a/libbcache/checksum.c +++ b/libbcachefs/checksum.c @@ -1,5 +1,4 @@ - -#include "bcache.h" +#include "bcachefs.h" #include "checksum.h" #include "super.h" #include "super-io.h" @@ -125,7 +124,7 @@ static const u64 crc_table[256] = { 0x9AFCE626CE85B507ULL, }; -u64 bch_crc64_update(u64 crc, const void *_data, size_t len) +u64 bch2_crc64_update(u64 crc, const void *_data, size_t len) { const unsigned char *data = _data; @@ -137,7 +136,7 @@ u64 bch_crc64_update(u64 crc, const void *_data, size_t len) return crc; } -static u64 bch_checksum_init(unsigned type) +static u64 bch2_checksum_init(unsigned type) { switch (type) { case BCH_CSUM_NONE: @@ -151,7 +150,7 @@ static u64 bch_checksum_init(unsigned type) } } -static u64 bch_checksum_final(unsigned type, u64 crc) +static u64 bch2_checksum_final(unsigned type, u64 crc) { switch (type) { case BCH_CSUM_NONE: @@ -165,7 +164,7 @@ static u64 bch_checksum_final(unsigned type, u64 crc) } } -static u64 bch_checksum_update(unsigned type, u64 crc, const void *data, size_t len) +static u64 bch2_checksum_update(unsigned type, u64 crc, const void *data, size_t len) { switch (type) { case BCH_CSUM_NONE: @@ -173,7 +172,7 @@ static u64 bch_checksum_update(unsigned type, u64 crc, const void *data, size_t case BCH_CSUM_CRC32C: return crc32c(crc, data, len); case BCH_CSUM_CRC64: - return bch_crc64_update(crc, data, len); + return bch2_crc64_update(crc, data, len); default: BUG(); } @@ -200,7 +199,7 @@ static inline void do_encrypt(struct crypto_blkcipher *tfm, do_encrypt_sg(tfm, nonce, &sg, len); } -int bch_chacha_encrypt_key(struct bch_key *key, struct nonce nonce, +int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce, void *buf, size_t len) { struct crypto_blkcipher *chacha20 = @@ -236,17 +235,17 @@ static void gen_poly_key(struct bch_fs *c, struct shash_desc *desc, crypto_shash_update(desc, key, sizeof(key)); } -struct bch_csum bch_checksum(struct bch_fs *c, unsigned type, - struct nonce nonce, const void *data, size_t len) +struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type, + struct nonce nonce, const void *data, size_t len) { switch (type) { case BCH_CSUM_NONE: case BCH_CSUM_CRC32C: case BCH_CSUM_CRC64: { - u64 crc = bch_checksum_init(type); + u64 crc = bch2_checksum_init(type); - crc = bch_checksum_update(type, crc, data, len); - crc = bch_checksum_final(type, crc); + crc = bch2_checksum_update(type, crc, data, len); + crc = bch2_checksum_final(type, crc); return (struct bch_csum) { .lo = crc }; } @@ -270,17 +269,17 @@ struct bch_csum bch_checksum(struct bch_fs *c, unsigned type, } } -void bch_encrypt(struct bch_fs *c, unsigned type, - struct nonce nonce, void *data, size_t len) +void bch2_encrypt(struct bch_fs *c, unsigned type, + struct nonce nonce, void *data, size_t len) { - if (!bch_csum_type_is_encryption(type)) + if (!bch2_csum_type_is_encryption(type)) return; do_encrypt(c->chacha20, nonce, data, len); } -struct bch_csum bch_checksum_bio(struct bch_fs *c, unsigned type, - struct nonce nonce, struct bio *bio) +struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type, + struct nonce nonce, struct bio *bio) { struct bio_vec bv; struct bvec_iter iter; @@ -290,16 +289,16 @@ struct bch_csum bch_checksum_bio(struct bch_fs *c, unsigned type, return (struct bch_csum) { 0 }; case BCH_CSUM_CRC32C: case BCH_CSUM_CRC64: { - u64 crc = bch_checksum_init(type); + u64 crc = bch2_checksum_init(type); bio_for_each_contig_segment(bv, bio, iter) { void *p = kmap_atomic(bv.bv_page) + bv.bv_offset; - crc = bch_checksum_update(type, + crc = bch2_checksum_update(type, crc, p, bv.bv_len); kunmap_atomic(p); } - crc = bch_checksum_final(type, crc); + crc = bch2_checksum_final(type, crc); return (struct bch_csum) { .lo = crc }; } @@ -328,15 +327,15 @@ struct bch_csum bch_checksum_bio(struct bch_fs *c, unsigned type, } } -void bch_encrypt_bio(struct bch_fs *c, unsigned type, - struct nonce nonce, struct bio *bio) +void bch2_encrypt_bio(struct bch_fs *c, unsigned type, + struct nonce nonce, struct bio *bio) { struct bio_vec bv; struct bvec_iter iter; struct scatterlist sgl[16], *sg = sgl; size_t bytes = 0; - if (!bch_csum_type_is_encryption(type)) + if (!bch2_csum_type_is_encryption(type)) return; sg_init_table(sgl, ARRAY_SIZE(sgl)); @@ -363,7 +362,7 @@ void bch_encrypt_bio(struct bch_fs *c, unsigned type, } #ifdef __KERNEL__ -int bch_request_key(struct bch_sb *sb, struct bch_key *key) +int bch2_request_key(struct bch_sb *sb, struct bch_key *key) { char key_description[60]; struct key *keyring_key; @@ -371,7 +370,7 @@ int bch_request_key(struct bch_sb *sb, struct bch_key *key) int ret; snprintf(key_description, sizeof(key_description), - "bcache:%pUb", &sb->user_uuid); + "bcachefs:%pUb", &sb->user_uuid); keyring_key = request_key(&key_type_logon, key_description, NULL); if (IS_ERR(keyring_key)) @@ -394,14 +393,14 @@ int bch_request_key(struct bch_sb *sb, struct bch_key *key) #include <keyutils.h> #include <uuid/uuid.h> -int bch_request_key(struct bch_sb *sb, struct bch_key *key) +int bch2_request_key(struct bch_sb *sb, struct bch_key *key) { key_serial_t key_id; char key_description[60]; char uuid[40]; uuid_unparse_lower(sb->user_uuid.b, uuid); - sprintf(key_description, "bcache:%s", uuid); + sprintf(key_description, "bcachefs:%s", uuid); key_id = request_key("user", key_description, NULL, KEY_SPEC_USER_KEYRING); @@ -415,7 +414,7 @@ int bch_request_key(struct bch_sb *sb, struct bch_key *key) } #endif -static int bch_decrypt_sb_key(struct bch_fs *c, +static int bch2_decrypt_sb_key(struct bch_fs *c, struct bch_sb_field_crypt *crypt, struct bch_key *key) { @@ -424,22 +423,22 @@ static int bch_decrypt_sb_key(struct bch_fs *c, int ret = 0; /* is key encrypted? */ - if (!bch_key_is_encrypted(&sb_key)) + if (!bch2_key_is_encrypted(&sb_key)) goto out; - ret = bch_request_key(c->disk_sb, &user_key); + ret = bch2_request_key(c->disk_sb, &user_key); if (ret) { bch_err(c, "error requesting encryption key"); goto err; } /* decrypt real key: */ - ret = bch_chacha_encrypt_key(&user_key, bch_sb_key_nonce(c), + ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c), &sb_key, sizeof(sb_key)); if (ret) goto err; - if (bch_key_is_encrypted(&sb_key)) { + if (bch2_key_is_encrypted(&sb_key)) { bch_err(c, "incorrect encryption key"); ret = -EINVAL; goto err; @@ -452,7 +451,7 @@ err: return ret; } -static int bch_alloc_ciphers(struct bch_fs *c) +static int bch2_alloc_ciphers(struct bch_fs *c) { if (!c->chacha20) c->chacha20 = crypto_alloc_blkcipher("chacha20", 0, @@ -468,7 +467,7 @@ static int bch_alloc_ciphers(struct bch_fs *c) return 0; } -int bch_disable_encryption(struct bch_fs *c) +int bch2_disable_encryption(struct bch_fs *c) { struct bch_sb_field_crypt *crypt; struct bch_key key; @@ -476,16 +475,16 @@ int bch_disable_encryption(struct bch_fs *c) mutex_lock(&c->sb_lock); - crypt = bch_sb_get_crypt(c->disk_sb); + crypt = bch2_sb_get_crypt(c->disk_sb); if (!crypt) goto out; /* is key encrypted? */ ret = 0; - if (bch_key_is_encrypted(&crypt->key)) + if (bch2_key_is_encrypted(&crypt->key)) goto out; - ret = bch_decrypt_sb_key(c, crypt, &key); + ret = bch2_decrypt_sb_key(c, crypt, &key); if (ret) goto out; @@ -493,14 +492,14 @@ int bch_disable_encryption(struct bch_fs *c) crypt->key.key = key; SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb, 0); - bch_write_super(c); + bch2_write_super(c); out: mutex_unlock(&c->sb_lock); return ret; } -int bch_enable_encryption(struct bch_fs *c, bool keyed) +int bch2_enable_encryption(struct bch_fs *c, bool keyed) { struct bch_encrypted_key key; struct bch_key user_key; @@ -510,10 +509,10 @@ int bch_enable_encryption(struct bch_fs *c, bool keyed) mutex_lock(&c->sb_lock); /* Do we already have an encryption key? */ - if (bch_sb_get_crypt(c->disk_sb)) + if (bch2_sb_get_crypt(c->disk_sb)) goto err; - ret = bch_alloc_ciphers(c); + ret = bch2_alloc_ciphers(c); if (ret) goto err; @@ -521,14 +520,14 @@ int bch_enable_encryption(struct bch_fs *c, bool keyed) get_random_bytes(&key.key, sizeof(key.key)); if (keyed) { - ret = bch_request_key(c->disk_sb, &user_key); + ret = bch2_request_key(c->disk_sb, &user_key); if (ret) { bch_err(c, "error requesting encryption key"); goto err; } - ret = bch_chacha_encrypt_key(&user_key, bch_sb_key_nonce(c), - &key, sizeof(key)); + ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c), + &key, sizeof(key)); if (ret) goto err; } @@ -538,7 +537,7 @@ int bch_enable_encryption(struct bch_fs *c, bool keyed) if (ret) goto err; - crypt = bch_fs_sb_resize_crypt(c, sizeof(*crypt) / sizeof(u64)); + crypt = bch2_fs_sb_resize_crypt(c, sizeof(*crypt) / sizeof(u64)); if (!crypt) { ret = -ENOMEM; /* XXX this technically could be -ENOSPC */ goto err; @@ -548,7 +547,7 @@ int bch_enable_encryption(struct bch_fs *c, bool keyed) /* write superblock */ SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb, 1); - bch_write_super(c); + bch2_write_super(c); err: mutex_unlock(&c->sb_lock); memzero_explicit(&user_key, sizeof(user_key)); @@ -556,29 +555,35 @@ err: return ret; } -void bch_fs_encryption_exit(struct bch_fs *c) +void bch2_fs_encryption_exit(struct bch_fs *c) { if (!IS_ERR_OR_NULL(c->poly1305)) crypto_free_shash(c->poly1305); if (!IS_ERR_OR_NULL(c->chacha20)) crypto_free_blkcipher(c->chacha20); + if (!IS_ERR_OR_NULL(c->sha256)) + crypto_free_shash(c->sha256); } -int bch_fs_encryption_init(struct bch_fs *c) +int bch2_fs_encryption_init(struct bch_fs *c) { struct bch_sb_field_crypt *crypt; struct bch_key key; int ret; - crypt = bch_sb_get_crypt(c->disk_sb); + c->sha256 = crypto_alloc_shash("sha256", 0, 0); + if (IS_ERR(c->sha256)) + return PTR_ERR(c->sha256); + + crypt = bch2_sb_get_crypt(c->disk_sb); if (!crypt) return 0; - ret = bch_alloc_ciphers(c); + ret = bch2_alloc_ciphers(c); if (ret) return ret; - ret = bch_decrypt_sb_key(c, crypt, &key); + ret = bch2_decrypt_sb_key(c, crypt, &key); if (ret) goto err; diff --git a/libbcache/checksum.h b/libbcachefs/checksum.h index 10f62e5b..f540e305 100644 --- a/libbcache/checksum.h +++ b/libbcachefs/checksum.h @@ -1,12 +1,12 @@ #ifndef _BCACHE_CHECKSUM_H #define _BCACHE_CHECKSUM_H -#include "bcache.h" +#include "bcachefs.h" #include "super-io.h" #include <crypto/chacha20.h> -u64 bch_crc64_update(u64, const void *, size_t); +u64 bch2_crc64_update(u64, const void *, size_t); #define BCH_NONCE_EXTENT cpu_to_le32(1 << 28) #define BCH_NONCE_BTREE cpu_to_le32(2 << 28) @@ -14,7 +14,7 @@ u64 bch_crc64_update(u64, const void *, size_t); #define BCH_NONCE_PRIO cpu_to_le32(4 << 28) #define BCH_NONCE_POLY cpu_to_le32(1 << 31) -struct bch_csum bch_checksum(struct bch_fs *, unsigned, struct nonce, +struct bch_csum bch2_checksum(struct bch_fs *, unsigned, struct nonce, const void *, size_t); /* @@ -26,27 +26,27 @@ struct bch_csum bch_checksum(struct bch_fs *, unsigned, struct nonce, const void *start = ((const void *) (_i)) + sizeof((_i)->csum); \ const void *end = vstruct_end(_i); \ \ - bch_checksum(_c, _type, _nonce, start, end - start); \ + bch2_checksum(_c, _type, _nonce, start, end - start); \ }) -int bch_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t); -int bch_request_key(struct bch_sb *, struct bch_key *); +int bch2_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t); +int bch2_request_key(struct bch_sb *, struct bch_key *); -void bch_encrypt(struct bch_fs *, unsigned, struct nonce, +void bch2_encrypt(struct bch_fs *, unsigned, struct nonce, void *data, size_t); -struct bch_csum bch_checksum_bio(struct bch_fs *, unsigned, +struct bch_csum bch2_checksum_bio(struct bch_fs *, unsigned, struct nonce, struct bio *); -void bch_encrypt_bio(struct bch_fs *, unsigned, +void bch2_encrypt_bio(struct bch_fs *, unsigned, struct nonce, struct bio *); -int bch_disable_encryption(struct bch_fs *); -int bch_enable_encryption(struct bch_fs *, bool); +int bch2_disable_encryption(struct bch_fs *); +int bch2_enable_encryption(struct bch_fs *, bool); -void bch_fs_encryption_exit(struct bch_fs *); -int bch_fs_encryption_init(struct bch_fs *); +void bch2_fs_encryption_exit(struct bch_fs *); +int bch2_fs_encryption_init(struct bch_fs *); -static inline unsigned bch_data_checksum_type(struct bch_fs *c) +static inline unsigned bch2_data_checksum_type(struct bch_fs *c) { if (c->sb.encryption_type) return c->opts.wide_macs @@ -56,20 +56,20 @@ static inline unsigned bch_data_checksum_type(struct bch_fs *c) return c->opts.data_checksum; } -static inline unsigned bch_meta_checksum_type(struct bch_fs *c) +static inline unsigned bch2_meta_checksum_type(struct bch_fs *c) { return c->sb.encryption_type ? BCH_CSUM_CHACHA20_POLY1305_128 : c->opts.metadata_checksum; } -static inline bool bch_checksum_type_valid(const struct bch_fs *c, +static inline bool bch2_checksum_type_valid(const struct bch_fs *c, unsigned type) { if (type >= BCH_CSUM_NR) return false; - if (bch_csum_type_is_encryption(type) && !c->chacha20) + if (bch2_csum_type_is_encryption(type) && !c->chacha20) return false; return true; @@ -83,7 +83,7 @@ static const unsigned bch_crc_bytes[] = { [BCH_CSUM_CHACHA20_POLY1305_128] = 16, }; -static inline bool bch_crc_cmp(struct bch_csum l, struct bch_csum r) +static inline bool bch2_crc_cmp(struct bch_csum l, struct bch_csum r) { /* * XXX: need some way of preventing the compiler from optimizing this @@ -101,14 +101,14 @@ static inline struct nonce nonce_add(struct nonce nonce, unsigned offset) return nonce; } -static inline bool bch_key_is_encrypted(struct bch_encrypted_key *key) +static inline bool bch2_key_is_encrypted(struct bch_encrypted_key *key) { return le64_to_cpu(key->magic) != BCH_KEY_MAGIC; } -static inline struct nonce __bch_sb_key_nonce(struct bch_sb *sb) +static inline struct nonce __bch2_sb_key_nonce(struct bch_sb *sb) { - __le64 magic = __bch_sb_magic(sb); + __le64 magic = __bch2_sb_magic(sb); return (struct nonce) {{ [0] = 0, @@ -118,9 +118,9 @@ static inline struct nonce __bch_sb_key_nonce(struct bch_sb *sb) }}; } -static inline struct nonce bch_sb_key_nonce(struct bch_fs *c) +static inline struct nonce bch2_sb_key_nonce(struct bch_fs *c) { - __le64 magic = bch_sb_magic(c); + __le64 magic = bch2_sb_magic(c); return (struct nonce) {{ [0] = 0, diff --git a/libbcache/clock.c b/libbcachefs/clock.c index 85891a03..3c3649f0 100644 --- a/libbcache/clock.c +++ b/libbcachefs/clock.c @@ -1,4 +1,4 @@ -#include "bcache.h" +#include "bcachefs.h" #include "clock.h" #include <linux/freezer.h> @@ -9,7 +9,7 @@ static inline bool io_timer_cmp(struct io_timer *l, struct io_timer *r) return time_after(l->expire, r->expire); } -void bch_io_timer_add(struct io_clock *clock, struct io_timer *timer) +void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer) { size_t i; @@ -23,7 +23,7 @@ out: spin_unlock(&clock->timer_lock); } -void bch_io_timer_del(struct io_clock *clock, struct io_timer *timer) +void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer) { size_t i; @@ -53,7 +53,7 @@ static void io_clock_wait_fn(struct io_timer *timer) wake_up_process(wait->task); } -void bch_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until) +void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until) { struct io_clock_wait wait; @@ -62,17 +62,17 @@ void bch_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until) wait.timer.fn = io_clock_wait_fn; wait.task = current; wait.expired = 0; - bch_io_timer_add(clock, &wait.timer); + bch2_io_timer_add(clock, &wait.timer); schedule(); - bch_io_timer_del(clock, &wait.timer); + bch2_io_timer_del(clock, &wait.timer); } /* * _only_ to be used from a kthread */ -void bch_kthread_io_clock_wait(struct io_clock *clock, +void bch2_kthread_io_clock_wait(struct io_clock *clock, unsigned long until) { struct io_clock_wait wait; @@ -82,7 +82,7 @@ void bch_kthread_io_clock_wait(struct io_clock *clock, wait.timer.fn = io_clock_wait_fn; wait.task = current; wait.expired = 0; - bch_io_timer_add(clock, &wait.timer); + bch2_io_timer_add(clock, &wait.timer); while (1) { set_current_state(TASK_INTERRUPTIBLE); @@ -97,7 +97,7 @@ void bch_kthread_io_clock_wait(struct io_clock *clock, } __set_current_state(TASK_RUNNING); - bch_io_timer_del(clock, &wait.timer); + bch2_io_timer_del(clock, &wait.timer); } static struct io_timer *get_expired_timer(struct io_clock *clock, @@ -116,7 +116,7 @@ static struct io_timer *get_expired_timer(struct io_clock *clock, return ret; } -void bch_increment_clock(struct bch_fs *c, unsigned sectors, int rw) +void bch2_increment_clock(struct bch_fs *c, unsigned sectors, int rw) { struct io_clock *clock = &c->io_clock[rw]; struct io_timer *timer; @@ -139,13 +139,13 @@ void bch_increment_clock(struct bch_fs *c, unsigned sectors, int rw) timer->fn(timer); } -void bch_io_clock_exit(struct io_clock *clock) +void bch2_io_clock_exit(struct io_clock *clock) { free_heap(&clock->timers); free_percpu(clock->pcpu_buf); } -int bch_io_clock_init(struct io_clock *clock) +int bch2_io_clock_init(struct io_clock *clock) { atomic_long_set(&clock->now, 0); spin_lock_init(&clock->timer_lock); diff --git a/libbcachefs/clock.h b/libbcachefs/clock.h new file mode 100644 index 00000000..061bf04a --- /dev/null +++ b/libbcachefs/clock.h @@ -0,0 +1,23 @@ +#ifndef _BCACHE_CLOCK_H +#define _BCACHE_CLOCK_H + +void bch2_io_timer_add(struct io_clock *, struct io_timer *); +void bch2_io_timer_del(struct io_clock *, struct io_timer *); +void bch2_kthread_io_clock_wait(struct io_clock *, unsigned long); +void bch2_increment_clock(struct bch_fs *, unsigned, int); + +void bch2_io_clock_schedule_timeout(struct io_clock *, unsigned long); + +#define bch2_kthread_wait_event_ioclock_timeout(condition, clock, timeout)\ +({ \ + long __ret = timeout; \ + might_sleep(); \ + if (!___wait_cond_timeout(condition)) \ + __ret = __wait_event_timeout(wq, condition, timeout); \ + __ret; \ +}) + +void bch2_io_clock_exit(struct io_clock *); +int bch2_io_clock_init(struct io_clock *); + +#endif /* _BCACHE_CLOCK_H */ diff --git a/libbcache/clock_types.h b/libbcachefs/clock_types.h index 4a02f467..4a02f467 100644 --- a/libbcache/clock_types.h +++ b/libbcachefs/clock_types.h diff --git a/libbcache/compress.c b/libbcachefs/compress.c index d9a64c38..547ea732 100644 --- a/libbcache/compress.c +++ b/libbcachefs/compress.c @@ -1,4 +1,4 @@ -#include "bcache.h" +#include "bcachefs.h" #include "compress.h" #include "extents.h" #include "io.h" @@ -195,7 +195,7 @@ err: return ret; } -int bch_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio, +int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio, unsigned live_data_sectors, struct bch_extent_crc128 crc) { @@ -242,12 +242,12 @@ use_mempool: * deadlock: */ - bch_bio_free_pages_pool(c, bio); - bch_bio_alloc_pages_pool(c, bio, live_data_sectors << 9); + bch2_bio_free_pages_pool(c, bio); + bch2_bio_alloc_pages_pool(c, bio, live_data_sectors << 9); goto copy_data; } -int bch_bio_uncompress(struct bch_fs *c, struct bio *src, +int bch2_bio_uncompress(struct bch_fs *c, struct bio *src, struct bio *dst, struct bvec_iter dst_iter, struct bch_extent_crc128 crc) { @@ -391,7 +391,7 @@ err: return ret; } -void bch_bio_compress(struct bch_fs *c, +void bch2_bio_compress(struct bch_fs *c, struct bio *dst, size_t *dst_len, struct bio *src, size_t *src_len, unsigned *compression_type) @@ -423,30 +423,30 @@ out: } /* doesn't write superblock: */ -int bch_check_set_has_compressed_data(struct bch_fs *c, +int bch2_check_set_has_compressed_data(struct bch_fs *c, unsigned compression_type) { switch (compression_type) { case BCH_COMPRESSION_NONE: return 0; case BCH_COMPRESSION_LZ4: - if (bch_sb_test_feature(c->disk_sb, BCH_FEATURE_LZ4)) + if (bch2_sb_test_feature(c->disk_sb, BCH_FEATURE_LZ4)) return 0; - bch_sb_set_feature(c->disk_sb, BCH_FEATURE_LZ4); + bch2_sb_set_feature(c->disk_sb, BCH_FEATURE_LZ4); break; case BCH_COMPRESSION_GZIP: - if (bch_sb_test_feature(c->disk_sb, BCH_FEATURE_GZIP)) + if (bch2_sb_test_feature(c->disk_sb, BCH_FEATURE_GZIP)) return 0; - bch_sb_set_feature(c->disk_sb, BCH_FEATURE_GZIP); + bch2_sb_set_feature(c->disk_sb, BCH_FEATURE_GZIP); break; } - return bch_fs_compress_init(c); + return bch2_fs_compress_init(c); } -void bch_fs_compress_exit(struct bch_fs *c) +void bch2_fs_compress_exit(struct bch_fs *c) { vfree(c->zlib_workspace); mempool_exit(&c->lz4_workspace_pool); @@ -458,13 +458,13 @@ void bch_fs_compress_exit(struct bch_fs *c) max_t(size_t, zlib_inflate_workspacesize(), \ zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL)) -int bch_fs_compress_init(struct bch_fs *c) +int bch2_fs_compress_init(struct bch_fs *c) { unsigned order = get_order(BCH_ENCODED_EXTENT_MAX << 9); int ret; - if (!bch_sb_test_feature(c->disk_sb, BCH_FEATURE_LZ4) && - !bch_sb_test_feature(c->disk_sb, BCH_FEATURE_GZIP)) + if (!bch2_sb_test_feature(c->disk_sb, BCH_FEATURE_LZ4) && + !bch2_sb_test_feature(c->disk_sb, BCH_FEATURE_GZIP)) return 0; if (!mempool_initialized(&c->compression_bounce[READ])) { @@ -482,7 +482,7 @@ int bch_fs_compress_init(struct bch_fs *c) } if (!mempool_initialized(&c->lz4_workspace_pool) && - bch_sb_test_feature(c->disk_sb, BCH_FEATURE_LZ4)) { + bch2_sb_test_feature(c->disk_sb, BCH_FEATURE_LZ4)) { ret = mempool_init_kmalloc_pool(&c->lz4_workspace_pool, 1, LZ4_MEM_COMPRESS); if (ret) @@ -490,7 +490,7 @@ int bch_fs_compress_init(struct bch_fs *c) } if (!c->zlib_workspace && - bch_sb_test_feature(c->disk_sb, BCH_FEATURE_GZIP)) { + bch2_sb_test_feature(c->disk_sb, BCH_FEATURE_GZIP)) { c->zlib_workspace = vmalloc(COMPRESSION_WORKSPACE_SIZE); if (!c->zlib_workspace) return -ENOMEM; diff --git a/libbcachefs/compress.h b/libbcachefs/compress.h new file mode 100644 index 00000000..05804f55 --- /dev/null +++ b/libbcachefs/compress.h @@ -0,0 +1,15 @@ +#ifndef _BCACHE_COMPRESS_H +#define _BCACHE_COMPRESS_H + +int bch2_bio_uncompress_inplace(struct bch_fs *, struct bio *, + unsigned, struct bch_extent_crc128); +int bch2_bio_uncompress(struct bch_fs *, struct bio *, struct bio *, + struct bvec_iter, struct bch_extent_crc128); +void bch2_bio_compress(struct bch_fs *, struct bio *, size_t *, + struct bio *, size_t *, unsigned *); + +int bch2_check_set_has_compressed_data(struct bch_fs *, unsigned); +void bch2_fs_compress_exit(struct bch_fs *); +int bch2_fs_compress_init(struct bch_fs *); + +#endif /* _BCACHE_COMPRESS_H */ diff --git a/libbcache/debug.c b/libbcachefs/debug.c index bddff979..248bc7a1 100644 --- a/libbcache/debug.c +++ b/libbcachefs/debug.c @@ -1,11 +1,11 @@ /* - * Assorted bcache debug code + * Assorted bcachefs debug code * * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> * Copyright 2012 Google, Inc. */ -#include "bcache.h" +#include "bcachefs.h" #include "bkey_methods.h" #include "btree_cache.h" #include "btree_io.h" @@ -28,29 +28,19 @@ static struct dentry *bch_debug; -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG -static void btree_verify_endio(struct bio *bio) -{ - struct closure *cl = bio->bi_private; - - closure_put(cl); -} - -void __bch_btree_verify(struct bch_fs *c, struct btree *b) +void __bch2_btree_verify(struct bch_fs *c, struct btree *b) { struct btree *v = c->verify_data; struct btree_node *n_ondisk, *n_sorted, *n_inmemory; struct bset *sorted, *inmemory; struct extent_pick_ptr pick; struct bio *bio; - struct closure cl; if (c->opts.nochanges) return; - closure_init_stack(&cl); - btree_node_io_lock(b); mutex_lock(&c->verify_lock); @@ -62,9 +52,9 @@ void __bch_btree_verify(struct bch_fs *c, struct btree *b) v->written = 0; v->level = b->level; v->btree_id = b->btree_id; - bch_btree_keys_init(v, &c->expensive_debug_checks); + bch2_btree_keys_init(v, &c->expensive_debug_checks); - pick = bch_btree_pick_ptr(c, b); + pick = bch2_btree_pick_ptr(c, b); if (IS_ERR_OR_NULL(pick.ca)) return; @@ -73,19 +63,15 @@ void __bch_btree_verify(struct bch_fs *c, struct btree *b) bio->bi_iter.bi_sector = pick.ptr.offset; bio->bi_iter.bi_size = btree_bytes(c); bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); - bio->bi_private = &cl; - bio->bi_end_io = btree_verify_endio; - bch_bio_map(bio, n_sorted); + bch2_bio_map(bio, n_sorted); - closure_get(&cl); - bch_generic_make_request(bio, c); - closure_sync(&cl); + submit_bio_wait(bio); bio_put(bio); memcpy(n_ondisk, n_sorted, btree_bytes(c)); - bch_btree_node_read_done(c, v, pick.ca, &pick.ptr); + bch2_btree_node_read_done(c, v, pick.ca, &pick.ptr); n_sorted = c->verify_data->data; percpu_ref_put(&pick.ca->io_ref); @@ -104,10 +90,10 @@ void __bch_btree_verify(struct bch_fs *c, struct btree *b) console_lock(); printk(KERN_ERR "*** in memory:\n"); - bch_dump_bset(b, inmemory, 0); + bch2_dump_bset(b, inmemory, 0); printk(KERN_ERR "*** read back in:\n"); - bch_dump_bset(v, sorted, 0); + bch2_dump_bset(v, sorted, 0); while (offset < b->written) { if (!offset ) { @@ -124,7 +110,7 @@ void __bch_btree_verify(struct bch_fs *c, struct btree *b) } printk(KERN_ERR "*** on disk block %u:\n", offset); - bch_dump_bset(b, i, offset); + bch2_dump_bset(b, i, offset); offset += sectors; } @@ -146,42 +132,6 @@ void __bch_btree_verify(struct bch_fs *c, struct btree *b) btree_node_io_unlock(b); } -void bch_data_verify(struct cached_dev *dc, struct bio *bio) -{ - char name[BDEVNAME_SIZE]; - struct bio *check; - struct bio_vec bv; - struct bvec_iter iter; - - check = bio_clone(bio, GFP_NOIO); - if (!check) - return; - bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC); - - if (bio_alloc_pages(check, GFP_NOIO)) - goto out_put; - - submit_bio_wait(check); - - bio_for_each_segment(bv, bio, iter) { - void *p1 = kmap_atomic(bv.bv_page); - void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page); - - if (memcmp(p1 + bv.bv_offset, - p2 + bv.bv_offset, - bv.bv_len)) - panic("verify failed at dev %s sector %llu\n", - bdevname(dc->disk_sb.bdev, name), - (uint64_t) bio->bi_iter.bi_sector); - - kunmap_atomic(p1); - } - - bio_free_pages(check); -out_put: - bio_put(check); -} - #endif #ifdef CONFIG_DEBUG_FS @@ -220,7 +170,7 @@ static int flush_buf(struct dump_iter *i) return 0; } -static int bch_dump_open(struct inode *inode, struct file *file) +static int bch2_dump_open(struct inode *inode, struct file *file) { struct btree_debug *bd = inode->i_private; struct dump_iter *i; @@ -237,14 +187,14 @@ static int bch_dump_open(struct inode *inode, struct file *file) return 0; } -static int bch_dump_release(struct inode *inode, struct file *file) +static int bch2_dump_release(struct inode *inode, struct file *file) { kfree(file->private_data); return 0; } -static ssize_t bch_read_btree(struct file *file, char __user *buf, - size_t size, loff_t *ppos) +static ssize_t bch2_read_btree(struct file *file, char __user *buf, + size_t size, loff_t *ppos) { struct dump_iter *i = file->private_data; struct btree_iter iter; @@ -262,18 +212,18 @@ static ssize_t bch_read_btree(struct file *file, char __user *buf, if (!i->size) return i->ret; - bch_btree_iter_init(&iter, i->c, i->id, i->from); + bch2_btree_iter_init(&iter, i->c, i->id, i->from); - while ((k = bch_btree_iter_peek(&iter)).k && + while ((k = bch2_btree_iter_peek(&iter)).k && !(err = btree_iter_err(k))) { - bch_bkey_val_to_text(i->c, bkey_type(0, i->id), + bch2_bkey_val_to_text(i->c, bkey_type(0, i->id), i->buf, sizeof(i->buf), k); i->bytes = strlen(i->buf); BUG_ON(i->bytes >= PAGE_SIZE); i->buf[i->bytes] = '\n'; i->bytes++; - bch_btree_iter_advance_pos(&iter); + bch2_btree_iter_advance_pos(&iter); i->from = iter.pos; err = flush_buf(i); @@ -283,20 +233,20 @@ static ssize_t bch_read_btree(struct file *file, char __user *buf, if (!i->size) break; } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return err < 0 ? err : i->ret; } static const struct file_operations btree_debug_ops = { .owner = THIS_MODULE, - .open = bch_dump_open, - .release = bch_dump_release, - .read = bch_read_btree, + .open = bch2_dump_open, + .release = bch2_dump_release, + .read = bch2_read_btree, }; -static ssize_t bch_read_btree_formats(struct file *file, char __user *buf, - size_t size, loff_t *ppos) +static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf, + size_t size, loff_t *ppos) { struct dump_iter *i = file->private_data; struct btree_iter iter; @@ -315,7 +265,7 @@ static ssize_t bch_read_btree_formats(struct file *file, char __user *buf, return i->ret; for_each_btree_node(&iter, i->c, i->id, i->from, 0, b) { - i->bytes = bch_print_btree_node(i->c, b, i->buf, + i->bytes = bch2_print_btree_node(i->c, b, i->buf, sizeof(i->buf)); err = flush_buf(i); if (err) @@ -332,20 +282,20 @@ static ssize_t bch_read_btree_formats(struct file *file, char __user *buf, if (!i->size) break; } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return err < 0 ? err : i->ret; } static const struct file_operations btree_format_debug_ops = { .owner = THIS_MODULE, - .open = bch_dump_open, - .release = bch_dump_release, - .read = bch_read_btree_formats, + .open = bch2_dump_open, + .release = bch2_dump_release, + .read = bch2_read_btree_formats, }; -static ssize_t bch_read_bfloat_failed(struct file *file, char __user *buf, - size_t size, loff_t *ppos) +static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf, + size_t size, loff_t *ppos) { struct dump_iter *i = file->private_data; struct btree_iter iter; @@ -364,16 +314,16 @@ static ssize_t bch_read_bfloat_failed(struct file *file, char __user *buf, if (!i->size) return i->ret; - bch_btree_iter_init(&iter, i->c, i->id, i->from); + bch2_btree_iter_init(&iter, i->c, i->id, i->from); - while ((k = bch_btree_iter_peek(&iter)).k && + while ((k = bch2_btree_iter_peek(&iter)).k && !(err = btree_iter_err(k))) { struct btree *b = iter.nodes[0]; struct btree_node_iter *node_iter = &iter.node_iters[0]; - struct bkey_packed *_k = bch_btree_node_iter_peek(node_iter, b); + struct bkey_packed *_k = bch2_btree_node_iter_peek(node_iter, b); if (iter.nodes[0] != prev_node) { - i->bytes = bch_print_btree_node(i->c, b, i->buf, + i->bytes = bch2_print_btree_node(i->c, b, i->buf, sizeof(i->buf)); err = flush_buf(i); if (err) @@ -381,13 +331,13 @@ static ssize_t bch_read_bfloat_failed(struct file *file, char __user *buf, } prev_node = iter.nodes[0]; - i->bytes = bch_bkey_print_bfloat(b, _k, i->buf, sizeof(i->buf)); + i->bytes = bch2_bkey_print_bfloat(b, _k, i->buf, sizeof(i->buf)); err = flush_buf(i); if (err) break; - bch_btree_iter_advance_pos(&iter); + bch2_btree_iter_advance_pos(&iter); i->from = iter.pos; err = flush_buf(i); @@ -397,25 +347,25 @@ static ssize_t bch_read_bfloat_failed(struct file *file, char __user *buf, if (!i->size) break; } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return err < 0 ? err : i->ret; } static const struct file_operations bfloat_failed_debug_ops = { .owner = THIS_MODULE, - .open = bch_dump_open, - .release = bch_dump_release, - .read = bch_read_bfloat_failed, + .open = bch2_dump_open, + .release = bch2_dump_release, + .read = bch2_read_bfloat_failed, }; -void bch_fs_debug_exit(struct bch_fs *c) +void bch2_fs_debug_exit(struct bch_fs *c) { if (!IS_ERR_OR_NULL(c->debug)) debugfs_remove_recursive(c->debug); } -void bch_fs_debug_init(struct bch_fs *c) +void bch2_fs_debug_init(struct bch_fs *c) { struct btree_debug *bd; char name[100]; @@ -432,18 +382,18 @@ void bch_fs_debug_init(struct bch_fs *c) bd < c->btree_debug + ARRAY_SIZE(c->btree_debug); bd++) { bd->id = bd - c->btree_debug; - bd->btree = debugfs_create_file(bch_btree_ids[bd->id], + bd->btree = debugfs_create_file(bch2_btree_ids[bd->id], 0400, c->debug, bd, &btree_debug_ops); snprintf(name, sizeof(name), "%s-formats", - bch_btree_ids[bd->id]); + bch2_btree_ids[bd->id]); bd->btree_format = debugfs_create_file(name, 0400, c->debug, bd, &btree_format_debug_ops); snprintf(name, sizeof(name), "%s-bfloat-failed", - bch_btree_ids[bd->id]); + bch2_btree_ids[bd->id]); bd->failed = debugfs_create_file(name, 0400, c->debug, bd, &bfloat_failed_debug_ops); @@ -452,16 +402,16 @@ void bch_fs_debug_init(struct bch_fs *c) #endif -void bch_debug_exit(void) +void bch2_debug_exit(void) { if (!IS_ERR_OR_NULL(bch_debug)) debugfs_remove_recursive(bch_debug); } -int __init bch_debug_init(void) +int __init bch2_debug_init(void) { int ret = 0; - bch_debug = debugfs_create_dir("bcache", NULL); + bch_debug = debugfs_create_dir("bcachefs", NULL); return ret; } diff --git a/libbcachefs/debug.h b/libbcachefs/debug.h new file mode 100644 index 00000000..77245045 --- /dev/null +++ b/libbcachefs/debug.h @@ -0,0 +1,62 @@ +#ifndef _BCACHE_DEBUG_H +#define _BCACHE_DEBUG_H + +#include "bcachefs.h" + +struct bio; +struct btree; +struct bch_fs; + +#define BCH_DEBUG_PARAM(name, description) extern bool bch2_##name; +BCH_DEBUG_PARAMS() +#undef BCH_DEBUG_PARAM + +#define BCH_DEBUG_PARAM(name, description) \ + static inline bool name(struct bch_fs *c) \ + { return bch2_##name || c->name; } +BCH_DEBUG_PARAMS_ALWAYS() +#undef BCH_DEBUG_PARAM + +#ifdef CONFIG_BCACHEFS_DEBUG + +#define BCH_DEBUG_PARAM(name, description) \ + static inline bool name(struct bch_fs *c) \ + { return bch2_##name || c->name; } +BCH_DEBUG_PARAMS_DEBUG() +#undef BCH_DEBUG_PARAM + +void __bch2_btree_verify(struct bch_fs *, struct btree *); + +#define bypass_torture_test(d) ((d)->bypass_torture_test) + +#else /* DEBUG */ + +#define BCH_DEBUG_PARAM(name, description) \ + static inline bool name(struct bch_fs *c) { return false; } +BCH_DEBUG_PARAMS_DEBUG() +#undef BCH_DEBUG_PARAM + +static inline void __bch2_btree_verify(struct bch_fs *c, struct btree *b) {} + +#define bypass_torture_test(d) 0 + +#endif + +static inline void bch2_btree_verify(struct bch_fs *c, struct btree *b) +{ + if (verify_btree_ondisk(c)) + __bch2_btree_verify(c, b); +} + +#ifdef CONFIG_DEBUG_FS +void bch2_fs_debug_exit(struct bch_fs *); +void bch2_fs_debug_init(struct bch_fs *); +#else +static inline void bch2_fs_debug_exit(struct bch_fs *c) {} +static inline void bch2_fs_debug_init(struct bch_fs *c) {} +#endif + +void bch2_debug_exit(void); +int bch2_debug_init(void); + +#endif diff --git a/libbcache/dirent.c b/libbcachefs/dirent.c index f961e881..503f0dc4 100644 --- a/libbcache/dirent.c +++ b/libbcachefs/dirent.c @@ -1,5 +1,5 @@ -#include "bcache.h" +#include "bcachefs.h" #include "bkey_methods.h" #include "btree_update.h" #include "extents.h" @@ -10,7 +10,7 @@ #include <linux/dcache.h> -unsigned bch_dirent_name_bytes(struct bkey_s_c_dirent d) +unsigned bch2_dirent_name_bytes(struct bkey_s_c_dirent d) { unsigned len = bkey_val_bytes(d.k) - sizeof(struct bch_dirent); @@ -20,35 +20,35 @@ unsigned bch_dirent_name_bytes(struct bkey_s_c_dirent d) return len; } -static u64 bch_dirent_hash(const struct bch_hash_info *info, - const struct qstr *name) +static u64 bch2_dirent_hash(const struct bch_hash_info *info, + const struct qstr *name) { struct bch_str_hash_ctx ctx; - bch_str_hash_init(&ctx, info); - bch_str_hash_update(&ctx, info, name->name, name->len); + bch2_str_hash_init(&ctx, info); + bch2_str_hash_update(&ctx, info, name->name, name->len); /* [0,2) reserved for dots */ - return max_t(u64, bch_str_hash_end(&ctx, info), 2); + return max_t(u64, bch2_str_hash_end(&ctx, info), 2); } static u64 dirent_hash_key(const struct bch_hash_info *info, const void *key) { - return bch_dirent_hash(info, key); + return bch2_dirent_hash(info, key); } static u64 dirent_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k) { struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); - struct qstr name = QSTR_INIT(d.v->d_name, bch_dirent_name_bytes(d)); + struct qstr name = QSTR_INIT(d.v->d_name, bch2_dirent_name_bytes(d)); - return bch_dirent_hash(info, &name); + return bch2_dirent_hash(info, &name); } static bool dirent_cmp_key(struct bkey_s_c _l, const void *_r) { struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l); - int len = bch_dirent_name_bytes(l); + int len = bch2_dirent_name_bytes(l); const struct qstr *r = _r; return len - r->len ?: memcmp(l.v->d_name, r->name, len); @@ -58,8 +58,8 @@ static bool dirent_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r) { struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l); struct bkey_s_c_dirent r = bkey_s_c_to_dirent(_r); - int l_len = bch_dirent_name_bytes(l); - int r_len = bch_dirent_name_bytes(r); + int l_len = bch2_dirent_name_bytes(l); + int r_len = bch2_dirent_name_bytes(r); return l_len - r_len ?: memcmp(l.v->d_name, r.v->d_name, l_len); } @@ -74,8 +74,8 @@ static const struct bch_hash_desc dirent_hash_desc = { .cmp_bkey = dirent_cmp_bkey, }; -static const char *bch_dirent_invalid(const struct bch_fs *c, - struct bkey_s_c k) +static const char *bch2_dirent_invalid(const struct bch_fs *c, + struct bkey_s_c k) { switch (k.k->type) { case BCH_DIRENT: @@ -93,8 +93,8 @@ static const char *bch_dirent_invalid(const struct bch_fs *c, } } -static void bch_dirent_to_text(struct bch_fs *c, char *buf, - size_t size, struct bkey_s_c k) +static void bch2_dirent_to_text(struct bch_fs *c, char *buf, + size_t size, struct bkey_s_c k) { struct bkey_s_c_dirent d; @@ -104,7 +104,7 @@ static void bch_dirent_to_text(struct bch_fs *c, char *buf, if (size) { unsigned n = min_t(unsigned, size, - bch_dirent_name_bytes(d)); + bch2_dirent_name_bytes(d)); memcpy(buf, d.v->d_name, n); buf[size - 1] = '\0'; buf += n; @@ -119,9 +119,9 @@ static void bch_dirent_to_text(struct bch_fs *c, char *buf, } } -const struct bkey_ops bch_bkey_dirent_ops = { - .key_invalid = bch_dirent_invalid, - .val_to_text = bch_dirent_to_text, +const struct bkey_ops bch2_bkey_dirent_ops = { + .key_invalid = bch2_dirent_invalid, + .val_to_text = bch2_dirent_to_text, }; static struct bkey_i_dirent *dirent_create_key(u8 type, @@ -146,15 +146,15 @@ static struct bkey_i_dirent *dirent_create_key(u8 type, bkey_val_bytes(&dirent->k) - (sizeof(struct bch_dirent) + name->len)); - EBUG_ON(bch_dirent_name_bytes(dirent_i_to_s_c(dirent)) != name->len); + EBUG_ON(bch2_dirent_name_bytes(dirent_i_to_s_c(dirent)) != name->len); return dirent; } -int bch_dirent_create(struct bch_fs *c, u64 dir_inum, - const struct bch_hash_info *hash_info, - u8 type, const struct qstr *name, u64 dst_inum, - u64 *journal_seq, int flags) +int bch2_dirent_create(struct bch_fs *c, u64 dir_inum, + const struct bch_hash_info *hash_info, + u8 type, const struct qstr *name, u64 dst_inum, + u64 *journal_seq, int flags) { struct bkey_i_dirent *dirent; int ret; @@ -163,7 +163,7 @@ int bch_dirent_create(struct bch_fs *c, u64 dir_inum, if (!dirent) return -ENOMEM; - ret = bch_hash_set(dirent_hash_desc, hash_info, c, dir_inum, + ret = bch2_hash_set(dirent_hash_desc, hash_info, c, dir_inum, journal_seq, &dirent->k_i, flags); kfree(dirent); @@ -177,16 +177,16 @@ static void dirent_copy_target(struct bkey_i_dirent *dst, dst->v.d_type = src.v->d_type; } -static struct bpos bch_dirent_pos(struct bch_inode_info *ei, - const struct qstr *name) +static struct bpos bch2_dirent_pos(struct bch_inode_info *ei, + const struct qstr *name) { - return POS(ei->vfs_inode.i_ino, bch_dirent_hash(&ei->str_hash, name)); + return POS(ei->vfs_inode.i_ino, bch2_dirent_hash(&ei->str_hash, name)); } -int bch_dirent_rename(struct bch_fs *c, - struct inode *src_dir, const struct qstr *src_name, - struct inode *dst_dir, const struct qstr *dst_name, - u64 *journal_seq, enum bch_rename_mode mode) +int bch2_dirent_rename(struct bch_fs *c, + struct inode *src_dir, const struct qstr *src_name, + struct inode *dst_dir, const struct qstr *dst_name, + u64 *journal_seq, enum bch_rename_mode mode) { struct bch_inode_info *src_ei = to_bch_ei(src_dir); struct bch_inode_info *dst_ei = to_bch_ei(dst_dir); @@ -194,17 +194,17 @@ int bch_dirent_rename(struct bch_fs *c, struct bkey_s_c old_src, old_dst; struct bkey delete; struct bkey_i_dirent *new_src = NULL, *new_dst = NULL; - struct bpos src_pos = bch_dirent_pos(src_ei, src_name); - struct bpos dst_pos = bch_dirent_pos(dst_ei, dst_name); + struct bpos src_pos = bch2_dirent_pos(src_ei, src_name); + struct bpos dst_pos = bch2_dirent_pos(dst_ei, dst_name); bool need_whiteout; int ret = -ENOMEM; - bch_btree_iter_init_intent(&src_iter, c, BTREE_ID_DIRENTS, src_pos); - bch_btree_iter_init_intent(&dst_iter, c, BTREE_ID_DIRENTS, dst_pos); - bch_btree_iter_link(&src_iter, &dst_iter); + bch2_btree_iter_init_intent(&src_iter, c, BTREE_ID_DIRENTS, src_pos); + bch2_btree_iter_init_intent(&dst_iter, c, BTREE_ID_DIRENTS, dst_pos); + bch2_btree_iter_link(&src_iter, &dst_iter); - bch_btree_iter_init(&whiteout_iter, c, BTREE_ID_DIRENTS, src_pos); - bch_btree_iter_link(&src_iter, &whiteout_iter); + bch2_btree_iter_init(&whiteout_iter, c, BTREE_ID_DIRENTS, src_pos); + bch2_btree_iter_link(&src_iter, &whiteout_iter); if (mode == BCH_RENAME_EXCHANGE) { new_src = dirent_create_key(0, src_name, 0); @@ -223,13 +223,13 @@ retry: * from the original hashed position (like we do when creating dirents, * in bch_hash_set) - we never move existing dirents to different slot: */ - old_src = bch_hash_lookup_at(dirent_hash_desc, + old_src = bch2_hash_lookup_at(dirent_hash_desc, &src_ei->str_hash, &src_iter, src_name); if ((ret = btree_iter_err(old_src))) goto err; - ret = bch_hash_needs_whiteout(dirent_hash_desc, + ret = bch2_hash_needs_whiteout(dirent_hash_desc, &src_ei->str_hash, &whiteout_iter, &src_iter); if (ret < 0) @@ -242,8 +242,8 @@ retry: * to do that check for us for correctness: */ old_dst = mode == BCH_RENAME - ? bch_hash_hole_at(dirent_hash_desc, &dst_iter) - : bch_hash_lookup_at(dirent_hash_desc, + ? bch2_hash_hole_at(dirent_hash_desc, &dst_iter) + : bch2_hash_lookup_at(dirent_hash_desc, &dst_ei->str_hash, &dst_iter, dst_name); if ((ret = btree_iter_err(old_dst))) @@ -265,13 +265,13 @@ retry: * were going to delete: * * Note: this is a correctness issue, in this - * situation bch_hash_needs_whiteout() could + * situation bch2_hash_needs_whiteout() could * return false when the whiteout would have * been needed if we inserted at the pos * __dirent_find_hole() found */ new_dst->k.p = src_iter.pos; - ret = bch_btree_insert_at(c, NULL, NULL, + ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq, BTREE_INSERT_ATOMIC, BTREE_INSERT_ENTRY(&src_iter, @@ -307,7 +307,7 @@ retry: new_src->k.p = src_iter.pos; new_dst->k.p = dst_iter.pos; - ret = bch_btree_insert_at(c, NULL, NULL, journal_seq, + ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq, BTREE_INSERT_ATOMIC, BTREE_INSERT_ENTRY(&src_iter, &new_src->k_i), BTREE_INSERT_ENTRY(&dst_iter, &new_dst->k_i)); @@ -315,9 +315,9 @@ err: if (ret == -EINTR) goto retry; - bch_btree_iter_unlock(&whiteout_iter); - bch_btree_iter_unlock(&dst_iter); - bch_btree_iter_unlock(&src_iter); + bch2_btree_iter_unlock(&whiteout_iter); + bch2_btree_iter_unlock(&dst_iter); + bch2_btree_iter_unlock(&src_iter); if (new_src != (void *) &delete) kfree(new_src); @@ -325,37 +325,37 @@ err: return ret; } -int bch_dirent_delete(struct bch_fs *c, u64 dir_inum, - const struct bch_hash_info *hash_info, - const struct qstr *name, - u64 *journal_seq) +int bch2_dirent_delete(struct bch_fs *c, u64 dir_inum, + const struct bch_hash_info *hash_info, + const struct qstr *name, + u64 *journal_seq) { - return bch_hash_delete(dirent_hash_desc, hash_info, + return bch2_hash_delete(dirent_hash_desc, hash_info, c, dir_inum, journal_seq, name); } -u64 bch_dirent_lookup(struct bch_fs *c, u64 dir_inum, - const struct bch_hash_info *hash_info, - const struct qstr *name) +u64 bch2_dirent_lookup(struct bch_fs *c, u64 dir_inum, + const struct bch_hash_info *hash_info, + const struct qstr *name) { struct btree_iter iter; struct bkey_s_c k; u64 inum; - k = bch_hash_lookup(dirent_hash_desc, hash_info, c, + k = bch2_hash_lookup(dirent_hash_desc, hash_info, c, dir_inum, &iter, name); if (IS_ERR(k.k)) { - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return 0; } inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum); - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return inum; } -int bch_empty_dir(struct bch_fs *c, u64 dir_inum) +int bch2_empty_dir(struct bch_fs *c, u64 dir_inum) { struct btree_iter iter; struct bkey_s_c k; @@ -370,13 +370,13 @@ int bch_empty_dir(struct bch_fs *c, u64 dir_inum) break; } } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return ret; } -int bch_readdir(struct bch_fs *c, struct file *file, - struct dir_context *ctx) +int bch2_readdir(struct bch_fs *c, struct file *file, + struct dir_context *ctx) { struct inode *inode = file_inode(file); struct btree_iter iter; @@ -406,7 +406,7 @@ int bch_readdir(struct bch_fs *c, struct file *file, if (k.k->p.inode > inode->i_ino) break; - len = bch_dirent_name_bytes(dirent); + len = bch2_dirent_name_bytes(dirent); pr_debug("emitting %s", dirent.v->d_name); @@ -421,7 +421,7 @@ int bch_readdir(struct bch_fs *c, struct file *file, ctx->pos = k.k->p.offset + 1; } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return 0; } diff --git a/libbcachefs/dirent.h b/libbcachefs/dirent.h new file mode 100644 index 00000000..b1a30bda --- /dev/null +++ b/libbcachefs/dirent.h @@ -0,0 +1,36 @@ +#ifndef _BCACHE_DIRENT_H +#define _BCACHE_DIRENT_H + +extern const struct bkey_ops bch2_bkey_dirent_ops; + +struct qstr; +struct file; +struct dir_context; +struct bch_fs; +struct bch_hash_info; + +unsigned bch2_dirent_name_bytes(struct bkey_s_c_dirent); +int bch2_dirent_create(struct bch_fs *c, u64, const struct bch_hash_info *, + u8, const struct qstr *, u64, u64 *, int); +int bch2_dirent_delete(struct bch_fs *, u64, const struct bch_hash_info *, + const struct qstr *, u64 *); + +enum bch_rename_mode { + BCH_RENAME, + BCH_RENAME_OVERWRITE, + BCH_RENAME_EXCHANGE, +}; + +int bch2_dirent_rename(struct bch_fs *, + struct inode *, const struct qstr *, + struct inode *, const struct qstr *, + u64 *, enum bch_rename_mode); + +u64 bch2_dirent_lookup(struct bch_fs *, u64, const struct bch_hash_info *, + const struct qstr *); + +int bch2_empty_dir(struct bch_fs *, u64); +int bch2_readdir(struct bch_fs *, struct file *, struct dir_context *); + +#endif /* _BCACHE_DIRENT_H */ + diff --git a/libbcachefs/error.c b/libbcachefs/error.c new file mode 100644 index 00000000..8babf196 --- /dev/null +++ b/libbcachefs/error.c @@ -0,0 +1,51 @@ +#include "bcachefs.h" +#include "error.h" +#include "io.h" +#include "super.h" + +void bch2_inconsistent_error(struct bch_fs *c) +{ + set_bit(BCH_FS_ERROR, &c->flags); + + switch (c->opts.errors) { + case BCH_ON_ERROR_CONTINUE: + break; + case BCH_ON_ERROR_RO: + if (bch2_fs_emergency_read_only(c)) + bch_err(c, "emergency read only"); + break; + case BCH_ON_ERROR_PANIC: + panic(bch2_fmt(c, "panic after error")); + break; + } +} + +void bch2_fatal_error(struct bch_fs *c) +{ + if (bch2_fs_emergency_read_only(c)) + bch_err(c, "emergency read only"); +} + +void bch2_nonfatal_io_error_work(struct work_struct *work) +{ + struct bch_dev *ca = container_of(work, struct bch_dev, io_error_work); + struct bch_fs *c = ca->fs; + bool dev; + + mutex_lock(&c->state_lock); + dev = bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_RO, + BCH_FORCE_IF_DEGRADED); + if (dev + ? __bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_RO, + BCH_FORCE_IF_DEGRADED) + : bch2_fs_emergency_read_only(c)) + bch_err(ca, + "too many IO errors, setting %s RO", + dev ? "device" : "filesystem"); + mutex_unlock(&c->state_lock); +} + +void bch2_nonfatal_io_error(struct bch_dev *ca) +{ + queue_work(system_long_wq, &ca->io_error_work); +} diff --git a/libbcache/error.h b/libbcachefs/error.h index 726b20d4..83d3a627 100644 --- a/libbcache/error.h +++ b/libbcachefs/error.h @@ -21,16 +21,16 @@ struct bch_fs; * XXX: audit and convert to inconsistent() checks */ -#define bch_fs_bug(c, ...) \ +#define bch2_fs_bug(c, ...) \ do { \ bch_err(c, __VA_ARGS__); \ BUG(); \ } while (0) -#define bch_fs_bug_on(cond, c, ...) \ +#define bch2_fs_bug_on(cond, c, ...) \ do { \ if (cond) \ - bch_fs_bug(c, __VA_ARGS__); \ + bch2_fs_bug(c, __VA_ARGS__); \ } while (0) /* @@ -44,20 +44,20 @@ do { \ * BCH_ON_ERROR_CONTINUE mode */ -void bch_inconsistent_error(struct bch_fs *); +void bch2_inconsistent_error(struct bch_fs *); -#define bch_fs_inconsistent(c, ...) \ +#define bch2_fs_inconsistent(c, ...) \ do { \ bch_err(c, __VA_ARGS__); \ - bch_inconsistent_error(c); \ + bch2_inconsistent_error(c); \ } while (0) -#define bch_fs_inconsistent_on(cond, c, ...) \ +#define bch2_fs_inconsistent_on(cond, c, ...) \ ({ \ int _ret = !!(cond); \ \ if (_ret) \ - bch_fs_inconsistent(c, __VA_ARGS__); \ + bch2_fs_inconsistent(c, __VA_ARGS__); \ _ret; \ }) @@ -66,18 +66,18 @@ do { \ * entire filesystem: */ -#define bch_dev_inconsistent(ca, ...) \ +#define bch2_dev_inconsistent(ca, ...) \ do { \ bch_err(ca, __VA_ARGS__); \ - bch_inconsistent_error((ca)->fs); \ + bch2_inconsistent_error((ca)->fs); \ } while (0) -#define bch_dev_inconsistent_on(cond, ca, ...) \ +#define bch2_dev_inconsistent_on(cond, ca, ...) \ ({ \ int _ret = !!(cond); \ \ if (_ret) \ - bch_dev_inconsistent(ca, __VA_ARGS__); \ + bch2_dev_inconsistent(ca, __VA_ARGS__); \ _ret; \ }) @@ -145,43 +145,43 @@ enum { * mode - pretty much just due to metadata IO errors: */ -void bch_fatal_error(struct bch_fs *); +void bch2_fatal_error(struct bch_fs *); -#define bch_fs_fatal_error(c, ...) \ +#define bch2_fs_fatal_error(c, ...) \ do { \ bch_err(c, __VA_ARGS__); \ - bch_fatal_error(c); \ + bch2_fatal_error(c); \ } while (0) -#define bch_fs_fatal_err_on(cond, c, ...) \ +#define bch2_fs_fatal_err_on(cond, c, ...) \ ({ \ int _ret = !!(cond); \ \ if (_ret) \ - bch_fs_fatal_error(c, __VA_ARGS__); \ + bch2_fs_fatal_error(c, __VA_ARGS__); \ _ret; \ }) -#define bch_dev_fatal_error(ca, ...) \ +#define bch2_dev_fatal_error(ca, ...) \ do { \ bch_err(ca, __VA_ARGS__); \ - bch_fatal_error(c); \ + bch2_fatal_error(c); \ } while (0) -#define bch_dev_fatal_io_error(ca, fmt, ...) \ +#define bch2_dev_fatal_io_error(ca, fmt, ...) \ do { \ - printk_ratelimited(KERN_ERR bch_fmt((ca)->fs, \ + printk_ratelimited(KERN_ERR bch2_fmt((ca)->fs, \ "fatal IO error on %s for " fmt), \ (ca)->name, ##__VA_ARGS__); \ - bch_fatal_error((ca)->fs); \ + bch2_fatal_error((ca)->fs); \ } while (0) -#define bch_dev_fatal_io_err_on(cond, ca, ...) \ +#define bch2_dev_fatal_io_err_on(cond, ca, ...) \ ({ \ int _ret = !!(cond); \ \ if (_ret) \ - bch_dev_fatal_io_error(ca, __VA_ARGS__); \ + bch2_dev_fatal_io_error(ca, __VA_ARGS__); \ _ret; \ }) @@ -191,44 +191,41 @@ do { \ * don't (necessarily) want to shut down the fs: */ -void bch_account_io_completion(struct bch_dev *); -void bch_account_io_completion_time(struct bch_dev *, unsigned, int); - -void bch_nonfatal_io_error_work(struct work_struct *); +void bch2_nonfatal_io_error_work(struct work_struct *); /* Does the error handling without logging a message */ -void bch_nonfatal_io_error(struct bch_dev *); +void bch2_nonfatal_io_error(struct bch_dev *); #if 0 -#define bch_fs_nonfatal_io_error(c, ...) \ +#define bch2_fs_nonfatal_io_error(c, ...) \ do { \ bch_err(c, __VA_ARGS__); \ - bch_nonfatal_io_error(c); \ + bch2_nonfatal_io_error(c); \ } while (0) #endif /* Logs message and handles the error: */ -#define bch_dev_nonfatal_io_error(ca, fmt, ...) \ +#define bch2_dev_nonfatal_io_error(ca, fmt, ...) \ do { \ - printk_ratelimited(KERN_ERR bch_fmt((ca)->fs, \ + printk_ratelimited(KERN_ERR bch2_fmt((ca)->fs, \ "IO error on %s for " fmt), \ (ca)->name, ##__VA_ARGS__); \ - bch_nonfatal_io_error(ca); \ + bch2_nonfatal_io_error(ca); \ } while (0) -#define bch_dev_nonfatal_io_err_on(cond, ca, ...) \ +#define bch2_dev_nonfatal_io_err_on(cond, ca, ...) \ ({ \ bool _ret = (cond); \ \ if (_ret) \ - bch_dev_nonfatal_io_error(ca, __VA_ARGS__); \ + bch2_dev_nonfatal_io_error(ca, __VA_ARGS__); \ _ret; \ }) /* kill? */ #define __bcache_io_error(c, fmt, ...) \ - printk_ratelimited(KERN_ERR bch_fmt(c, \ + printk_ratelimited(KERN_ERR bch2_fmt(c, \ "IO error: " fmt), ##__VA_ARGS__) #define bcache_io_error(c, bio, fmt, ...) \ diff --git a/libbcache/extents.c b/libbcachefs/extents.c index 4b422fb1..26f9352a 100644 --- a/libbcache/extents.c +++ b/libbcachefs/extents.c @@ -5,7 +5,7 @@ * dirty sector count. */ -#include "bcache.h" +#include "bcachefs.h" #include "bkey_methods.h" #include "btree_gc.h" #include "btree_update.h" @@ -17,13 +17,12 @@ #include "inode.h" #include "journal.h" #include "super-io.h" -#include "writeback.h" #include "xattr.h" -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> -static enum merge_result bch_extent_merge(struct bch_fs *, struct btree *, - struct bkey_i *, struct bkey_i *); +static enum merge_result bch2_extent_merge(struct bch_fs *, struct btree *, + struct bkey_i *, struct bkey_i *); static void sort_key_next(struct btree_node_iter *iter, struct btree *b, @@ -77,7 +76,7 @@ static inline bool should_drop_next_key(struct btree_node_iter *iter, __btree_node_offset_to_key(b, r->k)); } -struct btree_nr_keys bch_key_sort_fix_overlapping(struct bset *dst, +struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst, struct btree *b, struct btree_node_iter *iter) { @@ -88,7 +87,7 @@ struct btree_nr_keys bch_key_sort_fix_overlapping(struct bset *dst, heap_resort(iter, key_sort_cmp); - while (!bch_btree_node_iter_end(iter)) { + while (!bch2_btree_node_iter_end(iter)) { if (!should_drop_next_key(iter, b)) { struct bkey_packed *k = __btree_node_offset_to_key(b, iter->data->k); @@ -109,7 +108,7 @@ struct btree_nr_keys bch_key_sort_fix_overlapping(struct bset *dst, /* Common among btree and extent ptrs */ const struct bch_extent_ptr * -bch_extent_has_device(struct bkey_s_c_extent e, unsigned dev) +bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev) { const struct bch_extent_ptr *ptr; @@ -120,7 +119,7 @@ bch_extent_has_device(struct bkey_s_c_extent e, unsigned dev) return NULL; } -unsigned bch_extent_nr_ptrs(struct bkey_s_c_extent e) +unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent e) { const struct bch_extent_ptr *ptr; unsigned nr_ptrs = 0; @@ -131,7 +130,7 @@ unsigned bch_extent_nr_ptrs(struct bkey_s_c_extent e) return nr_ptrs; } -unsigned bch_extent_nr_dirty_ptrs(struct bkey_s_c k) +unsigned bch2_extent_nr_dirty_ptrs(struct bkey_s_c k) { struct bkey_s_c_extent e; const struct bch_extent_ptr *ptr; @@ -162,7 +161,7 @@ static bool crc_cmp(union bch_extent_crc *l, union bch_extent_crc *r) } /* Increment pointers after @crc by crc's offset until the next crc entry: */ -void bch_extent_crc_narrow_pointers(struct bkey_s_extent e, union bch_extent_crc *crc) +void bch2_extent_crc_narrow_pointers(struct bkey_s_extent e, union bch_extent_crc *crc) { union bch_extent_entry *entry; @@ -197,7 +196,7 @@ void bch_extent_crc_narrow_pointers(struct bkey_s_extent e, union bch_extent_crc * * note: doesn't work with encryption */ -void bch_extent_narrow_crcs(struct bkey_s_extent e) +void bch2_extent_narrow_crcs(struct bkey_s_extent e) { union bch_extent_crc *crc; bool have_wide = false, have_narrow = false; @@ -206,7 +205,7 @@ void bch_extent_narrow_crcs(struct bkey_s_extent e) extent_for_each_crc(e, crc) { if (crc_compression_type(crc) || - bch_csum_type_is_encryption(crc_csum_type(crc))) + bch2_csum_type_is_encryption(crc_csum_type(crc))) continue; if (crc_uncompressed_size(e.k, crc) != e.k->size) { @@ -233,7 +232,7 @@ void bch_extent_narrow_crcs(struct bkey_s_extent e) if (bch_crc_bytes[csum_type] > 4) continue; - bch_extent_crc_narrow_pointers(e, crc); + bch2_extent_crc_narrow_pointers(e, crc); crc->crc32._compressed_size = e.k->size - 1; crc->crc32._uncompressed_size = e.k->size - 1; crc->crc32.offset = 0; @@ -244,7 +243,7 @@ void bch_extent_narrow_crcs(struct bkey_s_extent e) if (bch_crc_bytes[csum_type] > 10) continue; - bch_extent_crc_narrow_pointers(e, crc); + bch2_extent_crc_narrow_pointers(e, crc); crc->crc64._compressed_size = e.k->size - 1; crc->crc64._uncompressed_size = e.k->size - 1; crc->crc64.offset = 0; @@ -256,7 +255,7 @@ void bch_extent_narrow_crcs(struct bkey_s_extent e) if (bch_crc_bytes[csum_type] > 16) continue; - bch_extent_crc_narrow_pointers(e, crc); + bch2_extent_crc_narrow_pointers(e, crc); crc->crc128._compressed_size = e.k->size - 1; crc->crc128._uncompressed_size = e.k->size - 1; crc->crc128.offset = 0; @@ -268,7 +267,7 @@ void bch_extent_narrow_crcs(struct bkey_s_extent e) } } -void bch_extent_drop_redundant_crcs(struct bkey_s_extent e) +void bch2_extent_drop_redundant_crcs(struct bkey_s_extent e) { union bch_extent_entry *entry = e.v->start; union bch_extent_crc *crc, *prev = NULL; @@ -301,7 +300,7 @@ void bch_extent_drop_redundant_crcs(struct bkey_s_extent e) !crc_csum_type(crc) && !crc_compression_type(crc)) { /* null crc entry: */ - bch_extent_crc_narrow_pointers(e, crc); + bch2_extent_crc_narrow_pointers(e, crc); goto drop; } @@ -315,7 +314,7 @@ drop: e.k->u64s -= crc_u64s; } - EBUG_ON(bkey_val_u64s(e.k) && !bch_extent_nr_ptrs(e.c)); + EBUG_ON(bkey_val_u64s(e.k) && !bch2_extent_nr_ptrs(e.c)); } static bool should_drop_ptr(const struct bch_fs *c, @@ -325,29 +324,29 @@ static bool should_drop_ptr(const struct bch_fs *c, return ptr->cached && ptr_stale(c->devs[ptr->dev], ptr); } -static void bch_extent_drop_stale(struct bch_fs *c, struct bkey_s_extent e) +static void bch2_extent_drop_stale(struct bch_fs *c, struct bkey_s_extent e) { struct bch_extent_ptr *ptr = &e.v->start->ptr; bool dropped = false; while ((ptr = extent_ptr_next(e, ptr))) if (should_drop_ptr(c, e.c, ptr)) { - __bch_extent_drop_ptr(e, ptr); + __bch2_extent_drop_ptr(e, ptr); dropped = true; } else ptr++; if (dropped) - bch_extent_drop_redundant_crcs(e); + bch2_extent_drop_redundant_crcs(e); } -static bool bch_ptr_normalize(struct bch_fs *c, struct btree *bk, +static bool bch2_ptr_normalize(struct bch_fs *c, struct btree *bk, struct bkey_s k) { - return bch_extent_normalize(c, k); + return bch2_extent_normalize(c, k); } -static void bch_ptr_swab(const struct bkey_format *f, struct bkey_packed *k) +static void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k) { switch (k->type) { case BCH_EXTENT: @@ -472,7 +471,7 @@ out: /* Btree ptrs */ -static const char *bch_btree_ptr_invalid(const struct bch_fs *c, +static const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k) { if (bkey_extent_is_cached(k.k)) @@ -549,9 +548,9 @@ static void btree_ptr_debugcheck(struct bch_fs *c, struct btree *b, } if (replicas < c->sb.meta_replicas_have) { - bch_bkey_val_to_text(c, btree_node_type(b), + bch2_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), k); - bch_fs_bug(c, + bch2_fs_bug(c, "btree key bad (too few replicas, %u < %u): %s", replicas, c->sb.meta_replicas_have, buf); return; @@ -559,8 +558,8 @@ static void btree_ptr_debugcheck(struct bch_fs *c, struct btree *b, return; err: - bch_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), k); - bch_fs_bug(c, "%s btree pointer %s: bucket %zi prio %i " + bch2_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), k); + bch2_fs_bug(c, "%s btree pointer %s: bucket %zi prio %i " "gen %i last_gc %i mark %08x", err, buf, PTR_BUCKET_NR(ca, ptr), g->read_prio, PTR_BUCKET(ca, ptr)->mark.gen, @@ -568,7 +567,7 @@ err: (unsigned) g->mark.counter); } -static void bch_btree_ptr_to_text(struct bch_fs *c, char *buf, +static void bch2_btree_ptr_to_text(struct bch_fs *c, char *buf, size_t size, struct bkey_s_c k) { char *out = buf, *end = buf + size; @@ -579,14 +578,14 @@ static void bch_btree_ptr_to_text(struct bch_fs *c, char *buf, if (bkey_extent_is_data(k.k)) out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k)); - invalid = bch_btree_ptr_invalid(c, k); + invalid = bch2_btree_ptr_invalid(c, k); if (invalid) p(" invalid: %s", invalid); #undef p } struct extent_pick_ptr -bch_btree_pick_ptr(struct bch_fs *c, const struct btree *b) +bch2_btree_pick_ptr(struct bch_fs *c, const struct btree *b) { struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key); const union bch_extent_crc *crc; @@ -597,13 +596,13 @@ bch_btree_pick_ptr(struct bch_fs *c, const struct btree *b) struct bch_dev *ca = c->devs[ptr->dev]; struct btree *root = btree_node_root(c, b); - if (bch_fs_inconsistent_on(crc, c, + if (bch2_fs_inconsistent_on(crc, c, "btree node pointer with crc at btree %u level %u/%u bucket %zu", b->btree_id, b->level, root ? root->level : -1, PTR_BUCKET_NR(ca, ptr))) break; - if (bch_dev_inconsistent_on(ptr_stale(ca, ptr), ca, + if (bch2_dev_inconsistent_on(ptr_stale(ca, ptr), ca, "stale btree node pointer at btree %u level %u/%u bucket %zu", b->btree_id, b->level, root ? root->level : -1, PTR_BUCKET_NR(ca, ptr))) @@ -628,16 +627,16 @@ bch_btree_pick_ptr(struct bch_fs *c, const struct btree *b) return pick; } -const struct bkey_ops bch_bkey_btree_ops = { - .key_invalid = bch_btree_ptr_invalid, +const struct bkey_ops bch2_bkey_btree_ops = { + .key_invalid = bch2_btree_ptr_invalid, .key_debugcheck = btree_ptr_debugcheck, - .val_to_text = bch_btree_ptr_to_text, - .swab = bch_ptr_swab, + .val_to_text = bch2_btree_ptr_to_text, + .swab = bch2_ptr_swab, }; /* Extents */ -static bool __bch_cut_front(struct bpos where, struct bkey_s k) +static bool __bch2_cut_front(struct bpos where, struct bkey_s k) { u64 len = 0; @@ -688,12 +687,12 @@ static bool __bch_cut_front(struct bpos where, struct bkey_s k) return true; } -bool bch_cut_front(struct bpos where, struct bkey_i *k) +bool bch2_cut_front(struct bpos where, struct bkey_i *k) { - return __bch_cut_front(where, bkey_i_to_s(k)); + return __bch2_cut_front(where, bkey_i_to_s(k)); } -bool bch_cut_back(struct bpos where, struct bkey *k) +bool bch2_cut_back(struct bpos where, struct bkey *k) { u64 len = 0; @@ -720,7 +719,7 @@ bool bch_cut_back(struct bpos where, struct bkey *k) * * bkey_start_offset(k) will be preserved, modifies where the extent ends */ -void bch_key_resize(struct bkey *k, +void bch2_key_resize(struct bkey *k, unsigned new_size) { k->p.offset -= k->size; @@ -745,11 +744,11 @@ static bool __extent_save(struct btree *b, struct btree_node_iter *iter, dst_unpacked->k = *src; ret = true; } else { - ret = bkey_pack_key(dst, src, f); + ret = bch2_bkey_pack_key(dst, src, f); } if (ret && iter) - bch_verify_key_order(b, iter, dst); + bch2_verify_key_order(b, iter, dst); return ret; } @@ -805,14 +804,14 @@ static void extent_sort_append(struct bch_fs *c, if (bkey_whiteout(k)) return; - bkey_unpack(b, &tmp.k, k); + bch2_bkey_unpack(b, &tmp.k, k); if (*prev && - bch_extent_merge(c, b, (void *) *prev, &tmp.k)) + bch2_extent_merge(c, b, (void *) *prev, &tmp.k)) return; if (*prev) { - bkey_pack(*prev, (void *) *prev, f); + bch2_bkey_pack(*prev, (void *) *prev, f); btree_keys_account_key_add(nr, 0, *prev); *prev = bkey_next(*prev); @@ -823,7 +822,7 @@ static void extent_sort_append(struct bch_fs *c, bkey_copy(*prev, &tmp.k); } -struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bch_fs *c, +struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c, struct bset *dst, struct btree *b, struct btree_node_iter *iter) @@ -839,7 +838,7 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bch_fs *c, heap_resort(iter, extent_sort_cmp); - while (!bch_btree_node_iter_end(iter)) { + while (!bch2_btree_node_iter_end(iter)) { lk = __btree_node_offset_to_key(b, _l->k); if (iter->used == 1) { @@ -886,7 +885,7 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bch_fs *c, if (bkey_cmp(l.k->p, r.k->p) >= 0) { sort_key_next(iter, b, _r); } else { - __bch_cut_front(l.k->p, r); + __bch2_cut_front(l.k->p, r); extent_save(b, NULL, rk, r.k); } @@ -898,9 +897,9 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bch_fs *c, * r wins, but it overlaps in the middle of l - split l: */ bkey_reassemble(&tmp.k, l.s_c); - bch_cut_back(bkey_start_pos(r.k), &tmp.k.k); + bch2_cut_back(bkey_start_pos(r.k), &tmp.k.k); - __bch_cut_front(r.k->p, l); + __bch2_cut_front(r.k->p, l); extent_save(b, NULL, lk, l.k); extent_sort_sift(iter, b, 0); @@ -908,13 +907,13 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bch_fs *c, extent_sort_append(c, b, &nr, dst->start, &prev, bkey_to_packed(&tmp.k)); } else { - bch_cut_back(bkey_start_pos(r.k), l.k); + bch2_cut_back(bkey_start_pos(r.k), l.k); extent_save(b, NULL, lk, l.k); } } if (prev) { - bkey_pack(prev, (void *) prev, f); + bch2_bkey_pack(prev, (void *) prev, f); btree_keys_account_key_add(&nr, 0, prev); out = bkey_next(prev); } else { @@ -937,7 +936,7 @@ struct extent_insert_state { bool deleting; }; -static void bch_add_sectors(struct extent_insert_state *s, +static void bch2_add_sectors(struct extent_insert_state *s, struct bkey_s_c k, u64 offset, s64 sectors) { struct bch_fs *c = s->trans->c; @@ -948,163 +947,47 @@ static void bch_add_sectors(struct extent_insert_state *s, if (!sectors) return; - bch_mark_key(c, k, sectors, false, gc_pos_btree_node(b), + bch2_mark_key(c, k, sectors, false, gc_pos_btree_node(b), &s->stats, s->trans->journal_res.seq); - - if (bkey_extent_is_data(k.k) && - !bkey_extent_is_cached(k.k)) - bcache_dev_sectors_dirty_add(c, k.k->p.inode, offset, sectors); } -static void bch_subtract_sectors(struct extent_insert_state *s, +static void bch2_subtract_sectors(struct extent_insert_state *s, struct bkey_s_c k, u64 offset, s64 sectors) { - bch_add_sectors(s, k, offset, -sectors); + bch2_add_sectors(s, k, offset, -sectors); } /* These wrappers subtract exactly the sectors that we're removing from @k */ -static void bch_cut_subtract_back(struct extent_insert_state *s, +static void bch2_cut_subtract_back(struct extent_insert_state *s, struct bpos where, struct bkey_s k) { - bch_subtract_sectors(s, k.s_c, where.offset, + bch2_subtract_sectors(s, k.s_c, where.offset, k.k->p.offset - where.offset); - bch_cut_back(where, k.k); + bch2_cut_back(where, k.k); } -static void bch_cut_subtract_front(struct extent_insert_state *s, +static void bch2_cut_subtract_front(struct extent_insert_state *s, struct bpos where, struct bkey_s k) { - bch_subtract_sectors(s, k.s_c, bkey_start_offset(k.k), + bch2_subtract_sectors(s, k.s_c, bkey_start_offset(k.k), where.offset - bkey_start_offset(k.k)); - __bch_cut_front(where, k); + __bch2_cut_front(where, k); } -static void bch_drop_subtract(struct extent_insert_state *s, struct bkey_s k) +static void bch2_drop_subtract(struct extent_insert_state *s, struct bkey_s k) { if (k.k->size) - bch_subtract_sectors(s, k.s_c, + bch2_subtract_sectors(s, k.s_c, bkey_start_offset(k.k), k.k->size); k.k->size = 0; __set_bkey_deleted(k.k); } -/* - * Note: If this returns true because only some pointers matched, - * we can lose some caching that had happened in the interim. - * Because cache promotion only promotes the part of the extent - * actually read, and not the whole extent, and due to the key - * splitting done in bch_extent_insert_fixup, preserving such - * caching is difficult. - */ -static bool bch_extent_cmpxchg_cmp(struct bkey_s_c l, struct bkey_s_c r) -{ - struct bkey_s_c_extent le, re; - const struct bch_extent_ptr *lp, *rp; - s64 offset; - - BUG_ON(!l.k->size || !r.k->size); - - if (l.k->type != r.k->type || - bversion_cmp(l.k->version, r.k->version)) - return false; - - switch (l.k->type) { - case KEY_TYPE_COOKIE: - return !memcmp(bkey_s_c_to_cookie(l).v, - bkey_s_c_to_cookie(r).v, - sizeof(struct bch_cookie)); - - case BCH_EXTENT: - case BCH_EXTENT_CACHED: - le = bkey_s_c_to_extent(l); - re = bkey_s_c_to_extent(r); - - /* - * bkey_cmpxchg() handles partial matches - when either l or r - * has been trimmed - so we need just to handle l or r not - * starting at the same place when checking for a match here. - * - * If the starts of the keys are different, we just apply that - * offset to the device pointer offsets when checking those - - * matching how bch_cut_front() adjusts device pointer offsets - * when adjusting the start of a key: - */ - offset = bkey_start_offset(l.k) - bkey_start_offset(r.k); - - /* - * XXX: perhaps we only raced with copygc or tiering replacing - * one of the pointers: it should suffice to find _any_ matching - * pointer - */ - - if (bkey_val_u64s(le.k) != bkey_val_u64s(re.k)) - return false; - - extent_for_each_ptr(le, lp) { - const union bch_extent_entry *entry = - vstruct_idx(re.v, (u64 *) lp - le.v->_data); - - if (!extent_entry_is_ptr(entry)) - return false; - - rp = &entry->ptr; - - if (lp->offset != rp->offset + offset || - lp->dev != rp->dev || - lp->gen != rp->gen) - return false; - } - - return true; - default: - return false; - } - -} - -/* - * Returns true on success, false on failure (and false means @new no longer - * overlaps with @k) - * - * If returned true, we may have inserted up to one key in @b. - * If returned false, we may have inserted up to two keys in @b. - * - * On return, there is room in @res for at least one more key of the same size - * as @new. - */ -enum extent_insert_hook_ret bch_extent_cmpxchg(struct extent_insert_hook *hook, - struct bpos committed_pos, - struct bpos next_pos, - struct bkey_s_c k, - const struct bkey_i *new) -{ - struct bch_replace_info *replace = container_of(hook, - struct bch_replace_info, hook); - struct bkey_i *old = &replace->key; - - EBUG_ON(bkey_cmp(committed_pos, bkey_start_pos(&new->k)) < 0); - - /* must have something to compare against */ - EBUG_ON(!bkey_val_u64s(&old->k)); - - /* new must be a subset of old */ - EBUG_ON(bkey_cmp(new->k.p, old->k.p) > 0 || - bkey_cmp(bkey_start_pos(&new->k), bkey_start_pos(&old->k)) < 0); - - if (k.k && bch_extent_cmpxchg_cmp(k, bkey_i_to_s_c(old))) { - replace->successes++; - return BTREE_HOOK_DO_INSERT; - } else { - replace->failures++; - return BTREE_HOOK_NO_INSERT; - } -} - -static bool bch_extent_merge_inline(struct bch_fs *, - struct btree_iter *, - struct bkey_packed *, - struct bkey_packed *, - bool); +static bool bch2_extent_merge_inline(struct bch_fs *, + struct btree_iter *, + struct bkey_packed *, + struct bkey_packed *, + bool); #define MAX_LOCK_HOLD_TIME (5 * NSEC_PER_MSEC) @@ -1122,7 +1005,7 @@ extent_insert_should_stop(struct extent_insert_state *s) * will insert two keys, and one iteration of this room will insert one * key, so we need room for three keys. */ - if (!bch_btree_node_insert_fits(s->trans->c, b, s->insert->k->k.u64s)) + if (!bch2_btree_node_insert_fits(s->trans->c, b, s->insert->k->k.u64s)) return BTREE_INSERT_BTREE_NODE_FULL; else if (!journal_res_insert_fits(s->trans, s->insert)) return BTREE_INSERT_JOURNAL_RES_FULL; /* XXX worth tracing */ @@ -1137,8 +1020,8 @@ static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter, struct btree_node_iter *node_iter = &iter->node_iters[0]; struct bset_tree *t = bset_tree_last(b); struct bkey_packed *where = - bch_btree_node_iter_bset_pos(node_iter, b, t); - struct bkey_packed *prev = bkey_prev(b, t, where); + bch2_btree_node_iter_bset_pos(node_iter, b, t); + struct bkey_packed *prev = bch2_bkey_prev(b, t, where); struct bkey_packed *next_live_key = where; unsigned clobber_u64s; @@ -1156,21 +1039,21 @@ static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter, clobber_u64s = (u64 *) next_live_key - (u64 *) where; if (prev && - bch_extent_merge_inline(c, iter, prev, bkey_to_packed(insert), true)) + bch2_extent_merge_inline(c, iter, prev, bkey_to_packed(insert), true)) goto drop_deleted_keys; if (next_live_key != btree_bkey_last(b, t) && - bch_extent_merge_inline(c, iter, bkey_to_packed(insert), + bch2_extent_merge_inline(c, iter, bkey_to_packed(insert), next_live_key, false)) goto drop_deleted_keys; - bch_bset_insert(b, node_iter, where, insert, clobber_u64s); - bch_btree_node_iter_fix(iter, b, node_iter, t, where, + bch2_bset_insert(b, node_iter, where, insert, clobber_u64s); + bch2_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, where->u64s); return; drop_deleted_keys: - bch_bset_delete(b, where, clobber_u64s); - bch_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, 0); + bch2_bset_delete(b, where, clobber_u64s); + bch2_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, 0); } static void extent_insert_committed(struct extent_insert_state *s) @@ -1189,7 +1072,7 @@ static void extent_insert_committed(struct extent_insert_state *s) return; if (s->deleting && !s->do_journal) { - bch_cut_front(s->committed, insert); + bch2_cut_front(s->committed, insert); goto done; } @@ -1201,27 +1084,27 @@ static void extent_insert_committed(struct extent_insert_state *s) bkey_cmp(s->committed, insert->k.p) && bkey_extent_is_compressed(bkey_i_to_s_c(insert))) { /* XXX: possibly need to increase our reservation? */ - bch_cut_subtract_back(s, s->committed, + bch2_cut_subtract_back(s, s->committed, bkey_i_to_s(&split.k)); - bch_cut_front(s->committed, insert); - bch_add_sectors(s, bkey_i_to_s_c(insert), + bch2_cut_front(s->committed, insert); + bch2_add_sectors(s, bkey_i_to_s_c(insert), bkey_start_offset(&insert->k), insert->k.size); } else { - bch_cut_back(s->committed, &split.k.k); - bch_cut_front(s->committed, insert); + bch2_cut_back(s->committed, &split.k.k); + bch2_cut_front(s->committed, insert); } if (debug_check_bkeys(c)) - bkey_debugcheck(c, iter->nodes[iter->level], + bch2_bkey_debugcheck(c, iter->nodes[iter->level], bkey_i_to_s_c(&split.k)); - bch_btree_journal_key(s->trans, iter, &split.k); + bch2_btree_journal_key(s->trans, iter, &split.k); if (!s->deleting) extent_bset_insert(c, iter, &split.k); done: - bch_btree_iter_set_pos_same_leaf(iter, s->committed); + bch2_btree_iter_set_pos_same_leaf(iter, s->committed); insert->k.needs_whiteout = false; s->do_journal = false; @@ -1259,9 +1142,9 @@ __extent_insert_advance_pos(struct extent_insert_state *s, break; case BTREE_HOOK_NO_INSERT: extent_insert_committed(s); - bch_cut_subtract_front(s, next_pos, bkey_i_to_s(s->insert->k)); + bch2_cut_subtract_front(s, next_pos, bkey_i_to_s(s->insert->k)); - bch_btree_iter_set_pos_same_leaf(s->insert->iter, next_pos); + bch2_btree_iter_set_pos_same_leaf(s->insert->iter, next_pos); break; case BTREE_HOOK_RESTART_TRANS: return ret; @@ -1327,7 +1210,7 @@ extent_insert_check_split_compressed(struct extent_insert_state *s, if (s->trans->flags & BTREE_INSERT_NOFAIL) flags |= BCH_DISK_RESERVATION_NOFAIL; - switch (bch_disk_reservation_add(c, + switch (bch2_disk_reservation_add(c, s->trans->disk_res, sectors, flags)) { case 0: @@ -1357,14 +1240,14 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, switch (overlap) { case BCH_EXTENT_OVERLAP_FRONT: /* insert overlaps with start of k: */ - bch_cut_subtract_front(s, insert->k.p, k); + bch2_cut_subtract_front(s, insert->k.p, k); BUG_ON(bkey_deleted(k.k)); extent_save(b, node_iter, _k, k.k); break; case BCH_EXTENT_OVERLAP_BACK: /* insert overlaps with end of k: */ - bch_cut_subtract_back(s, bkey_start_pos(&insert->k), k); + bch2_cut_subtract_back(s, bkey_start_pos(&insert->k), k); BUG_ON(bkey_deleted(k.k)); extent_save(b, node_iter, _k, k.k); @@ -1373,8 +1256,8 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, * key and we've just changed the end, update the * auxiliary tree. */ - bch_bset_fix_invalidated_key(b, t, _k); - bch_btree_node_iter_fix(iter, b, node_iter, t, + bch2_bset_fix_invalidated_key(b, t, _k); + bch2_btree_node_iter_fix(iter, b, node_iter, t, _k, _k->u64s, _k->u64s); break; @@ -1386,7 +1269,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, btree_keys_account_key_drop(&b->nr, t - b->set, _k); - bch_drop_subtract(s, k); + bch2_drop_subtract(s, k); k.k->p = bkey_start_pos(&insert->k); if (!__extent_save(b, node_iter, _k, k.k)) { /* @@ -1411,8 +1294,8 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, */ EBUG_ON(bkey_cmp(s->committed, k.k->p)); } else { - bch_bset_fix_invalidated_key(b, t, _k); - bch_btree_node_iter_fix(iter, b, node_iter, t, + bch2_bset_fix_invalidated_key(b, t, _k); + bch2_btree_node_iter_fix(iter, b, node_iter, t, _k, _k->u64s, _k->u64s); } @@ -1437,14 +1320,14 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, bkey_reassemble(&split.k, k.s_c); split.k.k.needs_whiteout |= bset_written(b, bset(b, t)); - bch_cut_back(bkey_start_pos(&insert->k), &split.k.k); + bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k); BUG_ON(bkey_deleted(&split.k.k)); - bch_cut_subtract_front(s, insert->k.p, k); + bch2_cut_subtract_front(s, insert->k.p, k); BUG_ON(bkey_deleted(k.k)); extent_save(b, node_iter, _k, k.k); - bch_add_sectors(s, bkey_i_to_s_c(&split.k), + bch2_add_sectors(s, bkey_i_to_s_c(&split.k), bkey_start_offset(&split.k.k), split.k.k.size); extent_bset_insert(c, iter, &split.k); @@ -1456,7 +1339,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert, } static enum btree_insert_ret -bch_delete_fixup_extent(struct extent_insert_state *s) +bch2_delete_fixup_extent(struct extent_insert_state *s) { struct bch_fs *c = s->trans->c; struct btree_iter *iter = s->insert->iter; @@ -1474,8 +1357,8 @@ bch_delete_fixup_extent(struct extent_insert_state *s) while (bkey_cmp(s->committed, insert->k.p) < 0 && (ret = extent_insert_should_stop(s)) == BTREE_INSERT_OK && - (_k = bch_btree_node_iter_peek_all(node_iter, b))) { - struct bset_tree *t = bch_bkey_to_bset(b, _k); + (_k = bch2_btree_node_iter_peek_all(node_iter, b))) { + struct bset_tree *t = bch2_bkey_to_bset(b, _k); struct bkey_s k = __bkey_disassemble(b, _k, &unpacked); enum bch_extent_overlap overlap; @@ -1490,7 +1373,7 @@ bch_delete_fixup_extent(struct extent_insert_state *s) goto next; } - overlap = bch_extent_overlap(&insert->k, k.k); + overlap = bch2_extent_overlap(&insert->k, k.k); ret = extent_insert_check_split_compressed(s, k.s_c, overlap); if (ret != BTREE_INSERT_OK) @@ -1511,7 +1394,7 @@ bch_delete_fixup_extent(struct extent_insert_state *s) if (overlap == BCH_EXTENT_OVERLAP_ALL) { btree_keys_account_key_drop(&b->nr, t - b->set, _k); - bch_subtract_sectors(s, k.s_c, + bch2_subtract_sectors(s, k.s_c, bkey_start_offset(k.k), k.k->size); _k->type = KEY_TYPE_DISCARD; reserve_whiteout(b, t, _k); @@ -1521,10 +1404,10 @@ bch_delete_fixup_extent(struct extent_insert_state *s) switch (overlap) { case BCH_EXTENT_OVERLAP_FRONT: - bch_cut_front(bkey_start_pos(k.k), &discard); + bch2_cut_front(bkey_start_pos(k.k), &discard); break; case BCH_EXTENT_OVERLAP_BACK: - bch_cut_back(k.k->p, &discard.k); + bch2_cut_back(k.k->p, &discard.k); break; default: break; @@ -1541,8 +1424,8 @@ bch_delete_fixup_extent(struct extent_insert_state *s) BUG_ON(ret != BTREE_INSERT_OK); } next: - bch_cut_front(s->committed, insert); - bch_btree_iter_set_pos_same_leaf(iter, s->committed); + bch2_cut_front(s->committed, insert); + bch2_btree_iter_set_pos_same_leaf(iter, s->committed); } if (bkey_cmp(s->committed, insert->k.p) < 0 && @@ -1552,13 +1435,13 @@ next: stop: extent_insert_committed(s); - bch_fs_usage_apply(c, &s->stats, s->trans->disk_res, + bch2_fs_usage_apply(c, &s->stats, s->trans->disk_res, gc_pos_btree_node(b)); EBUG_ON(bkey_cmp(iter->pos, s->committed)); EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) != iter->at_end_of_leaf); - bch_cut_front(iter->pos, insert); + bch2_cut_front(iter->pos, insert); if (insert->k.size && iter->at_end_of_leaf) ret = BTREE_INSERT_NEED_TRAVERSE; @@ -1575,7 +1458,7 @@ stop: * of the insert key. For cmpxchg operations this is where that logic lives. * * All subsets of @insert that need to be inserted are inserted using - * bch_btree_insert_and_journal(). If @b or @res fills up, this function + * bch2_btree_insert_and_journal(). If @b or @res fills up, this function * returns false, setting @iter->pos for the prefix of @insert that actually got * inserted. * @@ -1599,7 +1482,7 @@ stop: * i.e. no two overlapping keys _of nonzero size_ * * We can't realistically maintain this invariant for zero size keys because of - * the key merging done in bch_btree_insert_key() - for two mergeable keys k, j + * the key merging done in bch2_btree_insert_key() - for two mergeable keys k, j * there may be another 0 size key between them in another bset, and it will * thus overlap with the merged key. * @@ -1608,8 +1491,8 @@ stop: * key insertion needs to continue/be retried. */ enum btree_insert_ret -bch_insert_fixup_extent(struct btree_insert *trans, - struct btree_insert_entry *insert) +bch2_insert_fixup_extent(struct btree_insert *trans, + struct btree_insert_entry *insert) { struct bch_fs *c = trans->c; struct btree_iter *iter = insert->iter; @@ -1630,7 +1513,7 @@ bch_insert_fixup_extent(struct btree_insert *trans, EBUG_ON(bkey_deleted(&insert->k->k) || !insert->k->k.size); if (s.deleting) - return bch_delete_fixup_extent(&s); + return bch2_delete_fixup_extent(&s); /* * As we process overlapping extents, we advance @iter->pos both to @@ -1641,14 +1524,14 @@ bch_insert_fixup_extent(struct btree_insert *trans, EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k))); if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) - bch_add_sectors(&s, bkey_i_to_s_c(insert->k), + bch2_add_sectors(&s, bkey_i_to_s_c(insert->k), bkey_start_offset(&insert->k->k), insert->k->k.size); while (bkey_cmp(s.committed, insert->k->k.p) < 0 && (ret = extent_insert_should_stop(&s)) == BTREE_INSERT_OK && - (_k = bch_btree_node_iter_peek_all(node_iter, b))) { - struct bset_tree *t = bch_bkey_to_bset(b, _k); + (_k = bch2_btree_node_iter_peek_all(node_iter, b))) { + struct bset_tree *t = bch2_bkey_to_bset(b, _k); struct bkey_s k = __bkey_disassemble(b, _k, &unpacked); enum bch_extent_overlap overlap; @@ -1658,7 +1541,7 @@ bch_insert_fixup_extent(struct btree_insert *trans, if (bkey_cmp(bkey_start_pos(k.k), insert->k->k.p) >= 0) break; - overlap = bch_extent_overlap(&insert->k->k, k.k); + overlap = bch2_extent_overlap(&insert->k->k, k.k); ret = extent_insert_check_split_compressed(&s, k.s_c, overlap); if (ret != BTREE_INSERT_OK) @@ -1710,11 +1593,11 @@ stop: */ if (insert->k->k.size && !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)) - bch_subtract_sectors(&s, bkey_i_to_s_c(insert->k), + bch2_subtract_sectors(&s, bkey_i_to_s_c(insert->k), bkey_start_offset(&insert->k->k), insert->k->k.size); - bch_fs_usage_apply(c, &s.stats, trans->disk_res, + bch2_fs_usage_apply(c, &s.stats, trans->disk_res, gc_pos_btree_node(b)); EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k))); @@ -1729,8 +1612,8 @@ stop: return ret; } -static const char *bch_extent_invalid(const struct bch_fs *c, - struct bkey_s_c k) +static const char *bch2_extent_invalid(const struct bch_fs *c, + struct bkey_s_c k) { if (bkey_val_u64s(k.k) > BKEY_EXTENT_VAL_U64s_MAX) return "value too big"; @@ -1761,7 +1644,7 @@ static const char *bch_extent_invalid(const struct bch_fs *c, size_ondisk = crc_compressed_size(e.k, crc); - if (!bch_checksum_type_valid(c, crc_csum_type(crc))) + if (!bch2_checksum_type_valid(c, crc_csum_type(crc))) return "invalid checksum type"; if (crc_compression_type(crc) >= BCH_COMPRESSION_NR) @@ -1796,8 +1679,8 @@ static const char *bch_extent_invalid(const struct bch_fs *c, } } -static void bch_extent_debugcheck_extent(struct bch_fs *c, struct btree *b, - struct bkey_s_c_extent e) +static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b, + struct bkey_s_c_extent e) { const struct bch_extent_ptr *ptr; struct bch_dev *ca; @@ -1810,7 +1693,7 @@ static void bch_extent_debugcheck_extent(struct bch_fs *c, struct btree *b, /* * XXX: we should be doing most/all of these checks at startup time, - * where we check bkey_invalid() in btree_node_read_done() + * where we check bch2_bkey_invalid() in btree_node_read_done() * * But note that we can't check for stale pointers or incorrect gc marks * until after journal replay is done (it might be an extent that's @@ -1845,10 +1728,10 @@ static void bch_extent_debugcheck_extent(struct bch_fs *c, struct btree *b, stale = ptr_stale(ca, ptr); - bch_fs_bug_on(stale && !ptr->cached, c, + bch2_fs_bug_on(stale && !ptr->cached, c, "stale dirty pointer"); - bch_fs_bug_on(stale > 96, c, + bch2_fs_bug_on(stale > 96, c, "key too stale: %i", stale); @@ -1868,9 +1751,9 @@ static void bch_extent_debugcheck_extent(struct bch_fs *c, struct btree *b, } if (replicas > BCH_REPLICAS_MAX) { - bch_bkey_val_to_text(c, btree_node_type(b), buf, + bch2_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), e.s_c); - bch_fs_bug(c, + bch2_fs_bug(c, "extent key bad (too many replicas: %u): %s", replicas, buf); return; @@ -1878,9 +1761,9 @@ static void bch_extent_debugcheck_extent(struct bch_fs *c, struct btree *b, if (!bkey_extent_is_cached(e.k) && replicas < c->sb.data_replicas_have) { - bch_bkey_val_to_text(c, btree_node_type(b), buf, + bch2_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), e.s_c); - bch_fs_bug(c, + bch2_fs_bug(c, "extent key bad (too few replicas, %u < %u): %s", replicas, c->sb.data_replicas_have, buf); return; @@ -1889,9 +1772,9 @@ static void bch_extent_debugcheck_extent(struct bch_fs *c, struct btree *b, return; bad_ptr: - bch_bkey_val_to_text(c, btree_node_type(b), buf, + bch2_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), e.s_c); - bch_fs_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu prio %i " + bch2_fs_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu prio %i " "gen %i last_gc %i mark 0x%08x", buf, PTR_BUCKET_NR(ca, ptr), g->read_prio, PTR_BUCKET(ca, ptr)->mark.gen, @@ -1900,13 +1783,13 @@ bad_ptr: return; } -static void bch_extent_debugcheck(struct bch_fs *c, struct btree *b, - struct bkey_s_c k) +static void bch2_extent_debugcheck(struct bch_fs *c, struct btree *b, + struct bkey_s_c k) { switch (k.k->type) { case BCH_EXTENT: case BCH_EXTENT_CACHED: - bch_extent_debugcheck_extent(c, b, bkey_s_c_to_extent(k)); + bch2_extent_debugcheck_extent(c, b, bkey_s_c_to_extent(k)); break; case BCH_RESERVATION: break; @@ -1915,8 +1798,8 @@ static void bch_extent_debugcheck(struct bch_fs *c, struct btree *b, } } -static void bch_extent_to_text(struct bch_fs *c, char *buf, - size_t size, struct bkey_s_c k) +static void bch2_extent_to_text(struct bch_fs *c, char *buf, + size_t size, struct bkey_s_c k) { char *out = buf, *end = buf + size; const char *invalid; @@ -1926,7 +1809,7 @@ static void bch_extent_to_text(struct bch_fs *c, char *buf, if (bkey_extent_is_data(k.k)) out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k)); - invalid = bch_extent_invalid(c, k); + invalid = bch2_extent_invalid(c, k); if (invalid) p(" invalid: %s", invalid); #undef p @@ -1938,12 +1821,12 @@ static unsigned PTR_TIER(struct bch_fs *c, return c->devs[ptr->dev]->mi.tier; } -static void bch_extent_crc_init(union bch_extent_crc *crc, - unsigned compressed_size, - unsigned uncompressed_size, - unsigned compression_type, - unsigned nonce, - struct bch_csum csum, unsigned csum_type) +static void bch2_extent_crc_init(union bch_extent_crc *crc, + unsigned compressed_size, + unsigned uncompressed_size, + unsigned compression_type, + unsigned nonce, + struct bch_csum csum, unsigned csum_type) { if (bch_crc_bytes[csum_type] <= 4 && uncompressed_size <= CRC32_SIZE_MAX && @@ -1996,12 +1879,12 @@ static void bch_extent_crc_init(union bch_extent_crc *crc, BUG(); } -void bch_extent_crc_append(struct bkey_i_extent *e, - unsigned compressed_size, - unsigned uncompressed_size, - unsigned compression_type, - unsigned nonce, - struct bch_csum csum, unsigned csum_type) +void bch2_extent_crc_append(struct bkey_i_extent *e, + unsigned compressed_size, + unsigned uncompressed_size, + unsigned compression_type, + unsigned nonce, + struct bch_csum csum, unsigned csum_type) { union bch_extent_crc *crc; @@ -2030,7 +1913,7 @@ void bch_extent_crc_append(struct bkey_i_extent *e, crc_csum(crc).hi == csum.hi) return; - bch_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)), + bch2_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)), compressed_size, uncompressed_size, compression_type, @@ -2046,7 +1929,7 @@ void bch_extent_crc_append(struct bkey_i_extent *e, * For existing keys, only called when btree nodes are being rewritten, not when * they're merely being compacted/resorted in memory. */ -bool bch_extent_normalize(struct bch_fs *c, struct bkey_s k) +bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k) { struct bkey_s_extent e; @@ -2065,7 +1948,7 @@ bool bch_extent_normalize(struct bch_fs *c, struct bkey_s k) case BCH_EXTENT_CACHED: e = bkey_s_to_extent(k); - bch_extent_drop_stale(c, e); + bch2_extent_drop_stale(c, e); if (!bkey_val_u64s(e.k)) { if (bkey_extent_is_cached(e.k)) { @@ -2085,9 +1968,9 @@ bool bch_extent_normalize(struct bch_fs *c, struct bkey_s k) } } -void bch_extent_mark_replicas_cached(struct bch_fs *c, - struct bkey_s_extent e, - unsigned nr_cached) +void bch2_extent_mark_replicas_cached(struct bch_fs *c, + struct bkey_s_extent e, + unsigned nr_cached) { struct bch_extent_ptr *ptr; bool have_higher_tier; @@ -2124,9 +2007,9 @@ void bch_extent_mark_replicas_cached(struct bch_fs *c, * as the pointers are sorted by tier, hence preferring pointers to tier 0 * rather than pointers to tier 1. */ -void bch_extent_pick_ptr_avoiding(struct bch_fs *c, struct bkey_s_c k, - struct bch_dev *avoid, - struct extent_pick_ptr *ret) +void bch2_extent_pick_ptr_avoiding(struct bch_fs *c, struct bkey_s_c k, + struct bch_dev *avoid, + struct extent_pick_ptr *ret) { struct bkey_s_c_extent e; const union bch_extent_crc *crc; @@ -2188,9 +2071,9 @@ void bch_extent_pick_ptr_avoiding(struct bch_fs *c, struct bkey_s_c k, } } -static enum merge_result bch_extent_merge(struct bch_fs *c, - struct btree *bk, - struct bkey_i *l, struct bkey_i *r) +static enum merge_result bch2_extent_merge(struct bch_fs *c, + struct btree *bk, + struct bkey_i *l, struct bkey_i *r) { struct bkey_s_extent el, er; union bch_extent_entry *en_l, *en_r; @@ -2269,12 +2152,12 @@ static enum merge_result bch_extent_merge(struct bch_fs *c, * overflow KEY_SIZE */ if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) { - bch_key_resize(&l->k, KEY_SIZE_MAX); - bch_cut_front(l->k.p, r); + bch2_key_resize(&l->k, KEY_SIZE_MAX); + bch2_cut_front(l->k.p, r); return BCH_MERGE_PARTIAL; } - bch_key_resize(&l->k, l->k.size + r->k.size); + bch2_key_resize(&l->k, l->k.size + r->k.size); return BCH_MERGE_MERGE; } @@ -2288,7 +2171,7 @@ static void extent_i_save(struct btree *b, struct bkey_packed *dst, BUG_ON(bkeyp_val_u64s(f, dst) != bkey_val_u64s(&src->k)); /* - * We don't want the bch_verify_key_order() call in extent_save(), + * We don't want the bch2_verify_key_order() call in extent_save(), * because we may be out of order with deleted keys that are about to be * removed by extent_bset_insert() */ @@ -2296,7 +2179,7 @@ static void extent_i_save(struct btree *b, struct bkey_packed *dst, if ((dst_unpacked = packed_to_bkey(dst))) bkey_copy(dst_unpacked, src); else - BUG_ON(!bkey_pack(dst, src, f)); + BUG_ON(!bch2_bkey_pack(dst, src, f)); } static bool extent_merge_one_overlapping(struct btree_iter *iter, @@ -2315,8 +2198,8 @@ static bool extent_merge_one_overlapping(struct btree_iter *iter, } else { uk.p = new_pos; extent_save(b, node_iter, k, &uk); - bch_bset_fix_invalidated_key(b, t, k); - bch_btree_node_iter_fix(iter, b, node_iter, t, + bch2_bset_fix_invalidated_key(b, t, k); + bch2_btree_node_iter_fix(iter, b, node_iter, t, k, k->u64s, k->u64s); return true; } @@ -2352,10 +2235,10 @@ do_fixup: * if we don't find this bset in the iterator we already got to * the end of that bset, so start searching from the end. */ - k = bch_btree_node_iter_bset_pos(node_iter, b, t); + k = bch2_btree_node_iter_bset_pos(node_iter, b, t); if (k == btree_bkey_last(b, t)) - k = bkey_prev_all(b, t, k); + k = bch2_bkey_prev_all(b, t, k); if (!k) continue; @@ -2369,7 +2252,7 @@ do_fixup: k && (uk = bkey_unpack_key(b, k), bkey_cmp(uk.p, bkey_start_pos(m)) > 0); - k = bkey_prev_all(b, t, k)) { + k = bch2_bkey_prev_all(b, t, k)) { if (bkey_cmp(uk.p, m->p) >= 0) continue; @@ -2411,11 +2294,11 @@ do_fixup: * * Also unpacks and repacks. */ -static bool bch_extent_merge_inline(struct bch_fs *c, - struct btree_iter *iter, - struct bkey_packed *l, - struct bkey_packed *r, - bool back_merge) +static bool bch2_extent_merge_inline(struct bch_fs *c, + struct btree_iter *iter, + struct bkey_packed *l, + struct bkey_packed *r, + bool back_merge) { struct btree *b = iter->nodes[0]; struct btree_node_iter *node_iter = &iter->node_iters[0]; @@ -2431,27 +2314,27 @@ static bool bch_extent_merge_inline(struct bch_fs *c, * We need to save copies of both l and r, because we might get a * partial merge (which modifies both) and then fails to repack */ - bkey_unpack(b, &li.k, l); - bkey_unpack(b, &ri.k, r); + bch2_bkey_unpack(b, &li.k, l); + bch2_bkey_unpack(b, &ri.k, r); m = back_merge ? l : r; mi = back_merge ? &li.k : &ri.k; /* l & r should be in last bset: */ - EBUG_ON(bch_bkey_to_bset(b, m) != t); + EBUG_ON(bch2_bkey_to_bset(b, m) != t); - switch (bch_extent_merge(c, b, &li.k, &ri.k)) { + switch (bch2_extent_merge(c, b, &li.k, &ri.k)) { case BCH_MERGE_NOMERGE: return false; case BCH_MERGE_PARTIAL: - if (bkey_packed(m) && !bkey_pack_key((void *) &tmp, &mi->k, f)) + if (bkey_packed(m) && !bch2_bkey_pack_key((void *) &tmp, &mi->k, f)) return false; if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge)) return false; extent_i_save(b, m, mi); - bch_bset_fix_invalidated_key(b, t, m); + bch2_bset_fix_invalidated_key(b, t, m); /* * Update iterator to reflect what we just inserted - otherwise, @@ -2459,9 +2342,9 @@ static bool bch_extent_merge_inline(struct bch_fs *c, * just partially merged with: */ if (back_merge) - bch_btree_iter_set_pos_same_leaf(iter, li.k.k.p); + bch2_btree_iter_set_pos_same_leaf(iter, li.k.k.p); - bch_btree_node_iter_fix(iter, iter->nodes[0], node_iter, + bch2_btree_node_iter_fix(iter, iter->nodes[0], node_iter, t, m, m->u64s, m->u64s); if (!back_merge) @@ -2470,16 +2353,16 @@ static bool bch_extent_merge_inline(struct bch_fs *c, bkey_copy(packed_to_bkey(r), &ri.k); return false; case BCH_MERGE_MERGE: - if (bkey_packed(m) && !bkey_pack_key((void *) &tmp, &li.k.k, f)) + if (bkey_packed(m) && !bch2_bkey_pack_key((void *) &tmp, &li.k.k, f)) return false; if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge)) return false; extent_i_save(b, m, &li.k); - bch_bset_fix_invalidated_key(b, t, m); + bch2_bset_fix_invalidated_key(b, t, m); - bch_btree_node_iter_fix(iter, iter->nodes[0], node_iter, + bch2_btree_node_iter_fix(iter, iter->nodes[0], node_iter, t, m, m->u64s, m->u64s); return true; default: @@ -2487,12 +2370,12 @@ static bool bch_extent_merge_inline(struct bch_fs *c, } } -const struct bkey_ops bch_bkey_extent_ops = { - .key_invalid = bch_extent_invalid, - .key_debugcheck = bch_extent_debugcheck, - .val_to_text = bch_extent_to_text, - .swab = bch_ptr_swab, - .key_normalize = bch_ptr_normalize, - .key_merge = bch_extent_merge, +const struct bkey_ops bch2_bkey_extent_ops = { + .key_invalid = bch2_extent_invalid, + .key_debugcheck = bch2_extent_debugcheck, + .val_to_text = bch2_extent_to_text, + .swab = bch2_ptr_swab, + .key_normalize = bch2_ptr_normalize, + .key_merge = bch2_extent_merge, .is_extents = true, }; diff --git a/libbcache/extents.h b/libbcachefs/extents.h index 1d63b79d..3a952484 100644 --- a/libbcache/extents.h +++ b/libbcachefs/extents.h @@ -1,26 +1,24 @@ #ifndef _BCACHE_EXTENTS_H #define _BCACHE_EXTENTS_H -#include "bcache.h" +#include "bcachefs.h" #include "bkey.h" -#include <linux/bcache.h> - struct btree_node_iter; struct btree_insert; struct btree_insert_entry; struct extent_insert_hook; -struct btree_nr_keys bch_key_sort_fix_overlapping(struct bset *, +struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *, struct btree *, struct btree_node_iter *); -struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bch_fs *c, +struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c, struct bset *, struct btree *, struct btree_node_iter *); -extern const struct bkey_ops bch_bkey_btree_ops; -extern const struct bkey_ops bch_bkey_extent_ops; +extern const struct bkey_ops bch2_bkey_btree_ops; +extern const struct bkey_ops bch2_bkey_extent_ops; struct bch_fs; struct journal_res; @@ -32,32 +30,28 @@ struct extent_pick_ptr { }; struct extent_pick_ptr -bch_btree_pick_ptr(struct bch_fs *, const struct btree *); +bch2_btree_pick_ptr(struct bch_fs *, const struct btree *); -void bch_extent_pick_ptr_avoiding(struct bch_fs *, struct bkey_s_c, +void bch2_extent_pick_ptr_avoiding(struct bch_fs *, struct bkey_s_c, struct bch_dev *, struct extent_pick_ptr *); static inline void -bch_extent_pick_ptr(struct bch_fs *c, struct bkey_s_c k, +bch2_extent_pick_ptr(struct bch_fs *c, struct bkey_s_c k, struct extent_pick_ptr *ret) { - bch_extent_pick_ptr_avoiding(c, k, NULL, ret); + bch2_extent_pick_ptr_avoiding(c, k, NULL, ret); } -enum extent_insert_hook_ret -bch_extent_cmpxchg(struct extent_insert_hook *, struct bpos, struct bpos, - struct bkey_s_c, const struct bkey_i *); - enum btree_insert_ret -bch_insert_fixup_extent(struct btree_insert *, +bch2_insert_fixup_extent(struct btree_insert *, struct btree_insert_entry *); -bool bch_extent_normalize(struct bch_fs *, struct bkey_s); -void bch_extent_mark_replicas_cached(struct bch_fs *, +bool bch2_extent_normalize(struct bch_fs *, struct bkey_s); +void bch2_extent_mark_replicas_cached(struct bch_fs *, struct bkey_s_extent, unsigned); -unsigned bch_extent_nr_ptrs(struct bkey_s_c_extent); -unsigned bch_extent_nr_dirty_ptrs(struct bkey_s_c); +unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent); +unsigned bch2_extent_nr_dirty_ptrs(struct bkey_s_c); static inline bool bkey_extent_is_data(const struct bkey *k) { @@ -330,7 +324,7 @@ out: \ (_ptr); \ (_ptr) = extent_ptr_prev(_e, _ptr)) -void bch_extent_crc_append(struct bkey_i_extent *, unsigned, unsigned, +void bch2_extent_crc_append(struct bkey_i_extent *, unsigned, unsigned, unsigned, unsigned, struct bch_csum, unsigned); static inline void __extent_entry_push(struct bkey_i_extent *e) @@ -549,17 +543,17 @@ static inline unsigned extent_current_nonce(struct bkey_s_c_extent e) const union bch_extent_crc *crc; extent_for_each_crc(e, crc) - if (bch_csum_type_is_encryption(crc_csum_type(crc))) + if (bch2_csum_type_is_encryption(crc_csum_type(crc))) return crc_offset(crc) + crc_nonce(crc); return 0; } -void bch_extent_narrow_crcs(struct bkey_s_extent); -void bch_extent_drop_redundant_crcs(struct bkey_s_extent); +void bch2_extent_narrow_crcs(struct bkey_s_extent); +void bch2_extent_drop_redundant_crcs(struct bkey_s_extent); /* Doesn't cleanup redundant crcs */ -static inline void __bch_extent_drop_ptr(struct bkey_s_extent e, +static inline void __bch2_extent_drop_ptr(struct bkey_s_extent e, struct bch_extent_ptr *ptr) { EBUG_ON(ptr < &e.v->start->ptr || @@ -570,18 +564,18 @@ static inline void __bch_extent_drop_ptr(struct bkey_s_extent e, e.k->u64s -= sizeof(*ptr) / sizeof(u64); } -static inline void bch_extent_drop_ptr(struct bkey_s_extent e, +static inline void bch2_extent_drop_ptr(struct bkey_s_extent e, struct bch_extent_ptr *ptr) { - __bch_extent_drop_ptr(e, ptr); - bch_extent_drop_redundant_crcs(e); + __bch2_extent_drop_ptr(e, ptr); + bch2_extent_drop_redundant_crcs(e); } const struct bch_extent_ptr * -bch_extent_has_device(struct bkey_s_c_extent, unsigned); +bch2_extent_has_device(struct bkey_s_c_extent, unsigned); -bool bch_cut_front(struct bpos, struct bkey_i *); -bool bch_cut_back(struct bpos, struct bkey *); -void bch_key_resize(struct bkey *, unsigned); +bool bch2_cut_front(struct bpos, struct bkey_i *); +bool bch2_cut_back(struct bpos, struct bkey *); +void bch2_key_resize(struct bkey *, unsigned); #endif /* _BCACHE_EXTENTS_H */ diff --git a/libbcache/eytzinger.h b/libbcachefs/eytzinger.h index 13d54e5e..13d54e5e 100644 --- a/libbcache/eytzinger.h +++ b/libbcachefs/eytzinger.h diff --git a/libbcache/fifo.h b/libbcachefs/fifo.h index 2908ca23..2908ca23 100644 --- a/libbcache/fifo.h +++ b/libbcachefs/fifo.h diff --git a/libbcache/fs-gc.c b/libbcachefs/fs-gc.c index 1f6a65ec..20f552d2 100644 --- a/libbcache/fs-gc.c +++ b/libbcachefs/fs-gc.c @@ -1,5 +1,5 @@ -#include "bcache.h" +#include "bcachefs.h" #include "btree_update.h" #include "dirent.h" #include "error.h" @@ -23,7 +23,7 @@ static int remove_dirent(struct bch_fs *c, struct btree_iter *iter, int ret; char *buf; - name.len = bch_dirent_name_bytes(dirent); + name.len = bch2_dirent_name_bytes(dirent); buf = kmalloc(name.len + 1, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -33,15 +33,15 @@ static int remove_dirent(struct bch_fs *c, struct btree_iter *iter, name.name = buf; /* Unlock iter so we don't deadlock, after copying name: */ - bch_btree_iter_unlock(iter); + bch2_btree_iter_unlock(iter); - ret = bch_inode_find_by_inum(c, dir_inum, &dir_inode); + ret = bch2_inode_find_by_inum(c, dir_inum, &dir_inode); if (ret) goto err; - dir_hash_info = bch_hash_info_init(&dir_inode); + dir_hash_info = bch2_hash_info_init(c, &dir_inode); - ret = bch_dirent_delete(c, dir_inum, &dir_hash_info, &name, NULL); + ret = bch2_dirent_delete(c, dir_inum, &dir_hash_info, &name, NULL); err: kfree(buf); return ret; @@ -52,7 +52,7 @@ static int reattach_inode(struct bch_fs *c, u64 inum) { struct bch_hash_info lostfound_hash_info = - bch_hash_info_init(lostfound_inode); + bch2_hash_info_init(c, lostfound_inode); struct bkey_inode_buf packed; char name_buf[20]; struct qstr name; @@ -63,14 +63,14 @@ static int reattach_inode(struct bch_fs *c, lostfound_inode->i_nlink++; - bch_inode_pack(&packed, lostfound_inode); + bch2_inode_pack(&packed, lostfound_inode); - ret = bch_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i, + ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i, NULL, NULL, NULL, 0); if (ret) return ret; - return bch_dirent_create(c, lostfound_inode->inum, + return bch2_dirent_create(c, lostfound_inode->inum, &lostfound_hash_info, DT_DIR, &name, inum, NULL, 0); } @@ -96,7 +96,7 @@ static int walk_inode(struct bch_fs *c, struct inode_walker *w, u64 inum) w->cur_inum = inum; if (w->first_this_inode) { - int ret = bch_inode_find_by_inum(c, inum, &w->inode); + int ret = bch2_inode_find_by_inum(c, inum, &w->inode); if (ret && ret != -ENOENT) return ret; @@ -135,7 +135,7 @@ static int check_extents(struct bch_fs *c) unfixable_fsck_err_on(w.first_this_inode && w.have_inode && w.inode.i_sectors != - (i_sectors = bch_count_inode_sectors(c, w.cur_inum)), + (i_sectors = bch2_count_inode_sectors(c, w.cur_inum)), c, "i_sectors wrong: got %llu, should be %llu", w.inode.i_sectors, i_sectors); @@ -150,7 +150,7 @@ static int check_extents(struct bch_fs *c) k.k->type, k.k->p.offset, k.k->p.inode, w.inode.i_size); } fsck_err: - return bch_btree_iter_unlock(&iter) ?: ret; + return bch2_btree_iter_unlock(&iter) ?: ret; } /* @@ -198,7 +198,7 @@ static int check_dirents(struct bch_fs *c) continue; } - ret = bch_inode_find_by_inum(c, d_inum, &target); + ret = bch2_inode_find_by_inum(c, d_inum, &target); if (ret && ret != -ENOENT) break; @@ -232,7 +232,7 @@ static int check_dirents(struct bch_fs *c) bkey_reassemble(&n->k_i, d.s_c); n->v.d_type = mode_to_type(le16_to_cpu(target.i_mode)); - ret = bch_btree_insert_at(c, NULL, NULL, NULL, + ret = bch2_btree_insert_at(c, NULL, NULL, NULL, BTREE_INSERT_NOFAIL, BTREE_INSERT_ENTRY(&iter, &n->k_i)); kfree(n); @@ -243,7 +243,7 @@ static int check_dirents(struct bch_fs *c) } err: fsck_err: - return bch_btree_iter_unlock(&iter) ?: ret; + return bch2_btree_iter_unlock(&iter) ?: ret; } /* @@ -268,7 +268,7 @@ static int check_xattrs(struct bch_fs *c) k.k->p.inode); } fsck_err: - return bch_btree_iter_unlock(&iter) ?: ret; + return bch2_btree_iter_unlock(&iter) ?: ret; } /* Get root directory, create if it doesn't exist: */ @@ -277,7 +277,7 @@ static int check_root(struct bch_fs *c, struct bch_inode_unpacked *root_inode) struct bkey_inode_buf packed; int ret; - ret = bch_inode_find_by_inum(c, BCACHE_ROOT_INO, root_inode); + ret = bch2_inode_find_by_inum(c, BCACHE_ROOT_INO, root_inode); if (ret && ret != -ENOENT) return ret; @@ -292,12 +292,12 @@ static int check_root(struct bch_fs *c, struct bch_inode_unpacked *root_inode) fsck_err: return ret; create_root: - bch_inode_init(c, root_inode, 0, 0, S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0); + bch2_inode_init(c, root_inode, 0, 0, S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0); root_inode->inum = BCACHE_ROOT_INO; - bch_inode_pack(&packed, root_inode); + bch2_inode_pack(&packed, root_inode); - return bch_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i, + return bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i, NULL, NULL, NULL, 0); } @@ -308,19 +308,19 @@ static int check_lostfound(struct bch_fs *c, { struct qstr lostfound = QSTR("lost+found"); struct bch_hash_info root_hash_info = - bch_hash_info_init(root_inode); + bch2_hash_info_init(c, root_inode); struct bkey_inode_buf packed; u64 inum; int ret; - inum = bch_dirent_lookup(c, BCACHE_ROOT_INO, &root_hash_info, + inum = bch2_dirent_lookup(c, BCACHE_ROOT_INO, &root_hash_info, &lostfound); if (!inum) { bch_notice(c, "creating lost+found"); goto create_lostfound; } - ret = bch_inode_find_by_inum(c, inum, lostfound_inode); + ret = bch2_inode_find_by_inum(c, inum, lostfound_inode); if (ret && ret != -ENOENT) return ret; @@ -337,24 +337,24 @@ fsck_err: create_lostfound: root_inode->i_nlink++; - bch_inode_pack(&packed, root_inode); + bch2_inode_pack(&packed, root_inode); - ret = bch_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i, + ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i, NULL, NULL, NULL, 0); if (ret) return ret; - bch_inode_init(c, lostfound_inode, 0, 0, S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0); - bch_inode_pack(&packed, lostfound_inode); + bch2_inode_init(c, lostfound_inode, 0, 0, S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0); + bch2_inode_pack(&packed, lostfound_inode); - ret = bch_inode_create(c, &packed.inode.k_i, BLOCKDEV_INODE_MAX, 0, + ret = bch2_inode_create(c, &packed.inode.k_i, BLOCKDEV_INODE_MAX, 0, &c->unused_inode_hint); if (ret) return ret; lostfound_inode->inum = packed.inode.k.p.inode; - ret = bch_dirent_create(c, BCACHE_ROOT_INO, &root_hash_info, DT_DIR, + ret = bch2_dirent_create(c, BCACHE_ROOT_INO, &root_hash_info, DT_DIR, &lostfound, lostfound_inode->inum, NULL, 0); if (ret) return ret; @@ -488,10 +488,10 @@ next: if (ret) goto err; - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); goto next; } - ret = bch_btree_iter_unlock(&iter); + ret = bch2_btree_iter_unlock(&iter); if (ret) goto err; up: @@ -508,7 +508,7 @@ up: if (fsck_err_on(!inode_bitmap_test(&dirs_done, k.k->p.inode), c, "unreachable directory found (inum %llu)", k.k->p.inode)) { - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); ret = reattach_inode(c, lostfound_inode, k.k->p.inode); if (ret) @@ -517,7 +517,7 @@ up: had_unreachable = true; } } - ret = bch_btree_iter_unlock(&iter); + ret = bch2_btree_iter_unlock(&iter); if (ret) goto err; @@ -536,7 +536,7 @@ out: return ret; err: fsck_err: - ret = bch_btree_iter_unlock(&iter) ?: ret; + ret = bch2_btree_iter_unlock(&iter) ?: ret; goto out; } @@ -570,7 +570,7 @@ static void inc_link(struct bch_fs *c, nlink_table *links, } noinline_for_stack -static int bch_gc_walk_dirents(struct bch_fs *c, nlink_table *links, +static int bch2_gc_walk_dirents(struct bch_fs *c, nlink_table *links, u64 range_start, u64 *range_end) { struct btree_iter iter; @@ -597,16 +597,16 @@ static int bch_gc_walk_dirents(struct bch_fs *c, nlink_table *links, break; } - bch_btree_iter_cond_resched(&iter); + bch2_btree_iter_cond_resched(&iter); } - ret = bch_btree_iter_unlock(&iter); + ret = bch2_btree_iter_unlock(&iter); if (ret) bch_err(c, "error in fs gc: btree error %i while walking dirents", ret); return ret; } -s64 bch_count_inode_sectors(struct bch_fs *c, u64 inum) +s64 bch2_count_inode_sectors(struct bch_fs *c, u64 inum) { struct btree_iter iter; struct bkey_s_c k; @@ -620,10 +620,10 @@ s64 bch_count_inode_sectors(struct bch_fs *c, u64 inum) sectors += k.k->size; } - return bch_btree_iter_unlock(&iter) ?: sectors; + return bch2_btree_iter_unlock(&iter) ?: sectors; } -static int bch_gc_do_inode(struct bch_fs *c, +static int bch2_gc_do_inode(struct bch_fs *c, struct bch_inode_unpacked *lostfound_inode, struct btree_iter *iter, struct bkey_s_c_inode inode, struct nlink link) @@ -633,8 +633,8 @@ static int bch_gc_do_inode(struct bch_fs *c, u32 i_nlink, real_i_nlink; bool do_update = false; - ret = bch_inode_unpack(inode, &u); - if (bch_fs_inconsistent_on(ret, c, + ret = bch2_inode_unpack(inode, &u); + if (bch2_fs_inconsistent_on(ret, c, "error unpacking inode %llu in fs-gc", inode.k->p.inode)) return ret; @@ -668,7 +668,7 @@ static int bch_gc_do_inode(struct bch_fs *c, inode.k->p.inode); if (fsck_err_on(S_ISDIR(u.i_mode) && - bch_empty_dir(c, inode.k->p.inode), c, + bch2_empty_dir(c, inode.k->p.inode), c, "non empty directory with link count 0, " "inode nlink %u, dir links found %u", i_nlink, link.dir_count)) { @@ -680,7 +680,7 @@ static int bch_gc_do_inode(struct bch_fs *c, bch_verbose(c, "deleting inode %llu", inode.k->p.inode); - ret = bch_inode_rm(c, inode.k->p.inode); + ret = bch2_inode_rm(c, inode.k->p.inode); if (ret) bch_err(c, "error in fs gc: error %i " "while deleting inode", ret); @@ -700,7 +700,7 @@ static int bch_gc_do_inode(struct bch_fs *c, * just switch units to bytes and that issue goes away */ - ret = bch_inode_truncate(c, inode.k->p.inode, + ret = bch2_inode_truncate(c, inode.k->p.inode, round_up(u.i_size, PAGE_SIZE) >> 9, NULL, NULL); if (ret) { @@ -730,7 +730,7 @@ static int bch_gc_do_inode(struct bch_fs *c, bch_verbose(c, "recounting sectors for inode %llu", inode.k->p.inode); - sectors = bch_count_inode_sectors(c, inode.k->p.inode); + sectors = bch2_count_inode_sectors(c, inode.k->p.inode); if (sectors < 0) { bch_err(c, "error in fs gc: error %i " "recounting inode sectors", @@ -760,9 +760,9 @@ static int bch_gc_do_inode(struct bch_fs *c, if (do_update) { struct bkey_inode_buf p; - bch_inode_pack(&p, &u); + bch2_inode_pack(&p, &u); - ret = bch_btree_insert_at(c, NULL, NULL, NULL, + ret = bch2_btree_insert_at(c, NULL, NULL, NULL, BTREE_INSERT_NOFAIL, BTREE_INSERT_ENTRY(iter, &p.inode.k_i)); if (ret && ret != -EINTR) @@ -774,7 +774,7 @@ fsck_err: } noinline_for_stack -static int bch_gc_walk_inodes(struct bch_fs *c, +static int bch2_gc_walk_inodes(struct bch_fs *c, struct bch_inode_unpacked *lostfound_inode, nlink_table *links, u64 range_start, u64 range_end) @@ -786,10 +786,10 @@ static int bch_gc_walk_inodes(struct bch_fs *c, int ret = 0, ret2 = 0; u64 nlinks_pos; - bch_btree_iter_init(&iter, c, BTREE_ID_INODES, POS(range_start, 0)); + bch2_btree_iter_init(&iter, c, BTREE_ID_INODES, POS(range_start, 0)); genradix_iter_init(&nlinks_iter); - while ((k = bch_btree_iter_peek(&iter)).k && + while ((k = bch2_btree_iter_peek(&iter)).k && !btree_iter_err(k)) { peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links); @@ -814,9 +814,9 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links); * Avoid potential deadlocks with iter for * truncate/rm/etc.: */ - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); - ret = bch_gc_do_inode(c, lostfound_inode, &iter, + ret = bch2_gc_do_inode(c, lostfound_inode, &iter, bkey_s_c_to_inode(k), *link); if (ret == -EINTR) continue; @@ -835,11 +835,11 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links); if (nlinks_pos == iter.pos.inode) genradix_iter_advance(&nlinks_iter, links); - bch_btree_iter_advance_pos(&iter); - bch_btree_iter_cond_resched(&iter); + bch2_btree_iter_advance_pos(&iter); + bch2_btree_iter_cond_resched(&iter); } fsck_err: - ret2 = bch_btree_iter_unlock(&iter); + ret2 = bch2_btree_iter_unlock(&iter); if (ret2) bch_err(c, "error in fs gc: btree error %i while walking inodes", ret2); @@ -860,13 +860,13 @@ static int check_inode_nlinks(struct bch_fs *c, this_iter_range_start = next_iter_range_start; next_iter_range_start = U64_MAX; - ret = bch_gc_walk_dirents(c, &links, + ret = bch2_gc_walk_dirents(c, &links, this_iter_range_start, &next_iter_range_start); if (ret) break; - ret = bch_gc_walk_inodes(c, lostfound_inode, &links, + ret = bch2_gc_walk_inodes(c, lostfound_inode, &links, this_iter_range_start, next_iter_range_start); if (ret) @@ -884,7 +884,7 @@ static int check_inode_nlinks(struct bch_fs *c, * Checks for inconsistencies that shouldn't happen, unless we have a bug. * Doesn't fix them yet, mainly because they haven't yet been observed: */ -int bch_fsck(struct bch_fs *c, bool full_fsck) +int bch2_fsck(struct bch_fs *c, bool full_fsck) { struct bch_inode_unpacked root_inode, lostfound_inode; int ret; diff --git a/libbcachefs/fs-gc.h b/libbcachefs/fs-gc.h new file mode 100644 index 00000000..4bde1bda --- /dev/null +++ b/libbcachefs/fs-gc.h @@ -0,0 +1,7 @@ +#ifndef _BCACHE_FS_GC_H +#define _BCACHE_FS_GC_H + +s64 bch2_count_inode_sectors(struct bch_fs *, u64); +int bch2_fsck(struct bch_fs *, bool); + +#endif /* _BCACHE_FS_GC_H */ diff --git a/libbcache/fs-io.c b/libbcachefs/fs-io.c index afc8c208..8ad192c4 100644 --- a/libbcache/fs-io.c +++ b/libbcachefs/fs-io.c @@ -1,5 +1,5 @@ -#include "bcache.h" +#include "bcachefs.h" #include "btree_update.h" #include "buckets.h" #include "clock.h" @@ -23,9 +23,9 @@ #include <linux/writeback.h> #include <trace/events/writeback.h> -struct bio_set *bch_writepage_bioset; -struct bio_set *bch_dio_read_bioset; -struct bio_set *bch_dio_write_bioset; +struct bio_set *bch2_writepage_bioset; +struct bio_set *bch2_dio_read_bioset; +struct bio_set *bch2_dio_write_bioset; /* pagecache_block must be held */ static int write_invalidate_inode_pages_range(struct address_space *mapping, @@ -77,11 +77,11 @@ static int inode_set_size(struct bch_inode_info *ei, return 0; } -static int __must_check bch_write_inode_size(struct bch_fs *c, +static int __must_check bch2_write_inode_size(struct bch_fs *c, struct bch_inode_info *ei, loff_t new_size) { - return __bch_write_inode(c, ei, inode_set_size, &new_size); + return __bch2_write_inode(c, ei, inode_set_size, &new_size); } static inline void i_size_dirty_put(struct bch_inode_info *ei) @@ -159,7 +159,7 @@ static void i_sectors_dirty_put(struct bch_inode_info *ei, if (atomic_long_dec_and_test(&ei->i_sectors_dirty_count)) { struct bch_fs *c = ei->vfs_inode.i_sb->s_fs_info; - int ret = __bch_write_inode(c, ei, inode_clear_i_sectors_dirty, NULL); + int ret = __bch2_write_inode(c, ei, inode_clear_i_sectors_dirty, NULL); ret = ret; } @@ -174,7 +174,7 @@ static int __must_check i_sectors_dirty_get(struct bch_inode_info *ei, h->hook.fn = i_sectors_hook_fn; h->sectors = 0; -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG h->ei = ei; #endif @@ -186,7 +186,7 @@ static int __must_check i_sectors_dirty_get(struct bch_inode_info *ei, if (!(ei->i_flags & BCH_INODE_I_SECTORS_DIRTY)) { struct bch_fs *c = ei->vfs_inode.i_sb->s_fs_info; - ret = __bch_write_inode(c, ei, inode_set_i_sectors_dirty, NULL); + ret = __bch2_write_inode(c, ei, inode_set_i_sectors_dirty, NULL); } if (!ret) @@ -265,7 +265,7 @@ bchfs_extent_update_hook(struct extent_insert_hook *hook, } if (do_pack) - bch_inode_pack(&h->inode_p, &h->inode_u); + bch2_inode_pack(&h->inode_p, &h->inode_u); return BTREE_HOOK_DO_INSERT; } @@ -277,14 +277,14 @@ static int bchfs_write_index_update(struct bch_write_op *wop) struct keylist *keys = &op->op.insert_keys; struct btree_iter extent_iter, inode_iter; struct bchfs_extent_trans_hook hook; - struct bkey_i *k = bch_keylist_front(keys); + struct bkey_i *k = bch2_keylist_front(keys); int ret; BUG_ON(k->k.p.inode != op->ei->vfs_inode.i_ino); - bch_btree_iter_init_intent(&extent_iter, wop->c, BTREE_ID_EXTENTS, - bkey_start_pos(&bch_keylist_front(keys)->k)); - bch_btree_iter_init_intent(&inode_iter, wop->c, BTREE_ID_INODES, + bch2_btree_iter_init_intent(&extent_iter, wop->c, BTREE_ID_EXTENTS, + bkey_start_pos(&bch2_keylist_front(keys)->k)); + bch2_btree_iter_init_intent(&inode_iter, wop->c, BTREE_ID_INODES, POS(extent_iter.pos.inode, 0)); hook.op = op; @@ -292,12 +292,12 @@ static int bchfs_write_index_update(struct bch_write_op *wop) hook.need_inode_update = false; do { - ret = bch_btree_iter_traverse(&extent_iter); + ret = bch2_btree_iter_traverse(&extent_iter); if (ret) goto err; /* XXX: ei->i_size locking */ - k = bch_keylist_front(keys); + k = bch2_keylist_front(keys); if (min(k->k.p.offset << 9, op->new_i_size) > op->ei->i_size) hook.need_inode_update = true; @@ -305,9 +305,9 @@ static int bchfs_write_index_update(struct bch_write_op *wop) struct bkey_s_c inode; if (!btree_iter_linked(&inode_iter)) - bch_btree_iter_link(&extent_iter, &inode_iter); + bch2_btree_iter_link(&extent_iter, &inode_iter); - inode = bch_btree_iter_peek_with_holes(&inode_iter); + inode = bch2_btree_iter_peek_with_holes(&inode_iter); if ((ret = btree_iter_err(inode))) goto err; @@ -329,7 +329,7 @@ static int bchfs_write_index_update(struct bch_write_op *wop) } bkey_reassemble(&hook.inode_p.inode.k_i, inode); - ret = bch_inode_unpack(bkey_s_c_to_inode(inode), + ret = bch2_inode_unpack(bkey_s_c_to_inode(inode), &hook.inode_u); if (WARN_ONCE(ret, "error %i unpacking inode %llu", @@ -338,14 +338,14 @@ static int bchfs_write_index_update(struct bch_write_op *wop) break; } - ret = bch_btree_insert_at(wop->c, &wop->res, + ret = bch2_btree_insert_at(wop->c, &wop->res, &hook.hook, op_journal_seq(wop), BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC, BTREE_INSERT_ENTRY(&extent_iter, k), BTREE_INSERT_ENTRY_EXTRA_RES(&inode_iter, &hook.inode_p.inode.k_i, 2)); } else { - ret = bch_btree_insert_at(wop->c, &wop->res, + ret = bch2_btree_insert_at(wop->c, &wop->res, &hook.hook, op_journal_seq(wop), BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC, BTREE_INSERT_ENTRY(&extent_iter, k)); @@ -356,11 +356,11 @@ err: if (ret) break; - bch_keylist_pop_front(keys); - } while (!bch_keylist_empty(keys)); + bch2_keylist_pop_front(keys); + } while (!bch2_keylist_empty(keys)); - bch_btree_iter_unlock(&extent_iter); - bch_btree_iter_unlock(&inode_iter); + bch2_btree_iter_unlock(&extent_iter); + bch2_btree_iter_unlock(&inode_iter); return ret; } @@ -371,7 +371,7 @@ err: /* * bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could - * almost protected it with the page lock, except that bch_writepage_io_done has + * almost protected it with the page lock, except that bch2_writepage_io_done has * to update the sector counts (and from interrupt/bottom half context). */ struct bch_page_state { @@ -427,7 +427,7 @@ static inline struct bch_page_state *page_state(struct page *page) return s; } -static void bch_put_page_reservation(struct bch_fs *c, struct page *page) +static void bch2_put_page_reservation(struct bch_fs *c, struct page *page) { struct disk_reservation res = { .sectors = PAGE_SECTORS }; struct bch_page_state s; @@ -438,10 +438,10 @@ static void bch_put_page_reservation(struct bch_fs *c, struct page *page) s.reserved = 0; }); - bch_disk_reservation_put(c, &res); + bch2_disk_reservation_put(c, &res); } -static int bch_get_page_reservation(struct bch_fs *c, struct page *page, +static int bch2_get_page_reservation(struct bch_fs *c, struct page *page, bool check_enospc) { struct bch_page_state *s = page_state(page), new; @@ -453,14 +453,14 @@ static int bch_get_page_reservation(struct bch_fs *c, struct page *page, if (s->allocated || s->reserved) return 0; - ret = bch_disk_reservation_get(c, &res, PAGE_SECTORS, !check_enospc + ret = bch2_disk_reservation_get(c, &res, PAGE_SECTORS, !check_enospc ? BCH_DISK_RESERVATION_NOFAIL : 0); if (ret) return ret; page_state_cmpxchg(s, new, { if (new.reserved) { - bch_disk_reservation_put(c, &res); + bch2_disk_reservation_put(c, &res); return 0; } new.reserved = 1; @@ -470,7 +470,7 @@ static int bch_get_page_reservation(struct bch_fs *c, struct page *page, return 0; } -static void bch_clear_page_bits(struct page *page) +static void bch2_clear_page_bits(struct page *page) { struct inode *inode = page->mapping->host; struct bch_fs *c = inode->i_sb->s_fs_info; @@ -490,10 +490,10 @@ static void bch_clear_page_bits(struct page *page) } if (s.reserved) - bch_disk_reservation_put(c, &res); + bch2_disk_reservation_put(c, &res); } -int bch_set_page_dirty(struct page *page) +int bch2_set_page_dirty(struct page *page) { struct bch_page_state old, new; @@ -522,6 +522,17 @@ static bool bio_can_add_page_contig(struct bio *bio, struct page *page) bio_end_sector(bio) == offset; } +static void __bio_add_page(struct bio *bio, struct page *page) +{ + bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) { + .bv_page = page, + .bv_len = PAGE_SIZE, + .bv_offset = 0, + }; + + bio->bi_iter.bi_size += PAGE_SIZE; +} + static int bio_add_page_contig(struct bio *bio, struct page *page) { sector_t offset = (sector_t) page->index << (PAGE_SHIFT - 9); @@ -533,18 +544,11 @@ static int bio_add_page_contig(struct bio *bio, struct page *page) else if (!bio_can_add_page_contig(bio, page)) return -1; - bio->bi_io_vec[bio->bi_vcnt++] = (struct bio_vec) { - .bv_page = page, - .bv_len = PAGE_SIZE, - .bv_offset = 0, - }; - - bio->bi_iter.bi_size += PAGE_SIZE; - + __bio_add_page(bio, page); return 0; } -static void bch_readpages_end_io(struct bio *bio) +static void bch2_readpages_end_io(struct bio *bio) { struct bio_vec *bv; int i; @@ -564,38 +568,50 @@ static void bch_readpages_end_io(struct bio *bio) bio_put(bio); } -static inline struct page *__readpage_next_page(struct address_space *mapping, - struct list_head *pages, - unsigned *nr_pages) +struct readpages_iter { + struct address_space *mapping; + struct list_head pages; + unsigned nr_pages; +}; + +static int readpage_add_page(struct readpages_iter *iter, struct page *page) { - struct page *page; + struct bch_page_state *s = page_state(page); int ret; - while (*nr_pages) { - page = list_entry(pages->prev, struct page, lru); - prefetchw(&page->flags); - list_del(&page->lru); + BUG_ON(s->reserved); + s->allocated = 1; + s->sectors = 0; - ret = add_to_page_cache_lru(page, mapping, page->index, GFP_NOFS); + prefetchw(&page->flags); + ret = add_to_page_cache_lru(page, iter->mapping, + page->index, GFP_NOFS); + put_page(page); + return ret; +} - /* if add_to_page_cache_lru() succeeded, page is locked: */ - put_page(page); +static inline struct page *readpage_iter_next(struct readpages_iter *iter) +{ + while (iter->nr_pages) { + struct page *page = + list_last_entry(&iter->pages, struct page, lru); - if (!ret) - return page; + prefetchw(&page->flags); + list_del(&page->lru); + iter->nr_pages--; - (*nr_pages)--; + if (!readpage_add_page(iter, page)) + return page; } return NULL; } -#define for_each_readpage_page(_mapping, _pages, _nr_pages, _page) \ +#define for_each_readpage_page(_iter, _page) \ for (; \ - ((_page) = __readpage_next_page(_mapping, _pages, &(_nr_pages)));\ - (_nr_pages)--) + ((_page) = __readpage_next_page(&(_iter)));) \ -static void bch_mark_pages_unalloc(struct bio *bio) +static void bch2_mark_pages_unalloc(struct bio *bio) { struct bvec_iter iter; struct bio_vec bv; @@ -604,7 +620,7 @@ static void bch_mark_pages_unalloc(struct bio *bio) page_state(bv.bv_page)->allocated = 0; } -static void bch_add_page_sectors(struct bio *bio, struct bkey_s_c k) +static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k) { struct bvec_iter iter; struct bio_vec bv; @@ -618,88 +634,134 @@ static void bch_add_page_sectors(struct bio *bio, struct bkey_s_c k) unsigned page_sectors = min(bv.bv_len >> 9, k_sectors); if (!s->sectors) - s->nr_replicas = bch_extent_nr_dirty_ptrs(k); + s->nr_replicas = bch2_extent_nr_dirty_ptrs(k); else s->nr_replicas = min_t(unsigned, s->nr_replicas, - bch_extent_nr_dirty_ptrs(k)); + bch2_extent_nr_dirty_ptrs(k)); BUG_ON(s->sectors + page_sectors > PAGE_SECTORS); s->sectors += page_sectors; } } -static void bchfs_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode) +static void readpage_bio_extend(struct readpages_iter *iter, + struct bio *bio, u64 offset, + bool get_more) { - struct bio *bio = &rbio->bio; - struct btree_iter iter; - struct bkey_s_c k; - struct bio_vec *bv; - unsigned i; + struct page *page; + pgoff_t page_offset; int ret; - bch_increment_clock(c, bio_sectors(bio), READ); + while (bio_end_sector(bio) < offset && + bio->bi_vcnt < bio->bi_max_vecs) { + page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT; - /* - * Initialize page state: - * If a page is partly allocated and partly a hole, we want it to be - * marked BCH_PAGE_UNALLOCATED - so we initially mark all pages - * allocated and then mark them unallocated as we find holes: - * - * Note that the bio hasn't been split yet - it's the only bio that - * points to these pages. As we walk extents and split @bio, that - * necessarily be true, the splits won't necessarily be on page - * boundaries: - */ - bio_for_each_segment_all(bv, bio, i) { - struct bch_page_state *s = page_state(bv->bv_page); + if (iter->nr_pages) { + page = list_last_entry(&iter->pages, struct page, lru); + if (page->index != page_offset) + break; + + list_del(&page->lru); + iter->nr_pages--; + } else if (get_more) { + rcu_read_lock(); + page = radix_tree_lookup(&iter->mapping->page_tree, page_offset); + rcu_read_unlock(); - EBUG_ON(s->reserved); + if (page && !radix_tree_exceptional_entry(page)) + break; + + page = __page_cache_alloc(readahead_gfp_mask(iter->mapping)); + if (!page) + break; - s->allocated = 1; - s->sectors = 0; + page->index = page_offset; + ClearPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page); + } else { + break; + } + + ret = readpage_add_page(iter, page); + if (ret) + break; + + __bio_add_page(bio, page); } - for_each_btree_key_with_holes(&iter, c, BTREE_ID_EXTENTS, - POS(inode, bio->bi_iter.bi_sector), k) { - BKEY_PADDED(k) tmp; + if (!iter->nr_pages) + SetPageReadahead(bio->bi_io_vec[bio->bi_vcnt - 1].bv_page); +} + +static void bchfs_read(struct bch_fs *c, struct btree_iter *iter, + struct bch_read_bio *rbio, u64 inode, + struct readpages_iter *readpages_iter) +{ + struct bio *bio = &rbio->bio; + int flags = BCH_READ_RETRY_IF_STALE| + BCH_READ_PROMOTE| + BCH_READ_MAY_REUSE_BIO; + + while (1) { struct extent_pick_ptr pick; - unsigned bytes, sectors; + BKEY_PADDED(k) tmp; + struct bkey_s_c k; + unsigned bytes; bool is_last; + bch2_btree_iter_set_pos(iter, POS(inode, bio->bi_iter.bi_sector)); + + k = bch2_btree_iter_peek_with_holes(iter); + BUG_ON(!k.k); + + if (IS_ERR(k.k)) { + int ret = bch2_btree_iter_unlock(iter); + BUG_ON(!ret); + bcache_io_error(c, bio, "btree IO error %i", ret); + bio_endio(bio); + return; + } + bkey_reassemble(&tmp.k, k); - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(iter); k = bkey_i_to_s_c(&tmp.k); - if (!bkey_extent_is_allocation(k.k) || - bkey_extent_is_compressed(k)) - bch_mark_pages_unalloc(bio); - - bch_extent_pick_ptr(c, k, &pick); + bch2_extent_pick_ptr(c, k, &pick); if (IS_ERR(pick.ca)) { bcache_io_error(c, bio, "no device to read from"); bio_endio(bio); return; } - sectors = min_t(u64, k.k->p.offset, bio_end_sector(bio)) - - bio->bi_iter.bi_sector; - bytes = sectors << 9; + if (readpages_iter) + readpage_bio_extend(readpages_iter, + bio, k.k->p.offset, + pick.ca && + (pick.crc.csum_type || + pick.crc.compression_type)); + + bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) - + bio->bi_iter.bi_sector) << 9; is_last = bytes == bio->bi_iter.bi_size; swap(bio->bi_iter.bi_size, bytes); if (bkey_extent_is_allocation(k.k)) - bch_add_page_sectors(bio, k); + bch2_add_page_sectors(bio, k); + + if (!bkey_extent_is_allocation(k.k) || + bkey_extent_is_compressed(k)) + bch2_mark_pages_unalloc(bio); + + if (is_last) + flags |= BCH_READ_IS_LAST; if (pick.ca) { PTR_BUCKET(pick.ca, &pick.ptr)->read_prio = c->prio_clock[READ].hand; - bch_read_extent(c, rbio, k, &pick, - BCH_READ_RETRY_IF_STALE| - BCH_READ_PROMOTE| - (is_last ? BCH_READ_IS_LAST : 0)); + bch2_read_extent(c, rbio, k, &pick, flags); + flags &= ~BCH_READ_MAY_REUSE_BIO; } else { - zero_fill_bio_iter(bio, bio->bi_iter); + zero_fill_bio(bio); if (is_last) bio_endio(bio); @@ -711,60 +773,79 @@ static void bchfs_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode) swap(bio->bi_iter.bi_size, bytes); bio_advance(bio, bytes); } - - /* - * If we get here, it better have been because there was an error - * reading a btree node - */ - ret = bch_btree_iter_unlock(&iter); - BUG_ON(!ret); - bcache_io_error(c, bio, "btree IO error %i", ret); - bio_endio(bio); } -int bch_readpages(struct file *file, struct address_space *mapping, +int bch2_readpages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { struct inode *inode = mapping->host; struct bch_fs *c = inode->i_sb->s_fs_info; - struct bch_read_bio *rbio = NULL; + struct btree_iter iter; struct page *page; + struct readpages_iter readpages_iter = { + .mapping = mapping, .nr_pages = nr_pages + }; - pr_debug("reading %u pages", nr_pages); + bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN); + + INIT_LIST_HEAD(&readpages_iter.pages); + list_add(&readpages_iter.pages, pages); + list_del_init(pages); if (current->pagecache_lock != &mapping->add_lock) pagecache_add_get(&mapping->add_lock); - for_each_readpage_page(mapping, pages, nr_pages, page) { -again: - if (!rbio) { - rbio = container_of(bio_alloc_bioset(GFP_NOFS, - min_t(unsigned, nr_pages, - BIO_MAX_PAGES), - &c->bio_read), - struct bch_read_bio, bio); + while ((page = readpage_iter_next(&readpages_iter))) { + unsigned n = max(min_t(unsigned, readpages_iter.nr_pages + 1, + BIO_MAX_PAGES), + BCH_ENCODED_EXTENT_MAX >> PAGE_SECTOR_SHIFT); - rbio->bio.bi_end_io = bch_readpages_end_io; - } + struct bch_read_bio *rbio = + container_of(bio_alloc_bioset(GFP_NOFS, n, + &c->bio_read), + struct bch_read_bio, bio); - if (bio_add_page_contig(&rbio->bio, page)) { - bchfs_read(c, rbio, inode->i_ino); - rbio = NULL; - goto again; - } + rbio->bio.bi_end_io = bch2_readpages_end_io; + bio_add_page_contig(&rbio->bio, page); + bchfs_read(c, &iter, rbio, inode->i_ino, &readpages_iter); } - if (rbio) - bchfs_read(c, rbio, inode->i_ino); - if (current->pagecache_lock != &mapping->add_lock) pagecache_add_put(&mapping->add_lock); - pr_debug("success"); return 0; } -int bch_readpage(struct file *file, struct page *page) +static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio, + u64 inode, struct page *page) +{ + struct btree_iter iter; + + /* + * Initialize page state: + * If a page is partly allocated and partly a hole, we want it to be + * marked BCH_PAGE_UNALLOCATED - so we initially mark all pages + * allocated and then mark them unallocated as we find holes: + * + * Note that the bio hasn't been split yet - it's the only bio that + * points to these pages. As we walk extents and split @bio, that + * necessarily be true, the splits won't necessarily be on page + * boundaries: + */ + struct bch_page_state *s = page_state(page); + + EBUG_ON(s->reserved); + s->allocated = 1; + s->sectors = 0; + + bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC); + bio_add_page_contig(&rbio->bio, page); + + bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN); + bchfs_read(c, &iter, rbio, inode, NULL); +} + +int bch2_readpage(struct file *file, struct page *page) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; @@ -774,12 +855,9 @@ int bch_readpage(struct file *file, struct page *page) rbio = container_of(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), struct bch_read_bio, bio); - bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC); - rbio->bio.bi_end_io = bch_readpages_end_io; - - bio_add_page_contig(&rbio->bio, page); - bchfs_read(c, rbio, inode->i_ino); + rbio->bio.bi_end_io = bch2_readpages_end_io; + __bchfs_readpage(c, rbio, inode->i_ino, page); return 0; } @@ -787,7 +865,7 @@ struct bch_writepage_state { struct bch_writepage_io *io; }; -static void bch_writepage_io_free(struct closure *cl) +static void bch2_writepage_io_free(struct closure *cl) { struct bch_writepage_io *io = container_of(cl, struct bch_writepage_io, cl); @@ -796,7 +874,7 @@ static void bch_writepage_io_free(struct closure *cl) bio_put(bio); } -static void bch_writepage_io_done(struct closure *cl) +static void bch2_writepage_io_done(struct closure *cl) { struct bch_writepage_io *io = container_of(cl, struct bch_writepage_io, cl); @@ -854,10 +932,10 @@ static void bch_writepage_io_done(struct closure *cl) bio_for_each_segment_all(bvec, bio, i) end_page_writeback(bvec->bv_page); - closure_return_with_destructor(&io->cl, bch_writepage_io_free); + closure_return_with_destructor(&io->cl, bch2_writepage_io_free); } -static void bch_writepage_do_io(struct bch_writepage_state *w) +static void bch2_writepage_do_io(struct bch_writepage_state *w) { struct bch_writepage_io *io = w->io; @@ -866,18 +944,18 @@ static void bch_writepage_do_io(struct bch_writepage_state *w) io->op.op.pos.offset = io->bio.bio.bi_iter.bi_sector; - closure_call(&io->op.op.cl, bch_write, NULL, &io->cl); - continue_at(&io->cl, bch_writepage_io_done, NULL); + closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl); + continue_at(&io->cl, bch2_writepage_io_done, NULL); } /* * Get a bch_writepage_io and add @page to it - appending to an existing one if * possible, else allocating a new one: */ -static void bch_writepage_io_alloc(struct bch_fs *c, - struct bch_writepage_state *w, - struct bch_inode_info *ei, - struct page *page) +static void bch2_writepage_io_alloc(struct bch_fs *c, + struct bch_writepage_state *w, + struct bch_inode_info *ei, + struct page *page) { u64 inum = ei->vfs_inode.i_ino; unsigned nr_replicas = page_state(page)->nr_replicas; @@ -889,14 +967,14 @@ static void bch_writepage_io_alloc(struct bch_fs *c, alloc_io: w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, - bch_writepage_bioset), + bch2_writepage_bioset), struct bch_writepage_io, bio.bio); closure_init(&w->io->cl, NULL); w->io->op.ei = ei; w->io->op.sectors_added = 0; w->io->op.is_dio = false; - bch_write_op_init(&w->io->op.op, c, &w->io->bio, + bch2_write_op_init(&w->io->op.op, c, &w->io->bio, (struct disk_reservation) { .nr_replicas = c->opts.data_replicas, }, @@ -908,7 +986,7 @@ alloc_io: if (w->io->op.op.res.nr_replicas != nr_replicas || bio_add_page_contig(&w->io->bio.bio, page)) { - bch_writepage_do_io(w); + bch2_writepage_do_io(w); goto alloc_io; } @@ -919,9 +997,9 @@ alloc_io: BUG_ON(ei != w->io->op.ei); } -static int __bch_writepage(struct bch_fs *c, struct page *page, - struct writeback_control *wbc, - struct bch_writepage_state *w) +static int __bch2_writepage(struct bch_fs *c, struct page *page, + struct writeback_control *wbc, + struct bch_writepage_state *w) { struct inode *inode = page->mapping->host; struct bch_inode_info *ei = to_bch_ei(inode); @@ -952,7 +1030,7 @@ static int __bch_writepage(struct bch_fs *c, struct page *page, */ zero_user_segment(page, offset, PAGE_SIZE); do_io: - bch_writepage_io_alloc(c, w, ei, page); + bch2_writepage_io_alloc(c, w, ei, page); /* while page is locked: */ w->io->op.new_i_size = i_size; @@ -985,7 +1063,7 @@ out: return 0; } -int bch_writepages(struct address_space *mapping, struct writeback_control *wbc) +int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct bch_fs *c = mapping->host->i_sb->s_fs_info; struct bch_writepage_state w = { NULL }; @@ -1031,7 +1109,7 @@ get_pages: if (w.io && !bio_can_add_page_contig(&w.io->bio.bio, page)) - bch_writepage_do_io(&w); + bch2_writepage_do_io(&w); if (!w.io && atomic_read(&c->writeback_pages) >= @@ -1078,7 +1156,7 @@ continue_unlock: goto continue_unlock; trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); - ret = __bch_writepage(c, page, wbc, &w); + ret = __bch2_writepage(c, page, wbc, &w); if (unlikely(ret)) { if (ret == AOP_WRITEPAGE_ACTIVATE) { unlock_page(page); @@ -1114,7 +1192,7 @@ continue_unlock: pagecache_iter_release(&iter); if (w.io) - bch_writepage_do_io(&w); + bch2_writepage_do_io(&w); if (!cycled && !done) { /* @@ -1133,26 +1211,26 @@ continue_unlock: return ret; } -int bch_writepage(struct page *page, struct writeback_control *wbc) +int bch2_writepage(struct page *page, struct writeback_control *wbc) { struct bch_fs *c = page->mapping->host->i_sb->s_fs_info; struct bch_writepage_state w = { NULL }; int ret; - ret = __bch_writepage(c, page, wbc, &w); + ret = __bch2_writepage(c, page, wbc, &w); if (w.io) - bch_writepage_do_io(&w); + bch2_writepage_do_io(&w); return ret; } -static void bch_read_single_page_end_io(struct bio *bio) +static void bch2_read_single_page_end_io(struct bio *bio) { complete(bio->bi_private); } -static int bch_read_single_page(struct page *page, - struct address_space *mapping) +static int bch2_read_single_page(struct page *page, + struct address_space *mapping) { struct inode *inode = mapping->host; struct bch_fs *c = inode->i_sb->s_fs_info; @@ -1163,12 +1241,10 @@ static int bch_read_single_page(struct page *page, rbio = container_of(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), struct bch_read_bio, bio); - bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC); rbio->bio.bi_private = &done; - rbio->bio.bi_end_io = bch_read_single_page_end_io; - bio_add_page_contig(&rbio->bio, page); + rbio->bio.bi_end_io = bch2_read_single_page_end_io; - bchfs_read(c, rbio, inode->i_ino); + __bchfs_readpage(c, rbio, inode->i_ino, page); wait_for_completion(&done); ret = rbio->bio.bi_error; @@ -1181,9 +1257,9 @@ static int bch_read_single_page(struct page *page, return 0; } -int bch_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) +int bch2_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) { struct inode *inode = mapping->host; struct bch_fs *c = inode->i_sb->s_fs_info; @@ -1220,11 +1296,11 @@ int bch_write_begin(struct file *file, struct address_space *mapping, goto out; } readpage: - ret = bch_read_single_page(page, mapping); + ret = bch2_read_single_page(page, mapping); if (ret) goto err; out: - ret = bch_get_page_reservation(c, page, true); + ret = bch2_get_page_reservation(c, page, true); if (ret) { if (!PageUptodate(page)) { /* @@ -1250,9 +1326,9 @@ err_unlock: return ret; } -int bch_write_end(struct file *filp, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) +int bch2_write_end(struct file *filp, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) { struct inode *inode = page->mapping->host; struct bch_fs *c = inode->i_sb->s_fs_info; @@ -1279,7 +1355,7 @@ int bch_write_end(struct file *filp, struct address_space *mapping, if (!PageDirty(page)) set_page_dirty(page); } else { - bch_put_page_reservation(c, page); + bch2_put_page_reservation(c, page); } unlock_page(page); @@ -1291,7 +1367,7 @@ int bch_write_end(struct file *filp, struct address_space *mapping, /* O_DIRECT */ -static void bch_dio_read_complete(struct closure *cl) +static void bch2_dio_read_complete(struct closure *cl) { struct dio_read *dio = container_of(cl, struct dio_read, cl); @@ -1299,7 +1375,7 @@ static void bch_dio_read_complete(struct closure *cl) bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */ } -static void bch_direct_IO_read_endio(struct bio *bio) +static void bch2_direct_IO_read_endio(struct bio *bio) { struct dio_read *dio = bio->bi_private; @@ -1309,15 +1385,15 @@ static void bch_direct_IO_read_endio(struct bio *bio) closure_put(&dio->cl); } -static void bch_direct_IO_read_split_endio(struct bio *bio) +static void bch2_direct_IO_read_split_endio(struct bio *bio) { - bch_direct_IO_read_endio(bio); + bch2_direct_IO_read_endio(bio); bio_check_pages_dirty(bio); /* transfers ownership */ } -static int bch_direct_IO_read(struct bch_fs *c, struct kiocb *req, - struct file *file, struct inode *inode, - struct iov_iter *iter, loff_t offset) +static int bch2_direct_IO_read(struct bch_fs *c, struct kiocb *req, + struct file *file, struct inode *inode, + struct iov_iter *iter, loff_t offset) { struct dio_read *dio; struct bio *bio; @@ -1336,9 +1412,9 @@ static int bch_direct_IO_read(struct bch_fs *c, struct kiocb *req, bio = bio_alloc_bioset(GFP_KERNEL, iov_iter_npages(iter, BIO_MAX_PAGES), - bch_dio_read_bioset); + bch2_dio_read_bioset); - bio->bi_end_io = bch_direct_IO_read_endio; + bio->bi_end_io = bch2_direct_IO_read_endio; dio = container_of(bio, struct dio_read, rbio.bio); closure_init(&dio->cl, NULL); @@ -1348,7 +1424,7 @@ static int bch_direct_IO_read(struct bch_fs *c, struct kiocb *req, * end: */ if (!sync) { - set_closure_fn(&dio->cl, bch_dio_read_complete, NULL); + set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL); atomic_set(&dio->cl.remaining, CLOSURE_REMAINING_INITIALIZER - CLOSURE_RUNNING + @@ -1366,7 +1442,7 @@ static int bch_direct_IO_read(struct bch_fs *c, struct kiocb *req, bio = bio_alloc_bioset(GFP_KERNEL, iov_iter_npages(iter, BIO_MAX_PAGES), &c->bio_read); - bio->bi_end_io = bch_direct_IO_read_split_endio; + bio->bi_end_io = bch2_direct_IO_read_split_endio; start: bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC); bio->bi_iter.bi_sector = offset >> 9; @@ -1386,7 +1462,7 @@ start: if (iter->count) closure_get(&dio->cl); - bch_read(c, container_of(bio, + bch2_read(c, container_of(bio, struct bch_read_bio, bio), inode->i_ino); } @@ -1402,14 +1478,14 @@ start: } } -static long __bch_dio_write_complete(struct dio_write *dio) +static long __bch2_dio_write_complete(struct dio_write *dio) { struct file *file = dio->req->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = file->f_inode; long ret = dio->error ?: dio->written; - bch_disk_reservation_put(dio->c, &dio->res); + bch2_disk_reservation_put(dio->c, &dio->res); __pagecache_block_put(&mapping->add_lock); inode_dio_end(inode); @@ -1421,15 +1497,15 @@ static long __bch_dio_write_complete(struct dio_write *dio) return ret; } -static void bch_dio_write_complete(struct closure *cl) +static void bch2_dio_write_complete(struct closure *cl) { struct dio_write *dio = container_of(cl, struct dio_write, cl); struct kiocb *req = dio->req; - req->ki_complete(req, __bch_dio_write_complete(dio), 0); + req->ki_complete(req, __bch2_dio_write_complete(dio), 0); } -static void bch_dio_write_done(struct dio_write *dio) +static void bch2_dio_write_done(struct dio_write *dio) { struct bio_vec *bv; int i; @@ -1446,7 +1522,7 @@ static void bch_dio_write_done(struct dio_write *dio) bio_reset(&dio->bio.bio); } -static void bch_do_direct_IO_write(struct dio_write *dio) +static void bch2_do_direct_IO_write(struct dio_write *dio) { struct file *file = dio->req->ki_filp; struct inode *inode = file->f_inode; @@ -1464,7 +1540,7 @@ static void bch_do_direct_IO_write(struct dio_write *dio) ret = bio_get_user_pages(bio, &dio->iter, 0); if (ret < 0) { /* - * these didn't get initialized, but bch_dio_write_done() will + * these didn't get initialized, but bch2_dio_write_done() will * look at them: */ dio->iop.op.error = 0; @@ -1477,7 +1553,7 @@ static void bch_do_direct_IO_write(struct dio_write *dio) dio->iop.sectors_added = 0; dio->iop.is_dio = true; dio->iop.new_i_size = U64_MAX; - bch_write_op_init(&dio->iop.op, dio->c, &dio->bio, + bch2_write_op_init(&dio->iop.op, dio->c, &dio->bio, dio->res, foreground_write_point(dio->c, inode->i_ino), POS(inode->i_ino, bio->bi_iter.bi_sector), @@ -1489,40 +1565,40 @@ static void bch_do_direct_IO_write(struct dio_write *dio) task_io_account_write(bio->bi_iter.bi_size); - closure_call(&dio->iop.op.cl, bch_write, NULL, &dio->cl); + closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl); } -static void bch_dio_write_loop_async(struct closure *cl) +static void bch2_dio_write_loop_async(struct closure *cl) { struct dio_write *dio = container_of(cl, struct dio_write, cl); struct address_space *mapping = dio->req->ki_filp->f_mapping; - bch_dio_write_done(dio); + bch2_dio_write_done(dio); if (dio->iter.count && !dio->error) { use_mm(dio->mm); pagecache_block_get(&mapping->add_lock); - bch_do_direct_IO_write(dio); + bch2_do_direct_IO_write(dio); pagecache_block_put(&mapping->add_lock); unuse_mm(dio->mm); - continue_at(&dio->cl, bch_dio_write_loop_async, NULL); + continue_at(&dio->cl, bch2_dio_write_loop_async, NULL); } else { #if 0 - closure_return_with_destructor(cl, bch_dio_write_complete); + closure_return_with_destructor(cl, bch2_dio_write_complete); #else closure_debug_destroy(cl); - bch_dio_write_complete(cl); + bch2_dio_write_complete(cl); #endif } } -static int bch_direct_IO_write(struct bch_fs *c, struct kiocb *req, - struct file *file, struct inode *inode, - struct iov_iter *iter, loff_t offset) +static int bch2_direct_IO_write(struct bch_fs *c, struct kiocb *req, + struct file *file, struct inode *inode, + struct iov_iter *iter, loff_t offset) { struct address_space *mapping = file->f_mapping; struct dio_write *dio; @@ -1540,7 +1616,7 @@ static int bch_direct_IO_write(struct bch_fs *c, struct kiocb *req, bio = bio_alloc_bioset(GFP_KERNEL, iov_iter_npages(iter, BIO_MAX_PAGES), - bch_dio_write_bioset); + bch2_dio_write_bioset); dio = container_of(bio, struct dio_write, bio.bio); dio->req = req; dio->c = c; @@ -1563,7 +1639,7 @@ static int bch_direct_IO_write(struct bch_fs *c, struct kiocb *req, * Have to then guard against racing with truncate (deleting data that * we would have been overwriting) */ - ret = bch_disk_reservation_get(c, &dio->res, iter->count >> 9, 0); + ret = bch2_disk_reservation_get(c, &dio->res, iter->count >> 9, 0); if (unlikely(ret)) { closure_debug_destroy(&dio->cl); bio_put(bio); @@ -1575,16 +1651,16 @@ static int bch_direct_IO_write(struct bch_fs *c, struct kiocb *req, if (sync) { do { - bch_do_direct_IO_write(dio); + bch2_do_direct_IO_write(dio); closure_sync(&dio->cl); - bch_dio_write_done(dio); + bch2_dio_write_done(dio); } while (dio->iter.count && !dio->error); closure_debug_destroy(&dio->cl); - return __bch_dio_write_complete(dio); + return __bch2_dio_write_complete(dio); } else { - bch_do_direct_IO_write(dio); + bch2_do_direct_IO_write(dio); if (dio->iter.count && !dio->error) { if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) { @@ -1603,12 +1679,12 @@ static int bch_direct_IO_write(struct bch_fs *c, struct kiocb *req, dio->iter.iov = dio->iovec; } - continue_at_noreturn(&dio->cl, bch_dio_write_loop_async, NULL); + continue_at_noreturn(&dio->cl, bch2_dio_write_loop_async, NULL); return -EIOCBQUEUED; } } -ssize_t bch_direct_IO(struct kiocb *req, struct iov_iter *iter) +ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter) { struct file *file = req->ki_filp; struct inode *inode = file->f_inode; @@ -1618,15 +1694,15 @@ ssize_t bch_direct_IO(struct kiocb *req, struct iov_iter *iter) blk_start_plug(&plug); ret = ((iov_iter_rw(iter) == WRITE) - ? bch_direct_IO_write - : bch_direct_IO_read)(c, req, file, inode, iter, req->ki_pos); + ? bch2_direct_IO_write + : bch2_direct_IO_read)(c, req, file, inode, iter, req->ki_pos); blk_finish_plug(&plug); return ret; } static ssize_t -bch_direct_write(struct kiocb *iocb, struct iov_iter *iter) +bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_inode; @@ -1643,14 +1719,14 @@ bch_direct_write(struct kiocb *iocb, struct iov_iter *iter) if (unlikely(ret)) goto err; - ret = bch_direct_IO_write(c, iocb, file, inode, iter, pos); + ret = bch2_direct_IO_write(c, iocb, file, inode, iter, pos); err: pagecache_block_put(&mapping->add_lock); return ret; } -static ssize_t __bch_write_iter(struct kiocb *iocb, struct iov_iter *from) +static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -1668,7 +1744,7 @@ static ssize_t __bch_write_iter(struct kiocb *iocb, struct iov_iter *from) goto out; ret = iocb->ki_flags & IOCB_DIRECT - ? bch_direct_write(iocb, from) + ? bch2_direct_write(iocb, from) : generic_perform_write(file, from, iocb->ki_pos); if (likely(ret > 0)) @@ -1678,7 +1754,7 @@ out: return ret; } -ssize_t bch_write_iter(struct kiocb *iocb, struct iov_iter *from) +ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; @@ -1688,7 +1764,7 @@ ssize_t bch_write_iter(struct kiocb *iocb, struct iov_iter *from) inode_lock(inode); ret = generic_write_checks(iocb, from); if (ret > 0) - ret = __bch_write_iter(iocb, from); + ret = __bch2_write_iter(iocb, from); inode_unlock(inode); if (ret > 0 && !direct) @@ -1697,7 +1773,7 @@ ssize_t bch_write_iter(struct kiocb *iocb, struct iov_iter *from) return ret; } -int bch_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +int bch2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = file_inode(vma->vm_file); @@ -1725,7 +1801,7 @@ int bch_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) goto out; } - if (bch_get_page_reservation(c, page, true)) { + if (bch2_get_page_reservation(c, page, true)) { unlock_page(page); ret = VM_FAULT_SIGBUS; goto out; @@ -1741,8 +1817,8 @@ out: return ret; } -void bch_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +void bch2_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { EBUG_ON(!PageLocked(page)); EBUG_ON(PageWriteback(page)); @@ -1750,10 +1826,10 @@ void bch_invalidatepage(struct page *page, unsigned int offset, if (offset || length < PAGE_SIZE) return; - bch_clear_page_bits(page); + bch2_clear_page_bits(page); } -int bch_releasepage(struct page *page, gfp_t gfp_mask) +int bch2_releasepage(struct page *page, gfp_t gfp_mask) { EBUG_ON(!PageLocked(page)); EBUG_ON(PageWriteback(page)); @@ -1761,13 +1837,13 @@ int bch_releasepage(struct page *page, gfp_t gfp_mask) if (PageDirty(page)) return 0; - bch_clear_page_bits(page); + bch2_clear_page_bits(page); return 1; } #ifdef CONFIG_MIGRATION -int bch_migrate_page(struct address_space *mapping, struct page *newpage, - struct page *page, enum migrate_mode mode) +int bch2_migrate_page(struct address_space *mapping, struct page *newpage, + struct page *page, enum migrate_mode mode) { int ret; @@ -1785,7 +1861,7 @@ int bch_migrate_page(struct address_space *mapping, struct page *newpage, } #endif -int bch_fsync(struct file *file, loff_t start, loff_t end, int datasync) +int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; struct bch_inode_info *ei = to_bch_ei(inode); @@ -1799,11 +1875,11 @@ int bch_fsync(struct file *file, loff_t start, loff_t end, int datasync) if (c->opts.journal_flush_disabled) return 0; - return bch_journal_flush_seq(&c->journal, ei->journal_seq); + return bch2_journal_flush_seq(&c->journal, ei->journal_seq); } -static int __bch_truncate_page(struct address_space *mapping, - pgoff_t index, loff_t start, loff_t end) +static int __bch2_truncate_page(struct address_space *mapping, + pgoff_t index, loff_t start, loff_t end) { struct inode *inode = mapping->host; struct bch_fs *c = inode->i_sb->s_fs_info; @@ -1840,11 +1916,11 @@ static int __bch_truncate_page(struct address_space *mapping, if (k.k->type != KEY_TYPE_DISCARD && k.k->type != BCH_RESERVATION) { - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); goto create; } } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return 0; create: page = find_or_create_page(mapping, index, GFP_KERNEL); @@ -1855,7 +1931,7 @@ create: } if (!PageUptodate(page)) { - ret = bch_read_single_page(page, mapping); + ret = bch2_read_single_page(page, mapping); if (ret) goto unlock; } @@ -1866,7 +1942,7 @@ create: * XXX: because we aren't currently tracking whether the page has actual * data in it (vs. just 0s, or only partially written) this wrong. ick. */ - ret = bch_get_page_reservation(c, page, false); + ret = bch2_get_page_reservation(c, page, false); BUG_ON(ret); if (index == start >> PAGE_SHIFT && @@ -1886,13 +1962,13 @@ out: return ret; } -static int bch_truncate_page(struct address_space *mapping, loff_t from) +static int bch2_truncate_page(struct address_space *mapping, loff_t from) { - return __bch_truncate_page(mapping, from >> PAGE_SHIFT, + return __bch2_truncate_page(mapping, from >> PAGE_SHIFT, from, from + PAGE_SIZE); } -int bch_truncate(struct inode *inode, struct iattr *iattr) +int bch2_truncate(struct inode *inode, struct iattr *iattr) { struct address_space *mapping = inode->i_mapping; struct bch_inode_info *ei = to_bch_ei(inode); @@ -1914,7 +1990,7 @@ int bch_truncate(struct inode *inode, struct iattr *iattr) mutex_lock(&ei->update_lock); i_size_dirty_get(ei); - ret = bch_write_inode_size(c, ei, inode->i_size); + ret = bch2_write_inode_size(c, ei, inode->i_size); mutex_unlock(&ei->update_lock); if (unlikely(ret)) @@ -1922,7 +1998,7 @@ int bch_truncate(struct inode *inode, struct iattr *iattr) /* * There might be persistent reservations (from fallocate()) - * above i_size, which bch_inode_truncate() will discard - we're + * above i_size, which bch2_inode_truncate() will discard - we're * only supposed to discard them if we're doing a real truncate * here (new i_size < current i_size): */ @@ -1934,13 +2010,13 @@ int bch_truncate(struct inode *inode, struct iattr *iattr) if (unlikely(ret)) goto err; - ret = bch_truncate_page(inode->i_mapping, iattr->ia_size); + ret = bch2_truncate_page(inode->i_mapping, iattr->ia_size); if (unlikely(ret)) { i_sectors_dirty_put(ei, &i_sectors_hook); goto err; } - ret = bch_inode_truncate(c, inode->i_ino, + ret = bch2_inode_truncate(c, inode->i_ino, round_up(iattr->ia_size, PAGE_SIZE) >> 9, &i_sectors_hook.hook, &ei->journal_seq); @@ -1957,7 +2033,7 @@ int bch_truncate(struct inode *inode, struct iattr *iattr) /* clear I_SIZE_DIRTY: */ i_size_dirty_put(ei); - ret = bch_write_inode_size(c, ei, inode->i_size); + ret = bch2_write_inode_size(c, ei, inode->i_size); mutex_unlock(&ei->update_lock); pagecache_block_put(&mapping->add_lock); @@ -1970,7 +2046,7 @@ err_put_pagecache: return ret; } -static long bch_fpunch(struct inode *inode, loff_t offset, loff_t len) +static long bch2_fpunch(struct inode *inode, loff_t offset, loff_t len) { struct address_space *mapping = inode->i_mapping; struct bch_inode_info *ei = to_bch_ei(inode); @@ -1984,7 +2060,7 @@ static long bch_fpunch(struct inode *inode, loff_t offset, loff_t len) inode_dio_wait(inode); pagecache_block_get(&mapping->add_lock); - ret = __bch_truncate_page(inode->i_mapping, + ret = __bch2_truncate_page(inode->i_mapping, offset >> PAGE_SHIFT, offset, offset + len); if (unlikely(ret)) @@ -1992,7 +2068,7 @@ static long bch_fpunch(struct inode *inode, loff_t offset, loff_t len) if (offset >> PAGE_SHIFT != (offset + len) >> PAGE_SHIFT) { - ret = __bch_truncate_page(inode->i_mapping, + ret = __bch2_truncate_page(inode->i_mapping, (offset + len) >> PAGE_SHIFT, offset, offset + len); if (unlikely(ret)) @@ -2006,13 +2082,13 @@ static long bch_fpunch(struct inode *inode, loff_t offset, loff_t len) struct i_sectors_hook i_sectors_hook; int ret; - BUG_ON(bch_disk_reservation_get(c, &disk_res, 0, 0)); + BUG_ON(bch2_disk_reservation_get(c, &disk_res, 0, 0)); ret = i_sectors_dirty_get(ei, &i_sectors_hook); if (unlikely(ret)) goto out; - ret = bch_discard(c, + ret = bch2_discard(c, POS(ino, discard_start), POS(ino, discard_end), ZERO_VERSION, @@ -2021,7 +2097,7 @@ static long bch_fpunch(struct inode *inode, loff_t offset, loff_t len) &ei->journal_seq); i_sectors_dirty_put(ei, &i_sectors_hook); - bch_disk_reservation_put(c, &disk_res); + bch2_disk_reservation_put(c, &disk_res); } out: pagecache_block_put(&mapping->add_lock); @@ -2030,7 +2106,7 @@ out: return ret; } -static long bch_fcollapse(struct inode *inode, loff_t offset, loff_t len) +static long bch2_fcollapse(struct inode *inode, loff_t offset, loff_t len) { struct address_space *mapping = inode->i_mapping; struct bch_inode_info *ei = to_bch_ei(inode); @@ -2046,11 +2122,11 @@ static long bch_fcollapse(struct inode *inode, loff_t offset, loff_t len) if ((offset | len) & (PAGE_SIZE - 1)) return -EINVAL; - bch_btree_iter_init_intent(&dst, c, BTREE_ID_EXTENTS, + bch2_btree_iter_init_intent(&dst, c, BTREE_ID_EXTENTS, POS(inode->i_ino, offset >> 9)); /* position will be set from dst iter's position: */ - bch_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN); - bch_btree_iter_link(&src, &dst); + bch2_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN); + bch2_btree_iter_link(&src, &dst); /* * We need i_mutex to keep the page cache consistent with the extents @@ -2085,14 +2161,14 @@ static long bch_fcollapse(struct inode *inode, loff_t offset, loff_t len) round_up(new_size, PAGE_SIZE) >> 9)) < 0) { struct disk_reservation disk_res; - bch_btree_iter_set_pos(&src, + bch2_btree_iter_set_pos(&src, POS(dst.pos.inode, dst.pos.offset + (len >> 9))); - ret = bch_btree_iter_traverse(&dst); + ret = bch2_btree_iter_traverse(&dst); if (ret) goto btree_iter_err; - k = bch_btree_iter_peek_with_holes(&src); + k = bch2_btree_iter_peek_with_holes(&src); if ((ret = btree_iter_err(k))) goto btree_iter_err; @@ -2101,32 +2177,32 @@ static long bch_fcollapse(struct inode *inode, loff_t offset, loff_t len) if (bkey_deleted(©.k.k)) copy.k.k.type = KEY_TYPE_DISCARD; - bch_cut_front(src.pos, ©.k); + bch2_cut_front(src.pos, ©.k); copy.k.k.p.offset -= len >> 9; BUG_ON(bkey_cmp(dst.pos, bkey_start_pos(©.k.k))); - ret = bch_disk_reservation_get(c, &disk_res, copy.k.k.size, + ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size, BCH_DISK_RESERVATION_NOFAIL); BUG_ON(ret); - ret = bch_btree_insert_at(c, &disk_res, &i_sectors_hook.hook, + ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook, &ei->journal_seq, BTREE_INSERT_ATOMIC| BTREE_INSERT_NOFAIL, BTREE_INSERT_ENTRY(&dst, ©.k)); - bch_disk_reservation_put(c, &disk_res); + bch2_disk_reservation_put(c, &disk_res); btree_iter_err: if (ret < 0 && ret != -EINTR) goto err_unwind; - bch_btree_iter_cond_resched(&src); + bch2_btree_iter_cond_resched(&src); } - bch_btree_iter_unlock(&src); - bch_btree_iter_unlock(&dst); + bch2_btree_iter_unlock(&src); + bch2_btree_iter_unlock(&dst); - ret = bch_inode_truncate(c, inode->i_ino, + ret = bch2_inode_truncate(c, inode->i_ino, round_up(new_size, PAGE_SIZE) >> 9, &i_sectors_hook.hook, &ei->journal_seq); @@ -2137,7 +2213,7 @@ btree_iter_err: mutex_lock(&ei->update_lock); i_size_write(inode, new_size); - ret = bch_write_inode_size(c, ei, inode->i_size); + ret = bch2_write_inode_size(c, ei, inode->i_size); mutex_unlock(&ei->update_lock); pagecache_block_put(&mapping->add_lock); @@ -2151,15 +2227,15 @@ err_unwind: */ i_sectors_dirty_put(ei, &i_sectors_hook); err: - bch_btree_iter_unlock(&src); - bch_btree_iter_unlock(&dst); + bch2_btree_iter_unlock(&src); + bch2_btree_iter_unlock(&dst); pagecache_block_put(&mapping->add_lock); inode_unlock(inode); return ret; } -static long bch_fallocate(struct inode *inode, int mode, - loff_t offset, loff_t len) +static long bch2_fallocate(struct inode *inode, int mode, + loff_t offset, loff_t len) { struct address_space *mapping = inode->i_mapping; struct bch_inode_info *ei = to_bch_ei(inode); @@ -2173,7 +2249,7 @@ static long bch_fallocate(struct inode *inode, int mode, unsigned replicas = READ_ONCE(c->opts.data_replicas); int ret; - bch_btree_iter_init_intent(&iter, c, BTREE_ID_EXTENTS, POS_MIN); + bch2_btree_iter_init_intent(&iter, c, BTREE_ID_EXTENTS, POS_MIN); inode_lock(inode); inode_dio_wait(inode); @@ -2187,14 +2263,14 @@ static long bch_fallocate(struct inode *inode, int mode, } if (mode & FALLOC_FL_ZERO_RANGE) { - ret = __bch_truncate_page(inode->i_mapping, + ret = __bch2_truncate_page(inode->i_mapping, offset >> PAGE_SHIFT, offset, offset + len); if (!ret && offset >> PAGE_SHIFT != (offset + len) >> PAGE_SHIFT) - ret = __bch_truncate_page(inode->i_mapping, + ret = __bch2_truncate_page(inode->i_mapping, (offset + len) >> PAGE_SHIFT, offset, offset + len); @@ -2210,7 +2286,7 @@ static long bch_fallocate(struct inode *inode, int mode, block_end = round_up(offset + len, PAGE_SIZE); } - bch_btree_iter_set_pos(&iter, POS(inode->i_ino, block_start >> 9)); + bch2_btree_iter_set_pos(&iter, POS(inode->i_ino, block_start >> 9)); end = POS(inode->i_ino, block_end >> 9); ret = i_sectors_dirty_get(ei, &i_sectors_hook); @@ -2222,20 +2298,20 @@ static long bch_fallocate(struct inode *inode, int mode, struct bkey_i_reservation reservation; struct bkey_s_c k; - k = bch_btree_iter_peek_with_holes(&iter); + k = bch2_btree_iter_peek_with_holes(&iter); if ((ret = btree_iter_err(k))) goto btree_iter_err; /* already reserved */ if (k.k->type == BCH_RESERVATION && bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) { - bch_btree_iter_advance_pos(&iter); + bch2_btree_iter_advance_pos(&iter); continue; } if (bkey_extent_is_data(k.k)) { if (!(mode & FALLOC_FL_ZERO_RANGE)) { - bch_btree_iter_advance_pos(&iter); + bch2_btree_iter_advance_pos(&iter); continue; } } @@ -2245,15 +2321,15 @@ static long bch_fallocate(struct inode *inode, int mode, reservation.k.p = k.k->p; reservation.k.size = k.k->size; - bch_cut_front(iter.pos, &reservation.k_i); - bch_cut_back(end, &reservation.k); + bch2_cut_front(iter.pos, &reservation.k_i); + bch2_cut_back(end, &reservation.k); sectors = reservation.k.size; - reservation.v.nr_replicas = bch_extent_nr_dirty_ptrs(k); + reservation.v.nr_replicas = bch2_extent_nr_dirty_ptrs(k); if (reservation.v.nr_replicas < replicas || bkey_extent_is_compressed(k)) { - ret = bch_disk_reservation_get(c, &disk_res, + ret = bch2_disk_reservation_get(c, &disk_res, sectors, 0); if (ret) goto err_put_sectors_dirty; @@ -2261,18 +2337,18 @@ static long bch_fallocate(struct inode *inode, int mode, reservation.v.nr_replicas = disk_res.nr_replicas; } - ret = bch_btree_insert_at(c, &disk_res, &i_sectors_hook.hook, + ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook, &ei->journal_seq, BTREE_INSERT_ATOMIC| BTREE_INSERT_NOFAIL, BTREE_INSERT_ENTRY(&iter, &reservation.k_i)); - bch_disk_reservation_put(c, &disk_res); + bch2_disk_reservation_put(c, &disk_res); btree_iter_err: if (ret < 0 && ret != -EINTR) goto err_put_sectors_dirty; } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); i_sectors_dirty_put(ei, &i_sectors_hook); @@ -2281,7 +2357,7 @@ btree_iter_err: i_size_write(inode, new_size); mutex_lock(&ei->update_lock); - ret = bch_write_inode_size(c, ei, inode->i_size); + ret = bch2_write_inode_size(c, ei, inode->i_size); mutex_unlock(&ei->update_lock); } @@ -2296,7 +2372,7 @@ btree_iter_err: if (ei->i_size != inode->i_size) { mutex_lock(&ei->update_lock); - ret = bch_write_inode_size(c, ei, inode->i_size); + ret = bch2_write_inode_size(c, ei, inode->i_size); mutex_unlock(&ei->update_lock); } } @@ -2308,25 +2384,25 @@ btree_iter_err: err_put_sectors_dirty: i_sectors_dirty_put(ei, &i_sectors_hook); err: - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); pagecache_block_put(&mapping->add_lock); inode_unlock(inode); return ret; } -long bch_fallocate_dispatch(struct file *file, int mode, - loff_t offset, loff_t len) +long bch2_fallocate_dispatch(struct file *file, int mode, + loff_t offset, loff_t len) { struct inode *inode = file_inode(file); if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE))) - return bch_fallocate(inode, mode, offset, len); + return bch2_fallocate(inode, mode, offset, len); if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE)) - return bch_fpunch(inode, offset, len); + return bch2_fpunch(inode, offset, len); if (mode == FALLOC_FL_COLLAPSE_RANGE) - return bch_fcollapse(inode, offset, len); + return bch2_fcollapse(inode, offset, len); return -EOPNOTSUPP; } @@ -2339,7 +2415,7 @@ static bool page_is_data(struct page *page) page_state(page)->dirty_sectors); } -static loff_t bch_next_pagecache_data(struct inode *inode, +static loff_t bch2_next_pagecache_data(struct inode *inode, loff_t start_offset, loff_t end_offset) { @@ -2369,7 +2445,7 @@ static loff_t bch_next_pagecache_data(struct inode *inode, return end_offset; } -static loff_t bch_seek_data(struct file *file, u64 offset) +static loff_t bch2_seek_data(struct file *file, u64 offset) { struct inode *inode = file->f_mapping->host; struct bch_fs *c = inode->i_sb->s_fs_info; @@ -2393,12 +2469,12 @@ static loff_t bch_seek_data(struct file *file, u64 offset) break; } - ret = bch_btree_iter_unlock(&iter); + ret = bch2_btree_iter_unlock(&iter); if (ret) return ret; if (next_data > offset) - next_data = bch_next_pagecache_data(inode, offset, next_data); + next_data = bch2_next_pagecache_data(inode, offset, next_data); if (next_data > isize) return -ENXIO; @@ -2421,7 +2497,7 @@ static bool page_slot_is_data(struct address_space *mapping, pgoff_t index) return ret; } -static loff_t bch_next_pagecache_hole(struct inode *inode, +static loff_t bch2_next_pagecache_hole(struct inode *inode, loff_t start_offset, loff_t end_offset) { @@ -2438,7 +2514,7 @@ static loff_t bch_next_pagecache_hole(struct inode *inode, return end_offset; } -static loff_t bch_seek_hole(struct file *file, u64 offset) +static loff_t bch2_seek_hole(struct file *file, u64 offset) { struct inode *inode = file->f_mapping->host; struct bch_fs *c = inode->i_sb->s_fs_info; @@ -2454,11 +2530,11 @@ static loff_t bch_seek_hole(struct file *file, u64 offset) for_each_btree_key_with_holes(&iter, c, BTREE_ID_EXTENTS, POS(inode->i_ino, offset >> 9), k) { if (k.k->p.inode != inode->i_ino) { - next_hole = bch_next_pagecache_hole(inode, + next_hole = bch2_next_pagecache_hole(inode, offset, MAX_LFS_FILESIZE); break; } else if (!bkey_extent_is_data(k.k)) { - next_hole = bch_next_pagecache_hole(inode, + next_hole = bch2_next_pagecache_hole(inode, max(offset, bkey_start_offset(k.k) << 9), k.k->p.offset << 9); @@ -2469,7 +2545,7 @@ static loff_t bch_seek_hole(struct file *file, u64 offset) } } - ret = bch_btree_iter_unlock(&iter); + ret = bch2_btree_iter_unlock(&iter); if (ret) return ret; @@ -2479,7 +2555,7 @@ static loff_t bch_seek_hole(struct file *file, u64 offset) return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE); } -loff_t bch_llseek(struct file *file, loff_t offset, int whence) +loff_t bch2_llseek(struct file *file, loff_t offset, int whence) { switch (whence) { case SEEK_SET: @@ -2487,9 +2563,9 @@ loff_t bch_llseek(struct file *file, loff_t offset, int whence) case SEEK_END: return generic_file_llseek(file, offset, whence); case SEEK_DATA: - return bch_seek_data(file, offset); + return bch2_seek_data(file, offset); case SEEK_HOLE: - return bch_seek_hole(file, offset); + return bch2_seek_hole(file, offset); } return -EINVAL; diff --git a/libbcachefs/fs-io.h b/libbcachefs/fs-io.h new file mode 100644 index 00000000..f3fcf947 --- /dev/null +++ b/libbcachefs/fs-io.h @@ -0,0 +1,96 @@ +#ifndef _BCACHE_FS_IO_H +#define _BCACHE_FS_IO_H + +#include "buckets.h" +#include <linux/uio.h> + +int bch2_set_page_dirty(struct page *); + +int bch2_writepage(struct page *, struct writeback_control *); +int bch2_readpage(struct file *, struct page *); + +int bch2_writepages(struct address_space *, struct writeback_control *); +int bch2_readpages(struct file *, struct address_space *, + struct list_head *, unsigned); + +int bch2_write_begin(struct file *, struct address_space *, loff_t, + unsigned, unsigned, struct page **, void **); +int bch2_write_end(struct file *, struct address_space *, loff_t, + unsigned, unsigned, struct page *, void *); + +ssize_t bch2_direct_IO(struct kiocb *, struct iov_iter *); + +ssize_t bch2_write_iter(struct kiocb *, struct iov_iter *); + +int bch2_fsync(struct file *, loff_t, loff_t, int); + +int bch2_truncate(struct inode *, struct iattr *); +long bch2_fallocate_dispatch(struct file *, int, loff_t, loff_t); + +loff_t bch2_llseek(struct file *, loff_t, int); + +int bch2_page_mkwrite(struct vm_area_struct *, struct vm_fault *); +void bch2_invalidatepage(struct page *, unsigned int, unsigned int); +int bch2_releasepage(struct page *, gfp_t); +int bch2_migrate_page(struct address_space *, struct page *, + struct page *, enum migrate_mode); + +struct i_sectors_hook { + struct extent_insert_hook hook; + s64 sectors; + struct bch_inode_info *ei; +}; + +struct bchfs_write_op { + struct bch_inode_info *ei; + s64 sectors_added; + bool is_dio; + u64 new_i_size; + struct bch_write_op op; +}; + +struct bch_writepage_io { + struct closure cl; + + struct bchfs_write_op op; + + /* must come last: */ + struct bch_write_bio bio; +}; + +extern struct bio_set *bch2_writepage_bioset; + +struct dio_write { + struct closure cl; + struct kiocb *req; + struct bch_fs *c; + long written; + long error; + loff_t offset; + + struct disk_reservation res; + + struct iovec *iovec; + struct iovec inline_vecs[UIO_FASTIOV]; + struct iov_iter iter; + + struct mm_struct *mm; + + struct bchfs_write_op iop; + + /* must be last: */ + struct bch_write_bio bio; +}; + +extern struct bio_set *bch2_dio_write_bioset; + +struct dio_read { + struct closure cl; + struct kiocb *req; + long ret; + struct bch_read_bio rbio; +}; + +extern struct bio_set *bch2_dio_read_bioset; + +#endif /* _BCACHE_FS_IO_H */ diff --git a/libbcache/fs.c b/libbcachefs/fs.c index f1125a32..94c5a9e6 100644 --- a/libbcache/fs.c +++ b/libbcachefs/fs.c @@ -1,5 +1,5 @@ -#include "bcache.h" +#include "bcachefs.h" #include "acl.h" #include "btree_update.h" #include "buckets.h" @@ -24,11 +24,11 @@ #include <linux/statfs.h> #include <linux/xattr.h> -static struct kmem_cache *bch_inode_cache; +static struct kmem_cache *bch2_inode_cache; -static void bch_vfs_inode_init(struct bch_fs *, - struct bch_inode_info *, - struct bch_inode_unpacked *); +static void bch2_vfs_inode_init(struct bch_fs *, + struct bch_inode_info *, + struct bch_inode_unpacked *); /* * I_SIZE_DIRTY requires special handling: @@ -58,10 +58,10 @@ static void bch_vfs_inode_init(struct bch_fs *, * be set explicitly. */ -int __must_check __bch_write_inode(struct bch_fs *c, - struct bch_inode_info *ei, - inode_set_fn set, - void *p) +int __must_check __bch2_write_inode(struct bch_fs *c, + struct bch_inode_info *ei, + inode_set_fn set, + void *p) { struct btree_iter iter; struct inode *inode = &ei->vfs_inode; @@ -81,21 +81,21 @@ int __must_check __bch_write_inode(struct bch_fs *c, lockdep_assert_held(&ei->update_lock); - bch_btree_iter_init_intent(&iter, c, BTREE_ID_INODES, POS(inum, 0)); + bch2_btree_iter_init_intent(&iter, c, BTREE_ID_INODES, POS(inum, 0)); do { - struct bkey_s_c k = bch_btree_iter_peek_with_holes(&iter); + struct bkey_s_c k = bch2_btree_iter_peek_with_holes(&iter); if ((ret = btree_iter_err(k))) goto out; if (WARN_ONCE(k.k->type != BCH_INODE_FS, "inode %llu not found when updating", inum)) { - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return -ENOENT; } - ret = bch_inode_unpack(bkey_s_c_to_inode(k), &inode_u); + ret = bch2_inode_unpack(bkey_s_c_to_inode(k), &inode_u); if (WARN_ONCE(ret, "error %i unpacking inode %llu", ret, inum)) { ret = -ENOENT; @@ -115,13 +115,13 @@ int __must_check __bch_write_inode(struct bch_fs *c, inode_u.i_gid = i_gid_read(inode); inode_u.i_nlink = i_nlink - nlink_bias(inode->i_mode); inode_u.i_dev = inode->i_rdev; - inode_u.i_atime = timespec_to_bch_time(c, inode->i_atime); - inode_u.i_mtime = timespec_to_bch_time(c, inode->i_mtime); - inode_u.i_ctime = timespec_to_bch_time(c, inode->i_ctime); + inode_u.i_atime = timespec_to_bch2_time(c, inode->i_atime); + inode_u.i_mtime = timespec_to_bch2_time(c, inode->i_mtime); + inode_u.i_ctime = timespec_to_bch2_time(c, inode->i_ctime); - bch_inode_pack(&inode_p, &inode_u); + bch2_inode_pack(&inode_p, &inode_u); - ret = bch_btree_insert_at(c, NULL, NULL, &ei->journal_seq, + ret = bch2_btree_insert_at(c, NULL, NULL, &ei->journal_seq, BTREE_INSERT_ATOMIC| BTREE_INSERT_NOFAIL, BTREE_INSERT_ENTRY(&iter, &inode_p.inode.k_i)); @@ -132,42 +132,42 @@ int __must_check __bch_write_inode(struct bch_fs *c, ei->i_flags = inode_u.i_flags; } out: - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return ret < 0 ? ret : 0; } -int __must_check bch_write_inode(struct bch_fs *c, - struct bch_inode_info *ei) +int __must_check bch2_write_inode(struct bch_fs *c, + struct bch_inode_info *ei) { - return __bch_write_inode(c, ei, NULL, NULL); + return __bch2_write_inode(c, ei, NULL, NULL); } -int bch_inc_nlink(struct bch_fs *c, struct bch_inode_info *ei) +int bch2_inc_nlink(struct bch_fs *c, struct bch_inode_info *ei) { int ret; mutex_lock(&ei->update_lock); inc_nlink(&ei->vfs_inode); - ret = bch_write_inode(c, ei); + ret = bch2_write_inode(c, ei); mutex_unlock(&ei->update_lock); return ret; } -int bch_dec_nlink(struct bch_fs *c, struct bch_inode_info *ei) +int bch2_dec_nlink(struct bch_fs *c, struct bch_inode_info *ei) { int ret = 0; mutex_lock(&ei->update_lock); drop_nlink(&ei->vfs_inode); - ret = bch_write_inode(c, ei); + ret = bch2_write_inode(c, ei); mutex_unlock(&ei->update_lock); return ret; } -static struct inode *bch_vfs_inode_get(struct super_block *sb, u64 inum) +static struct inode *bch2_vfs_inode_get(struct super_block *sb, u64 inum) { struct bch_fs *c = sb->s_fs_info; struct inode *inode; @@ -183,25 +183,25 @@ static struct inode *bch_vfs_inode_get(struct super_block *sb, u64 inum) if (!(inode->i_state & I_NEW)) return inode; - ret = bch_inode_find_by_inum(c, inum, &inode_u); + ret = bch2_inode_find_by_inum(c, inum, &inode_u); if (ret) { iget_failed(inode); return ERR_PTR(ret); } ei = to_bch_ei(inode); - bch_vfs_inode_init(c, ei, &inode_u); + bch2_vfs_inode_init(c, ei, &inode_u); - ei->journal_seq = bch_inode_journal_seq(&c->journal, inum); + ei->journal_seq = bch2_inode_journal_seq(&c->journal, inum); unlock_new_inode(inode); return inode; } -static struct inode *bch_vfs_inode_create(struct bch_fs *c, - struct inode *parent, - umode_t mode, dev_t rdev) +static struct inode *bch2_vfs_inode_create(struct bch_fs *c, + struct inode *parent, + umode_t mode, dev_t rdev) { struct inode *inode; struct posix_acl *default_acl = NULL, *acl = NULL; @@ -224,11 +224,11 @@ static struct inode *bch_vfs_inode_create(struct bch_fs *c, ei = to_bch_ei(inode); - bch_inode_init(c, &inode_u, i_uid_read(inode), + bch2_inode_init(c, &inode_u, i_uid_read(inode), i_gid_read(inode), inode->i_mode, rdev); - bch_inode_pack(&inode_p, &inode_u); + bch2_inode_pack(&inode_p, &inode_u); - ret = bch_inode_create(c, &inode_p.inode.k_i, + ret = bch2_inode_create(c, &inode_p.inode.k_i, BLOCKDEV_INODE_MAX, 0, &c->unused_inode_hint); if (unlikely(ret)) { @@ -241,16 +241,16 @@ static struct inode *bch_vfs_inode_create(struct bch_fs *c, } inode_u.inum = inode_p.inode.k.p.inode; - bch_vfs_inode_init(c, ei, &inode_u); + bch2_vfs_inode_init(c, ei, &inode_u); if (default_acl) { - ret = bch_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); + ret = bch2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); if (unlikely(ret)) goto err; } if (acl) { - ret = bch_set_acl(inode, acl, ACL_TYPE_ACCESS); + ret = bch2_set_acl(inode, acl, ACL_TYPE_ACCESS); if (unlikely(ret)) goto err; } @@ -268,14 +268,14 @@ err: goto out; } -static int bch_vfs_dirent_create(struct bch_fs *c, struct inode *dir, - u8 type, const struct qstr *name, - struct inode *dst) +static int bch2_vfs_dirent_create(struct bch_fs *c, struct inode *dir, + u8 type, const struct qstr *name, + struct inode *dst) { struct bch_inode_info *dir_ei = to_bch_ei(dir); int ret; - ret = bch_dirent_create(c, dir->i_ino, &dir_ei->str_hash, + ret = bch2_dirent_create(c, dir->i_ino, &dir_ei->str_hash, type, name, dst->i_ino, &dir_ei->journal_seq, BCH_HASH_SET_MUST_CREATE); @@ -287,8 +287,8 @@ static int bch_vfs_dirent_create(struct bch_fs *c, struct inode *dir, return 0; } -static int __bch_create(struct inode *dir, struct dentry *dentry, - umode_t mode, dev_t rdev) +static int __bch2_create(struct inode *dir, struct dentry *dentry, + umode_t mode, dev_t rdev) { struct bch_inode_info *dir_ei = to_bch_ei(dir); struct bch_fs *c = dir->i_sb->s_fs_info; @@ -296,13 +296,13 @@ static int __bch_create(struct inode *dir, struct dentry *dentry, struct bch_inode_info *ei; int ret; - inode = bch_vfs_inode_create(c, dir, mode, rdev); + inode = bch2_vfs_inode_create(c, dir, mode, rdev); if (unlikely(IS_ERR(inode))) return PTR_ERR(inode); ei = to_bch_ei(inode); - ret = bch_vfs_dirent_create(c, dir, mode_to_type(mode), + ret = bch2_vfs_dirent_create(c, dir, mode_to_type(mode), &dentry->d_name, inode); if (unlikely(ret)) { clear_nlink(inode); @@ -319,32 +319,32 @@ static int __bch_create(struct inode *dir, struct dentry *dentry, /* methods */ -static struct dentry *bch_lookup(struct inode *dir, struct dentry *dentry, - unsigned int flags) +static struct dentry *bch2_lookup(struct inode *dir, struct dentry *dentry, + unsigned int flags) { struct bch_fs *c = dir->i_sb->s_fs_info; struct bch_inode_info *dir_ei = to_bch_ei(dir); struct inode *inode = NULL; u64 inum; - inum = bch_dirent_lookup(c, dir->i_ino, + inum = bch2_dirent_lookup(c, dir->i_ino, &dir_ei->str_hash, &dentry->d_name); if (inum) - inode = bch_vfs_inode_get(dir->i_sb, inum); + inode = bch2_vfs_inode_get(dir->i_sb, inum); return d_splice_alias(inode, dentry); } -static int bch_create(struct inode *dir, struct dentry *dentry, - umode_t mode, bool excl) +static int bch2_create(struct inode *dir, struct dentry *dentry, + umode_t mode, bool excl) { - return __bch_create(dir, dentry, mode|S_IFREG, 0); + return __bch2_create(dir, dentry, mode|S_IFREG, 0); } -static int bch_link(struct dentry *old_dentry, struct inode *dir, - struct dentry *dentry) +static int bch2_link(struct dentry *old_dentry, struct inode *dir, + struct dentry *dentry) { struct bch_fs *c = dir->i_sb->s_fs_info; struct inode *inode = old_dentry->d_inode; @@ -355,16 +355,16 @@ static int bch_link(struct dentry *old_dentry, struct inode *dir, inode->i_ctime = current_fs_time(dir->i_sb); - ret = bch_inc_nlink(c, ei); + ret = bch2_inc_nlink(c, ei); if (ret) return ret; ihold(inode); - ret = bch_vfs_dirent_create(c, dir, mode_to_type(inode->i_mode), + ret = bch2_vfs_dirent_create(c, dir, mode_to_type(inode->i_mode), &dentry->d_name, inode); if (unlikely(ret)) { - bch_dec_nlink(c, ei); + bch2_dec_nlink(c, ei); iput(inode); return ret; } @@ -373,7 +373,7 @@ static int bch_link(struct dentry *old_dentry, struct inode *dir, return 0; } -static int bch_unlink(struct inode *dir, struct dentry *dentry) +static int bch2_unlink(struct inode *dir, struct dentry *dentry) { struct bch_fs *c = dir->i_sb->s_fs_info; struct bch_inode_info *dir_ei = to_bch_ei(dir); @@ -383,7 +383,7 @@ static int bch_unlink(struct inode *dir, struct dentry *dentry) lockdep_assert_held(&inode->i_rwsem); - ret = bch_dirent_delete(c, dir->i_ino, &dir_ei->str_hash, + ret = bch2_dirent_delete(c, dir->i_ino, &dir_ei->str_hash, &dentry->d_name, &dir_ei->journal_seq); if (ret) return ret; @@ -394,24 +394,24 @@ static int bch_unlink(struct inode *dir, struct dentry *dentry) inode->i_ctime = dir->i_ctime; if (S_ISDIR(inode->i_mode)) { - bch_dec_nlink(c, dir_ei); + bch2_dec_nlink(c, dir_ei); drop_nlink(inode); } - bch_dec_nlink(c, ei); + bch2_dec_nlink(c, ei); return 0; } -static int bch_symlink(struct inode *dir, struct dentry *dentry, - const char *symname) +static int bch2_symlink(struct inode *dir, struct dentry *dentry, + const char *symname) { struct bch_fs *c = dir->i_sb->s_fs_info; struct inode *inode; struct bch_inode_info *ei, *dir_ei = to_bch_ei(dir); int ret; - inode = bch_vfs_inode_create(c, dir, S_IFLNK|S_IRWXUGO, 0); + inode = bch2_vfs_inode_create(c, dir, S_IFLNK|S_IRWXUGO, 0); if (unlikely(IS_ERR(inode))) return PTR_ERR(inode); @@ -432,7 +432,7 @@ static int bch_symlink(struct inode *dir, struct dentry *dentry, if (dir_ei->journal_seq < ei->journal_seq) dir_ei->journal_seq = ei->journal_seq; - ret = bch_vfs_dirent_create(c, dir, DT_LNK, &dentry->d_name, inode); + ret = bch2_vfs_dirent_create(c, dir, DT_LNK, &dentry->d_name, inode); if (unlikely(ret)) goto err; @@ -444,41 +444,41 @@ err: return ret; } -static int bch_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +static int bch2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct bch_fs *c = dir->i_sb->s_fs_info; int ret; lockdep_assert_held(&dir->i_rwsem); - ret = __bch_create(dir, dentry, mode|S_IFDIR, 0); + ret = __bch2_create(dir, dentry, mode|S_IFDIR, 0); if (unlikely(ret)) return ret; - bch_inc_nlink(c, to_bch_ei(dir)); + bch2_inc_nlink(c, to_bch_ei(dir)); return 0; } -static int bch_rmdir(struct inode *dir, struct dentry *dentry) +static int bch2_rmdir(struct inode *dir, struct dentry *dentry) { struct bch_fs *c = dir->i_sb->s_fs_info; struct inode *inode = dentry->d_inode; - if (bch_empty_dir(c, inode->i_ino)) + if (bch2_empty_dir(c, inode->i_ino)) return -ENOTEMPTY; - return bch_unlink(dir, dentry); + return bch2_unlink(dir, dentry); } -static int bch_mknod(struct inode *dir, struct dentry *dentry, - umode_t mode, dev_t rdev) +static int bch2_mknod(struct inode *dir, struct dentry *dentry, + umode_t mode, dev_t rdev) { - return __bch_create(dir, dentry, mode, rdev); + return __bch2_create(dir, dentry, mode, rdev); } -static int bch_rename(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry) +static int bch2_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) { struct bch_fs *c = old_dir->i_sb->s_fs_info; struct inode *old_inode = old_dentry->d_inode; @@ -500,10 +500,10 @@ static int bch_rename(struct inode *old_dir, struct dentry *old_dentry, if (!S_ISDIR(new_inode->i_mode)) return -ENOTDIR; - if (bch_empty_dir(c, new_inode->i_ino)) + if (bch2_empty_dir(c, new_inode->i_ino)) return -ENOTEMPTY; - ret = bch_dirent_rename(c, + ret = bch2_dirent_rename(c, old_dir, &old_dentry->d_name, new_dir, &new_dentry->d_name, &ei->journal_seq, BCH_RENAME_OVERWRITE); @@ -511,11 +511,11 @@ static int bch_rename(struct inode *old_dir, struct dentry *old_dentry, return ret; clear_nlink(new_inode); - bch_dec_nlink(c, to_bch_ei(old_dir)); + bch2_dec_nlink(c, to_bch_ei(old_dir)); } else if (new_inode) { lockdep_assert_held(&new_inode->i_rwsem); - ret = bch_dirent_rename(c, + ret = bch2_dirent_rename(c, old_dir, &old_dentry->d_name, new_dir, &new_dentry->d_name, &ei->journal_seq, BCH_RENAME_OVERWRITE); @@ -523,19 +523,19 @@ static int bch_rename(struct inode *old_dir, struct dentry *old_dentry, return ret; new_inode->i_ctime = now; - bch_dec_nlink(c, to_bch_ei(new_inode)); + bch2_dec_nlink(c, to_bch_ei(new_inode)); } else if (S_ISDIR(old_inode->i_mode)) { - ret = bch_dirent_rename(c, + ret = bch2_dirent_rename(c, old_dir, &old_dentry->d_name, new_dir, &new_dentry->d_name, &ei->journal_seq, BCH_RENAME); if (unlikely(ret)) return ret; - bch_inc_nlink(c, to_bch_ei(new_dir)); - bch_dec_nlink(c, to_bch_ei(old_dir)); + bch2_inc_nlink(c, to_bch_ei(new_dir)); + bch2_dec_nlink(c, to_bch_ei(old_dir)); } else { - ret = bch_dirent_rename(c, + ret = bch2_dirent_rename(c, old_dir, &old_dentry->d_name, new_dir, &new_dentry->d_name, &ei->journal_seq, BCH_RENAME); @@ -554,8 +554,8 @@ static int bch_rename(struct inode *old_dir, struct dentry *old_dentry, return 0; } -static int bch_rename_exchange(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry) +static int bch2_rename_exchange(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) { struct bch_fs *c = old_dir->i_sb->s_fs_info; struct inode *old_inode = old_dentry->d_inode; @@ -564,7 +564,7 @@ static int bch_rename_exchange(struct inode *old_dir, struct dentry *old_dentry, struct timespec now = current_fs_time(old_dir->i_sb); int ret; - ret = bch_dirent_rename(c, + ret = bch2_dirent_rename(c, old_dir, &old_dentry->d_name, new_dir, &new_dentry->d_name, &ei->journal_seq, BCH_RENAME_EXCHANGE); @@ -574,11 +574,11 @@ static int bch_rename_exchange(struct inode *old_dir, struct dentry *old_dentry, if (S_ISDIR(old_inode->i_mode) != S_ISDIR(new_inode->i_mode)) { if (S_ISDIR(old_inode->i_mode)) { - bch_inc_nlink(c, to_bch_ei(new_dir)); - bch_dec_nlink(c, to_bch_ei(old_dir)); + bch2_inc_nlink(c, to_bch_ei(new_dir)); + bch2_dec_nlink(c, to_bch_ei(old_dir)); } else { - bch_dec_nlink(c, to_bch_ei(new_dir)); - bch_inc_nlink(c, to_bch_ei(old_dir)); + bch2_dec_nlink(c, to_bch_ei(new_dir)); + bch2_inc_nlink(c, to_bch_ei(old_dir)); } } @@ -595,21 +595,21 @@ static int bch_rename_exchange(struct inode *old_dir, struct dentry *old_dentry, return 0; } -static int bch_rename2(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry, - unsigned flags) +static int bch2_rename2(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned flags) { if (flags & ~(RENAME_NOREPLACE|RENAME_EXCHANGE)) return -EINVAL; if (flags & RENAME_EXCHANGE) - return bch_rename_exchange(old_dir, old_dentry, + return bch2_rename_exchange(old_dir, old_dentry, new_dir, new_dentry); - return bch_rename(old_dir, old_dentry, new_dir, new_dentry); + return bch2_rename(old_dir, old_dentry, new_dir, new_dentry); } -static int bch_setattr(struct dentry *dentry, struct iattr *iattr) +static int bch2_setattr(struct dentry *dentry, struct iattr *iattr) { struct inode *inode = dentry->d_inode; struct bch_inode_info *ei = to_bch_ei(inode); @@ -626,11 +626,11 @@ static int bch_setattr(struct dentry *dentry, struct iattr *iattr) return ret; if (iattr->ia_valid & ATTR_SIZE) { - ret = bch_truncate(inode, iattr); + ret = bch2_truncate(inode, iattr); } else { mutex_lock(&ei->update_lock); setattr_copy(inode, iattr); - ret = bch_write_inode(c, ei); + ret = bch2_write_inode(c, ei); mutex_unlock(&ei->update_lock); } @@ -643,13 +643,13 @@ static int bch_setattr(struct dentry *dentry, struct iattr *iattr) return ret; } -static int bch_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) +static int bch2_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) { struct bch_fs *c = dir->i_sb->s_fs_info; struct inode *inode; /* XXX: i_nlink should be 0? */ - inode = bch_vfs_inode_create(c, dir, mode, 0); + inode = bch2_vfs_inode_create(c, dir, mode, 0); if (unlikely(IS_ERR(inode))) return PTR_ERR(inode); @@ -657,8 +657,8 @@ static int bch_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) return 0; } -static int bch_fill_extent(struct fiemap_extent_info *info, - const struct bkey_i *k, unsigned flags) +static int bch2_fill_extent(struct fiemap_extent_info *info, + const struct bkey_i *k, unsigned flags) { if (bkey_extent_is_data(&k->k)) { struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k); @@ -700,8 +700,8 @@ static int bch_fill_extent(struct fiemap_extent_info *info, } } -static int bch_fiemap(struct inode *inode, struct fiemap_extent_info *info, - u64 start, u64 len) +static int bch2_fiemap(struct inode *inode, struct fiemap_extent_info *info, + u64 start, u64 len) { struct bch_fs *c = inode->i_sb->s_fs_info; struct btree_iter iter; @@ -722,7 +722,7 @@ static int bch_fiemap(struct inode *inode, struct fiemap_extent_info *info, break; if (have_extent) { - ret = bch_fill_extent(info, &tmp.k, 0); + ret = bch2_fill_extent(info, &tmp.k, 0); if (ret) goto out; } @@ -732,19 +732,19 @@ static int bch_fiemap(struct inode *inode, struct fiemap_extent_info *info, } if (have_extent) - ret = bch_fill_extent(info, &tmp.k, FIEMAP_EXTENT_LAST); + ret = bch2_fill_extent(info, &tmp.k, FIEMAP_EXTENT_LAST); out: - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return ret < 0 ? ret : 0; } static const struct vm_operations_struct bch_vm_ops = { .fault = filemap_fault, .map_pages = filemap_map_pages, - .page_mkwrite = bch_page_mkwrite, + .page_mkwrite = bch2_page_mkwrite, }; -static int bch_mmap(struct file *file, struct vm_area_struct *vma) +static int bch2_mmap(struct file *file, struct vm_area_struct *vma) { file_accessed(file); @@ -769,8 +769,8 @@ static const unsigned bch_inode_flags_to_user_flags_map[] = { [__BCH_INODE_NOATIME] = FS_NOATIME_FL, }; -/* Set VFS inode flags from bcache inode: */ -static void bch_inode_flags_to_vfs(struct inode *inode) +/* Set VFS inode flags from bcachefs inode: */ +static void bch2_inode_flags_to_vfs(struct inode *inode) { unsigned i, flags = to_bch_ei(inode)->i_flags; @@ -781,8 +781,8 @@ static void bch_inode_flags_to_vfs(struct inode *inode) inode->i_flags &= ~bch_inode_flags_to_vfs_flags_map[i]; } -/* Get FS_IOC_GETFLAGS flags from bcache inode: */ -static unsigned bch_inode_flags_to_user_flags(unsigned flags) +/* Get FS_IOC_GETFLAGS flags from bcachefs inode: */ +static unsigned bch2_inode_flags_to_user_flags(unsigned flags) { unsigned i, ret = 0; @@ -793,16 +793,16 @@ static unsigned bch_inode_flags_to_user_flags(unsigned flags) return ret; } -static int bch_inode_user_flags_set(struct bch_inode_info *ei, - struct bch_inode_unpacked *bi, - void *p) +static int bch2_inode_user_flags_set(struct bch_inode_info *ei, + struct bch_inode_unpacked *bi, + void *p) { /* * We're relying on btree locking here for exclusion with other ioctl * calls - use the flags in the btree (@bi), not ei->i_flags: */ unsigned bch_flags = bi->i_flags; - unsigned oldflags = bch_inode_flags_to_user_flags(bch_flags); + unsigned oldflags = bch2_inode_flags_to_user_flags(bch_flags); unsigned newflags = *((unsigned *) p); unsigned i; @@ -831,8 +831,8 @@ static int bch_inode_user_flags_set(struct bch_inode_info *ei, #define FS_IOC_GOINGDOWN _IOR ('X', 125, __u32) -static long bch_fs_file_ioctl(struct file *filp, unsigned int cmd, - unsigned long arg) +static long bch2_fs_file_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) { struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; @@ -843,7 +843,7 @@ static long bch_fs_file_ioctl(struct file *filp, unsigned int cmd, switch (cmd) { case FS_IOC_GETFLAGS: - return put_user(bch_inode_flags_to_user_flags(ei->i_flags), + return put_user(bch2_inode_flags_to_user_flags(ei->i_flags), (int __user *) arg); case FS_IOC_SETFLAGS: { @@ -871,11 +871,11 @@ static long bch_fs_file_ioctl(struct file *filp, unsigned int cmd, inode_lock(inode); mutex_lock(&ei->update_lock); - ret = __bch_write_inode(c, ei, bch_inode_user_flags_set, &flags); + ret = __bch2_write_inode(c, ei, bch2_inode_user_flags_set, &flags); mutex_unlock(&ei->update_lock); if (!ret) - bch_inode_flags_to_vfs(inode); + bch2_inode_flags_to_vfs(inode); inode_unlock(inode); setflags_out: @@ -894,17 +894,17 @@ setflags_out: down_write(&sb->s_umount); sb->s_flags |= MS_RDONLY; - bch_fs_emergency_read_only(c); + bch2_fs_emergency_read_only(c); up_write(&sb->s_umount); return 0; default: - return bch_fs_ioctl(c, cmd, (void __user *) arg); + return bch2_fs_ioctl(c, cmd, (void __user *) arg); } } #ifdef CONFIG_COMPAT -static long bch_compat_fs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static long bch2_compat_fs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { /* These are just misnamed, they actually get/put from/to user an int */ switch (cmd) { @@ -917,114 +917,114 @@ static long bch_compat_fs_ioctl(struct file *file, unsigned int cmd, unsigned lo default: return -ENOIOCTLCMD; } - return bch_fs_file_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); + return bch2_fs_file_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); } #endif /* Directories: */ -static loff_t bch_dir_llseek(struct file *file, loff_t offset, int whence) +static loff_t bch2_dir_llseek(struct file *file, loff_t offset, int whence) { return generic_file_llseek_size(file, offset, whence, S64_MAX, S64_MAX); } -static int bch_vfs_readdir(struct file *file, struct dir_context *ctx) +static int bch2_vfs_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct bch_fs *c = inode->i_sb->s_fs_info; - return bch_readdir(c, file, ctx); + return bch2_readdir(c, file, ctx); } static const struct file_operations bch_file_operations = { - .llseek = bch_llseek, + .llseek = bch2_llseek, .read_iter = generic_file_read_iter, - .write_iter = bch_write_iter, - .mmap = bch_mmap, + .write_iter = bch2_write_iter, + .mmap = bch2_mmap, .open = generic_file_open, - .fsync = bch_fsync, + .fsync = bch2_fsync, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, - .fallocate = bch_fallocate_dispatch, - .unlocked_ioctl = bch_fs_file_ioctl, + .fallocate = bch2_fallocate_dispatch, + .unlocked_ioctl = bch2_fs_file_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = bch_compat_fs_ioctl, + .compat_ioctl = bch2_compat_fs_ioctl, #endif }; static const struct inode_operations bch_file_inode_operations = { - .setattr = bch_setattr, - .fiemap = bch_fiemap, - .listxattr = bch_xattr_list, - .get_acl = bch_get_acl, - .set_acl = bch_set_acl, + .setattr = bch2_setattr, + .fiemap = bch2_fiemap, + .listxattr = bch2_xattr_list, + .get_acl = bch2_get_acl, + .set_acl = bch2_set_acl, }; static const struct inode_operations bch_dir_inode_operations = { - .lookup = bch_lookup, - .create = bch_create, - .link = bch_link, - .unlink = bch_unlink, - .symlink = bch_symlink, - .mkdir = bch_mkdir, - .rmdir = bch_rmdir, - .mknod = bch_mknod, - .rename = bch_rename2, - .setattr = bch_setattr, - .tmpfile = bch_tmpfile, - .listxattr = bch_xattr_list, - .get_acl = bch_get_acl, - .set_acl = bch_set_acl, + .lookup = bch2_lookup, + .create = bch2_create, + .link = bch2_link, + .unlink = bch2_unlink, + .symlink = bch2_symlink, + .mkdir = bch2_mkdir, + .rmdir = bch2_rmdir, + .mknod = bch2_mknod, + .rename = bch2_rename2, + .setattr = bch2_setattr, + .tmpfile = bch2_tmpfile, + .listxattr = bch2_xattr_list, + .get_acl = bch2_get_acl, + .set_acl = bch2_set_acl, }; static const struct file_operations bch_dir_file_operations = { - .llseek = bch_dir_llseek, + .llseek = bch2_dir_llseek, .read = generic_read_dir, - .iterate = bch_vfs_readdir, - .fsync = bch_fsync, - .unlocked_ioctl = bch_fs_file_ioctl, + .iterate = bch2_vfs_readdir, + .fsync = bch2_fsync, + .unlocked_ioctl = bch2_fs_file_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = bch_compat_fs_ioctl, + .compat_ioctl = bch2_compat_fs_ioctl, #endif }; static const struct inode_operations bch_symlink_inode_operations = { .readlink = generic_readlink, .get_link = page_get_link, - .setattr = bch_setattr, - .listxattr = bch_xattr_list, - .get_acl = bch_get_acl, - .set_acl = bch_set_acl, + .setattr = bch2_setattr, + .listxattr = bch2_xattr_list, + .get_acl = bch2_get_acl, + .set_acl = bch2_set_acl, }; static const struct inode_operations bch_special_inode_operations = { - .setattr = bch_setattr, - .listxattr = bch_xattr_list, - .get_acl = bch_get_acl, - .set_acl = bch_set_acl, + .setattr = bch2_setattr, + .listxattr = bch2_xattr_list, + .get_acl = bch2_get_acl, + .set_acl = bch2_set_acl, }; static const struct address_space_operations bch_address_space_operations = { - .writepage = bch_writepage, - .readpage = bch_readpage, - .writepages = bch_writepages, - .readpages = bch_readpages, - .set_page_dirty = bch_set_page_dirty, - .write_begin = bch_write_begin, - .write_end = bch_write_end, - .invalidatepage = bch_invalidatepage, - .releasepage = bch_releasepage, - .direct_IO = bch_direct_IO, + .writepage = bch2_writepage, + .readpage = bch2_readpage, + .writepages = bch2_writepages, + .readpages = bch2_readpages, + .set_page_dirty = bch2_set_page_dirty, + .write_begin = bch2_write_begin, + .write_end = bch2_write_end, + .invalidatepage = bch2_invalidatepage, + .releasepage = bch2_releasepage, + .direct_IO = bch2_direct_IO, #ifdef CONFIG_MIGRATION - .migratepage = bch_migrate_page, + .migratepage = bch2_migrate_page, #endif .error_remove_page = generic_error_remove_page, }; -static void bch_vfs_inode_init(struct bch_fs *c, - struct bch_inode_info *ei, - struct bch_inode_unpacked *bi) +static void bch2_vfs_inode_init(struct bch_fs *c, + struct bch_inode_info *ei, + struct bch_inode_unpacked *bi) { struct inode *inode = &ei->vfs_inode; @@ -1046,12 +1046,12 @@ static void bch_vfs_inode_init(struct bch_fs *c, inode->i_rdev = bi->i_dev; inode->i_generation = bi->i_generation; inode->i_size = bi->i_size; - inode->i_atime = bch_time_to_timespec(c, bi->i_atime); - inode->i_mtime = bch_time_to_timespec(c, bi->i_mtime); - inode->i_ctime = bch_time_to_timespec(c, bi->i_ctime); - bch_inode_flags_to_vfs(inode); + inode->i_atime = bch2_time_to_timespec(c, bi->i_atime); + inode->i_mtime = bch2_time_to_timespec(c, bi->i_mtime); + inode->i_ctime = bch2_time_to_timespec(c, bi->i_ctime); + bch2_inode_flags_to_vfs(inode); - ei->str_hash = bch_hash_info_init(bi); + ei->str_hash = bch2_hash_info_init(c, bi); inode->i_mapping->a_ops = &bch_address_space_operations; @@ -1075,11 +1075,11 @@ static void bch_vfs_inode_init(struct bch_fs *c, } } -static struct inode *bch_alloc_inode(struct super_block *sb) +static struct inode *bch2_alloc_inode(struct super_block *sb) { struct bch_inode_info *ei; - ei = kmem_cache_alloc(bch_inode_cache, GFP_NOFS); + ei = kmem_cache_alloc(bch2_inode_cache, GFP_NOFS); if (!ei) return NULL; @@ -1094,45 +1094,45 @@ static struct inode *bch_alloc_inode(struct super_block *sb) return &ei->vfs_inode; } -static void bch_i_callback(struct rcu_head *head) +static void bch2_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); - kmem_cache_free(bch_inode_cache, to_bch_ei(inode)); + kmem_cache_free(bch2_inode_cache, to_bch_ei(inode)); } -static void bch_destroy_inode(struct inode *inode) +static void bch2_destroy_inode(struct inode *inode) { - call_rcu(&inode->i_rcu, bch_i_callback); + call_rcu(&inode->i_rcu, bch2_i_callback); } -static int bch_vfs_write_inode(struct inode *inode, - struct writeback_control *wbc) +static int bch2_vfs_write_inode(struct inode *inode, + struct writeback_control *wbc) { struct bch_fs *c = inode->i_sb->s_fs_info; struct bch_inode_info *ei = to_bch_ei(inode); int ret; mutex_lock(&ei->update_lock); - ret = bch_write_inode(c, ei); + ret = bch2_write_inode(c, ei); mutex_unlock(&ei->update_lock); if (c->opts.journal_flush_disabled) return ret; if (!ret && wbc->sync_mode == WB_SYNC_ALL) - ret = bch_journal_flush_seq(&c->journal, ei->journal_seq); + ret = bch2_journal_flush_seq(&c->journal, ei->journal_seq); return ret; } -static void bch_evict_inode(struct inode *inode) +static void bch2_evict_inode(struct inode *inode) { struct bch_fs *c = inode->i_sb->s_fs_info; truncate_inode_pages_final(&inode->i_data); - if (!bch_journal_error(&c->journal) && !is_bad_inode(inode)) { + if (!bch2_journal_error(&c->journal) && !is_bad_inode(inode)) { struct bch_inode_info *ei = to_bch_ei(inode); /* XXX - we want to check this stuff iff there weren't IO errors: */ @@ -1143,12 +1143,12 @@ static void bch_evict_inode(struct inode *inode) clear_inode(inode); if (!inode->i_nlink && !is_bad_inode(inode)) { - bch_inode_rm(c, inode->i_ino); + bch2_inode_rm(c, inode->i_ino); atomic_long_dec(&c->nr_inodes); } } -static int bch_statfs(struct dentry *dentry, struct kstatfs *buf) +static int bch2_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct bch_fs *c = sb->s_fs_info; @@ -1157,7 +1157,7 @@ static int bch_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_type = BCACHE_STATFS_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = c->capacity >> PAGE_SECTOR_SHIFT; - buf->f_bfree = (c->capacity - bch_fs_sectors_used(c)) >> PAGE_SECTOR_SHIFT; + buf->f_bfree = (c->capacity - bch2_fs_sectors_used(c)) >> PAGE_SECTOR_SHIFT; buf->f_bavail = buf->f_bfree; buf->f_files = atomic_long_read(&c->nr_inodes); buf->f_ffree = U64_MAX; @@ -1171,20 +1171,20 @@ static int bch_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } -static int bch_sync_fs(struct super_block *sb, int wait) +static int bch2_sync_fs(struct super_block *sb, int wait) { struct bch_fs *c = sb->s_fs_info; if (!wait) { - bch_journal_flush_async(&c->journal, NULL); + bch2_journal_flush_async(&c->journal, NULL); return 0; } - return bch_journal_flush(&c->journal); + return bch2_journal_flush(&c->journal); } -static struct bch_fs *bch_open_as_blockdevs(const char *_dev_name, - struct bch_opts opts) +static struct bch_fs *bch2_open_as_blockdevs(const char *_dev_name, + struct bch_opts opts) { size_t nr_devs = 0, i = 0; char *dev_name, *s, **devs; @@ -1207,7 +1207,7 @@ static struct bch_fs *bch_open_as_blockdevs(const char *_dev_name, (s = strchr(s, ':')) && (*s++ = '\0')) devs[i++] = s; - err = bch_fs_open(devs, nr_devs, opts, &c); + err = bch2_fs_open(devs, nr_devs, opts, &c); if (err) { /* * Already open? @@ -1222,7 +1222,7 @@ static struct bch_fs *bch_open_as_blockdevs(const char *_dev_name, if (IS_ERR(bdev)) goto err; - c2 = bch_bdev_to_fs(bdev); + c2 = bch2_bdev_to_fs(bdev); bdput(bdev); if (!c) @@ -1240,7 +1240,7 @@ static struct bch_fs *bch_open_as_blockdevs(const char *_dev_name, mutex_lock(&c->state_lock); - if (!bch_fs_running(c)) { + if (!bch2_fs_running(c)) { mutex_unlock(&c->state_lock); closure_put(&c->cl); err = "incomplete filesystem"; @@ -1261,15 +1261,15 @@ err: return c; } -static int bch_remount(struct super_block *sb, int *flags, char *data) +static int bch2_remount(struct super_block *sb, int *flags, char *data) { struct bch_fs *c = sb->s_fs_info; - struct bch_opts opts = bch_opts_empty(); + struct bch_opts opts = bch2_opts_empty(); int ret; opts.read_only = (*flags & MS_RDONLY) != 0; - ret = bch_parse_mount_opts(&opts, data); + ret = bch2_parse_mount_opts(&opts, data); if (ret) return ret; @@ -1278,11 +1278,11 @@ static int bch_remount(struct super_block *sb, int *flags, char *data) const char *err = NULL; if (opts.read_only) { - bch_fs_read_only(c); + bch2_fs_read_only(c); sb->s_flags |= MS_RDONLY; } else { - err = bch_fs_read_write(c); + err = bch2_fs_read_write(c); if (err) { bch_err(c, "error going rw: %s", err); return -EINVAL; @@ -1301,54 +1301,54 @@ static int bch_remount(struct super_block *sb, int *flags, char *data) } static const struct super_operations bch_super_operations = { - .alloc_inode = bch_alloc_inode, - .destroy_inode = bch_destroy_inode, - .write_inode = bch_vfs_write_inode, - .evict_inode = bch_evict_inode, - .sync_fs = bch_sync_fs, - .statfs = bch_statfs, + .alloc_inode = bch2_alloc_inode, + .destroy_inode = bch2_destroy_inode, + .write_inode = bch2_vfs_write_inode, + .evict_inode = bch2_evict_inode, + .sync_fs = bch2_sync_fs, + .statfs = bch2_statfs, .show_options = generic_show_options, - .remount_fs = bch_remount, + .remount_fs = bch2_remount, #if 0 - .put_super = bch_put_super, - .freeze_fs = bch_freeze, - .unfreeze_fs = bch_unfreeze, + .put_super = bch2_put_super, + .freeze_fs = bch2_freeze, + .unfreeze_fs = bch2_unfreeze, #endif }; -static int bch_test_super(struct super_block *s, void *data) +static int bch2_test_super(struct super_block *s, void *data) { return s->s_fs_info == data; } -static int bch_set_super(struct super_block *s, void *data) +static int bch2_set_super(struct super_block *s, void *data) { s->s_fs_info = data; return 0; } -static struct dentry *bch_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) +static struct dentry *bch2_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) { struct bch_fs *c; struct bch_dev *ca; struct super_block *sb; struct inode *inode; - struct bch_opts opts = bch_opts_empty(); + struct bch_opts opts = bch2_opts_empty(); unsigned i; int ret; opts.read_only = (flags & MS_RDONLY) != 0; - ret = bch_parse_mount_opts(&opts, data); + ret = bch2_parse_mount_opts(&opts, data); if (ret) return ERR_PTR(ret); - c = bch_open_as_blockdevs(dev_name, opts); + c = bch2_open_as_blockdevs(dev_name, opts); if (!c) return ERR_PTR(-ENOENT); - sb = sget(fs_type, bch_test_super, bch_set_super, flags|MS_NOSEC, c); + sb = sget(fs_type, bch2_test_super, bch2_set_super, flags|MS_NOSEC, c); if (IS_ERR(sb)) { closure_put(&c->cl); return ERR_CAST(sb); @@ -1371,7 +1371,7 @@ static struct dentry *bch_mount(struct file_system_type *fs_type, sb->s_blocksize_bits = PAGE_SHIFT; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_op = &bch_super_operations; - sb->s_xattr = bch_xattr_handlers; + sb->s_xattr = bch2_xattr_handlers; sb->s_magic = BCACHE_STATFS_MAGIC; sb->s_time_gran = c->sb.time_precision; c->vfs_sb = sb; @@ -1393,7 +1393,7 @@ static struct dentry *bch_mount(struct file_system_type *fs_type, else sb->s_flags |= opts.posix_acl ? MS_POSIXACL : 0; - inode = bch_vfs_inode_get(sb, BCACHE_ROOT_INO); + inode = bch2_vfs_inode_get(sb, BCACHE_ROOT_INO); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto err_put_super; @@ -1414,60 +1414,60 @@ err_put_super: return ERR_PTR(ret); } -static void bch_kill_sb(struct super_block *sb) +static void bch2_kill_sb(struct super_block *sb) { struct bch_fs *c = sb->s_fs_info; generic_shutdown_super(sb); if (test_bit(BCH_FS_BDEV_MOUNTED, &c->flags)) - bch_fs_stop(c); + bch2_fs_stop(c); else closure_put(&c->cl); } static struct file_system_type bcache_fs_type = { .owner = THIS_MODULE, - .name = "bcache", - .mount = bch_mount, - .kill_sb = bch_kill_sb, + .name = "bcachefs", + .mount = bch2_mount, + .kill_sb = bch2_kill_sb, .fs_flags = FS_REQUIRES_DEV, }; -MODULE_ALIAS_FS("bcache"); +MODULE_ALIAS_FS("bcachefs"); -void bch_vfs_exit(void) +void bch2_vfs_exit(void) { unregister_filesystem(&bcache_fs_type); - if (bch_dio_write_bioset) - bioset_free(bch_dio_write_bioset); - if (bch_dio_read_bioset) - bioset_free(bch_dio_read_bioset); - if (bch_writepage_bioset) - bioset_free(bch_writepage_bioset); - if (bch_inode_cache) - kmem_cache_destroy(bch_inode_cache); + if (bch2_dio_write_bioset) + bioset_free(bch2_dio_write_bioset); + if (bch2_dio_read_bioset) + bioset_free(bch2_dio_read_bioset); + if (bch2_writepage_bioset) + bioset_free(bch2_writepage_bioset); + if (bch2_inode_cache) + kmem_cache_destroy(bch2_inode_cache); } -int __init bch_vfs_init(void) +int __init bch2_vfs_init(void) { int ret = -ENOMEM; - bch_inode_cache = KMEM_CACHE(bch_inode_info, 0); - if (!bch_inode_cache) + bch2_inode_cache = KMEM_CACHE(bch_inode_info, 0); + if (!bch2_inode_cache) goto err; - bch_writepage_bioset = + bch2_writepage_bioset = bioset_create(4, offsetof(struct bch_writepage_io, bio.bio)); - if (!bch_writepage_bioset) + if (!bch2_writepage_bioset) goto err; - bch_dio_read_bioset = bioset_create(4, offsetof(struct dio_read, rbio.bio)); - if (!bch_dio_read_bioset) + bch2_dio_read_bioset = bioset_create(4, offsetof(struct dio_read, rbio.bio)); + if (!bch2_dio_read_bioset) goto err; - bch_dio_write_bioset = bioset_create(4, offsetof(struct dio_write, bio.bio)); - if (!bch_dio_write_bioset) + bch2_dio_write_bioset = bioset_create(4, offsetof(struct dio_write, bio.bio)); + if (!bch2_dio_write_bioset) goto err; ret = register_filesystem(&bcache_fs_type); @@ -1476,6 +1476,6 @@ int __init bch_vfs_init(void) return 0; err: - bch_vfs_exit(); + bch2_vfs_exit(); return ret; } diff --git a/libbcache/fs.h b/libbcachefs/fs.h index 1c0a2b15..f7cad296 100644 --- a/libbcache/fs.h +++ b/libbcachefs/fs.h @@ -47,18 +47,18 @@ struct bch_inode_unpacked; typedef int (*inode_set_fn)(struct bch_inode_info *, struct bch_inode_unpacked *, void *); -int __must_check __bch_write_inode(struct bch_fs *, struct bch_inode_info *, - inode_set_fn, void *); -int __must_check bch_write_inode(struct bch_fs *, - struct bch_inode_info *); +int __must_check __bch2_write_inode(struct bch_fs *, struct bch_inode_info *, + inode_set_fn, void *); +int __must_check bch2_write_inode(struct bch_fs *, + struct bch_inode_info *); -void bch_vfs_exit(void); -int bch_vfs_init(void); +void bch2_vfs_exit(void); +int bch2_vfs_init(void); #else -static inline void bch_vfs_exit(void) {} -static inline int bch_vfs_init(void) { return 0; } +static inline void bch2_vfs_exit(void) {} +static inline int bch2_vfs_init(void) { return 0; } #endif diff --git a/libbcache/inode.c b/libbcachefs/inode.c index 2e15497f..7a8467c4 100644 --- a/libbcache/inode.c +++ b/libbcachefs/inode.c @@ -1,5 +1,5 @@ -#include "bcache.h" +#include "bcachefs.h" #include "bkey_methods.h" #include "btree_update.h" #include "extents.h" @@ -103,8 +103,8 @@ static int inode_decode_field(const u8 *in, const u8 *end, return bytes; } -void bch_inode_pack(struct bkey_inode_buf *packed, - const struct bch_inode_unpacked *inode) +void bch2_inode_pack(struct bkey_inode_buf *packed, + const struct bch_inode_unpacked *inode) { u8 *out = packed->inode.v.fields; u8 *end = (void *) &packed[1]; @@ -142,10 +142,10 @@ void bch_inode_pack(struct bkey_inode_buf *packed, SET_INODE_NR_FIELDS(&packed->inode.v, nr_fields); - if (IS_ENABLED(CONFIG_BCACHE_DEBUG)) { + if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) { struct bch_inode_unpacked unpacked; - int ret = bch_inode_unpack(inode_i_to_s_c(&packed->inode), + int ret = bch2_inode_unpack(inode_i_to_s_c(&packed->inode), &unpacked); BUG_ON(ret); BUG_ON(unpacked.inum != inode->inum); @@ -158,8 +158,8 @@ void bch_inode_pack(struct bkey_inode_buf *packed, } } -int bch_inode_unpack(struct bkey_s_c_inode inode, - struct bch_inode_unpacked *unpacked) +int bch2_inode_unpack(struct bkey_s_c_inode inode, + struct bch_inode_unpacked *unpacked) { const u8 *in = inode.v->fields; const u8 *end = (void *) inode.v + bkey_val_bytes(inode.k); @@ -198,8 +198,8 @@ int bch_inode_unpack(struct bkey_s_c_inode inode, return 0; } -static const char *bch_inode_invalid(const struct bch_fs *c, - struct bkey_s_c k) +static const char *bch2_inode_invalid(const struct bch_fs *c, + struct bkey_s_c k) { if (k.k->p.offset) return "nonzero offset"; @@ -218,7 +218,7 @@ static const char *bch_inode_invalid(const struct bch_fs *c, if (INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR) return "invalid str hash type"; - if (bch_inode_unpack(inode, &unpacked)) + if (bch2_inode_unpack(inode, &unpacked)) return "invalid variable length fields"; return NULL; @@ -236,8 +236,8 @@ static const char *bch_inode_invalid(const struct bch_fs *c, } } -static void bch_inode_to_text(struct bch_fs *c, char *buf, - size_t size, struct bkey_s_c k) +static void bch2_inode_to_text(struct bch_fs *c, char *buf, + size_t size, struct bkey_s_c k) { struct bkey_s_c_inode inode; struct bch_inode_unpacked unpacked; @@ -245,7 +245,7 @@ static void bch_inode_to_text(struct bch_fs *c, char *buf, switch (k.k->type) { case BCH_INODE_FS: inode = bkey_s_c_to_inode(k); - if (bch_inode_unpack(inode, &unpacked)) { + if (bch2_inode_unpack(inode, &unpacked)) { scnprintf(buf, size, "(unpack error)"); break; } @@ -255,15 +255,15 @@ static void bch_inode_to_text(struct bch_fs *c, char *buf, } } -const struct bkey_ops bch_bkey_inode_ops = { - .key_invalid = bch_inode_invalid, - .val_to_text = bch_inode_to_text, +const struct bkey_ops bch2_bkey_inode_ops = { + .key_invalid = bch2_inode_invalid, + .val_to_text = bch2_inode_to_text, }; -void bch_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u, - uid_t uid, gid_t gid, umode_t mode, dev_t rdev) +void bch2_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u, + uid_t uid, gid_t gid, umode_t mode, dev_t rdev) { - s64 now = timespec_to_bch_time(c, CURRENT_TIME); + s64 now = timespec_to_bch2_time(c, CURRENT_TIME); memset(inode_u, 0, sizeof(*inode_u)); @@ -281,8 +281,8 @@ void bch_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u, inode_u->i_otime = now; } -int bch_inode_create(struct bch_fs *c, struct bkey_i *inode, - u64 min, u64 max, u64 *hint) +int bch2_inode_create(struct bch_fs *c, struct bkey_i *inode, + u64 min, u64 max, u64 *hint) { struct btree_iter iter; bool searched_from_start = false; @@ -300,14 +300,14 @@ int bch_inode_create(struct bch_fs *c, struct bkey_i *inode, if (*hint == min) searched_from_start = true; again: - bch_btree_iter_init_intent(&iter, c, BTREE_ID_INODES, POS(*hint, 0)); + bch2_btree_iter_init_intent(&iter, c, BTREE_ID_INODES, POS(*hint, 0)); while (1) { - struct bkey_s_c k = bch_btree_iter_peek_with_holes(&iter); + struct bkey_s_c k = bch2_btree_iter_peek_with_holes(&iter); ret = btree_iter_err(k); if (ret) { - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return ret; } @@ -317,14 +317,14 @@ again: pr_debug("inserting inode %llu (size %u)", inode->k.p.inode, inode->k.u64s); - ret = bch_btree_insert_at(c, NULL, NULL, NULL, + ret = bch2_btree_insert_at(c, NULL, NULL, NULL, BTREE_INSERT_ATOMIC, BTREE_INSERT_ENTRY(&iter, inode)); if (ret == -EINTR) continue; - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); if (!ret) *hint = k.k->p.inode + 1; @@ -333,10 +333,10 @@ again: if (iter.pos.inode == max) break; /* slot used */ - bch_btree_iter_advance_pos(&iter); + bch2_btree_iter_advance_pos(&iter); } } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); if (!searched_from_start) { /* Retry from start */ @@ -348,23 +348,23 @@ again: return -ENOSPC; } -int bch_inode_truncate(struct bch_fs *c, u64 inode_nr, u64 new_size, - struct extent_insert_hook *hook, u64 *journal_seq) +int bch2_inode_truncate(struct bch_fs *c, u64 inode_nr, u64 new_size, + struct extent_insert_hook *hook, u64 *journal_seq) { - return bch_discard(c, POS(inode_nr, new_size), POS(inode_nr + 1, 0), + return bch2_discard(c, POS(inode_nr, new_size), POS(inode_nr + 1, 0), ZERO_VERSION, NULL, hook, journal_seq); } -int bch_inode_rm(struct bch_fs *c, u64 inode_nr) +int bch2_inode_rm(struct bch_fs *c, u64 inode_nr) { struct bkey_i delete; int ret; - ret = bch_inode_truncate(c, inode_nr, 0, NULL, NULL); + ret = bch2_inode_truncate(c, inode_nr, 0, NULL, NULL); if (ret < 0) return ret; - ret = bch_btree_delete_range(c, BTREE_ID_XATTRS, + ret = bch2_btree_delete_range(c, BTREE_ID_XATTRS, POS(inode_nr, 0), POS(inode_nr + 1, 0), ZERO_VERSION, NULL, NULL, NULL); @@ -379,7 +379,7 @@ int bch_inode_rm(struct bch_fs *c, u64 inode_nr) * XXX: the dirent could ideally would delete whitouts when they're no * longer needed */ - ret = bch_btree_delete_range(c, BTREE_ID_DIRENTS, + ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS, POS(inode_nr, 0), POS(inode_nr + 1, 0), ZERO_VERSION, NULL, NULL, NULL); @@ -389,12 +389,12 @@ int bch_inode_rm(struct bch_fs *c, u64 inode_nr) bkey_init(&delete.k); delete.k.p.inode = inode_nr; - return bch_btree_insert(c, BTREE_ID_INODES, &delete, NULL, + return bch2_btree_insert(c, BTREE_ID_INODES, &delete, NULL, NULL, NULL, BTREE_INSERT_NOFAIL); } -int bch_inode_find_by_inum(struct bch_fs *c, u64 inode_nr, - struct bch_inode_unpacked *inode) +int bch2_inode_find_by_inum(struct bch_fs *c, u64 inode_nr, + struct bch_inode_unpacked *inode) { struct btree_iter iter; struct bkey_s_c k; @@ -404,7 +404,7 @@ int bch_inode_find_by_inum(struct bch_fs *c, u64 inode_nr, POS(inode_nr, 0), k) { switch (k.k->type) { case BCH_INODE_FS: - ret = bch_inode_unpack(bkey_s_c_to_inode(k), inode); + ret = bch2_inode_unpack(bkey_s_c_to_inode(k), inode); break; default: /* hole, not found */ @@ -415,11 +415,11 @@ int bch_inode_find_by_inum(struct bch_fs *c, u64 inode_nr, } - return bch_btree_iter_unlock(&iter) ?: ret; + return bch2_btree_iter_unlock(&iter) ?: ret; } -int bch_cached_dev_inode_find_by_uuid(struct bch_fs *c, uuid_le *uuid, - struct bkey_i_inode_blockdev *ret) +int bch2_cached_dev_inode_find_by_uuid(struct bch_fs *c, uuid_le *uuid, + struct bkey_i_inode_blockdev *ret) { struct btree_iter iter; struct bkey_s_c k; @@ -439,13 +439,13 @@ int bch_cached_dev_inode_find_by_uuid(struct bch_fs *c, uuid_le *uuid, if (CACHED_DEV(inode.v) && !memcmp(uuid, &inode.v->i_uuid, 16)) { bkey_reassemble(&ret->k_i, k); - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return 0; } } - bch_btree_iter_cond_resched(&iter); + bch2_btree_iter_cond_resched(&iter); } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return -ENOENT; } diff --git a/libbcache/inode.h b/libbcachefs/inode.h index 41e344d5..277d4e42 100644 --- a/libbcache/inode.h +++ b/libbcachefs/inode.h @@ -3,7 +3,7 @@ #include <linux/math64.h> -extern const struct bkey_ops bch_bkey_inode_ops; +extern const struct bkey_ops bch2_bkey_inode_ops; struct bch_inode_unpacked { u64 inum; @@ -24,27 +24,27 @@ struct bkey_inode_buf { #undef BCH_INODE_FIELD } __packed; -void bch_inode_pack(struct bkey_inode_buf *, const struct bch_inode_unpacked *); -int bch_inode_unpack(struct bkey_s_c_inode, struct bch_inode_unpacked *); +void bch2_inode_pack(struct bkey_inode_buf *, const struct bch_inode_unpacked *); +int bch2_inode_unpack(struct bkey_s_c_inode, struct bch_inode_unpacked *); -void bch_inode_init(struct bch_fs *, struct bch_inode_unpacked *, +void bch2_inode_init(struct bch_fs *, struct bch_inode_unpacked *, uid_t, gid_t, umode_t, dev_t); -int bch_inode_create(struct bch_fs *, struct bkey_i *, u64, u64, u64 *); -int bch_inode_truncate(struct bch_fs *, u64, u64, +int bch2_inode_create(struct bch_fs *, struct bkey_i *, u64, u64, u64 *); +int bch2_inode_truncate(struct bch_fs *, u64, u64, struct extent_insert_hook *, u64 *); -int bch_inode_rm(struct bch_fs *, u64); +int bch2_inode_rm(struct bch_fs *, u64); -int bch_inode_find_by_inum(struct bch_fs *, u64, +int bch2_inode_find_by_inum(struct bch_fs *, u64, struct bch_inode_unpacked *); -int bch_cached_dev_inode_find_by_uuid(struct bch_fs *, uuid_le *, +int bch2_cached_dev_inode_find_by_uuid(struct bch_fs *, uuid_le *, struct bkey_i_inode_blockdev *); -static inline struct timespec bch_time_to_timespec(struct bch_fs *c, u64 time) +static inline struct timespec bch2_time_to_timespec(struct bch_fs *c, u64 time) { return ns_to_timespec(time * c->sb.time_precision + c->sb.time_base_lo); } -static inline u64 timespec_to_bch_time(struct bch_fs *c, struct timespec ts) +static inline u64 timespec_to_bch2_time(struct bch_fs *c, struct timespec ts) { s64 ns = timespec_to_ns(&ts) - c->sb.time_base_lo; diff --git a/libbcache/io.c b/libbcachefs/io.c index 753c8a3d..212a5a65 100644 --- a/libbcache/io.c +++ b/libbcachefs/io.c @@ -5,7 +5,7 @@ * Copyright 2012 Google, Inc. */ -#include "bcache.h" +#include "bcachefs.h" #include "alloc.h" #include "bset.h" #include "btree_update.h" @@ -20,14 +20,12 @@ #include "journal.h" #include "keylist.h" #include "move.h" -#include "notify.h" -#include "stats.h" #include "super-io.h" #include <linux/blkdev.h> #include <linux/random.h> -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> static inline void __bio_inc_remaining(struct bio *bio) { @@ -36,37 +34,9 @@ static inline void __bio_inc_remaining(struct bio *bio) atomic_inc(&bio->__bi_remaining); } -void bch_generic_make_request(struct bio *bio, struct bch_fs *c) -{ - if (current->bio_list) { - spin_lock(&c->bio_submit_lock); - bio_list_add(&c->bio_submit_list, bio); - spin_unlock(&c->bio_submit_lock); - queue_work(bcache_io_wq, &c->bio_submit_work); - } else { - generic_make_request(bio); - } -} - -void bch_bio_submit_work(struct work_struct *work) -{ - struct bch_fs *c = container_of(work, struct bch_fs, - bio_submit_work); - struct bio_list bl; - struct bio *bio; - - spin_lock(&c->bio_submit_lock); - bl = c->bio_submit_list; - bio_list_init(&c->bio_submit_list); - spin_unlock(&c->bio_submit_lock); - - while ((bio = bio_list_pop(&bl))) - generic_make_request(bio); -} - /* Allocate, free from mempool: */ -void bch_bio_free_pages_pool(struct bch_fs *c, struct bio *bio) +void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio) { struct bio_vec *bv; unsigned i; @@ -77,7 +47,7 @@ void bch_bio_free_pages_pool(struct bch_fs *c, struct bio *bio) bio->bi_vcnt = 0; } -static void bch_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio, +static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio, bool *using_mempool) { struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++]; @@ -99,7 +69,7 @@ pool_alloc: bv->bv_offset = 0; } -void bch_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio, +void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio, size_t bytes) { bool using_mempool = false; @@ -107,7 +77,7 @@ void bch_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio, bio->bi_iter.bi_size = bytes; while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE)) - bch_bio_alloc_page_pool(c, bio, &using_mempool); + bch2_bio_alloc_page_pool(c, bio, &using_mempool); if (using_mempool) mutex_unlock(&c->bio_bounce_pages_lock); @@ -115,9 +85,8 @@ void bch_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio, /* Bios with headers */ -static void bch_submit_wbio(struct bch_fs *c, struct bch_write_bio *wbio, - struct bch_dev *ca, const struct bch_extent_ptr *ptr, - bool punt) +static void bch2_submit_wbio(struct bch_fs *c, struct bch_write_bio *wbio, + struct bch_dev *ca, const struct bch_extent_ptr *ptr) { wbio->ca = ca; wbio->submit_time_us = local_clock_us(); @@ -126,14 +95,12 @@ static void bch_submit_wbio(struct bch_fs *c, struct bch_write_bio *wbio, if (!ca) bcache_io_error(c, &wbio->bio, "device has been removed"); - else if (punt) - bch_generic_make_request(&wbio->bio, c); else generic_make_request(&wbio->bio); } -void bch_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, - const struct bkey_i *k, bool punt) +void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, + const struct bkey_i *k) { struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k); const struct bch_extent_ptr *ptr; @@ -148,7 +115,7 @@ void bch_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, extent_for_each_ptr(e, ptr) { ca = c->devs[ptr->dev]; if (!percpu_ref_tryget(&ca->io_ref)) { - bch_submit_wbio(c, wbio, NULL, ptr, punt); + bch2_submit_wbio(c, wbio, NULL, ptr); break; } @@ -172,7 +139,7 @@ void bch_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, if (!journal_flushes_device(ca)) n->bio.bi_opf |= REQ_FUA; - bch_submit_wbio(c, n, ca, ptr, punt); + bch2_submit_wbio(c, n, ca, ptr); } } @@ -187,20 +154,20 @@ static struct workqueue_struct *index_update_wq(struct bch_write_op *op) : op->c->wq; } -static void __bch_write(struct closure *); +static void __bch2_write(struct closure *); -static void bch_write_done(struct closure *cl) +static void bch2_write_done(struct closure *cl) { struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); BUG_ON(!(op->flags & BCH_WRITE_DONE)); if (!op->error && (op->flags & BCH_WRITE_FLUSH)) - op->error = bch_journal_error(&op->c->journal); + op->error = bch2_journal_error(&op->c->journal); - bch_disk_reservation_put(op->c, &op->res); + bch2_disk_reservation_put(op->c, &op->res); percpu_ref_put(&op->c->writes); - bch_keylist_free(&op->insert_keys, op->inline_keys); + bch2_keylist_free(&op->insert_keys, op->inline_keys); closure_return(cl); } @@ -215,19 +182,19 @@ static u64 keylist_sectors(struct keylist *keys) return ret; } -static int bch_write_index_default(struct bch_write_op *op) +static int bch2_write_index_default(struct bch_write_op *op) { struct keylist *keys = &op->insert_keys; struct btree_iter iter; int ret; - bch_btree_iter_init_intent(&iter, op->c, BTREE_ID_EXTENTS, - bkey_start_pos(&bch_keylist_front(keys)->k)); + bch2_btree_iter_init_intent(&iter, op->c, BTREE_ID_EXTENTS, + bkey_start_pos(&bch2_keylist_front(keys)->k)); - ret = bch_btree_insert_list_at(&iter, keys, &op->res, + ret = bch2_btree_insert_list_at(&iter, keys, &op->res, NULL, op_journal_seq(op), BTREE_INSERT_NOFAIL); - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return ret; } @@ -235,7 +202,7 @@ static int bch_write_index_default(struct bch_write_op *op) /** * bch_write_index - after a write, update index to point to new data */ -static void bch_write_index(struct closure *cl) +static void bch2_write_index(struct closure *cl) { struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); struct bch_fs *c = op->c; @@ -244,7 +211,7 @@ static void bch_write_index(struct closure *cl) op->flags |= BCH_WRITE_LOOPED; - if (!bch_keylist_empty(keys)) { + if (!bch2_keylist_empty(keys)) { u64 sectors_start = keylist_sectors(keys); int ret = op->index_update_fn(op); @@ -260,22 +227,22 @@ static void bch_write_index(struct closure *cl) for (i = 0; i < ARRAY_SIZE(op->open_buckets); i++) if (op->open_buckets[i]) { - bch_open_bucket_put(c, - c->open_buckets + - op->open_buckets[i]); + bch2_open_bucket_put(c, + c->open_buckets + + op->open_buckets[i]); op->open_buckets[i] = 0; } if (!(op->flags & BCH_WRITE_DONE)) - continue_at(cl, __bch_write, op->io_wq); + continue_at(cl, __bch2_write, op->io_wq); if (!op->error && (op->flags & BCH_WRITE_FLUSH)) { - bch_journal_flush_seq_async(&c->journal, - *op_journal_seq(op), - cl); - continue_at(cl, bch_write_done, index_update_wq(op)); + bch2_journal_flush_seq_async(&c->journal, + *op_journal_seq(op), + cl); + continue_at(cl, bch2_write_done, index_update_wq(op)); } else { - continue_at_nobarrier(cl, bch_write_done, NULL); + continue_at_nobarrier(cl, bch2_write_done, NULL); } } @@ -285,7 +252,7 @@ static void bch_write_index(struct closure *cl) * Used to implement discard, and to handle when writethrough write hits * a write error on the cache device. */ -static void bch_write_discard(struct closure *cl) +static void bch2_write_discard(struct closure *cl) { struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); struct bio *bio = &op->bio->bio; @@ -293,20 +260,20 @@ static void bch_write_discard(struct closure *cl) end.offset += bio_sectors(bio); - op->error = bch_discard(op->c, op->pos, end, op->version, + op->error = bch2_discard(op->c, op->pos, end, op->version, &op->res, NULL, NULL); } /* * Convert extents to be inserted to discards after an error: */ -static void bch_write_io_error(struct closure *cl) +static void bch2_write_io_error(struct closure *cl) { struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); if (op->flags & BCH_WRITE_DISCARD_ON_ERROR) { - struct bkey_i *src = bch_keylist_front(&op->insert_keys); - struct bkey_i *dst = bch_keylist_front(&op->insert_keys); + struct bkey_i *src = bch2_keylist_front(&op->insert_keys); + struct bkey_i *dst = bch2_keylist_front(&op->insert_keys); /* * Our data write just errored, which means we've got a bunch @@ -334,17 +301,17 @@ static void bch_write_io_error(struct closure *cl) op->flags |= BCH_WRITE_DISCARD; } else { /* TODO: We could try to recover from this. */ - while (!bch_keylist_empty(&op->insert_keys)) - bch_keylist_pop_front(&op->insert_keys); + while (!bch2_keylist_empty(&op->insert_keys)) + bch2_keylist_pop_front(&op->insert_keys); op->error = -EIO; op->flags |= BCH_WRITE_DONE; } - bch_write_index(cl); + bch2_write_index(cl); } -static void bch_write_endio(struct bio *bio) +static void bch2_write_endio(struct bio *bio) { struct closure *cl = bio->bi_private; struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); @@ -353,13 +320,11 @@ static void bch_write_endio(struct bio *bio) struct bio *orig = wbio->orig; struct bch_dev *ca = wbio->ca; - if (bch_dev_nonfatal_io_err_on(bio->bi_error, ca, + if (bch2_dev_nonfatal_io_err_on(bio->bi_error, ca, "data write")) { - set_closure_fn(cl, bch_write_io_error, index_update_wq(op)); + set_closure_fn(cl, bch2_write_io_error, index_update_wq(op)); } - bch_account_io_completion_time(ca, wbio->submit_time_us, - REQ_OP_WRITE); if (ca) percpu_ref_put(&ca->io_ref); @@ -367,7 +332,7 @@ static void bch_write_endio(struct bio *bio) orig->bi_error = bio->bi_error; if (wbio->bounce) - bch_bio_free_pages_pool(c, bio); + bch2_bio_free_pages_pool(c, bio); if (wbio->put_bio) bio_put(bio); @@ -409,19 +374,19 @@ static void init_append_extent(struct bch_write_op *op, e->k.version = op->version; bkey_extent_set_cached(&e->k, op->flags & BCH_WRITE_CACHED); - bch_extent_crc_append(e, compressed_size, + bch2_extent_crc_append(e, compressed_size, uncompressed_size, compression_type, nonce, csum, csum_type); - bch_alloc_sectors_append_ptrs(op->c, e, op->nr_replicas, + bch2_alloc_sectors_append_ptrs(op->c, e, op->nr_replicas, ob, compressed_size); bkey_extent_set_cached(&e->k, (op->flags & BCH_WRITE_CACHED)); - bch_keylist_push(&op->insert_keys); + bch2_keylist_push(&op->insert_keys); } -static int bch_write_extent(struct bch_write_op *op, +static int bch2_write_extent(struct bch_write_op *op, struct open_bucket *ob, struct bio *orig) { @@ -444,7 +409,7 @@ static int bch_write_extent(struct bch_write_op *op, crc_compressed_size(NULL, &op->crc) > ob->sectors_free)) { int ret; - ret = bch_bio_uncompress_inplace(c, orig, op->size, op->crc); + ret = bch2_bio_uncompress_inplace(c, orig, op->size, op->crc); if (ret) return ret; @@ -472,7 +437,7 @@ static int bch_write_extent(struct bch_write_op *op, /* all units here in bytes */ unsigned total_output = 0, output_available = min(ob->sectors_free << 9, orig->bi_iter.bi_size); - unsigned crc_nonce = bch_csum_type_is_encryption(csum_type) + unsigned crc_nonce = bch2_csum_type_is_encryption(csum_type) ? op->nonce : 0; struct bch_csum csum; struct nonce nonce; @@ -484,7 +449,7 @@ static int bch_write_extent(struct bch_write_op *op, * XXX: can't use mempool for more than * BCH_COMPRESSED_EXTENT_MAX worth of pages */ - bch_bio_alloc_pages_pool(c, bio, output_available); + bch2_bio_alloc_pages_pool(c, bio, output_available); /* copy WRITE_SYNC flag */ bio->bi_opf = orig->bi_opf; @@ -497,7 +462,7 @@ static int bch_write_extent(struct bch_write_op *op, unsigned fragment_compression_type = compression_type; size_t dst_len, src_len; - bch_bio_compress(c, bio, &dst_len, + bch2_bio_compress(c, bio, &dst_len, orig, &src_len, &fragment_compression_type); @@ -512,9 +477,9 @@ static int bch_write_extent(struct bch_write_op *op, src_len >> 9, compression_type), - bch_encrypt_bio(c, csum_type, nonce, bio); + bch2_encrypt_bio(c, csum_type, nonce, bio); - csum = bch_checksum_bio(c, csum_type, nonce, bio); + csum = bch2_checksum_bio(c, csum_type, nonce, bio); swap(bio->bi_iter.bi_size, dst_len); init_append_extent(op, @@ -527,7 +492,7 @@ static int bch_write_extent(struct bch_write_op *op, bio_advance(orig, src_len); } while (bio->bi_iter.bi_size && orig->bi_iter.bi_size && - !bch_keylist_realloc(&op->insert_keys, + !bch2_keylist_realloc(&op->insert_keys, op->inline_keys, ARRAY_SIZE(op->inline_keys), BKEY_EXTENT_U64s_MAX)); @@ -562,7 +527,7 @@ static int bch_write_extent(struct bch_write_op *op, ret = bio != orig; } - bio->bi_end_io = bch_write_endio; + bio->bi_end_io = bch2_write_endio; bio->bi_private = &op->cl; bio_set_op_attrs(bio, REQ_OP_WRITE, 0); @@ -572,18 +537,13 @@ static int bch_write_extent(struct bch_write_op *op, key_to_write = (void *) (op->insert_keys.keys_p + key_to_write_offset); - bch_check_mark_super(c, key_to_write, false); + bch2_check_mark_super(c, key_to_write, false); -#ifndef CONFIG_BCACHE_NO_IO - bch_submit_wbio_replicas(to_wbio(bio), c, key_to_write, false); -#else - to_wbio(bio)->ca = NULL; - bio_endio(bio); -#endif + bch2_submit_wbio_replicas(to_wbio(bio), c, key_to_write); return ret; } -static void __bch_write(struct closure *cl) +static void __bch2_write(struct closure *cl) { struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); struct bch_fs *c = op->c; @@ -596,9 +556,9 @@ static void __bch_write(struct closure *cl) if (op->flags & BCH_WRITE_DISCARD) { op->flags |= BCH_WRITE_DONE; - bch_write_discard(cl); + bch2_write_discard(cl); bio_put(bio); - continue_at(cl, bch_write_done, index_update_wq(op)); + continue_at(cl, bch2_write_done, index_update_wq(op)); } /* @@ -612,16 +572,16 @@ static void __bch_write(struct closure *cl) EBUG_ON(!bio_sectors(bio)); if (open_bucket_nr == ARRAY_SIZE(op->open_buckets)) - continue_at(cl, bch_write_index, index_update_wq(op)); + continue_at(cl, bch2_write_index, index_update_wq(op)); /* for the device pointers and 1 for the chksum */ - if (bch_keylist_realloc(&op->insert_keys, + if (bch2_keylist_realloc(&op->insert_keys, op->inline_keys, ARRAY_SIZE(op->inline_keys), BKEY_EXTENT_U64s_MAX)) - continue_at(cl, bch_write_index, index_update_wq(op)); + continue_at(cl, bch2_write_index, index_update_wq(op)); - b = bch_alloc_sectors_start(c, op->wp, + b = bch2_alloc_sectors_start(c, op->wp, op->nr_replicas, c->opts.data_replicas_required, op->alloc_reserve, @@ -639,17 +599,17 @@ static void __bch_write(struct closure *cl) * before allocating another open bucket. We only hit * this case if open_bucket_nr > 1. */ - if (!bch_keylist_empty(&op->insert_keys)) - continue_at(cl, bch_write_index, + if (!bch2_keylist_empty(&op->insert_keys)) + continue_at(cl, bch2_write_index, index_update_wq(op)); /* * If we've looped, we're running out of a workqueue - - * not the bch_write() caller's context - and we don't + * not the bch2_write() caller's context - and we don't * want to block the workqueue: */ if (op->flags & BCH_WRITE_LOOPED) - continue_at(cl, __bch_write, op->io_wq); + continue_at(cl, __bch2_write, op->io_wq); /* * Otherwise, we do want to block the caller on alloc @@ -667,16 +627,16 @@ static void __bch_write(struct closure *cl) b - c->open_buckets > U8_MAX); op->open_buckets[open_bucket_nr++] = b - c->open_buckets; - ret = bch_write_extent(op, b, bio); + ret = bch2_write_extent(op, b, bio); - bch_alloc_sectors_done(c, op->wp, b); + bch2_alloc_sectors_done(c, op->wp, b); if (ret < 0) goto err; } while (ret); op->flags |= BCH_WRITE_DONE; - continue_at(cl, bch_write_index, index_update_wq(op)); + continue_at(cl, bch2_write_index, index_update_wq(op)); err: if (op->flags & BCH_WRITE_DISCARD_ON_ERROR) { /* @@ -686,7 +646,7 @@ err: * reclaiming it. */ - bch_write_discard(cl); + bch2_write_discard(cl); } else { /* * Right now we can only error here if we went RO - the @@ -706,12 +666,12 @@ err: * written (especially for a cmpxchg operation that's moving data * around) */ - continue_at(cl, !bch_keylist_empty(&op->insert_keys) - ? bch_write_index - : bch_write_done, index_update_wq(op)); + continue_at(cl, !bch2_keylist_empty(&op->insert_keys) + ? bch2_write_index + : bch2_write_done, index_update_wq(op)); } -void bch_wake_delayed_writes(unsigned long data) +void bch2_wake_delayed_writes(unsigned long data) { struct bch_fs *c = (void *) data; struct bch_write_op *op; @@ -754,39 +714,30 @@ void bch_wake_delayed_writes(unsigned long data) * If op->discard is true, instead of inserting the data it invalidates the * region of the cache represented by op->bio and op->inode. */ -void bch_write(struct closure *cl) +void bch2_write(struct closure *cl) { struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); struct bio *bio = &op->bio->bio; struct bch_fs *c = op->c; u64 inode = op->pos.inode; - trace_bcache_write(c, inode, bio, - !(op->flags & BCH_WRITE_CACHED), - op->flags & BCH_WRITE_DISCARD); - if (c->opts.nochanges || !percpu_ref_tryget(&c->writes)) { __bcache_io_error(c, "read only"); op->error = -EROFS; - bch_disk_reservation_put(c, &op->res); + bch2_disk_reservation_put(c, &op->res); closure_return(cl); } if (bversion_zero(op->version) && - bch_csum_type_is_encryption(op->csum_type)) + bch2_csum_type_is_encryption(op->csum_type)) op->version.lo = atomic64_inc_return(&c->key_version) + 1; if (!(op->flags & BCH_WRITE_DISCARD)) - bch_increment_clock(c, bio_sectors(bio), WRITE); - - if (!(op->flags & BCH_WRITE_DISCARD)) - bch_mark_foreground_write(c, bio_sectors(bio)); - else - bch_mark_discard(c, bio_sectors(bio)); + bch2_increment_clock(c, bio_sectors(bio), WRITE); - /* Don't call bch_next_delay() if rate is >= 1 GB/sec */ + /* Don't call bch2_next_delay() if rate is >= 1 GB/sec */ if (c->foreground_write_ratelimit_enabled && c->foreground_write_pd.rate.rate < (1 << 30) && @@ -795,13 +746,13 @@ void bch_write(struct closure *cl) u64 delay; spin_lock_irqsave(&c->foreground_write_pd_lock, flags); - bch_ratelimit_increment(&c->foreground_write_pd.rate, + bch2_ratelimit_increment(&c->foreground_write_pd.rate, bio->bi_iter.bi_size); - delay = bch_ratelimit_delay(&c->foreground_write_pd.rate); + delay = bch2_ratelimit_delay(&c->foreground_write_pd.rate); if (delay >= HZ / 100) { - trace_bcache_write_throttle(c, inode, bio, delay); + trace_write_throttle(c, inode, bio, delay); closure_get(&op->cl); /* list takes a ref */ @@ -820,16 +771,16 @@ void bch_write(struct closure *cl) spin_unlock_irqrestore(&c->foreground_write_pd_lock, flags); - continue_at(cl, __bch_write, index_update_wq(op)); + continue_at(cl, __bch2_write, index_update_wq(op)); } spin_unlock_irqrestore(&c->foreground_write_pd_lock, flags); } - continue_at_nobarrier(cl, __bch_write, NULL); + continue_at_nobarrier(cl, __bch2_write, NULL); } -void bch_write_op_init(struct bch_write_op *op, struct bch_fs *c, +void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c, struct bch_write_bio *bio, struct disk_reservation res, struct write_point *wp, struct bpos pos, u64 *journal_seq, unsigned flags) @@ -842,7 +793,7 @@ void bch_write_op_init(struct bch_write_op *op, struct bch_fs *c, op->written = 0; op->error = 0; op->flags = flags; - op->csum_type = bch_data_checksum_type(c); + op->csum_type = bch2_data_checksum_type(c); op->compression_type = c->opts.compression; op->nr_replicas = res.nr_replicas; op->alloc_reserve = RESERVE_NONE; @@ -859,11 +810,11 @@ void bch_write_op_init(struct bch_write_op *op, struct bch_fs *c, op->journal_seq = 0; } - op->index_update_fn = bch_write_index_default; + op->index_update_fn = bch2_write_index_default; - bch_keylist_init(&op->insert_keys, - op->inline_keys, - ARRAY_SIZE(op->inline_keys)); + bch2_keylist_init(&op->insert_keys, + op->inline_keys, + ARRAY_SIZE(op->inline_keys)); if (version_stress_test(c)) get_random_bytes(&op->version, sizeof(op->version)); @@ -886,13 +837,13 @@ void bch_write_op_init(struct bch_write_op *op, struct bch_fs *c, * XXX: this needs to be refactored with inode_truncate, or more * appropriately inode_truncate should call this */ -int bch_discard(struct bch_fs *c, struct bpos start, - struct bpos end, struct bversion version, - struct disk_reservation *disk_res, - struct extent_insert_hook *hook, - u64 *journal_seq) +int bch2_discard(struct bch_fs *c, struct bpos start, + struct bpos end, struct bversion version, + struct disk_reservation *disk_res, + struct extent_insert_hook *hook, + u64 *journal_seq) { - return bch_btree_delete_range(c, BTREE_ID_EXTENTS, start, end, version, + return bch2_btree_delete_range(c, BTREE_ID_EXTENTS, start, end, version, disk_res, hook, journal_seq); } @@ -910,7 +861,7 @@ static int bio_checksum_uncompress(struct bch_fs *c, struct bch_read_bio *rbio) { struct bio *src = &rbio->bio; - struct bio *dst = &bch_rbio_parent(rbio)->bio; + struct bio *dst = &bch2_rbio_parent(rbio)->bio; struct bvec_iter dst_iter = rbio->parent_iter; struct nonce nonce = extent_nonce(rbio->version, rbio->crc.nonce, @@ -933,8 +884,9 @@ static int bio_checksum_uncompress(struct bch_fs *c, src->bi_iter = rbio->parent_iter; } - csum = bch_checksum_bio(c, rbio->crc.csum_type, nonce, src); - if (bch_dev_nonfatal_io_err_on(bch_crc_cmp(rbio->crc.csum, csum), rbio->ca, + csum = bch2_checksum_bio(c, rbio->crc.csum_type, nonce, src); + if (bch2_dev_nonfatal_io_err_on(bch2_crc_cmp(rbio->crc.csum, csum), + rbio->ca, "data checksum error, inode %llu offset %llu: expected %0llx%0llx got %0llx%0llx (type %u)", rbio->inode, (u64) rbio->parent_iter.bi_sector << 9, rbio->crc.csum.hi, rbio->crc.csum.lo, csum.hi, csum.lo, @@ -947,8 +899,8 @@ static int bio_checksum_uncompress(struct bch_fs *c, */ if (rbio->crc.compression_type != BCH_COMPRESSION_NONE) { if (!ret) { - bch_encrypt_bio(c, rbio->crc.csum_type, nonce, src); - ret = bch_bio_uncompress(c, src, dst, + bch2_encrypt_bio(c, rbio->crc.csum_type, nonce, src); + ret = bch2_bio_uncompress(c, src, dst, dst_iter, rbio->crc); if (ret) __bcache_io_error(c, "decompression error"); @@ -962,19 +914,19 @@ static int bio_checksum_uncompress(struct bch_fs *c, nonce = nonce_add(nonce, rbio->crc.offset << 9); - bch_encrypt_bio(c, rbio->crc.csum_type, + bch2_encrypt_bio(c, rbio->crc.csum_type, nonce, src); bio_copy_data_iter(dst, dst_iter, src, src->bi_iter); } else { - bch_encrypt_bio(c, rbio->crc.csum_type, nonce, src); + bch2_encrypt_bio(c, rbio->crc.csum_type, nonce, src); } return ret; } -static void bch_rbio_free(struct bch_read_bio *rbio) +static void bch2_rbio_free(struct bch_read_bio *rbio) { struct bch_fs *c = rbio->c; struct bio *bio = &rbio->bio; @@ -985,14 +937,14 @@ static void bch_rbio_free(struct bch_read_bio *rbio) if (rbio->promote) kfree(rbio->promote); if (rbio->bounce) - bch_bio_free_pages_pool(c, bio); + bch2_bio_free_pages_pool(c, bio); bio_put(bio); } -static void bch_rbio_done(struct bch_read_bio *rbio) +static void bch2_rbio_done(struct bch_read_bio *rbio) { - struct bio *orig = &bch_rbio_parent(rbio)->bio; + struct bio *orig = &bch2_rbio_parent(rbio)->bio; percpu_ref_put(&rbio->ca->io_ref); rbio->ca = NULL; @@ -1002,7 +954,7 @@ static void bch_rbio_done(struct bch_read_bio *rbio) orig->bi_error = rbio->bio.bi_error; bio_endio(orig); - bch_rbio_free(rbio); + bch2_rbio_free(rbio); } else { if (rbio->promote) kfree(rbio->promote); @@ -1012,13 +964,13 @@ static void bch_rbio_done(struct bch_read_bio *rbio) } } -static void bch_rbio_error(struct bch_read_bio *rbio, int error) +static void bch2_rbio_error(struct bch_read_bio *rbio, int error) { - bch_rbio_parent(rbio)->bio.bi_error = error; - bch_rbio_done(rbio); + bch2_rbio_parent(rbio)->bio.bi_error = error; + bch2_rbio_done(rbio); } -static void bch_rbio_retry(struct bch_fs *c, struct bch_read_bio *rbio) +static void bch2_rbio_retry(struct bch_fs *c, struct bch_read_bio *rbio) { unsigned long flags; @@ -1036,12 +988,12 @@ static void cache_promote_done(struct closure *cl) struct cache_promote_op *op = container_of(cl, struct cache_promote_op, cl); - bch_bio_free_pages_pool(op->write.op.c, &op->write.wbio.bio); + bch2_bio_free_pages_pool(op->write.op.c, &op->write.wbio.bio); kfree(op); } /* Inner part that may run in process context */ -static void __bch_read_endio(struct work_struct *work) +static void __bch2_read_endio(struct work_struct *work) { struct bch_read_bio *rbio = container_of(work, struct bch_read_bio, work); @@ -1057,9 +1009,9 @@ static void __bch_read_endio(struct work_struct *work) */ if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) { rbio->flags |= BCH_READ_FORCE_BOUNCE; - bch_rbio_retry(c, rbio); + bch2_rbio_retry(c, rbio); } else { - bch_rbio_error(rbio, -EIO); + bch2_rbio_error(rbio, -EIO); } return; } @@ -1070,33 +1022,31 @@ static void __bch_read_endio(struct work_struct *work) BUG_ON(!rbio->split || !rbio->bounce); + trace_promote(&rbio->bio); + /* we now own pages: */ swap(promote->write.wbio.bio.bi_vcnt, rbio->bio.bi_vcnt); rbio->promote = NULL; - bch_rbio_done(rbio); + bch2_rbio_done(rbio); closure_init(cl, &c->cl); - closure_call(&promote->write.op.cl, bch_write, c->wq, cl); + closure_call(&promote->write.op.cl, bch2_write, c->wq, cl); closure_return_with_destructor(cl, cache_promote_done); } else { - bch_rbio_done(rbio); + bch2_rbio_done(rbio); } } -static void bch_read_endio(struct bio *bio) +static void bch2_read_endio(struct bio *bio) { struct bch_read_bio *rbio = container_of(bio, struct bch_read_bio, bio); struct bch_fs *c = rbio->c; - if (rbio->flags & BCH_READ_ACCOUNT_TIMES) - bch_account_io_completion_time(rbio->ca, rbio->submit_time_us, - REQ_OP_READ); - - if (bch_dev_nonfatal_io_err_on(bio->bi_error, rbio->ca, "data read")) { + if (bch2_dev_nonfatal_io_err_on(bio->bi_error, rbio->ca, "data read")) { /* XXX: retry IO errors when we have another replica */ - bch_rbio_error(rbio, bio->bi_error); + bch2_rbio_error(rbio, bio->bi_error); return; } @@ -1106,19 +1056,19 @@ static void bch_read_endio(struct bio *bio) atomic_long_inc(&c->cache_read_races); if (rbio->flags & BCH_READ_RETRY_IF_STALE) - bch_rbio_retry(c, rbio); + bch2_rbio_retry(c, rbio); else - bch_rbio_error(rbio, -EINTR); + bch2_rbio_error(rbio, -EINTR); return; } if (rbio->crc.compression_type || - bch_csum_type_is_encryption(rbio->crc.csum_type)) + bch2_csum_type_is_encryption(rbio->crc.csum_type)) queue_work(system_unbound_wq, &rbio->work); else if (rbio->crc.csum_type) queue_work(system_highpri_wq, &rbio->work); else - __bch_read_endio(&rbio->work); + __bch2_read_endio(&rbio->work); } static bool should_promote(struct bch_fs *c, @@ -1134,7 +1084,7 @@ static bool should_promote(struct bch_fs *c, c->fastest_tier < c->tiers + pick->ca->mi.tier; } -void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig, +void bch2_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig, struct bvec_iter iter, struct bkey_s_c k, struct extent_pick_ptr *pick, unsigned flags) { @@ -1143,6 +1093,8 @@ void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig, unsigned skip = iter.bi_sector - bkey_start_offset(k.k); bool bounce = false, split, read_full = false; + bch2_increment_clock(c, bio_sectors(&orig->bio), READ); + EBUG_ON(bkey_start_offset(k.k) > iter.bi_sector || k.k->p.offset < bvec_iter_end_sector(iter)); @@ -1155,7 +1107,7 @@ void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig, if (should_promote(c, pick, flags)) { /* * biovec needs to be big enough to hold decompressed data, if - * the bch_write_extent() has to decompress/recompress it: + * the bch2_write_extent() has to decompress/recompress it: */ unsigned sectors = max_t(unsigned, k.k->size, @@ -1182,7 +1134,7 @@ void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig, if (pick->crc.compression_type != BCH_COMPRESSION_NONE || (pick->crc.csum_type != BCH_CSUM_NONE && (bvec_iter_sectors(iter) != crc_uncompressed_size(NULL, &pick->crc) || - (bch_csum_type_is_encryption(pick->crc.csum_type) && + (bch2_csum_type_is_encryption(pick->crc.csum_type) && (flags & BCH_READ_USER_MAPPED)) || (flags & BCH_READ_FORCE_BOUNCE)))) { read_full = true; @@ -1199,7 +1151,7 @@ void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig, &c->bio_read_split), struct bch_read_bio, bio); - bch_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9); + bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9); split = true; } else if (!(flags & BCH_READ_MAY_REUSE_BIO) || !(flags & BCH_READ_IS_LAST)) { @@ -1249,12 +1201,12 @@ void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig, rbio->version = k.k->version; rbio->promote = promote_op; rbio->inode = k.k->p.inode; - INIT_WORK(&rbio->work, __bch_read_endio); + INIT_WORK(&rbio->work, __bch2_read_endio); rbio->bio.bi_bdev = pick->ca->disk_sb.bdev; rbio->bio.bi_opf = orig->bio.bi_opf; rbio->bio.bi_iter.bi_sector = pick->ptr.offset; - rbio->bio.bi_end_io = bch_read_endio; + rbio->bio.bi_end_io = bch2_read_endio; if (promote_op) { struct bio *promote_bio = &promote_op->write.wbio.bio; @@ -1263,7 +1215,7 @@ void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig, memcpy(promote_bio->bi_io_vec, rbio->bio.bi_io_vec, sizeof(struct bio_vec) * rbio->bio.bi_vcnt); - bch_migrate_write_init(c, &promote_op->write, + bch2_migrate_write_init(c, &promote_op->write, &c->promote_write_point, k, NULL, BCH_WRITE_ALLOC_NOWAIT| @@ -1302,14 +1254,16 @@ void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig, rbio->submit_time_us = local_clock_us(); -#ifndef CONFIG_BCACHE_NO_IO + if (bounce) + trace_read_bounce(&rbio->bio); + + if (!(flags & BCH_READ_IS_LAST)) + trace_read_split(&rbio->bio); + generic_make_request(&rbio->bio); -#else - bio_endio(&rbio->bio); -#endif } -static void bch_read_iter(struct bch_fs *c, struct bch_read_bio *rbio, +static void bch2_read_iter(struct bch_fs *c, struct bch_read_bio *rbio, struct bvec_iter bvec_iter, u64 inode, unsigned flags) { @@ -1331,9 +1285,9 @@ static void bch_read_iter(struct bch_fs *c, struct bch_read_bio *rbio, */ bkey_reassemble(&tmp.k, k); k = bkey_i_to_s_c(&tmp.k); - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); - bch_extent_pick_ptr(c, k, &pick); + bch2_extent_pick_ptr(c, k, &pick); if (IS_ERR(pick.ca)) { bcache_io_error(c, bio, "no device to read from"); bio_endio(bio); @@ -1354,7 +1308,7 @@ static void bch_read_iter(struct bch_fs *c, struct bch_read_bio *rbio, PTR_BUCKET(pick.ca, &pick.ptr)->read_prio = c->prio_clock[READ].hand; - bch_read_extent_iter(c, rbio, bvec_iter, + bch2_read_extent_iter(c, rbio, bvec_iter, k, &pick, flags); flags &= ~BCH_READ_MAY_REUSE_BIO; @@ -1376,17 +1330,15 @@ static void bch_read_iter(struct bch_fs *c, struct bch_read_bio *rbio, * If we get here, it better have been because there was an error * reading a btree node */ - ret = bch_btree_iter_unlock(&iter); + ret = bch2_btree_iter_unlock(&iter); BUG_ON(!ret); bcache_io_error(c, bio, "btree IO error %i", ret); bio_endio(bio); } -void bch_read(struct bch_fs *c, struct bch_read_bio *bio, u64 inode) +void bch2_read(struct bch_fs *c, struct bch_read_bio *bio, u64 inode) { - bch_increment_clock(c, bio_sectors(&bio->bio), READ); - - bch_read_iter(c, bio, bio->bio.bi_iter, inode, + bch2_read_iter(c, bio, bio->bio.bi_iter, inode, BCH_READ_RETRY_IF_STALE| BCH_READ_PROMOTE| BCH_READ_MAY_REUSE_BIO| @@ -1394,26 +1346,26 @@ void bch_read(struct bch_fs *c, struct bch_read_bio *bio, u64 inode) } /** - * bch_read_retry - re-submit a bio originally from bch_read() + * bch_read_retry - re-submit a bio originally from bch2_read() */ -static void bch_read_retry(struct bch_fs *c, struct bch_read_bio *rbio) +static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio) { - struct bch_read_bio *parent = bch_rbio_parent(rbio); + struct bch_read_bio *parent = bch2_rbio_parent(rbio); struct bvec_iter iter = rbio->parent_iter; unsigned flags = rbio->flags; u64 inode = rbio->inode; - trace_bcache_read_retry(&rbio->bio); + trace_read_retry(&rbio->bio); if (rbio->split) - bch_rbio_free(rbio); + bch2_rbio_free(rbio); else rbio->bio.bi_end_io = rbio->orig_bi_end_io; - bch_read_iter(c, parent, iter, inode, flags); + bch2_read_iter(c, parent, iter, inode, flags); } -void bch_read_retry_work(struct work_struct *work) +void bch2_read_retry_work(struct work_struct *work) { struct bch_fs *c = container_of(work, struct bch_fs, read_retry_work); @@ -1430,6 +1382,6 @@ void bch_read_retry_work(struct work_struct *work) break; rbio = container_of(bio, struct bch_read_bio, bio); - bch_read_retry(c, rbio); + bch2_read_retry(c, rbio); } } diff --git a/libbcachefs/io.h b/libbcachefs/io.h new file mode 100644 index 00000000..253316a4 --- /dev/null +++ b/libbcachefs/io.h @@ -0,0 +1,87 @@ +#ifndef _BCACHE_IO_H +#define _BCACHE_IO_H + +#include "io_types.h" + +#define to_wbio(_bio) \ + container_of((_bio), struct bch_write_bio, bio) + +#define to_rbio(_bio) \ + container_of((_bio), struct bch_read_bio, bio) + +void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *); +void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t); + +enum bch_write_flags { + BCH_WRITE_ALLOC_NOWAIT = (1 << 0), + BCH_WRITE_DISCARD = (1 << 1), + BCH_WRITE_CACHED = (1 << 2), + BCH_WRITE_FLUSH = (1 << 3), + BCH_WRITE_DISCARD_ON_ERROR = (1 << 4), + BCH_WRITE_DATA_COMPRESSED = (1 << 5), + + /* Internal: */ + BCH_WRITE_JOURNAL_SEQ_PTR = (1 << 6), + BCH_WRITE_DONE = (1 << 7), + BCH_WRITE_LOOPED = (1 << 8), +}; + +static inline u64 *op_journal_seq(struct bch_write_op *op) +{ + return (op->flags & BCH_WRITE_JOURNAL_SEQ_PTR) + ? op->journal_seq_p : &op->journal_seq; +} + +static inline struct write_point *foreground_write_point(struct bch_fs *c, + unsigned long v) +{ + return c->write_points + + hash_long(v, ilog2(ARRAY_SIZE(c->write_points))); +} + +void bch2_write_op_init(struct bch_write_op *, struct bch_fs *, + struct bch_write_bio *, + struct disk_reservation, struct write_point *, + struct bpos, u64 *, unsigned); +void bch2_write(struct closure *); + +struct cache_promote_op; + +struct extent_pick_ptr; + +void bch2_read_extent_iter(struct bch_fs *, struct bch_read_bio *, + struct bvec_iter, struct bkey_s_c k, + struct extent_pick_ptr *, unsigned); + +static inline void bch2_read_extent(struct bch_fs *c, + struct bch_read_bio *orig, + struct bkey_s_c k, + struct extent_pick_ptr *pick, + unsigned flags) +{ + bch2_read_extent_iter(c, orig, orig->bio.bi_iter, + k, pick, flags); +} + +enum bch_read_flags { + BCH_READ_FORCE_BOUNCE = 1 << 0, + BCH_READ_RETRY_IF_STALE = 1 << 1, + BCH_READ_PROMOTE = 1 << 2, + BCH_READ_IS_LAST = 1 << 3, + BCH_READ_MAY_REUSE_BIO = 1 << 4, + BCH_READ_USER_MAPPED = 1 << 5, +}; + +void bch2_read(struct bch_fs *, struct bch_read_bio *, u64); + +void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *, + const struct bkey_i *); + +int bch2_discard(struct bch_fs *, struct bpos, struct bpos, + struct bversion, struct disk_reservation *, + struct extent_insert_hook *, u64 *); + +void bch2_read_retry_work(struct work_struct *); +void bch2_wake_delayed_writes(unsigned long data); + +#endif /* _BCACHE_IO_H */ diff --git a/libbcache/io_types.h b/libbcachefs/io_types.h index ca1b0192..07ea67c6 100644 --- a/libbcache/io_types.h +++ b/libbcachefs/io_types.h @@ -57,7 +57,7 @@ struct bch_read_bio { }; static inline struct bch_read_bio * -bch_rbio_parent(struct bch_read_bio *rbio) +bch2_rbio_parent(struct bch_read_bio *rbio) { return rbio->split ? rbio->parent : rbio; } diff --git a/libbcache/journal.c b/libbcachefs/journal.c index 585d1205..60c5c9b0 100644 --- a/libbcache/journal.c +++ b/libbcachefs/journal.c @@ -1,10 +1,10 @@ /* - * bcache journalling code, for btree insertions + * bcachefs journalling code, for btree insertions * * Copyright 2012 Google, Inc. */ -#include "bcache.h" +#include "bcachefs.h" #include "alloc.h" #include "bkey_methods.h" #include "buckets.h" @@ -21,7 +21,7 @@ #include "super-io.h" #include "vstructs.h" -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> static void journal_write(struct closure *); static void journal_reclaim_fast(struct journal *); @@ -75,19 +75,19 @@ static inline struct jset_entry *__jset_entry_type_next(struct jset *jset, for_each_jset_entry_type(entry, jset, JOURNAL_ENTRY_BTREE_KEYS) \ vstruct_for_each_safe(entry, k, _n) -static inline void bch_journal_add_entry(struct journal_buf *buf, +static inline void bch2_journal_add_entry(struct journal_buf *buf, const void *data, size_t u64s, unsigned type, enum btree_id id, unsigned level) { struct jset *jset = buf->data; - bch_journal_add_entry_at(buf, data, u64s, type, id, level, + bch2_journal_add_entry_at(buf, data, u64s, type, id, level, le32_to_cpu(jset->u64s)); le32_add_cpu(&jset->u64s, jset_u64s(u64s)); } -static struct jset_entry *bch_journal_find_entry(struct jset *j, unsigned type, +static struct jset_entry *bch2_journal_find_entry(struct jset *j, unsigned type, enum btree_id id) { struct jset_entry *entry; @@ -99,12 +99,12 @@ static struct jset_entry *bch_journal_find_entry(struct jset *j, unsigned type, return NULL; } -struct bkey_i *bch_journal_find_btree_root(struct bch_fs *c, struct jset *j, +struct bkey_i *bch2_journal_find_btree_root(struct bch_fs *c, struct jset *j, enum btree_id id, unsigned *level) { struct bkey_i *k; struct jset_entry *entry = - bch_journal_find_entry(j, JOURNAL_ENTRY_BTREE_ROOT, id); + bch2_journal_find_entry(j, JOURNAL_ENTRY_BTREE_ROOT, id); if (!entry) return NULL; @@ -115,15 +115,15 @@ struct bkey_i *bch_journal_find_btree_root(struct bch_fs *c, struct jset *j, return k; } -static void bch_journal_add_btree_root(struct journal_buf *buf, +static void bch2_journal_add_btree_root(struct journal_buf *buf, enum btree_id id, struct bkey_i *k, unsigned level) { - bch_journal_add_entry(buf, k, k->k.u64s, + bch2_journal_add_entry(buf, k, k->k.u64s, JOURNAL_ENTRY_BTREE_ROOT, id, level); } -static inline void bch_journal_add_prios(struct journal *j, +static inline void bch2_journal_add_prios(struct journal *j, struct journal_buf *buf) { /* @@ -133,7 +133,7 @@ static inline void bch_journal_add_prios(struct journal *j, if (!buf->nr_prio_buckets) return; - bch_journal_add_entry(buf, j->prio_buckets, buf->nr_prio_buckets, + bch2_journal_add_entry(buf, j->prio_buckets, buf->nr_prio_buckets, JOURNAL_ENTRY_PRIO_PTRS, 0, 0); } @@ -163,18 +163,18 @@ static void journal_seq_blacklist_flush(struct journal *j, n = bl->entries[i]; mutex_unlock(&j->blacklist_lock); - bch_btree_iter_init(&iter, c, n.btree_id, n.pos); + bch2_btree_iter_init(&iter, c, n.btree_id, n.pos); iter.is_extents = false; redo_peek: - b = bch_btree_iter_peek_node(&iter); + b = bch2_btree_iter_peek_node(&iter); /* The node might have already been rewritten: */ if (b->data->keys.seq == n.seq && !bkey_cmp(b->key.k.p, n.pos)) { - ret = bch_btree_node_rewrite(&iter, b, &cl); + ret = bch2_btree_node_rewrite(&iter, b, &cl); if (ret) { - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); closure_sync(&cl); if (ret == -EAGAIN || @@ -187,7 +187,7 @@ redo_peek: } } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); } closure_sync(&cl); @@ -226,7 +226,7 @@ redo_wait: mutex_lock(&j->blacklist_lock); - bch_journal_pin_drop(j, &bl->pin); + bch2_journal_pin_drop(j, &bl->pin); list_del(&bl->list); kfree(bl->entries); kfree(bl); @@ -249,7 +249,7 @@ journal_seq_blacklist_find(struct journal *j, u64 seq) } static struct journal_seq_blacklist * -bch_journal_seq_blacklisted_new(struct journal *j, u64 seq) +bch2_journal_seq_blacklisted_new(struct journal *j, u64 seq) { struct journal_seq_blacklist *bl; @@ -270,7 +270,7 @@ bch_journal_seq_blacklisted_new(struct journal *j, u64 seq) * as blacklisted so that on future restarts the corresponding data will still * be ignored: */ -int bch_journal_seq_should_ignore(struct bch_fs *c, u64 seq, struct btree *b) +int bch2_journal_seq_should_ignore(struct bch_fs *c, u64 seq, struct btree *b) { struct journal *j = &c->journal; struct journal_seq_blacklist *bl = NULL; @@ -301,7 +301,7 @@ int bch_journal_seq_should_ignore(struct bch_fs *c, u64 seq, struct btree *b) * Decrease this back to j->seq + 2 when we next rev the on disk format: * increasing it temporarily to work around bug in old kernels */ - bch_fs_inconsistent_on(seq > journal_seq + 4, c, + bch2_fs_inconsistent_on(seq > journal_seq + 4, c, "bset journal seq too far in the future: %llu > %llu", seq, journal_seq); @@ -309,14 +309,14 @@ int bch_journal_seq_should_ignore(struct bch_fs *c, u64 seq, struct btree *b) b->btree_id, b->key.k.p.inode, b->key.k.p.offset, seq); /* - * When we start the journal, bch_journal_start() will skip over @seq: + * When we start the journal, bch2_journal_start() will skip over @seq: */ mutex_lock(&j->blacklist_lock); for (i = journal_seq + 1; i <= seq; i++) { bl = journal_seq_blacklist_find(j, i) ?: - bch_journal_seq_blacklisted_new(j, i); + bch2_journal_seq_blacklisted_new(j, i); if (!bl) { ret = -ENOMEM; @@ -357,7 +357,7 @@ out: /* * Journal replay/recovery: * - * This code is all driven from bch_fs_start(); we first read the journal + * This code is all driven from bch2_fs_start(); we first read the journal * entries, do some other stuff, then we mark all the keys in the journal * entries (same as garbage collection would), then we replay them - reinserting * them into the cache in precisely the same order as they appear in the @@ -505,11 +505,11 @@ static int journal_validate_key(struct bch_fs *c, struct jset *j, } if (JSET_BIG_ENDIAN(j) != CPU_BIG_ENDIAN) - bch_bkey_swab(key_type, NULL, bkey_to_packed(k)); + bch2_bkey_swab(key_type, NULL, bkey_to_packed(k)); - invalid = bkey_invalid(c, key_type, bkey_i_to_s_c(k)); + invalid = bch2_bkey_invalid(c, key_type, bkey_i_to_s_c(k)); if (invalid) { - bch_bkey_val_to_text(c, key_type, buf, sizeof(buf), + bch2_bkey_val_to_text(c, key_type, buf, sizeof(buf), bkey_i_to_s_c(k)); mustfix_fsck_err(c, "invalid %s in journal: %s", type, buf); @@ -555,20 +555,20 @@ static int journal_entry_validate(struct bch_fs *c, if (bytes > sectors_read << 9) return JOURNAL_ENTRY_REREAD; - if (fsck_err_on(!bch_checksum_type_valid(c, JSET_CSUM_TYPE(j)), c, + if (fsck_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j)), c, "journal entry with unknown csum type %llu sector %lluu", JSET_CSUM_TYPE(j), sector)) return JOURNAL_ENTRY_BAD; csum = csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j); - if (mustfix_fsck_err_on(bch_crc_cmp(csum, j->csum), c, + if (mustfix_fsck_err_on(bch2_crc_cmp(csum, j->csum), c, "journal checksum bad, sector %llu", sector)) { /* XXX: retry IO, when we start retrying checksum errors */ /* XXX: note we might have missing journal entries */ return JOURNAL_ENTRY_BAD; } - bch_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j), + bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j), j->encrypted_start, vstruct_end(j) - (void *) j->encrypted_start); @@ -686,14 +686,14 @@ reread: sectors_read = min_t(unsigned, bio->bi_iter.bi_sector = offset; bio->bi_iter.bi_size = sectors_read << 9; bio_set_op_attrs(bio, REQ_OP_READ, 0); - bch_bio_map(bio, buf->data); + bch2_bio_map(bio, buf->data); ret = submit_bio_wait(bio); - if (bch_dev_fatal_io_err_on(ret, ca, + if (bch2_dev_fatal_io_err_on(ret, ca, "journal read from sector %llu", offset) || - bch_meta_read_fault("journal")) + bch2_meta_read_fault("journal")) return -EIO; j = buf->data; @@ -761,7 +761,7 @@ next_block: return 0; } -static void bch_journal_read_device(struct closure *cl) +static void bch2_journal_read_device(struct closure *cl) { #define read_bucket(b) \ ({ \ @@ -907,7 +907,7 @@ err: #undef read_bucket } -void bch_journal_entries_free(struct list_head *list) +void bch2_journal_entries_free(struct list_head *list) { while (!list_empty(list)) { @@ -933,7 +933,7 @@ static int journal_seq_blacklist_read(struct journal *j, bch_verbose(c, "blacklisting existing journal seq %llu", seq); - bl = bch_journal_seq_blacklisted_new(j, seq); + bl = bch2_journal_seq_blacklisted_new(j, seq); if (!bl) return -ENOMEM; @@ -958,7 +958,7 @@ static inline bool journal_has_keys(struct list_head *list) return false; } -int bch_journal_read(struct bch_fs *c, struct list_head *list) +int bch2_journal_read(struct bch_fs *c, struct list_head *list) { struct jset_entry *prio_ptrs; struct journal_list jlist; @@ -978,7 +978,7 @@ int bch_journal_read(struct bch_fs *c, struct list_head *list) for_each_readable_member(ca, c, iter) { percpu_ref_get(&ca->io_ref); closure_call(&ca->journal.read, - bch_journal_read_device, + bch2_journal_read_device, system_unbound_wq, &jlist.cl); } @@ -1066,7 +1066,7 @@ int bch_journal_read(struct bch_fs *c, struct list_head *list) cur_seq = le64_to_cpu(i->j.seq) + 1; } - prio_ptrs = bch_journal_find_entry(j, JOURNAL_ENTRY_PRIO_PTRS, 0); + prio_ptrs = bch2_journal_find_entry(j, JOURNAL_ENTRY_PRIO_PTRS, 0); if (prio_ptrs) { memcpy_u64s(c->journal.prio_buckets, prio_ptrs->_data, @@ -1077,7 +1077,7 @@ fsck_err: return ret; } -void bch_journal_mark(struct bch_fs *c, struct list_head *list) +void bch2_journal_mark(struct bch_fs *c, struct list_head *list) { struct bkey_i *k, *n; struct jset_entry *j; @@ -1089,7 +1089,7 @@ void bch_journal_mark(struct bch_fs *c, struct list_head *list) struct bkey_s_c k_s_c = bkey_i_to_s_c(k); if (btree_type_has_ptrs(type)) - bch_btree_mark_key_initial(c, type, k_s_c); + bch2_btree_mark_key_initial(c, type, k_s_c); } } @@ -1098,13 +1098,13 @@ static bool journal_entry_is_open(struct journal *j) return j->reservations.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL; } -void bch_journal_buf_put_slowpath(struct journal *j, bool need_write_just_set) +void bch2_journal_buf_put_slowpath(struct journal *j, bool need_write_just_set) { struct bch_fs *c = container_of(j, struct bch_fs, journal); if (!need_write_just_set && test_bit(JOURNAL_NEED_WRITE, &j->flags)) - __bch_time_stats_update(j->delay_time, + __bch2_time_stats_update(j->delay_time, j->need_write_time); #if 0 closure_call(&j->io, journal_write, NULL, &c->cl); @@ -1116,7 +1116,7 @@ void bch_journal_buf_put_slowpath(struct journal *j, bool need_write_just_set) #endif } -static void __bch_journal_next_entry(struct journal *j) +static void __bch2_journal_next_entry(struct journal *j) { struct journal_entry_pin_list pin_list, *p; struct journal_buf *buf; @@ -1210,24 +1210,24 @@ static enum { BUG_ON(j->prev_buf_sectors > j->cur_buf_sectors); atomic_dec_bug(&fifo_peek_back(&j->pin).count); - __bch_journal_next_entry(j); + __bch2_journal_next_entry(j); cancel_delayed_work(&j->write_work); spin_unlock(&j->lock); if (c->bucket_journal_seq > 1 << 14) { c->bucket_journal_seq = 0; - bch_bucket_seq_cleanup(c); + bch2_bucket_seq_cleanup(c); } /* ugh - might be called from __journal_res_get() under wait_event() */ __set_current_state(TASK_RUNNING); - bch_journal_buf_put(j, old.idx, need_write_just_set); + bch2_journal_buf_put(j, old.idx, need_write_just_set); return JOURNAL_UNLOCKED; } -void bch_journal_halt(struct journal *j) +void bch2_journal_halt(struct journal *j) { union journal_res_state old, new; u64 v = atomic64_read(&j->reservations.counter); @@ -1301,7 +1301,7 @@ static int journal_entry_sectors(struct journal *j) * for the previous entry we have to make sure we have space for * it too: */ - if (bch_extent_has_device(e.c, ca->dev_idx)) { + if (bch2_extent_has_device(e.c, ca->dev_idx)) { if (j->prev_buf_sectors > ca->journal.sectors_free) buckets_required++; @@ -1391,7 +1391,7 @@ static int journal_entry_open(struct journal *j) wake_up(&j->wait); if (j->res_get_blocked_start) { - __bch_time_stats_update(j->blocked_time, + __bch2_time_stats_update(j->blocked_time, j->res_get_blocked_start); j->res_get_blocked_start = 0; } @@ -1404,7 +1404,7 @@ static int journal_entry_open(struct journal *j) return ret; } -void bch_journal_start(struct bch_fs *c) +void bch2_journal_start(struct bch_fs *c) { struct journal *j = &c->journal; struct journal_seq_blacklist *bl; @@ -1433,7 +1433,7 @@ void bch_journal_start(struct bch_fs *c) * closes an open journal entry - the very first journal entry gets * initialized here: */ - __bch_journal_next_entry(j); + __bch2_journal_next_entry(j); /* * Adding entries to the next journal entry before allocating space on @@ -1442,7 +1442,7 @@ void bch_journal_start(struct bch_fs *c) */ list_for_each_entry(bl, &j->seq_blacklist, list) if (!bl->written) { - bch_journal_add_entry(journal_cur_buf(j), &bl->seq, 1, + bch2_journal_add_entry(journal_cur_buf(j), &bl->seq, 1, JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED, 0, 0); @@ -1458,7 +1458,7 @@ void bch_journal_start(struct bch_fs *c) queue_delayed_work(system_freezable_wq, &j->reclaim_work, 0); } -int bch_journal_replay(struct bch_fs *c, struct list_head *list) +int bch2_journal_replay(struct bch_fs *c, struct list_head *list) { int ret = 0, keys = 0, entries = 0; struct journal *j = &c->journal; @@ -1480,15 +1480,13 @@ int bch_journal_replay(struct bch_fs *c, struct list_head *list) * We might cause compressed extents to be split, so we * need to pass in a disk_reservation: */ - BUG_ON(bch_disk_reservation_get(c, &disk_res, 0, 0)); + BUG_ON(bch2_disk_reservation_get(c, &disk_res, 0, 0)); - trace_bcache_journal_replay_key(&k->k); - - ret = bch_btree_insert(c, entry->btree_id, k, + ret = bch2_btree_insert(c, entry->btree_id, k, &disk_res, NULL, NULL, BTREE_INSERT_NOFAIL| BTREE_INSERT_JOURNAL_REPLAY); - bch_disk_reservation_put(c, &disk_res); + bch2_disk_reservation_put(c, &disk_res); if (ret) goto err; @@ -1504,7 +1502,7 @@ int bch_journal_replay(struct bch_fs *c, struct list_head *list) } if (keys) { - bch_btree_flush(c); + bch2_btree_flush(c); /* * Write a new journal entry _before_ we start journalling new data - @@ -1512,7 +1510,7 @@ int bch_journal_replay(struct bch_fs *c, struct list_head *list) * arbitrarily far in the future vs. the most recently written journal * entry on disk, if we crash before writing the next journal entry: */ - ret = bch_journal_meta(&c->journal); + ret = bch2_journal_meta(&c->journal); if (ret) goto err; } @@ -1520,12 +1518,12 @@ int bch_journal_replay(struct bch_fs *c, struct list_head *list) bch_info(c, "journal replay done, %i keys in %i entries, seq %llu", keys, entries, (u64) atomic64_read(&j->seq)); - bch_journal_set_replay_done(&c->journal); + bch2_journal_set_replay_done(&c->journal); err: if (ret) bch_err(c, "journal replay error: %d", ret); - bch_journal_entries_free(list); + bch2_journal_entries_free(list); return ret; } @@ -1535,7 +1533,7 @@ err: * Allocate more journal space at runtime - not currently making use if it, but * the code works: */ -static int bch_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, +static int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, unsigned nr) { struct journal *j = &c->journal; @@ -1559,7 +1557,7 @@ static int bch_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, * reservation to ensure we'll actually be able to allocate: */ - if (bch_disk_reservation_get(c, &disk_res, + if (bch2_disk_reservation_get(c, &disk_res, (nr - ja->nr) << ca->bucket_bits, 0)) return -ENOSPC; @@ -1571,7 +1569,7 @@ static int bch_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, if (!new_buckets || !new_bucket_seq) goto err; - journal_buckets = bch_sb_resize_journal(&ca->disk_sb, + journal_buckets = bch2_sb_resize_journal(&ca->disk_sb, nr + sizeof(*journal_buckets) / sizeof(u64)); if (!journal_buckets) goto err; @@ -1584,7 +1582,7 @@ static int bch_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, while (ja->nr < nr) { /* must happen under journal lock, to avoid racing with gc: */ - u64 b = bch_bucket_alloc(ca, RESERVE_NONE); + u64 b = bch2_bucket_alloc(ca, RESERVE_NONE); if (!b) { if (!closure_wait(&c->freelist_wait, &cl)) { spin_unlock(&j->lock); @@ -1594,9 +1592,9 @@ static int bch_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, continue; } - bch_mark_metadata_bucket(ca, &ca->buckets[b], + bch2_mark_metadata_bucket(ca, &ca->buckets[b], BUCKET_JOURNAL, false); - bch_mark_alloc_bucket(ca, &ca->buckets[b], false); + bch2_mark_alloc_bucket(ca, &ca->buckets[b], false); memmove(ja->buckets + ja->last_idx + 1, ja->buckets + ja->last_idx, @@ -1621,9 +1619,9 @@ static int bch_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, } spin_unlock(&j->lock); - BUG_ON(bch_validate_journal_layout(ca->disk_sb.sb, ca->mi)); + BUG_ON(bch2_validate_journal_layout(ca->disk_sb.sb, ca->mi)); - bch_write_super(c); + bch2_write_super(c); ret = 0; err: @@ -1631,20 +1629,20 @@ err: kfree(new_bucket_seq); kfree(new_buckets); - bch_disk_reservation_put(c, &disk_res); + bch2_disk_reservation_put(c, &disk_res); return ret; } #endif -int bch_dev_journal_alloc(struct bch_dev *ca) +int bch2_dev_journal_alloc(struct bch_dev *ca) { struct journal_device *ja = &ca->journal; struct bch_sb_field_journal *journal_buckets; unsigned i, nr; u64 b, *p; - if (dynamic_fault("bcache:add:journal_alloc")) + if (dynamic_fault("bcachefs:add:journal_alloc")) return -ENOMEM; /* @@ -1670,7 +1668,7 @@ int bch_dev_journal_alloc(struct bch_dev *ca) ja->buckets = p; - journal_buckets = bch_sb_resize_journal(&ca->disk_sb, + journal_buckets = bch2_sb_resize_journal(&ca->disk_sb, nr + sizeof(*journal_buckets) / sizeof(u64)); if (!journal_buckets) return -ENOMEM; @@ -1680,7 +1678,7 @@ int bch_dev_journal_alloc(struct bch_dev *ca) if (!is_available_bucket(ca->buckets[b].mark)) continue; - bch_mark_metadata_bucket(ca, &ca->buckets[b], + bch2_mark_metadata_bucket(ca, &ca->buckets[b], BUCKET_JOURNAL, true); ja->buckets[i] = b; journal_buckets->buckets[i] = cpu_to_le64(b); @@ -1690,7 +1688,7 @@ int bch_dev_journal_alloc(struct bch_dev *ca) if (i < nr) return -ENOSPC; - BUG_ON(bch_validate_journal_layout(ca->disk_sb.sb, ca->mi)); + BUG_ON(bch2_validate_journal_layout(ca->disk_sb.sb, ca->mi)); ja->nr = nr; @@ -1759,7 +1757,7 @@ static void journal_pin_add_entry(struct journal *j, spin_unlock_irq(&j->pin_lock); } -void bch_journal_pin_add(struct journal *j, +void bch2_journal_pin_add(struct journal *j, struct journal_entry_pin *pin, journal_pin_flush_fn flush_fn) { @@ -1782,7 +1780,7 @@ static inline bool __journal_pin_drop(struct journal *j, return atomic_dec_and_test(&pin_list->count); } -void bch_journal_pin_drop(struct journal *j, +void bch2_journal_pin_drop(struct journal *j, struct journal_entry_pin *pin) { unsigned long flags; @@ -1805,7 +1803,7 @@ void bch_journal_pin_drop(struct journal *j, wake_up(&j->wait); } -void bch_journal_pin_add_if_older(struct journal *j, +void bch2_journal_pin_add_if_older(struct journal *j, struct journal_entry_pin *src_pin, struct journal_entry_pin *pin, journal_pin_flush_fn flush_fn) @@ -1846,7 +1844,7 @@ journal_get_next_pin(struct journal *j, u64 seq_to_flush) ret = list_first_entry_or_null(&pin_list->list, struct journal_entry_pin, list); if (ret) { - /* must be list_del_init(), see bch_journal_pin_drop() */ + /* must be list_del_init(), see bch2_journal_pin_drop() */ list_del_init(&ret->list); break; } @@ -1869,14 +1867,14 @@ static bool journal_has_pins(struct journal *j) return ret; } -void bch_journal_flush_pins(struct journal *j) +void bch2_journal_flush_pins(struct journal *j) { struct journal_entry_pin *pin; while ((pin = journal_get_next_pin(j, U64_MAX))) pin->flush(j, pin); - wait_event(j->wait, !journal_has_pins(j) || bch_journal_error(j)); + wait_event(j->wait, !journal_has_pins(j) || bch2_journal_error(j)); } static bool should_discard_bucket(struct journal *j, struct journal_device *ja) @@ -2034,12 +2032,12 @@ static int journal_write_alloc(struct journal *j, unsigned sectors) if (ca->mi.state != BCH_MEMBER_STATE_RW || ca->journal.sectors_free <= sectors) - __bch_extent_drop_ptr(e, ptr); + __bch2_extent_drop_ptr(e, ptr); else ca->journal.sectors_free -= sectors; } - replicas = bch_extent_nr_ptrs(e.c); + replicas = bch2_extent_nr_ptrs(e.c); spin_lock(&j->devs.lock); @@ -2069,7 +2067,7 @@ static int journal_write_alloc(struct journal *j, unsigned sectors) * Check that we can use this device, and aren't already using * it: */ - if (bch_extent_has_device(e.c, ca->dev_idx) || + if (bch2_extent_has_device(e.c, ca->dev_idx) || !journal_dev_buckets_available(j, ca) || sectors > ca->mi.bucket_size) continue; @@ -2085,8 +2083,6 @@ static int journal_write_alloc(struct journal *j, unsigned sectors) .dev = ca->dev_idx, }); replicas++; - - trace_bcache_journal_next_bucket(ca, ja->cur_idx, ja->last_idx); } spin_unlock(&j->devs.lock); @@ -2149,9 +2145,9 @@ static void journal_write_endio(struct bio *bio) struct bch_dev *ca = bio->bi_private; struct journal *j = &ca->fs->journal; - if (bch_dev_fatal_io_err_on(bio->bi_error, ca, "journal write") || - bch_meta_write_fault("journal")) - bch_journal_halt(j); + if (bch2_dev_fatal_io_err_on(bio->bi_error, ca, "journal write") || + bch2_meta_write_fault("journal")) + bch2_journal_halt(j); closure_put(&j->io); percpu_ref_put(&ca->io_ref); @@ -2164,7 +2160,7 @@ static void journal_write_done(struct closure *cl) j->last_seq_ondisk = le64_to_cpu(w->data->last_seq); - __bch_time_stats_update(j->write_time, j->write_start_time); + __bch2_time_stats_update(j->write_time, j->write_start_time); BUG_ON(!j->reservations.prev_buf_unwritten); atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v, @@ -2208,14 +2204,14 @@ static void journal_write(struct closure *cl) j->write_start_time = local_clock(); - bch_journal_add_prios(j, w); + bch2_journal_add_prios(j, w); mutex_lock(&c->btree_root_lock); for (i = 0; i < BTREE_ID_NR; i++) { struct btree_root *r = &c->btree_roots[i]; if (r->alive) - bch_journal_add_btree_root(w, i, &r->key, r->level); + bch2_journal_add_btree_root(w, i, &r->key, r->level); } mutex_unlock(&c->btree_root_lock); @@ -2227,9 +2223,9 @@ static void journal_write(struct closure *cl) jset->version = cpu_to_le32(BCACHE_JSET_VERSION); SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN); - SET_JSET_CSUM_TYPE(jset, bch_meta_checksum_type(c)); + SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c)); - bch_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), + bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset->encrypted_start, vstruct_end(jset) - (void *) jset->encrypted_start); @@ -2243,13 +2239,13 @@ static void journal_write(struct closure *cl) memset((void *) w->data + bytes, 0, (sectors << 9) - bytes); if (journal_write_alloc(j, sectors)) { - bch_journal_halt(j); + bch2_journal_halt(j); bch_err(c, "Unable to allocate journal write"); - bch_fatal_error(c); + bch2_fatal_error(c); closure_return_with_destructor(cl, journal_write_done); } - bch_check_mark_super(c, &j->key, true); + bch2_check_mark_super(c, &j->key, true); /* * XXX: we really should just disable the entire journal in nochanges @@ -2277,17 +2273,17 @@ static void journal_write(struct closure *cl) bio->bi_private = ca; bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA); - bch_bio_map(bio, jset); + bch2_bio_map(bio, jset); - trace_bcache_journal_write(bio); - closure_bio_submit_punt(bio, cl, c); + trace_journal_write(bio); + closure_bio_submit(bio, cl); ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(w->data->seq); } for_each_rw_member(ca, c, i) if (journal_flushes_device(ca) && - !bch_extent_has_device(bkey_i_to_s_c_extent(&j->key), i)) { + !bch2_extent_has_device(bkey_i_to_s_c_extent(&j->key), i)) { percpu_ref_get(&ca->io_ref); bio = ca->journal.bio; @@ -2296,7 +2292,7 @@ static void journal_write(struct closure *cl) bio->bi_end_io = journal_write_endio; bio->bi_private = ca; bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); - closure_bio_submit_punt(bio, cl, c); + closure_bio_submit(bio, cl); } no_io: @@ -2322,7 +2318,7 @@ static void journal_write_work(struct work_struct *work) * hasn't yet been flushed, return the journal sequence number that needs to be * flushed: */ -u64 bch_inode_journal_seq(struct journal *j, u64 inode) +u64 bch2_inode_journal_seq(struct journal *j, u64 inode) { size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8)); u64 seq = 0; @@ -2374,7 +2370,7 @@ retry: case JOURNAL_ENTRY_INUSE: /* haven't finished writing out the previous one: */ spin_unlock(&j->lock); - trace_bcache_journal_entry_full(c); + trace_journal_entry_full(c); goto blocked; case JOURNAL_ENTRY_CLOSED: break; @@ -2399,7 +2395,7 @@ retry: */ journal_reclaim_work(&j->reclaim_work.work); - trace_bcache_journal_full(c); + trace_journal_full(c); blocked: if (!j->res_get_blocked_start) j->res_get_blocked_start = local_clock() ?: 1; @@ -2407,16 +2403,16 @@ blocked: } /* - * Essentially the entry function to the journaling code. When bcache is doing + * Essentially the entry function to the journaling code. When bcachefs is doing * a btree insert, it calls this function to get the current journal write. * Journal write is the structure used set up journal writes. The calling - * function will then add its keys to the structure, queuing them for the - * next write. + * function will then add its keys to the structure, queuing them for the next + * write. * * To ensure forward progress, the current task must not be holding any * btree node write locks. */ -int bch_journal_res_get_slowpath(struct journal *j, struct journal_res *res, +int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res, unsigned u64s_min, unsigned u64s_max) { int ret; @@ -2427,13 +2423,13 @@ int bch_journal_res_get_slowpath(struct journal *j, struct journal_res *res, return ret < 0 ? ret : 0; } -void bch_journal_wait_on_seq(struct journal *j, u64 seq, struct closure *parent) +void bch2_journal_wait_on_seq(struct journal *j, u64 seq, struct closure *parent) { spin_lock(&j->lock); BUG_ON(seq > atomic64_read(&j->seq)); - if (bch_journal_error(j)) { + if (bch2_journal_error(j)) { spin_unlock(&j->lock); return; } @@ -2450,20 +2446,20 @@ void bch_journal_wait_on_seq(struct journal *j, u64 seq, struct closure *parent) /* check if raced with write completion (or failure) */ if (!j->reservations.prev_buf_unwritten || - bch_journal_error(j)) + bch2_journal_error(j)) closure_wake_up(&journal_prev_buf(j)->wait); } spin_unlock(&j->lock); } -void bch_journal_flush_seq_async(struct journal *j, u64 seq, struct closure *parent) +void bch2_journal_flush_seq_async(struct journal *j, u64 seq, struct closure *parent) { spin_lock(&j->lock); BUG_ON(seq > atomic64_read(&j->seq)); - if (bch_journal_error(j)) { + if (bch2_journal_error(j)) { spin_unlock(&j->lock); return; } @@ -2506,41 +2502,41 @@ void bch_journal_flush_seq_async(struct journal *j, u64 seq, struct closure *par /* check if raced with write completion (or failure) */ if (!j->reservations.prev_buf_unwritten || - bch_journal_error(j)) + bch2_journal_error(j)) closure_wake_up(&journal_prev_buf(j)->wait); } spin_unlock(&j->lock); } -int bch_journal_flush_seq(struct journal *j, u64 seq) +int bch2_journal_flush_seq(struct journal *j, u64 seq) { struct closure cl; u64 start_time = local_clock(); closure_init_stack(&cl); - bch_journal_flush_seq_async(j, seq, &cl); + bch2_journal_flush_seq_async(j, seq, &cl); closure_sync(&cl); - bch_time_stats_update(j->flush_seq_time, start_time); + bch2_time_stats_update(j->flush_seq_time, start_time); - return bch_journal_error(j); + return bch2_journal_error(j); } -void bch_journal_meta_async(struct journal *j, struct closure *parent) +void bch2_journal_meta_async(struct journal *j, struct closure *parent) { struct journal_res res; unsigned u64s = jset_u64s(0); memset(&res, 0, sizeof(res)); - bch_journal_res_get(j, &res, u64s, u64s); - bch_journal_res_put(j, &res); + bch2_journal_res_get(j, &res, u64s, u64s); + bch2_journal_res_put(j, &res); - bch_journal_flush_seq_async(j, res.seq, parent); + bch2_journal_flush_seq_async(j, res.seq, parent); } -int bch_journal_meta(struct journal *j) +int bch2_journal_meta(struct journal *j) { struct journal_res res; unsigned u64s = jset_u64s(0); @@ -2548,16 +2544,16 @@ int bch_journal_meta(struct journal *j) memset(&res, 0, sizeof(res)); - ret = bch_journal_res_get(j, &res, u64s, u64s); + ret = bch2_journal_res_get(j, &res, u64s, u64s); if (ret) return ret; - bch_journal_res_put(j, &res); + bch2_journal_res_put(j, &res); - return bch_journal_flush_seq(j, res.seq); + return bch2_journal_flush_seq(j, res.seq); } -void bch_journal_flush_async(struct journal *j, struct closure *parent) +void bch2_journal_flush_async(struct journal *j, struct closure *parent) { u64 seq, journal_seq; @@ -2574,10 +2570,10 @@ void bch_journal_flush_async(struct journal *j, struct closure *parent) } spin_unlock(&j->lock); - bch_journal_flush_seq_async(j, seq, parent); + bch2_journal_flush_seq_async(j, seq, parent); } -int bch_journal_flush(struct journal *j) +int bch2_journal_flush(struct journal *j) { u64 seq, journal_seq; @@ -2594,10 +2590,10 @@ int bch_journal_flush(struct journal *j) } spin_unlock(&j->lock); - return bch_journal_flush_seq(j, seq); + return bch2_journal_flush_seq(j, seq); } -ssize_t bch_journal_print_debug(struct journal *j, char *buf) +ssize_t bch2_journal_print_debug(struct journal *j, char *buf) { union journal_res_state *s = &j->reservations; struct bch_dev *ca; @@ -2652,13 +2648,13 @@ ssize_t bch_journal_print_debug(struct journal *j, char *buf) return ret; } -static bool bch_journal_writing_to_device(struct bch_dev *ca) +static bool bch2_journal_writing_to_device(struct bch_dev *ca) { struct journal *j = &ca->fs->journal; bool ret; spin_lock(&j->lock); - ret = bch_extent_has_device(bkey_i_to_s_c_extent(&j->key), + ret = bch2_extent_has_device(bkey_i_to_s_c_extent(&j->key), ca->dev_idx); spin_unlock(&j->lock); @@ -2677,7 +2673,7 @@ static bool bch_journal_writing_to_device(struct bch_dev *ca) * writeable and pick a new set of devices to write to. */ -int bch_journal_move(struct bch_dev *ca) +int bch2_journal_move(struct bch_dev *ca) { u64 last_flushed_seq; struct journal_device *ja = &ca->journal; @@ -2686,7 +2682,7 @@ int bch_journal_move(struct bch_dev *ca) unsigned i; int ret = 0; /* Success */ - if (bch_journal_writing_to_device(ca)) { + if (bch2_journal_writing_to_device(ca)) { /* * bch_journal_meta will write a record and we'll wait * for the write to complete. @@ -2694,8 +2690,8 @@ int bch_journal_move(struct bch_dev *ca) * will call journal_next_bucket which notices that the * device is no longer writeable, and picks a new one. */ - bch_journal_meta(j); - BUG_ON(bch_journal_writing_to_device(ca)); + bch2_journal_meta(j); + BUG_ON(bch2_journal_writing_to_device(ca)); } /* @@ -2707,14 +2703,14 @@ int bch_journal_move(struct bch_dev *ca) /* * XXX: switch to normal journal reclaim machinery */ - bch_btree_flush(c); + bch2_btree_flush(c); /* * Force a meta-data journal entry to be written so that * we have newer journal entries in devices other than ca, * and wait for the meta data write to complete. */ - bch_journal_meta(j); + bch2_journal_meta(j); /* * Verify that we no longer need any of the journal entries in @@ -2730,7 +2726,7 @@ int bch_journal_move(struct bch_dev *ca) return ret; } -void bch_fs_journal_stop(struct journal *j) +void bch2_fs_journal_stop(struct journal *j) { if (!test_bit(JOURNAL_STARTED, &j->flags)) return; @@ -2740,15 +2736,15 @@ void bch_fs_journal_stop(struct journal *j) * journal entries, then force a brand new empty journal entry to be * written: */ - bch_journal_flush_pins(j); - bch_journal_flush_async(j, NULL); - bch_journal_meta(j); + bch2_journal_flush_pins(j); + bch2_journal_flush_async(j, NULL); + bch2_journal_meta(j); cancel_delayed_work_sync(&j->write_work); cancel_delayed_work_sync(&j->reclaim_work); } -void bch_dev_journal_exit(struct bch_dev *ca) +void bch2_dev_journal_exit(struct bch_dev *ca) { kfree(ca->journal.bio); kfree(ca->journal.buckets); @@ -2759,18 +2755,18 @@ void bch_dev_journal_exit(struct bch_dev *ca) ca->journal.bucket_seq = NULL; } -int bch_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb) +int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb) { struct journal_device *ja = &ca->journal; struct bch_sb_field_journal *journal_buckets = - bch_sb_get_journal(sb); + bch2_sb_get_journal(sb); unsigned i, journal_entry_pages; journal_entry_pages = DIV_ROUND_UP(1U << BCH_SB_JOURNAL_ENTRY_SIZE(sb), PAGE_SECTORS); - ja->nr = bch_nr_journal_buckets(journal_buckets); + ja->nr = bch2_nr_journal_buckets(journal_buckets); ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL); if (!ja->bucket_seq) @@ -2790,7 +2786,7 @@ int bch_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb) return 0; } -void bch_fs_journal_exit(struct journal *j) +void bch2_fs_journal_exit(struct journal *j) { unsigned order = get_order(j->entry_size_max); @@ -2799,7 +2795,7 @@ void bch_fs_journal_exit(struct journal *j) free_fifo(&j->pin); } -int bch_fs_journal_init(struct journal *j, unsigned entry_size_max) +int bch2_fs_journal_init(struct journal *j, unsigned entry_size_max) { static struct lock_class_key res_key; unsigned order = get_order(entry_size_max); diff --git a/libbcache/journal.h b/libbcachefs/journal.h index c83f8104..f5fc465a 100644 --- a/libbcache/journal.h +++ b/libbcachefs/journal.h @@ -27,7 +27,7 @@ * possible, if the write for the previous journal entry was still in flight). * * Synchronous updates are specified by passing a closure (@flush_cl) to - * bch_btree_insert() or bch_btree_insert_node(), which then pass that parameter + * bch2_btree_insert() or bch_btree_insert_node(), which then pass that parameter * down to the journalling code. That closure will will wait on the journal * write to complete (via closure_wait()). * @@ -128,25 +128,25 @@ static inline bool journal_pin_active(struct journal_entry_pin *pin) return pin->pin_list != NULL; } -void bch_journal_pin_add(struct journal *, struct journal_entry_pin *, +void bch2_journal_pin_add(struct journal *, struct journal_entry_pin *, journal_pin_flush_fn); -void bch_journal_pin_drop(struct journal *, struct journal_entry_pin *); -void bch_journal_pin_add_if_older(struct journal *, +void bch2_journal_pin_drop(struct journal *, struct journal_entry_pin *); +void bch2_journal_pin_add_if_older(struct journal *, struct journal_entry_pin *, struct journal_entry_pin *, journal_pin_flush_fn); -void bch_journal_flush_pins(struct journal *); +void bch2_journal_flush_pins(struct journal *); struct closure; struct bch_fs; struct keylist; -struct bkey_i *bch_journal_find_btree_root(struct bch_fs *, struct jset *, +struct bkey_i *bch2_journal_find_btree_root(struct bch_fs *, struct jset *, enum btree_id, unsigned *); -int bch_journal_seq_should_ignore(struct bch_fs *, u64, struct btree *); +int bch2_journal_seq_should_ignore(struct bch_fs *, u64, struct btree *); -u64 bch_inode_journal_seq(struct journal *, u64); +u64 bch2_inode_journal_seq(struct journal *, u64); static inline int journal_state_count(union journal_res_state s, int idx) { @@ -159,7 +159,7 @@ static inline void journal_state_inc(union journal_res_state *s) s->buf1_count += s->idx == 1; } -static inline void bch_journal_set_has_inode(struct journal_buf *buf, u64 inum) +static inline void bch2_journal_set_has_inode(struct journal_buf *buf, u64 inum) { set_bit(hash_64(inum, ilog2(sizeof(buf->has_inode) * 8)), buf->has_inode); } @@ -173,7 +173,7 @@ static inline unsigned jset_u64s(unsigned u64s) return u64s + sizeof(struct jset_entry) / sizeof(u64); } -static inline void bch_journal_add_entry_at(struct journal_buf *buf, +static inline void bch2_journal_add_entry_at(struct journal_buf *buf, const void *data, size_t u64s, unsigned type, enum btree_id id, unsigned level, unsigned offset) @@ -189,7 +189,7 @@ static inline void bch_journal_add_entry_at(struct journal_buf *buf, memcpy_u64s(entry->_data, data, u64s); } -static inline void bch_journal_add_keys(struct journal *j, struct journal_res *res, +static inline void bch2_journal_add_keys(struct journal *j, struct journal_res *res, enum btree_id id, const struct bkey_i *k) { struct journal_buf *buf = &j->buf[res->idx]; @@ -198,9 +198,9 @@ static inline void bch_journal_add_keys(struct journal *j, struct journal_res *r EBUG_ON(!res->ref); BUG_ON(actual > res->u64s); - bch_journal_set_has_inode(buf, k->k.p.inode); + bch2_journal_set_has_inode(buf, k->k.p.inode); - bch_journal_add_entry_at(buf, k, k->k.u64s, + bch2_journal_add_entry_at(buf, k, k->k.u64s, JOURNAL_ENTRY_BTREE_KEYS, id, 0, res->offset); @@ -208,9 +208,9 @@ static inline void bch_journal_add_keys(struct journal *j, struct journal_res *r res->u64s -= actual; } -void bch_journal_buf_put_slowpath(struct journal *, bool); +void bch2_journal_buf_put_slowpath(struct journal *, bool); -static inline void bch_journal_buf_put(struct journal *j, unsigned idx, +static inline void bch2_journal_buf_put(struct journal *j, unsigned idx, bool need_write_just_set) { union journal_res_state s; @@ -229,14 +229,14 @@ static inline void bch_journal_buf_put(struct journal *j, unsigned idx, if (s.idx != idx && !journal_state_count(s, idx) && s.cur_entry_offset != JOURNAL_ENTRY_ERROR_VAL) - bch_journal_buf_put_slowpath(j, need_write_just_set); + bch2_journal_buf_put_slowpath(j, need_write_just_set); } /* * This function releases the journal write structure so other threads can * then proceed to add their keys as well. */ -static inline void bch_journal_res_put(struct journal *j, +static inline void bch2_journal_res_put(struct journal *j, struct journal_res *res) { if (!res->ref) @@ -245,19 +245,19 @@ static inline void bch_journal_res_put(struct journal *j, lock_release(&j->res_map, 0, _RET_IP_); while (res->u64s) { - bch_journal_add_entry_at(&j->buf[res->idx], NULL, 0, + bch2_journal_add_entry_at(&j->buf[res->idx], NULL, 0, JOURNAL_ENTRY_BTREE_KEYS, 0, 0, res->offset); res->offset += jset_u64s(0); res->u64s -= jset_u64s(0); } - bch_journal_buf_put(j, res->idx, false); + bch2_journal_buf_put(j, res->idx, false); res->ref = 0; } -int bch_journal_res_get_slowpath(struct journal *, struct journal_res *, +int bch2_journal_res_get_slowpath(struct journal *, struct journal_res *, unsigned, unsigned); static inline int journal_res_get_fast(struct journal *j, @@ -293,7 +293,7 @@ static inline int journal_res_get_fast(struct journal *j, return 1; } -static inline int bch_journal_res_get(struct journal *j, struct journal_res *res, +static inline int bch2_journal_res_get(struct journal *j, struct journal_res *res, unsigned u64s_min, unsigned u64s_max) { int ret; @@ -304,7 +304,7 @@ static inline int bch_journal_res_get(struct journal *j, struct journal_res *res if (journal_res_get_fast(j, res, u64s_min, u64s_max)) goto out; - ret = bch_journal_res_get_slowpath(j, res, u64s_min, u64s_max); + ret = bch2_journal_res_get_slowpath(j, res, u64s_min, u64s_max); if (ret) return ret; out: @@ -313,18 +313,18 @@ out: return 0; } -void bch_journal_wait_on_seq(struct journal *, u64, struct closure *); -void bch_journal_flush_seq_async(struct journal *, u64, struct closure *); -void bch_journal_flush_async(struct journal *, struct closure *); -void bch_journal_meta_async(struct journal *, struct closure *); +void bch2_journal_wait_on_seq(struct journal *, u64, struct closure *); +void bch2_journal_flush_seq_async(struct journal *, u64, struct closure *); +void bch2_journal_flush_async(struct journal *, struct closure *); +void bch2_journal_meta_async(struct journal *, struct closure *); -int bch_journal_flush_seq(struct journal *, u64); -int bch_journal_flush(struct journal *); -int bch_journal_meta(struct journal *); +int bch2_journal_flush_seq(struct journal *, u64); +int bch2_journal_flush(struct journal *); +int bch2_journal_meta(struct journal *); -void bch_journal_halt(struct journal *); +void bch2_journal_halt(struct journal *); -static inline int bch_journal_error(struct journal *j) +static inline int bch2_journal_error(struct journal *j) { return j->reservations.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ? -EIO : 0; @@ -335,13 +335,13 @@ static inline bool journal_flushes_device(struct bch_dev *ca) return true; } -void bch_journal_start(struct bch_fs *); -void bch_journal_mark(struct bch_fs *, struct list_head *); -void bch_journal_entries_free(struct list_head *); -int bch_journal_read(struct bch_fs *, struct list_head *); -int bch_journal_replay(struct bch_fs *, struct list_head *); +void bch2_journal_start(struct bch_fs *); +void bch2_journal_mark(struct bch_fs *, struct list_head *); +void bch2_journal_entries_free(struct list_head *); +int bch2_journal_read(struct bch_fs *, struct list_head *); +int bch2_journal_replay(struct bch_fs *, struct list_head *); -static inline void bch_journal_set_replay_done(struct journal *j) +static inline void bch2_journal_set_replay_done(struct journal *j) { spin_lock(&j->lock); BUG_ON(!test_bit(JOURNAL_STARTED, &j->flags)); @@ -351,23 +351,23 @@ static inline void bch_journal_set_replay_done(struct journal *j) spin_unlock(&j->lock); } -ssize_t bch_journal_print_debug(struct journal *, char *); +ssize_t bch2_journal_print_debug(struct journal *, char *); -int bch_dev_journal_alloc(struct bch_dev *); +int bch2_dev_journal_alloc(struct bch_dev *); -static inline unsigned bch_nr_journal_buckets(struct bch_sb_field_journal *j) +static inline unsigned bch2_nr_journal_buckets(struct bch_sb_field_journal *j) { return j ? (__le64 *) vstruct_end(&j->field) - j->buckets : 0; } -int bch_journal_move(struct bch_dev *); +int bch2_journal_move(struct bch_dev *); -void bch_fs_journal_stop(struct journal *); -void bch_dev_journal_exit(struct bch_dev *); -int bch_dev_journal_init(struct bch_dev *, struct bch_sb *); -void bch_fs_journal_exit(struct journal *); -int bch_fs_journal_init(struct journal *, unsigned); +void bch2_fs_journal_stop(struct journal *); +void bch2_dev_journal_exit(struct bch_dev *); +int bch2_dev_journal_init(struct bch_dev *, struct bch_sb *); +void bch2_fs_journal_exit(struct journal *); +int bch2_fs_journal_init(struct journal *, unsigned); #endif /* _BCACHE_JOURNAL_H */ diff --git a/libbcache/journal_types.h b/libbcachefs/journal_types.h index ebc340ad..ebc340ad 100644 --- a/libbcache/journal_types.h +++ b/libbcachefs/journal_types.h diff --git a/libbcache/keylist.c b/libbcachefs/keylist.c index adf5eeba..51dd7edc 100644 --- a/libbcache/keylist.c +++ b/libbcachefs/keylist.c @@ -1,8 +1,8 @@ -#include "bcache.h" +#include "bcachefs.h" #include "keylist.h" -int bch_keylist_realloc(struct keylist *l, u64 *inline_u64s, +int bch2_keylist_realloc(struct keylist *l, u64 *inline_u64s, size_t nr_inline_u64s, size_t new_u64s) { size_t oldsize = bch_keylist_u64s(l); @@ -29,7 +29,7 @@ int bch_keylist_realloc(struct keylist *l, u64 *inline_u64s, return 0; } -void bch_keylist_add_in_order(struct keylist *l, struct bkey_i *insert) +void bch2_keylist_add_in_order(struct keylist *l, struct bkey_i *insert) { struct bkey_i *where; @@ -45,9 +45,9 @@ void bch_keylist_add_in_order(struct keylist *l, struct bkey_i *insert) bkey_copy(where, insert); } -void bch_keylist_pop_front(struct keylist *l) +void bch2_keylist_pop_front(struct keylist *l) { - l->top_p -= bch_keylist_front(l)->k.u64s; + l->top_p -= bch2_keylist_front(l)->k.u64s; memmove_u64s_down(l->keys, bkey_next(l->keys), diff --git a/libbcache/keylist.h b/libbcachefs/keylist.h index 1166f941..66628058 100644 --- a/libbcache/keylist.h +++ b/libbcachefs/keylist.h @@ -3,35 +3,35 @@ #include "keylist_types.h" -int bch_keylist_realloc(struct keylist *, u64 *, size_t, size_t); -void bch_keylist_add_in_order(struct keylist *, struct bkey_i *); -void bch_keylist_pop_front(struct keylist *); +int bch2_keylist_realloc(struct keylist *, u64 *, size_t, size_t); +void bch2_keylist_add_in_order(struct keylist *, struct bkey_i *); +void bch2_keylist_pop_front(struct keylist *); -static inline void bch_keylist_init(struct keylist *l, u64 *inline_keys, +static inline void bch2_keylist_init(struct keylist *l, u64 *inline_keys, size_t nr_inline_u64s) { l->top_p = l->keys_p = inline_keys; } -static inline void bch_keylist_free(struct keylist *l, u64 *inline_keys) +static inline void bch2_keylist_free(struct keylist *l, u64 *inline_keys) { if (l->keys_p != inline_keys) kfree(l->keys_p); memset(l, 0, sizeof(*l)); } -static inline void bch_keylist_push(struct keylist *l) +static inline void bch2_keylist_push(struct keylist *l) { l->top = bkey_next(l->top); } -static inline void bch_keylist_add(struct keylist *l, const struct bkey_i *k) +static inline void bch2_keylist_add(struct keylist *l, const struct bkey_i *k) { bkey_copy(l->top, k); - bch_keylist_push(l); + bch2_keylist_push(l); } -static inline bool bch_keylist_empty(struct keylist *l) +static inline bool bch2_keylist_empty(struct keylist *l) { return l->top == l->keys; } @@ -41,12 +41,12 @@ static inline size_t bch_keylist_u64s(struct keylist *l) return l->top_p - l->keys_p; } -static inline size_t bch_keylist_bytes(struct keylist *l) +static inline size_t bch2_keylist_bytes(struct keylist *l) { return bch_keylist_u64s(l) * sizeof(u64); } -static inline struct bkey_i *bch_keylist_front(struct keylist *l) +static inline struct bkey_i *bch2_keylist_front(struct keylist *l) { return l->keys; } diff --git a/libbcache/keylist_types.h b/libbcachefs/keylist_types.h index 195785bf..195785bf 100644 --- a/libbcache/keylist_types.h +++ b/libbcachefs/keylist_types.h diff --git a/libbcache/migrate.c b/libbcachefs/migrate.c index 9ef9685e..f79b624d 100644 --- a/libbcache/migrate.c +++ b/libbcachefs/migrate.c @@ -2,7 +2,7 @@ * Code for moving data off a device. */ -#include "bcache.h" +#include "bcachefs.h" #include "btree_update.h" #include "buckets.h" #include "extents.h" @@ -22,7 +22,7 @@ static int issue_migration_move(struct bch_dev *ca, const struct bch_extent_ptr *ptr; int ret; - if (bch_disk_reservation_get(c, &res, k.k->size, 0)) + if (bch2_disk_reservation_get(c, &res, k.k->size, 0)) return -ENOSPC; extent_for_each_ptr(bkey_s_c_to_extent(k), ptr) @@ -33,9 +33,9 @@ static int issue_migration_move(struct bch_dev *ca, found: /* XXX: we need to be doing something with the disk reservation */ - ret = bch_data_move(c, ctxt, &c->migration_write_point, k, ptr); + ret = bch2_data_move(c, ctxt, &c->migration_write_point, k, ptr); if (ret) - bch_disk_reservation_put(c, &res); + bch2_disk_reservation_put(c, &res); return ret; } @@ -55,7 +55,7 @@ found: * land in the same device even if there are others available. */ -int bch_move_data_off_device(struct bch_dev *ca) +int bch2_move_data_off_device(struct bch_dev *ca) { struct moving_context ctxt; struct bch_fs *c = ca->fs; @@ -69,7 +69,7 @@ int bch_move_data_off_device(struct bch_dev *ca) if (!ca->mi.has_data) return 0; - bch_move_ctxt_init(&ctxt, NULL, SECTORS_IN_FLIGHT_PER_DEVICE); + bch2_move_ctxt_init(&ctxt, NULL, SECTORS_IN_FLIGHT_PER_DEVICE); ctxt.avoid = ca; /* @@ -97,25 +97,25 @@ int bch_move_data_off_device(struct bch_dev *ca) atomic_set(&ctxt.error_count, 0); atomic_set(&ctxt.error_flags, 0); - bch_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN); + bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN); - while (!bch_move_ctxt_wait(&ctxt) && - (k = bch_btree_iter_peek(&iter)).k && + while (!bch2_move_ctxt_wait(&ctxt) && + (k = bch2_btree_iter_peek(&iter)).k && !(ret = btree_iter_err(k))) { if (!bkey_extent_is_data(k.k) || - !bch_extent_has_device(bkey_s_c_to_extent(k), + !bch2_extent_has_device(bkey_s_c_to_extent(k), ca->dev_idx)) goto next; ret = issue_migration_move(ca, &ctxt, k); if (ret == -ENOMEM) { - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); /* * memory allocation failure, wait for some IO * to finish */ - bch_move_ctxt_wait_for_io(&ctxt); + bch2_move_ctxt_wait_for_io(&ctxt); continue; } if (ret == -ENOSPC) @@ -124,12 +124,12 @@ int bch_move_data_off_device(struct bch_dev *ca) seen_key_count++; next: - bch_btree_iter_advance_pos(&iter); - bch_btree_iter_cond_resched(&iter); + bch2_btree_iter_advance_pos(&iter); + bch2_btree_iter_cond_resched(&iter); } - bch_btree_iter_unlock(&iter); - bch_move_ctxt_exit(&ctxt); + bch2_btree_iter_unlock(&iter); + bch2_move_ctxt_exit(&ctxt); if (ret) return ret; @@ -142,10 +142,10 @@ next: } mutex_lock(&c->sb_lock); - mi = bch_sb_get_members(c->disk_sb); + mi = bch2_sb_get_members(c->disk_sb); SET_BCH_MEMBER_HAS_DATA(&mi->members[ca->dev_idx], false); - bch_write_super(c); + bch2_write_super(c); mutex_unlock(&c->sb_lock); return 0; @@ -155,7 +155,7 @@ next: * This walks the btree, and for any node on the relevant device it moves the * node elsewhere. */ -static int bch_move_btree_off(struct bch_dev *ca, enum btree_id id) +static int bch2_move_btree_off(struct bch_dev *ca, enum btree_id id) { struct bch_fs *c = ca->fs; struct btree_iter iter; @@ -170,39 +170,39 @@ static int bch_move_btree_off(struct bch_dev *ca, enum btree_id id) for_each_btree_node(&iter, c, id, POS_MIN, 0, b) { struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key); retry: - if (!bch_extent_has_device(e, ca->dev_idx)) + if (!bch2_extent_has_device(e, ca->dev_idx)) continue; - ret = bch_btree_node_rewrite(&iter, b, &cl); + ret = bch2_btree_node_rewrite(&iter, b, &cl); if (ret == -EINTR || ret == -ENOSPC) { /* * Drop locks to upgrade locks or wait on * reserve: after retaking, recheck in case we * raced. */ - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); closure_sync(&cl); - b = bch_btree_iter_peek_node(&iter); + b = bch2_btree_iter_peek_node(&iter); goto retry; } if (ret) { - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return ret; } - bch_btree_iter_set_locks_want(&iter, 0); + bch2_btree_iter_set_locks_want(&iter, 0); } - ret = bch_btree_iter_unlock(&iter); + ret = bch2_btree_iter_unlock(&iter); if (ret) return ret; /* btree IO error */ - if (IS_ENABLED(CONFIG_BCACHE_DEBUG)) { + if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) { for_each_btree_node(&iter, c, id, POS_MIN, 0, b) { struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key); - BUG_ON(bch_extent_has_device(e, ca->dev_idx)); + BUG_ON(bch2_extent_has_device(e, ca->dev_idx)); } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); } return 0; @@ -252,7 +252,7 @@ retry: * is written. */ -int bch_move_metadata_off_device(struct bch_dev *ca) +int bch2_move_metadata_off_device(struct bch_dev *ca) { struct bch_fs *c = ca->fs; struct bch_sb_field_members *mi; @@ -267,7 +267,7 @@ int bch_move_metadata_off_device(struct bch_dev *ca) /* 1st, Move the btree nodes off the device */ for (i = 0; i < BTREE_ID_NR; i++) { - ret = bch_move_btree_off(ca, i); + ret = bch2_move_btree_off(ca, i); if (ret) return ret; } @@ -276,15 +276,15 @@ int bch_move_metadata_off_device(struct bch_dev *ca) /* 2nd. Move the journal off the device */ - ret = bch_journal_move(ca); + ret = bch2_journal_move(ca); if (ret) return ret; mutex_lock(&c->sb_lock); - mi = bch_sb_get_members(c->disk_sb); + mi = bch2_sb_get_members(c->disk_sb); SET_BCH_MEMBER_HAS_METADATA(&mi->members[ca->dev_idx], false); - bch_write_super(c); + bch2_write_super(c); mutex_unlock(&c->sb_lock); return 0; @@ -295,7 +295,7 @@ int bch_move_metadata_off_device(struct bch_dev *ca) * migrate the data off the device. */ -static int bch_flag_key_bad(struct btree_iter *iter, +static int bch2_flag_key_bad(struct btree_iter *iter, struct bch_dev *ca, struct bkey_s_c_extent orig) { @@ -309,16 +309,16 @@ static int bch_flag_key_bad(struct btree_iter *iter, extent_for_each_ptr_backwards(e, ptr) if (ptr->dev == ca->dev_idx) - bch_extent_drop_ptr(e, ptr); + bch2_extent_drop_ptr(e, ptr); /* - * If the new extent no longer has any pointers, bch_extent_normalize() + * If the new extent no longer has any pointers, bch2_extent_normalize() * will do the appropriate thing with it (turning it into a * KEY_TYPE_ERROR key, or just a discard if it was a cached extent) */ - bch_extent_normalize(c, e.s); + bch2_extent_normalize(c, e.s); - return bch_btree_insert_at(c, NULL, NULL, NULL, + return bch2_btree_insert_at(c, NULL, NULL, NULL, BTREE_INSERT_ATOMIC, BTREE_INSERT_ENTRY(iter, &tmp.key)); } @@ -334,25 +334,25 @@ static int bch_flag_key_bad(struct btree_iter *iter, * that we've already tried to move the data MAX_DATA_OFF_ITER times and * are not likely to succeed if we try again. */ -int bch_flag_data_bad(struct bch_dev *ca) +int bch2_flag_data_bad(struct bch_dev *ca) { int ret = 0; struct bkey_s_c k; struct bkey_s_c_extent e; struct btree_iter iter; - bch_btree_iter_init(&iter, ca->fs, BTREE_ID_EXTENTS, POS_MIN); + bch2_btree_iter_init(&iter, ca->fs, BTREE_ID_EXTENTS, POS_MIN); - while ((k = bch_btree_iter_peek(&iter)).k && + while ((k = bch2_btree_iter_peek(&iter)).k && !(ret = btree_iter_err(k))) { if (!bkey_extent_is_data(k.k)) goto advance; e = bkey_s_c_to_extent(k); - if (!bch_extent_has_device(e, ca->dev_idx)) + if (!bch2_extent_has_device(e, ca->dev_idx)) goto advance; - ret = bch_flag_key_bad(&iter, ca, e); + ret = bch2_flag_key_bad(&iter, ca, e); /* * don't want to leave ret == -EINTR, since if we raced and @@ -386,10 +386,10 @@ int bch_flag_data_bad(struct bch_dev *ca) */ continue; advance: - bch_btree_iter_advance_pos(&iter); + bch2_btree_iter_advance_pos(&iter); } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return ret; } diff --git a/libbcachefs/migrate.h b/libbcachefs/migrate.h new file mode 100644 index 00000000..81776bdc --- /dev/null +++ b/libbcachefs/migrate.h @@ -0,0 +1,8 @@ +#ifndef _BCACHE_MIGRATE_H +#define _BCACHE_MIGRATE_H + +int bch2_move_data_off_device(struct bch_dev *); +int bch2_move_metadata_off_device(struct bch_dev *); +int bch2_flag_data_bad(struct bch_dev *); + +#endif /* _BCACHE_MIGRATE_H */ diff --git a/libbcache/move.c b/libbcachefs/move.c index edee726c..f718f42a 100644 --- a/libbcache/move.c +++ b/libbcachefs/move.c @@ -1,5 +1,5 @@ -#include "bcache.h" +#include "bcachefs.h" #include "btree_gc.h" #include "btree_update.h" #include "buckets.h" @@ -10,7 +10,7 @@ #include <linux/ioprio.h> -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> static struct bch_extent_ptr *bkey_find_ptr(struct bch_fs *c, struct bkey_s_extent e, @@ -29,7 +29,7 @@ static struct bch_extent_ptr *bkey_find_ptr(struct bch_fs *c, return NULL; } -static struct bch_extent_ptr *bch_migrate_matching_ptr(struct migrate_write *m, +static struct bch_extent_ptr *bch2_migrate_matching_ptr(struct migrate_write *m, struct bkey_s_extent e) { const struct bch_extent_ptr *ptr; @@ -45,7 +45,7 @@ static struct bch_extent_ptr *bch_migrate_matching_ptr(struct migrate_write *m, return ret; } -static int bch_migrate_index_update(struct bch_write_op *op) +static int bch2_migrate_index_update(struct bch_write_op *op) { struct bch_fs *c = op->c; struct migrate_write *m = @@ -54,19 +54,19 @@ static int bch_migrate_index_update(struct bch_write_op *op) struct btree_iter iter; int ret = 0; - bch_btree_iter_init_intent(&iter, c, BTREE_ID_EXTENTS, - bkey_start_pos(&bch_keylist_front(keys)->k)); + bch2_btree_iter_init_intent(&iter, c, BTREE_ID_EXTENTS, + bkey_start_pos(&bch2_keylist_front(keys)->k)); while (1) { struct bkey_s_extent insert = - bkey_i_to_s_extent(bch_keylist_front(keys)); - struct bkey_s_c k = bch_btree_iter_peek_with_holes(&iter); + bkey_i_to_s_extent(bch2_keylist_front(keys)); + struct bkey_s_c k = bch2_btree_iter_peek_with_holes(&iter); struct bch_extent_ptr *ptr; struct bkey_s_extent e; BKEY_PADDED(k) new; if (!k.k) { - ret = bch_btree_iter_unlock(&iter); + ret = bch2_btree_iter_unlock(&iter); break; } @@ -74,19 +74,19 @@ static int bch_migrate_index_update(struct bch_write_op *op) goto nomatch; bkey_reassemble(&new.k, k); - bch_cut_front(iter.pos, &new.k); - bch_cut_back(insert.k->p, &new.k.k); + bch2_cut_front(iter.pos, &new.k); + bch2_cut_back(insert.k->p, &new.k.k); e = bkey_i_to_s_extent(&new.k); /* hack - promotes can race: */ if (m->promote) extent_for_each_ptr(insert, ptr) - if (bch_extent_has_device(e.c, ptr->dev)) + if (bch2_extent_has_device(e.c, ptr->dev)) goto nomatch; - ptr = bch_migrate_matching_ptr(m, e); + ptr = bch2_migrate_matching_ptr(m, e); if (ptr) { - int nr_new_dirty = bch_extent_nr_dirty_ptrs(insert.s_c); + int nr_new_dirty = bch2_extent_nr_dirty_ptrs(insert.s_c); unsigned insert_flags = BTREE_INSERT_ATOMIC| BTREE_INSERT_NOFAIL; @@ -97,7 +97,7 @@ static int bch_migrate_index_update(struct bch_write_op *op) if (m->move) { nr_new_dirty -= !ptr->cached; - __bch_extent_drop_ptr(e, ptr); + __bch2_extent_drop_ptr(e, ptr); } BUG_ON(nr_new_dirty < 0); @@ -107,12 +107,12 @@ static int bch_migrate_index_update(struct bch_write_op *op) bkey_val_u64s(insert.k)); e.k->u64s += bkey_val_u64s(insert.k); - bch_extent_narrow_crcs(e); - bch_extent_drop_redundant_crcs(e); - bch_extent_normalize(c, e.s); - bch_extent_mark_replicas_cached(c, e, nr_new_dirty); + bch2_extent_narrow_crcs(e); + bch2_extent_drop_redundant_crcs(e); + bch2_extent_normalize(c, e.s); + bch2_extent_mark_replicas_cached(c, e, nr_new_dirty); - ret = bch_btree_insert_at(c, &op->res, + ret = bch2_btree_insert_at(c, &op->res, NULL, op_journal_seq(op), insert_flags, BTREE_INSERT_ENTRY(&iter, &new.k)); @@ -120,23 +120,23 @@ static int bch_migrate_index_update(struct bch_write_op *op) break; } else { nomatch: - bch_btree_iter_advance_pos(&iter); + bch2_btree_iter_advance_pos(&iter); } - while (bkey_cmp(iter.pos, bch_keylist_front(keys)->k.p) >= 0) { - bch_keylist_pop_front(keys); - if (bch_keylist_empty(keys)) + while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) { + bch2_keylist_pop_front(keys); + if (bch2_keylist_empty(keys)) goto out; } - bch_cut_front(iter.pos, bch_keylist_front(keys)); + bch2_cut_front(iter.pos, bch2_keylist_front(keys)); } out: - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return ret; } -void bch_migrate_write_init(struct bch_fs *c, +void bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m, struct write_point *wp, struct bkey_s_c k, @@ -154,7 +154,7 @@ void bch_migrate_write_init(struct bch_fs *c, (move_ptr && move_ptr->cached)) flags |= BCH_WRITE_CACHED; - bch_write_op_init(&m->op, c, &m->wbio, + bch2_write_op_init(&m->op, c, &m->wbio, (struct disk_reservation) { 0 }, wp, bkey_start_pos(k.k), @@ -165,7 +165,7 @@ void bch_migrate_write_init(struct bch_fs *c, m->op.nonce = extent_current_nonce(bkey_s_c_to_extent(k)); m->op.nr_replicas = 1; - m->op.index_update_fn = bch_migrate_index_update; + m->op.index_update_fn = bch2_migrate_index_update; } static void migrate_bio_init(struct moving_io *io, struct bio *bio, @@ -178,7 +178,7 @@ static void migrate_bio_init(struct moving_io *io, struct bio *bio, bio->bi_max_vecs = DIV_ROUND_UP(sectors, PAGE_SECTORS); bio->bi_private = &io->cl; bio->bi_io_vec = io->bi_inline_vecs; - bch_bio_map(bio, NULL); + bch2_bio_map(bio, NULL); } static void moving_io_destructor(struct closure *cl) @@ -189,7 +189,7 @@ static void moving_io_destructor(struct closure *cl) int i; //if (io->replace.failures) - // trace_bcache_copy_collision(q, &io->key.k); + // trace_copy_collision(q, &io->key.k); atomic_sub(io->write.key.k.size, &ctxt->sectors_in_flight); wake_up(&ctxt->wait); @@ -225,7 +225,7 @@ static void write_moving(struct moving_io *io) if (op->error) { closure_return_with_destructor(&io->cl, moving_io_destructor); } else { - closure_call(&op->cl, bch_write, NULL, &io->cl); + closure_call(&op->cl, bch2_write, NULL, &io->cl); closure_return_with_destructor(&io->cl, moving_io_after_write); } } @@ -244,7 +244,7 @@ static void read_moving_endio(struct bio *bio) struct moving_io *io = container_of(cl, struct moving_io, cl); struct moving_context *ctxt = io->ctxt; - trace_bcache_move_read_done(&io->write.key.k); + trace_move_read_done(&io->write.key.k); if (bio->bi_error) { io->write.op.error = bio->bi_error; @@ -258,13 +258,13 @@ static void read_moving_endio(struct bio *bio) closure_put(&ctxt->cl); } -static void __bch_data_move(struct closure *cl) +static void __bch2_data_move(struct closure *cl) { struct moving_io *io = container_of(cl, struct moving_io, cl); struct bch_fs *c = io->write.op.c; struct extent_pick_ptr pick; - bch_extent_pick_ptr_avoiding(c, bkey_i_to_s_c(&io->write.key), + bch2_extent_pick_ptr_avoiding(c, bkey_i_to_s_c(&io->write.key), io->ctxt->avoid, &pick); if (IS_ERR_OR_NULL(pick.ca)) closure_return_with_destructor(cl, moving_io_destructor); @@ -279,12 +279,12 @@ static void __bch_data_move(struct closure *cl) */ closure_get(&io->ctxt->cl); - bch_read_extent(c, &io->rbio, + bch2_read_extent(c, &io->rbio, bkey_i_to_s_c(&io->write.key), &pick, BCH_READ_IS_LAST); } -int bch_data_move(struct bch_fs *c, +int bch2_data_move(struct bch_fs *c, struct moving_context *ctxt, struct write_point *wp, struct bkey_s_c k, @@ -311,19 +311,19 @@ int bch_data_move(struct bch_fs *c, bio_get(&io->write.wbio.bio); io->write.wbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k); - bch_migrate_write_init(c, &io->write, wp, k, move_ptr, 0); + bch2_migrate_write_init(c, &io->write, wp, k, move_ptr, 0); - trace_bcache_move_read(&io->write.key.k); + trace_move_read(&io->write.key.k); ctxt->keys_moved++; ctxt->sectors_moved += k.k->size; if (ctxt->rate) - bch_ratelimit_increment(ctxt->rate, k.k->size); + bch2_ratelimit_increment(ctxt->rate, k.k->size); atomic_add(k.k->size, &ctxt->sectors_in_flight); list_add_tail(&io->list, &ctxt->reads); - closure_call(&io->cl, __bch_data_move, NULL, &ctxt->cl); + closure_call(&io->cl, __bch2_data_move, NULL, &ctxt->cl); return 0; } @@ -333,7 +333,7 @@ static void do_pending_writes(struct moving_context *ctxt) while ((io = next_pending_write(ctxt))) { list_del(&io->list); - trace_bcache_move_write(&io->write.key.k); + trace_move_write(&io->write.key.k); write_moving(io); } } @@ -348,18 +348,18 @@ do { \ next_pending_write(_ctxt) || (_cond)); \ } while (1) -int bch_move_ctxt_wait(struct moving_context *ctxt) +int bch2_move_ctxt_wait(struct moving_context *ctxt) { move_ctxt_wait_event(ctxt, atomic_read(&ctxt->sectors_in_flight) < ctxt->max_sectors_in_flight); return ctxt->rate - ? bch_ratelimit_wait_freezable_stoppable(ctxt->rate) + ? bch2_ratelimit_wait_freezable_stoppable(ctxt->rate) : 0; } -void bch_move_ctxt_wait_for_io(struct moving_context *ctxt) +void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt) { unsigned sectors_pending = atomic_read(&ctxt->sectors_in_flight); @@ -368,7 +368,7 @@ void bch_move_ctxt_wait_for_io(struct moving_context *ctxt) atomic_read(&ctxt->sectors_in_flight) != sectors_pending); } -void bch_move_ctxt_exit(struct moving_context *ctxt) +void bch2_move_ctxt_exit(struct moving_context *ctxt) { move_ctxt_wait_event(ctxt, !atomic_read(&ctxt->sectors_in_flight)); closure_sync(&ctxt->cl); @@ -377,7 +377,7 @@ void bch_move_ctxt_exit(struct moving_context *ctxt) EBUG_ON(atomic_read(&ctxt->sectors_in_flight)); } -void bch_move_ctxt_init(struct moving_context *ctxt, +void bch2_move_ctxt_init(struct moving_context *ctxt, struct bch_ratelimit *rate, unsigned max_sectors_in_flight) { diff --git a/libbcache/move.h b/libbcachefs/move.h index 317431d6..548f0f0a 100644 --- a/libbcache/move.h +++ b/libbcachefs/move.h @@ -22,7 +22,7 @@ struct migrate_write { struct bch_write_bio wbio; }; -void bch_migrate_write_init(struct bch_fs *, +void bch2_migrate_write_init(struct bch_fs *, struct migrate_write *, struct write_point *, struct bkey_s_c, @@ -71,17 +71,17 @@ struct moving_io { struct bio_vec bi_inline_vecs[0]; }; -int bch_data_move(struct bch_fs *, +int bch2_data_move(struct bch_fs *, struct moving_context *, struct write_point *, struct bkey_s_c, const struct bch_extent_ptr *); -int bch_move_ctxt_wait(struct moving_context *); -void bch_move_ctxt_wait_for_io(struct moving_context *); +int bch2_move_ctxt_wait(struct moving_context *); +void bch2_move_ctxt_wait_for_io(struct moving_context *); -void bch_move_ctxt_exit(struct moving_context *); -void bch_move_ctxt_init(struct moving_context *, struct bch_ratelimit *, +void bch2_move_ctxt_exit(struct moving_context *); +void bch2_move_ctxt_init(struct moving_context *, struct bch_ratelimit *, unsigned); #endif /* _BCACHE_MOVE_H */ diff --git a/libbcache/move_types.h b/libbcachefs/move_types.h index 0e2275e2..0e2275e2 100644 --- a/libbcache/move_types.h +++ b/libbcachefs/move_types.h diff --git a/libbcache/movinggc.c b/libbcachefs/movinggc.c index 9bb2b7a4..8804dbb3 100644 --- a/libbcache/movinggc.c +++ b/libbcachefs/movinggc.c @@ -4,7 +4,7 @@ * Copyright 2012 Google, Inc. */ -#include "bcache.h" +#include "bcachefs.h" #include "btree_iter.h" #include "buckets.h" #include "clock.h" @@ -14,7 +14,7 @@ #include "move.h" #include "movinggc.h" -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/wait.h> @@ -27,7 +27,7 @@ static const struct bch_extent_ptr *moving_pred(struct bch_dev *ca, const struct bch_extent_ptr *ptr; if (bkey_extent_is_data(k.k) && - (ptr = bch_extent_has_device(bkey_s_c_to_extent(k), + (ptr = bch2_extent_has_device(bkey_s_c_to_extent(k), ca->dev_idx)) && PTR_BUCKET(ca, ptr)->mark.copygc) return ptr; @@ -47,11 +47,11 @@ static int issue_moving_gc_move(struct bch_dev *ca, if (!ptr) /* We raced - bucket's been reused */ return 0; - ret = bch_data_move(c, ctxt, &ca->copygc_write_point, k, ptr); + ret = bch2_data_move(c, ctxt, &ca->copygc_write_point, k, ptr); if (!ret) - trace_bcache_gc_copy(k.k); + trace_gc_copy(k.k); else - trace_bcache_moving_gc_alloc_fail(c, k.k->size); + trace_moving_gc_alloc_fail(c, k.k->size); return ret; } @@ -66,17 +66,17 @@ static void read_moving(struct bch_dev *ca, size_t buckets_to_move, u64 sectors_not_moved = 0; size_t buckets_not_moved = 0; - bch_ratelimit_reset(&ca->moving_gc_pd.rate); - bch_move_ctxt_init(&ctxt, &ca->moving_gc_pd.rate, + bch2_ratelimit_reset(&ca->moving_gc_pd.rate); + bch2_move_ctxt_init(&ctxt, &ca->moving_gc_pd.rate, SECTORS_IN_FLIGHT_PER_DEVICE); - bch_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN); + bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN); while (1) { if (kthread_should_stop()) goto out; - if (bch_move_ctxt_wait(&ctxt)) + if (bch2_move_ctxt_wait(&ctxt)) goto out; - k = bch_btree_iter_peek(&iter); + k = bch2_btree_iter_peek(&iter); if (!k.k) break; if (btree_iter_err(k)) @@ -86,24 +86,24 @@ static void read_moving(struct bch_dev *ca, size_t buckets_to_move, goto next; if (issue_moving_gc_move(ca, &ctxt, k)) { - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); /* memory allocation failure, wait for some IO to finish */ - bch_move_ctxt_wait_for_io(&ctxt); + bch2_move_ctxt_wait_for_io(&ctxt); continue; } next: - bch_btree_iter_advance_pos(&iter); - //bch_btree_iter_cond_resched(&iter); + bch2_btree_iter_advance_pos(&iter); + //bch2_btree_iter_cond_resched(&iter); /* unlock before calling moving_context_wait() */ - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); cond_resched(); } - bch_btree_iter_unlock(&iter); - bch_move_ctxt_exit(&ctxt); - trace_bcache_moving_gc_end(ca, ctxt.sectors_moved, ctxt.keys_moved, + bch2_btree_iter_unlock(&iter); + bch2_move_ctxt_exit(&ctxt); + trace_moving_gc_end(ca, ctxt.sectors_moved, ctxt.keys_moved, buckets_to_move); /* don't check this if we bailed out early: */ @@ -119,9 +119,9 @@ next: buckets_not_moved, buckets_to_move); return; out: - bch_btree_iter_unlock(&iter); - bch_move_ctxt_exit(&ctxt); - trace_bcache_moving_gc_end(ca, ctxt.sectors_moved, ctxt.keys_moved, + bch2_btree_iter_unlock(&iter); + bch2_move_ctxt_exit(&ctxt); + trace_moving_gc_end(ca, ctxt.sectors_moved, ctxt.keys_moved, buckets_to_move); } @@ -137,7 +137,7 @@ static bool have_copygc_reserve(struct bch_dev *ca) return ret; } -static void bch_moving_gc(struct bch_dev *ca) +static void bch2_moving_gc(struct bch_dev *ca) { struct bch_fs *c = ca->fs; struct bucket *g; @@ -163,7 +163,7 @@ static void bch_moving_gc(struct bch_dev *ca) reserve_sectors = COPYGC_SECTORS_PER_ITER(ca); - trace_bcache_moving_gc_start(ca); + trace_moving_gc_start(ca); /* * Find buckets with lowest sector counts, skipping completely @@ -223,7 +223,7 @@ static void bch_moving_gc(struct bch_dev *ca) read_moving(ca, buckets_to_move, sectors_to_move); } -static int bch_moving_gc_thread(void *arg) +static int bch2_moving_gc_thread(void *arg) { struct bch_dev *ca = arg; struct bch_fs *c = ca->fs; @@ -248,27 +248,27 @@ static int bch_moving_gc_thread(void *arg) if (available > want) { next = last + (available - want) * ca->mi.bucket_size; - bch_kthread_io_clock_wait(clock, next); + bch2_kthread_io_clock_wait(clock, next); continue; } - bch_moving_gc(ca); + bch2_moving_gc(ca); } return 0; } -void bch_moving_gc_stop(struct bch_dev *ca) +void bch2_moving_gc_stop(struct bch_dev *ca) { ca->moving_gc_pd.rate.rate = UINT_MAX; - bch_ratelimit_reset(&ca->moving_gc_pd.rate); + bch2_ratelimit_reset(&ca->moving_gc_pd.rate); if (ca->moving_gc_read) kthread_stop(ca->moving_gc_read); ca->moving_gc_read = NULL; } -int bch_moving_gc_start(struct bch_dev *ca) +int bch2_moving_gc_start(struct bch_dev *ca) { struct task_struct *t; @@ -277,10 +277,10 @@ int bch_moving_gc_start(struct bch_dev *ca) if (ca->fs->opts.nochanges) return 0; - if (bch_fs_init_fault("moving_gc_start")) + if (bch2_fs_init_fault("moving_gc_start")) return -ENOMEM; - t = kthread_create(bch_moving_gc_thread, ca, "bch_copygc_read"); + t = kthread_create(bch2_moving_gc_thread, ca, "bch_copygc_read"); if (IS_ERR(t)) return PTR_ERR(t); @@ -290,8 +290,8 @@ int bch_moving_gc_start(struct bch_dev *ca) return 0; } -void bch_dev_moving_gc_init(struct bch_dev *ca) +void bch2_dev_moving_gc_init(struct bch_dev *ca) { - bch_pd_controller_init(&ca->moving_gc_pd); + bch2_pd_controller_init(&ca->moving_gc_pd); ca->moving_gc_pd.d_term = 0; } diff --git a/libbcache/movinggc.h b/libbcachefs/movinggc.h index 5afbf34f..e27ccc35 100644 --- a/libbcache/movinggc.h +++ b/libbcachefs/movinggc.h @@ -23,8 +23,8 @@ #define COPYGC_SECTORS_PER_ITER(ca) \ ((ca)->mi.bucket_size * COPYGC_BUCKETS_PER_ITER(ca)) -void bch_moving_gc_stop(struct bch_dev *); -int bch_moving_gc_start(struct bch_dev *); -void bch_dev_moving_gc_init(struct bch_dev *); +void bch2_moving_gc_stop(struct bch_dev *); +int bch2_moving_gc_start(struct bch_dev *); +void bch2_dev_moving_gc_init(struct bch_dev *); #endif diff --git a/libbcache/opts.c b/libbcachefs/opts.c index 41780d59..7c4cf804 100644 --- a/libbcache/opts.c +++ b/libbcachefs/opts.c @@ -4,35 +4,35 @@ #include "opts.h" #include "util.h" -const char * const bch_error_actions[] = { +const char * const bch2_error_actions[] = { "continue", "remount-ro", "panic", NULL }; -const char * const bch_csum_types[] = { +const char * const bch2_csum_types[] = { "none", "crc32c", "crc64", NULL }; -const char * const bch_compression_types[] = { +const char * const bch2_compression_types[] = { "none", "lz4", "gzip", NULL }; -const char * const bch_str_hash_types[] = { +const char * const bch2_str_hash_types[] = { "crc32c", "crc64", "siphash", NULL }; -const char * const bch_cache_replacement_policies[] = { +const char * const bch2_cache_replacement_policies[] = { "lru", "fifo", "random", @@ -40,7 +40,7 @@ const char * const bch_cache_replacement_policies[] = { }; /* Default is -1; we skip past it for struct cached_dev's cache mode */ -const char * const bch_cache_modes[] = { +const char * const bch2_cache_modes[] = { "default", "writethrough", "writeback", @@ -49,7 +49,7 @@ const char * const bch_cache_modes[] = { NULL }; -const char * const bch_dev_state[] = { +const char * const bch2_dev_state[] = { "readwrite", "readonly", "failed", @@ -57,7 +57,7 @@ const char * const bch_dev_state[] = { NULL }; -const struct bch_option bch_opt_table[] = { +const struct bch_option bch2_opt_table[] = { #define OPT_BOOL() .type = BCH_OPT_BOOL #define OPT_UINT(_min, _max) .type = BCH_OPT_UINT, .min = _min, .max = _max #define OPT_STR(_choices) .type = BCH_OPT_STR, .choices = _choices @@ -72,20 +72,20 @@ const struct bch_option bch_opt_table[] = { #undef BCH_OPT }; -static enum bch_opt_id bch_opt_lookup(const char *name) +static enum bch_opt_id bch2_opt_lookup(const char *name) { const struct bch_option *i; - for (i = bch_opt_table; - i < bch_opt_table + ARRAY_SIZE(bch_opt_table); + for (i = bch2_opt_table; + i < bch2_opt_table + ARRAY_SIZE(bch2_opt_table); i++) if (!strcmp(name, i->name)) - return i - bch_opt_table; + return i - bch2_opt_table; return -1; } -static u64 bch_opt_get(struct bch_opts *opts, enum bch_opt_id id) +static u64 bch2_opt_get(struct bch_opts *opts, enum bch_opt_id id) { switch (id) { #define BCH_OPT(_name, ...) \ @@ -100,7 +100,7 @@ static u64 bch_opt_get(struct bch_opts *opts, enum bch_opt_id id) } } -void bch_opt_set(struct bch_opts *opts, enum bch_opt_id id, u64 v) +void bch2_opt_set(struct bch_opts *opts, enum bch_opt_id id, u64 v) { switch (id) { #define BCH_OPT(_name, ...) \ @@ -120,9 +120,9 @@ void bch_opt_set(struct bch_opts *opts, enum bch_opt_id id, u64 v) * Initial options from superblock - here we don't want any options undefined, * any options the superblock doesn't specify are set to 0: */ -struct bch_opts bch_sb_opts(struct bch_sb *sb) +struct bch_opts bch2_sb_opts(struct bch_sb *sb) { - struct bch_opts opts = bch_opts_empty(); + struct bch_opts opts = bch2_opts_empty(); #define BCH_OPT(_name, _mode, _sb_opt, ...) \ if (_sb_opt != NO_SB_OPT) \ @@ -134,9 +134,9 @@ struct bch_opts bch_sb_opts(struct bch_sb *sb) return opts; } -int parse_one_opt(enum bch_opt_id id, const char *val, u64 *res) +static int parse_one_opt(enum bch_opt_id id, const char *val, u64 *res) { - const struct bch_option *opt = &bch_opt_table[id]; + const struct bch_option *opt = &bch2_opt_table[id]; ssize_t ret; switch (opt->type) { @@ -157,7 +157,7 @@ int parse_one_opt(enum bch_opt_id id, const char *val, u64 *res) return -ERANGE; break; case BCH_OPT_STR: - ret = bch_read_string_list(val, opt->choices); + ret = bch2_read_string_list(val, opt->choices); if (ret < 0) return ret; @@ -168,7 +168,7 @@ int parse_one_opt(enum bch_opt_id id, const char *val, u64 *res) return 0; } -int bch_parse_mount_opts(struct bch_opts *opts, char *options) +int bch2_parse_mount_opts(struct bch_opts *opts, char *options) { char *opt, *name, *val; int ret, id; @@ -179,7 +179,7 @@ int bch_parse_mount_opts(struct bch_opts *opts, char *options) val = opt; if (val) { - id = bch_opt_lookup(name); + id = bch2_opt_lookup(name); if (id < 0) return -EINVAL; @@ -187,29 +187,29 @@ int bch_parse_mount_opts(struct bch_opts *opts, char *options) if (ret < 0) return ret; } else { - id = bch_opt_lookup(name); + id = bch2_opt_lookup(name); v = 1; if (id < 0 && !strncmp("no", name, 2)) { - id = bch_opt_lookup(name + 2); + id = bch2_opt_lookup(name + 2); v = 0; } - if (bch_opt_table[id].type != BCH_OPT_BOOL) + if (bch2_opt_table[id].type != BCH_OPT_BOOL) return -EINVAL; } - bch_opt_set(opts, id, v); + bch2_opt_set(opts, id, v); } return 0; } -enum bch_opt_id bch_parse_sysfs_opt(const char *name, const char *val, +enum bch_opt_id bch2_parse_sysfs_opt(const char *name, const char *val, u64 *res) { - enum bch_opt_id id = bch_opt_lookup(name); + enum bch_opt_id id = bch2_opt_lookup(name); int ret; if (id < 0) @@ -222,20 +222,20 @@ enum bch_opt_id bch_parse_sysfs_opt(const char *name, const char *val, return id; } -ssize_t bch_opt_show(struct bch_opts *opts, const char *name, +ssize_t bch2_opt_show(struct bch_opts *opts, const char *name, char *buf, size_t size) { - enum bch_opt_id id = bch_opt_lookup(name); + enum bch_opt_id id = bch2_opt_lookup(name); const struct bch_option *opt; u64 v; if (id < 0) return -EINVAL; - v = bch_opt_get(opts, id); - opt = &bch_opt_table[id]; + v = bch2_opt_get(opts, id); + opt = &bch2_opt_table[id]; return opt->type == BCH_OPT_STR - ? bch_snprint_string_list(buf, size, opt->choices, v) + ? bch2_snprint_string_list(buf, size, opt->choices, v) : snprintf(buf, size, "%lli\n", v); } diff --git a/libbcache/opts.h b/libbcachefs/opts.h index 253b7399..6fa707db 100644 --- a/libbcache/opts.h +++ b/libbcachefs/opts.h @@ -1,18 +1,18 @@ #ifndef _BCACHE_OPTS_H #define _BCACHE_OPTS_H -#include <linux/bcache.h> #include <linux/bug.h> #include <linux/log2.h> #include <linux/string.h> +#include "bcachefs_format.h" -extern const char * const bch_error_actions[]; -extern const char * const bch_csum_types[]; -extern const char * const bch_compression_types[]; -extern const char * const bch_str_hash_types[]; -extern const char * const bch_cache_replacement_policies[]; -extern const char * const bch_cache_modes[]; -extern const char * const bch_dev_state[]; +extern const char * const bch2_error_actions[]; +extern const char * const bch2_csum_types[]; +extern const char * const bch2_compression_types[]; +extern const char * const bch2_str_hash_types[]; +extern const char * const bch2_cache_replacement_policies[]; +extern const char * const bch2_cache_modes[]; +extern const char * const bch2_dev_state[]; /* * Mount options; we also store defaults in the superblock. @@ -22,7 +22,7 @@ extern const char * const bch_dev_state[]; * updates the superblock. * * We store options as signed integers, where -1 means undefined. This means we - * can pass the mount options to bch_fs_alloc() as a whole struct, and then only + * can pass the mount options to bch2_fs_alloc() as a whole struct, and then only * apply the options from that struct that are defined. */ @@ -50,7 +50,7 @@ enum opt_type { #define BCH_VISIBLE_OPTS() \ BCH_OPT(errors, 0644, BCH_SB_ERROR_ACTION, \ - s8, OPT_STR(bch_error_actions)) \ + s8, OPT_STR(bch2_error_actions)) \ BCH_OPT(metadata_replicas, 0444, BCH_SB_META_REPLICAS_WANT,\ s8, OPT_UINT(1, BCH_REPLICAS_MAX)) \ BCH_OPT(data_replicas, 0444, BCH_SB_DATA_REPLICAS_WANT,\ @@ -60,13 +60,13 @@ enum opt_type { BCH_OPT(data_replicas_required, 0444, BCH_SB_DATA_REPLICAS_REQ,\ s8, OPT_UINT(1, BCH_REPLICAS_MAX)) \ BCH_OPT(metadata_checksum, 0644, BCH_SB_META_CSUM_TYPE, \ - s8, OPT_STR(bch_csum_types)) \ + s8, OPT_STR(bch2_csum_types)) \ BCH_OPT(data_checksum, 0644, BCH_SB_DATA_CSUM_TYPE, \ - s8, OPT_STR(bch_csum_types)) \ + s8, OPT_STR(bch2_csum_types)) \ BCH_OPT(compression, 0644, BCH_SB_COMPRESSION_TYPE,\ - s8, OPT_STR(bch_compression_types)) \ + s8, OPT_STR(bch2_compression_types)) \ BCH_OPT(str_hash, 0644, BCH_SB_STR_HASH_TYPE, \ - s8, OPT_STR(bch_str_hash_types)) \ + s8, OPT_STR(bch2_str_hash_types)) \ BCH_OPT(inodes_32bit, 0644, BCH_SB_INODE_32BIT, \ s8, OPT_BOOL()) \ BCH_OPT(gc_reserve_percent, 0444, BCH_SB_GC_RESERVE, \ @@ -135,9 +135,9 @@ struct bch_option { }; -extern const struct bch_option bch_opt_table[]; +extern const struct bch_option bch2_opt_table[]; -static inline struct bch_opts bch_opts_empty(void) +static inline struct bch_opts bch2_opts_empty(void) { struct bch_opts ret; @@ -145,7 +145,7 @@ static inline struct bch_opts bch_opts_empty(void) return ret; } -static inline void bch_opts_apply(struct bch_opts *dst, struct bch_opts src) +static inline void bch2_opts_apply(struct bch_opts *dst, struct bch_opts src) { #define BCH_OPT(_name, ...) \ if (src._name >= 0) \ @@ -157,12 +157,12 @@ static inline void bch_opts_apply(struct bch_opts *dst, struct bch_opts src) #define opt_defined(_opt) ((_opt) >= 0) -void bch_opt_set(struct bch_opts *, enum bch_opt_id, u64); -struct bch_opts bch_sb_opts(struct bch_sb *); +void bch2_opt_set(struct bch_opts *, enum bch_opt_id, u64); +struct bch_opts bch2_sb_opts(struct bch_sb *); -int bch_parse_mount_opts(struct bch_opts *, char *); -enum bch_opt_id bch_parse_sysfs_opt(const char *, const char *, u64 *); +int bch2_parse_mount_opts(struct bch_opts *, char *); +enum bch_opt_id bch2_parse_sysfs_opt(const char *, const char *, u64 *); -ssize_t bch_opt_show(struct bch_opts *, const char *, char *, size_t); +ssize_t bch2_opt_show(struct bch_opts *, const char *, char *, size_t); #endif /* _BCACHE_OPTS_H */ diff --git a/libbcache/siphash.c b/libbcachefs/siphash.c index 3a6c9c82..3a6c9c82 100644 --- a/libbcache/siphash.c +++ b/libbcachefs/siphash.c diff --git a/libbcache/siphash.h b/libbcachefs/siphash.h index 7a4b2241..7a4b2241 100644 --- a/libbcache/siphash.h +++ b/libbcachefs/siphash.h diff --git a/libbcache/six.c b/libbcachefs/six.c index 1bb8bfcc..1bb8bfcc 100644 --- a/libbcache/six.c +++ b/libbcachefs/six.c diff --git a/libbcache/six.h b/libbcachefs/six.h index 01ed3385..01ed3385 100644 --- a/libbcache/six.h +++ b/libbcachefs/six.h diff --git a/libbcache/str_hash.h b/libbcachefs/str_hash.h index 1173dfe8..f70fc1a9 100644 --- a/libbcache/str_hash.h +++ b/libbcachefs/str_hash.h @@ -19,7 +19,8 @@ struct bch_hash_info { }; static inline struct bch_hash_info -bch_hash_info_init(const struct bch_inode_unpacked *bi) +bch2_hash_info_init(struct bch_fs *c, + const struct bch_inode_unpacked *bi) { /* XXX ick */ struct bch_hash_info info = { @@ -33,10 +34,10 @@ bch_hash_info_init(const struct bch_inode_unpacked *bi) info.crc_key = bi->i_hash_seed; break; case BCH_STR_HASH_SIPHASH: { - SHASH_DESC_ON_STACK(desc, bch_sha256); - u8 digest[crypto_shash_digestsize(bch_sha256)]; + SHASH_DESC_ON_STACK(desc, c->sha256); + u8 digest[crypto_shash_digestsize(c->sha256)]; - desc->tfm = bch_sha256; + desc->tfm = c->sha256; desc->flags = 0; crypto_shash_digest(desc, (void *) &bi->i_hash_seed, @@ -59,7 +60,7 @@ struct bch_str_hash_ctx { }; }; -static inline void bch_str_hash_init(struct bch_str_hash_ctx *ctx, +static inline void bch2_str_hash_init(struct bch_str_hash_ctx *ctx, const struct bch_hash_info *info) { switch (info->type) { @@ -67,7 +68,7 @@ static inline void bch_str_hash_init(struct bch_str_hash_ctx *ctx, ctx->crc32c = crc32c(~0, &info->crc_key, sizeof(info->crc_key)); break; case BCH_STR_HASH_CRC64: - ctx->crc64 = bch_crc64_update(~0, &info->crc_key, sizeof(info->crc_key)); + ctx->crc64 = bch2_crc64_update(~0, &info->crc_key, sizeof(info->crc_key)); break; case BCH_STR_HASH_SIPHASH: SipHash24_Init(&ctx->siphash, &info->siphash_key); @@ -77,7 +78,7 @@ static inline void bch_str_hash_init(struct bch_str_hash_ctx *ctx, } } -static inline void bch_str_hash_update(struct bch_str_hash_ctx *ctx, +static inline void bch2_str_hash_update(struct bch_str_hash_ctx *ctx, const struct bch_hash_info *info, const void *data, size_t len) { @@ -86,7 +87,7 @@ static inline void bch_str_hash_update(struct bch_str_hash_ctx *ctx, ctx->crc32c = crc32c(ctx->crc32c, data, len); break; case BCH_STR_HASH_CRC64: - ctx->crc64 = bch_crc64_update(ctx->crc64, data, len); + ctx->crc64 = bch2_crc64_update(ctx->crc64, data, len); break; case BCH_STR_HASH_SIPHASH: SipHash24_Update(&ctx->siphash, data, len); @@ -96,7 +97,7 @@ static inline void bch_str_hash_update(struct bch_str_hash_ctx *ctx, } } -static inline u64 bch_str_hash_end(struct bch_str_hash_ctx *ctx, +static inline u64 bch2_str_hash_end(struct bch_str_hash_ctx *ctx, const struct bch_hash_info *info) { switch (info->type) { @@ -123,14 +124,14 @@ struct bch_hash_desc { }; static inline struct bkey_s_c -bch_hash_lookup_at(const struct bch_hash_desc desc, +bch2_hash_lookup_at(const struct bch_hash_desc desc, const struct bch_hash_info *info, struct btree_iter *iter, const void *search) { u64 inode = iter->pos.inode; do { - struct bkey_s_c k = bch_btree_iter_peek_with_holes(iter); + struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter); if (btree_iter_err(k)) return k; @@ -145,21 +146,21 @@ bch_hash_lookup_at(const struct bch_hash_desc desc, break; } - bch_btree_iter_advance_pos(iter); + bch2_btree_iter_advance_pos(iter); } while (iter->pos.inode == inode); return bkey_s_c_err(-ENOENT); } static inline struct bkey_s_c -bch_hash_lookup_bkey_at(const struct bch_hash_desc desc, +bch2_hash_lookup_bkey_at(const struct bch_hash_desc desc, const struct bch_hash_info *info, struct btree_iter *iter, struct bkey_s_c search) { u64 inode = iter->pos.inode; do { - struct bkey_s_c k = bch_btree_iter_peek_with_holes(iter); + struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter); if (btree_iter_err(k)) return k; @@ -174,41 +175,41 @@ bch_hash_lookup_bkey_at(const struct bch_hash_desc desc, break; } - bch_btree_iter_advance_pos(iter); + bch2_btree_iter_advance_pos(iter); } while (iter->pos.inode == inode); return bkey_s_c_err(-ENOENT); } static inline struct bkey_s_c -bch_hash_lookup(const struct bch_hash_desc desc, +bch2_hash_lookup(const struct bch_hash_desc desc, const struct bch_hash_info *info, struct bch_fs *c, u64 inode, struct btree_iter *iter, const void *key) { - bch_btree_iter_init(iter, c, desc.btree_id, + bch2_btree_iter_init(iter, c, desc.btree_id, POS(inode, desc.hash_key(info, key))); - return bch_hash_lookup_at(desc, info, iter, key); + return bch2_hash_lookup_at(desc, info, iter, key); } static inline struct bkey_s_c -bch_hash_lookup_intent(const struct bch_hash_desc desc, +bch2_hash_lookup_intent(const struct bch_hash_desc desc, const struct bch_hash_info *info, struct bch_fs *c, u64 inode, struct btree_iter *iter, const void *key) { - bch_btree_iter_init_intent(iter, c, desc.btree_id, + bch2_btree_iter_init_intent(iter, c, desc.btree_id, POS(inode, desc.hash_key(info, key))); - return bch_hash_lookup_at(desc, info, iter, key); + return bch2_hash_lookup_at(desc, info, iter, key); } static inline struct bkey_s_c -bch_hash_hole_at(const struct bch_hash_desc desc, struct btree_iter *iter) +bch2_hash_hole_at(const struct bch_hash_desc desc, struct btree_iter *iter) { while (1) { - struct bkey_s_c k = bch_btree_iter_peek_with_holes(iter); + struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter); if (btree_iter_err(k)) return k; @@ -217,34 +218,34 @@ bch_hash_hole_at(const struct bch_hash_desc desc, struct btree_iter *iter) return k; /* hash collision, keep going */ - bch_btree_iter_advance_pos(iter); + bch2_btree_iter_advance_pos(iter); if (iter->pos.inode != k.k->p.inode) return bkey_s_c_err(-ENOENT); } } -static inline struct bkey_s_c bch_hash_hole(const struct bch_hash_desc desc, +static inline struct bkey_s_c bch2_hash_hole(const struct bch_hash_desc desc, const struct bch_hash_info *info, struct bch_fs *c, u64 inode, struct btree_iter *iter, const void *key) { - bch_btree_iter_init_intent(iter, c, desc.btree_id, + bch2_btree_iter_init_intent(iter, c, desc.btree_id, POS(inode, desc.hash_key(info, key))); - return bch_hash_hole_at(desc, iter); + return bch2_hash_hole_at(desc, iter); } -static inline int bch_hash_needs_whiteout(const struct bch_hash_desc desc, +static inline int bch2_hash_needs_whiteout(const struct bch_hash_desc desc, const struct bch_hash_info *info, struct btree_iter *iter, struct btree_iter *start) { - bch_btree_iter_set_pos(iter, + bch2_btree_iter_set_pos(iter, btree_type_successor(start->btree_id, start->pos)); while (1) { - struct bkey_s_c k = bch_btree_iter_peek_with_holes(iter); + struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter); int ret = btree_iter_err(k); if (ret) @@ -258,14 +259,14 @@ static inline int bch_hash_needs_whiteout(const struct bch_hash_desc desc, desc.hash_bkey(info, k) <= start->pos.offset) return true; - bch_btree_iter_advance_pos(iter); + bch2_btree_iter_advance_pos(iter); } } #define BCH_HASH_SET_MUST_CREATE 1 #define BCH_HASH_SET_MUST_REPLACE 2 -static inline int bch_hash_set(const struct bch_hash_desc desc, +static inline int bch2_hash_set(const struct bch_hash_desc desc, const struct bch_hash_info *info, struct bch_fs *c, u64 inode, u64 *journal_seq, @@ -275,17 +276,17 @@ static inline int bch_hash_set(const struct bch_hash_desc desc, struct bkey_s_c k; int ret; - bch_btree_iter_init_intent(&hashed_slot, c, desc.btree_id, + bch2_btree_iter_init_intent(&hashed_slot, c, desc.btree_id, POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert)))); - bch_btree_iter_init_intent(&iter, c, desc.btree_id, hashed_slot.pos); - bch_btree_iter_link(&hashed_slot, &iter); + bch2_btree_iter_init_intent(&iter, c, desc.btree_id, hashed_slot.pos); + bch2_btree_iter_link(&hashed_slot, &iter); retry: /* * On hash collision, we have to keep the slot we hashed to locked while * we do the insert - to avoid racing with another thread deleting * whatever's in the slot we hashed to: */ - ret = bch_btree_iter_traverse(&hashed_slot); + ret = bch2_btree_iter_traverse(&hashed_slot); if (ret) goto err; @@ -293,9 +294,9 @@ retry: * On -EINTR/retry, we dropped locks - always restart from the slot we * hashed to: */ - bch_btree_iter_copy(&iter, &hashed_slot); + bch2_btree_iter_copy(&iter, &hashed_slot); - k = bch_hash_lookup_bkey_at(desc, info, &iter, bkey_i_to_s_c(insert)); + k = bch2_hash_lookup_bkey_at(desc, info, &iter, bkey_i_to_s_c(insert)); ret = btree_iter_err(k); if (ret == -ENOENT) { @@ -310,8 +311,8 @@ retry: * that we could have used, so restart from the * slot we hashed to: */ - bch_btree_iter_copy(&iter, &hashed_slot); - k = bch_hash_hole_at(desc, &iter); + bch2_btree_iter_copy(&iter, &hashed_slot); + k = bch2_hash_hole_at(desc, &iter); if ((ret = btree_iter_err(k))) goto err; } else if (!ret) { @@ -324,7 +325,7 @@ retry: } insert->k.p = iter.pos; - ret = bch_btree_insert_at(c, NULL, NULL, journal_seq, + ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq, BTREE_INSERT_ATOMIC, BTREE_INSERT_ENTRY(&iter, insert)); err: @@ -335,12 +336,12 @@ err: * On successful insert, we don't want to clobber ret with error from * iter: */ - bch_btree_iter_unlock(&iter); - bch_btree_iter_unlock(&hashed_slot); + bch2_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&hashed_slot); return ret; } -static inline int bch_hash_delete(const struct bch_hash_desc desc, +static inline int bch2_hash_delete(const struct bch_hash_desc desc, const struct bch_hash_info *info, struct bch_fs *c, u64 inode, u64 *journal_seq, const void *key) @@ -350,17 +351,17 @@ static inline int bch_hash_delete(const struct bch_hash_desc desc, struct bkey_i delete; int ret = -ENOENT; - bch_btree_iter_init_intent(&iter, c, desc.btree_id, + bch2_btree_iter_init_intent(&iter, c, desc.btree_id, POS(inode, desc.hash_key(info, key))); - bch_btree_iter_init(&whiteout_iter, c, desc.btree_id, + bch2_btree_iter_init(&whiteout_iter, c, desc.btree_id, POS(inode, desc.hash_key(info, key))); - bch_btree_iter_link(&iter, &whiteout_iter); + bch2_btree_iter_link(&iter, &whiteout_iter); retry: - k = bch_hash_lookup_at(desc, info, &iter, key); + k = bch2_hash_lookup_at(desc, info, &iter, key); if ((ret = btree_iter_err(k))) goto err; - ret = bch_hash_needs_whiteout(desc, info, &whiteout_iter, &iter); + ret = bch2_hash_needs_whiteout(desc, info, &whiteout_iter, &iter); if (ret < 0) goto err; @@ -368,7 +369,7 @@ retry: delete.k.p = k.k->p; delete.k.type = ret ? desc.whiteout_type : KEY_TYPE_DELETED; - ret = bch_btree_insert_at(c, NULL, NULL, journal_seq, + ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq, BTREE_INSERT_NOFAIL| BTREE_INSERT_ATOMIC, BTREE_INSERT_ENTRY(&iter, &delete)); @@ -376,8 +377,8 @@ err: if (ret == -EINTR) goto retry; - bch_btree_iter_unlock(&whiteout_iter); - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&whiteout_iter); + bch2_btree_iter_unlock(&iter); return ret; } diff --git a/libbcache/super-io.c b/libbcachefs/super-io.c index 67c03e19..9f41d71d 100644 --- a/libbcache/super-io.c +++ b/libbcachefs/super-io.c @@ -1,6 +1,5 @@ -#include "bcache.h" -#include "blockdev.h" +#include "bcachefs.h" #include "checksum.h" #include "error.h" #include "io.h" @@ -12,12 +11,12 @@ #include <linux/backing-dev.h> #include <linux/sort.h> -static inline void __bch_sb_layout_size_assert(void) +static inline void __bch2_sb_layout_size_assert(void) { BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512); } -struct bch_sb_field *bch_sb_field_get(struct bch_sb *sb, +struct bch_sb_field *bch2_sb_field_get(struct bch_sb *sb, enum bch_sb_field_type type) { struct bch_sb_field *f; @@ -30,7 +29,7 @@ struct bch_sb_field *bch_sb_field_get(struct bch_sb *sb, return NULL; } -void bch_free_super(struct bcache_superblock *sb) +void bch2_free_super(struct bcache_superblock *sb) { if (sb->bio) bio_put(sb->bio); @@ -41,7 +40,7 @@ void bch_free_super(struct bcache_superblock *sb) memset(sb, 0, sizeof(*sb)); } -static int __bch_super_realloc(struct bcache_superblock *sb, unsigned order) +static int __bch2_super_realloc(struct bcache_superblock *sb, unsigned order) { struct bch_sb *new_sb; struct bio *bio; @@ -49,7 +48,7 @@ static int __bch_super_realloc(struct bcache_superblock *sb, unsigned order) if (sb->page_order >= order && sb->sb) return 0; - if (dynamic_fault("bcache:add:super_realloc")) + if (dynamic_fault("bcachefs:add:super_realloc")) return -ENOMEM; bio = bio_kmalloc(GFP_KERNEL, 1 << order); @@ -75,7 +74,7 @@ static int __bch_super_realloc(struct bcache_superblock *sb, unsigned order) return 0; } -static int bch_sb_realloc(struct bcache_superblock *sb, unsigned u64s) +static int bch2_sb_realloc(struct bcache_superblock *sb, unsigned u64s) { u64 new_bytes = __vstruct_bytes(struct bch_sb, u64s); u64 max_bytes = 512 << sb->sb->layout.sb_max_size_bits; @@ -88,10 +87,10 @@ static int bch_sb_realloc(struct bcache_superblock *sb, unsigned u64s) return -ENOSPC; } - return __bch_super_realloc(sb, get_order(new_bytes)); + return __bch2_super_realloc(sb, get_order(new_bytes)); } -static int bch_fs_sb_realloc(struct bch_fs *c, unsigned u64s) +static int bch2_fs_sb_realloc(struct bch_fs *c, unsigned u64s) { u64 bytes = __vstruct_bytes(struct bch_sb, u64s); struct bch_sb *sb; @@ -114,7 +113,7 @@ static int bch_fs_sb_realloc(struct bch_fs *c, unsigned u64s) return 0; } -static struct bch_sb_field *__bch_sb_field_resize(struct bch_sb *sb, +static struct bch_sb_field *__bch2_sb_field_resize(struct bch_sb *sb, struct bch_sb_field *f, unsigned u64s) { @@ -143,27 +142,27 @@ static struct bch_sb_field *__bch_sb_field_resize(struct bch_sb *sb, return f; } -struct bch_sb_field *bch_sb_field_resize(struct bcache_superblock *sb, +struct bch_sb_field *bch2_sb_field_resize(struct bcache_superblock *sb, enum bch_sb_field_type type, unsigned u64s) { - struct bch_sb_field *f = bch_sb_field_get(sb->sb, type); + struct bch_sb_field *f = bch2_sb_field_get(sb->sb, type); ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0; ssize_t d = -old_u64s + u64s; - if (bch_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d)) + if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d)) return NULL; - f = __bch_sb_field_resize(sb->sb, f, u64s); + f = __bch2_sb_field_resize(sb->sb, f, u64s); f->type = type; return f; } -struct bch_sb_field *bch_fs_sb_field_resize(struct bch_fs *c, +struct bch_sb_field *bch2_fs_sb_field_resize(struct bch_fs *c, enum bch_sb_field_type type, unsigned u64s) { - struct bch_sb_field *f = bch_sb_field_get(c->disk_sb, type); + struct bch_sb_field *f = bch2_sb_field_get(c->disk_sb, type); ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0; ssize_t d = -old_u64s + u64s; struct bch_dev *ca; @@ -171,7 +170,7 @@ struct bch_sb_field *bch_fs_sb_field_resize(struct bch_fs *c, lockdep_assert_held(&c->sb_lock); - if (bch_fs_sb_realloc(c, le32_to_cpu(c->disk_sb->u64s) + d)) + if (bch2_fs_sb_realloc(c, le32_to_cpu(c->disk_sb->u64s) + d)) return NULL; /* XXX: we're not checking that offline device have enough space */ @@ -179,13 +178,13 @@ struct bch_sb_field *bch_fs_sb_field_resize(struct bch_fs *c, for_each_online_member(ca, c, i) { struct bcache_superblock *sb = &ca->disk_sb; - if (bch_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d)) { + if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d)) { percpu_ref_put(&ca->ref); return NULL; } } - f = __bch_sb_field_resize(c->disk_sb, f, u64s); + f = __bch2_sb_field_resize(c->disk_sb, f, u64s); f->type = type; return f; } @@ -196,7 +195,7 @@ static const char *validate_sb_layout(struct bch_sb_layout *layout) unsigned i; if (uuid_le_cmp(layout->magic, BCACHE_MAGIC)) - return "Not a bcache superblock layout"; + return "Not a bcachefs superblock layout"; if (layout->layout_type != 0) return "Invalid superblock layout type"; @@ -229,7 +228,7 @@ static int u64_cmp(const void *_l, const void *_r) return l < r ? -1 : l > r ? 1 : 0; } -const char *bch_validate_journal_layout(struct bch_sb *sb, +const char *bch2_validate_journal_layout(struct bch_sb *sb, struct bch_member_cpu mi) { struct bch_sb_field_journal *journal; @@ -238,11 +237,11 @@ const char *bch_validate_journal_layout(struct bch_sb *sb, unsigned i; u64 *b; - journal = bch_sb_get_journal(sb); + journal = bch2_sb_get_journal(sb); if (!journal) return NULL; - nr = bch_nr_journal_buckets(journal); + nr = bch2_nr_journal_buckets(journal); if (!nr) return NULL; @@ -278,12 +277,12 @@ err: return err; } -static const char *bch_sb_validate_members(struct bch_sb *sb) +static const char *bch2_sb_validate_members(struct bch_sb *sb) { struct bch_sb_field_members *mi; unsigned i; - mi = bch_sb_get_members(sb); + mi = bch2_sb_get_members(sb); if (!mi) return "Invalid superblock: member info area missing"; @@ -292,7 +291,7 @@ static const char *bch_sb_validate_members(struct bch_sb *sb) return "Invalid superblock: bad member info"; for (i = 0; i < sb->nr_devices; i++) { - if (bch_is_zero(mi->members[i].uuid.b, sizeof(uuid_le))) + if (bch2_is_zero(mi->members[i].uuid.b, sizeof(uuid_le))) continue; if (le16_to_cpu(mi->members[i].bucket_size) < @@ -303,7 +302,7 @@ static const char *bch_sb_validate_members(struct bch_sb *sb) return NULL; } -const char *bch_validate_cache_super(struct bcache_superblock *disk_sb) +const char *bch2_validate_cache_super(struct bcache_superblock *disk_sb) { struct bch_sb *sb = disk_sb->sb; struct bch_sb_field *f; @@ -329,10 +328,10 @@ const char *bch_validate_cache_super(struct bcache_superblock *disk_sb) block_size > PAGE_SECTORS) return "Bad block size"; - if (bch_is_zero(sb->user_uuid.b, sizeof(uuid_le))) + if (bch2_is_zero(sb->user_uuid.b, sizeof(uuid_le))) return "Bad user UUID"; - if (bch_is_zero(sb->uuid.b, sizeof(uuid_le))) + if (bch2_is_zero(sb->uuid.b, sizeof(uuid_le))) return "Bad internal UUID"; if (!sb->nr_devices || @@ -405,12 +404,12 @@ const char *bch_validate_cache_super(struct bcache_superblock *disk_sb) return "Invalid superblock: unknown optional field type"; } - err = bch_sb_validate_members(sb); + err = bch2_sb_validate_members(sb); if (err) return err; - sb_mi = bch_sb_get_members(sb); - mi = bch_mi_to_cpu(sb_mi->members + sb->dev_idx); + sb_mi = bch2_sb_get_members(sb); + mi = bch2_mi_to_cpu(sb_mi->members + sb->dev_idx); if (mi.nbuckets > LONG_MAX) return "Too many buckets"; @@ -427,7 +426,7 @@ const char *bch_validate_cache_super(struct bcache_superblock *disk_sb) mi.bucket_size * mi.nbuckets) return "Invalid superblock: device too small"; - err = bch_validate_journal_layout(sb, mi); + err = bch2_validate_journal_layout(sb, mi); if (err) return err; @@ -436,7 +435,7 @@ const char *bch_validate_cache_super(struct bcache_superblock *disk_sb) /* device open: */ -static const char *bch_blkdev_open(const char *path, fmode_t mode, +static const char *bch2_blkdev_open(const char *path, fmode_t mode, void *holder, struct block_device **ret) { struct block_device *bdev; @@ -457,10 +456,10 @@ static const char *bch_blkdev_open(const char *path, fmode_t mode, return NULL; } -static void bch_sb_update(struct bch_fs *c) +static void bch2_sb_update(struct bch_fs *c) { struct bch_sb *src = c->disk_sb; - struct bch_sb_field_members *mi = bch_sb_get_members(src); + struct bch_sb_field_members *mi = bch2_sb_get_members(src); struct bch_dev *ca; unsigned i; @@ -481,7 +480,7 @@ static void bch_sb_update(struct bch_fs *c) c->sb.time_precision = le32_to_cpu(src->time_precision); for_each_member_device(ca, c, i) - ca->mi = bch_mi_to_cpu(mi->members + i); + ca->mi = bch2_mi_to_cpu(mi->members + i); } /* doesn't copy member info */ @@ -510,45 +509,45 @@ static void __copy_super(struct bch_sb *dst, struct bch_sb *src) if (src_f->type == BCH_SB_FIELD_journal) continue; - dst_f = bch_sb_field_get(dst, src_f->type); - dst_f = __bch_sb_field_resize(dst, dst_f, + dst_f = bch2_sb_field_get(dst, src_f->type); + dst_f = __bch2_sb_field_resize(dst, dst_f, le32_to_cpu(src_f->u64s)); memcpy(dst_f, src_f, vstruct_bytes(src_f)); } } -int bch_sb_to_fs(struct bch_fs *c, struct bch_sb *src) +int bch2_sb_to_fs(struct bch_fs *c, struct bch_sb *src) { struct bch_sb_field_journal *journal_buckets = - bch_sb_get_journal(src); + bch2_sb_get_journal(src); unsigned journal_u64s = journal_buckets ? le32_to_cpu(journal_buckets->field.u64s) : 0; lockdep_assert_held(&c->sb_lock); - if (bch_fs_sb_realloc(c, le32_to_cpu(src->u64s) - journal_u64s)) + if (bch2_fs_sb_realloc(c, le32_to_cpu(src->u64s) - journal_u64s)) return -ENOMEM; __copy_super(c->disk_sb, src); - bch_sb_update(c); + bch2_sb_update(c); return 0; } -int bch_sb_from_fs(struct bch_fs *c, struct bch_dev *ca) +int bch2_sb_from_fs(struct bch_fs *c, struct bch_dev *ca) { struct bch_sb *src = c->disk_sb, *dst = ca->disk_sb.sb; struct bch_sb_field_journal *journal_buckets = - bch_sb_get_journal(dst); + bch2_sb_get_journal(dst); unsigned journal_u64s = journal_buckets ? le32_to_cpu(journal_buckets->field.u64s) : 0; unsigned u64s = le32_to_cpu(src->u64s) + journal_u64s; int ret; - ret = bch_sb_realloc(&ca->disk_sb, u64s); + ret = bch2_sb_realloc(&ca->disk_sb, u64s); if (ret) return ret; @@ -570,13 +569,13 @@ reread: sb->bio->bi_iter.bi_sector = offset; sb->bio->bi_iter.bi_size = PAGE_SIZE << sb->page_order; bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META); - bch_bio_map(sb->bio, sb->sb); + bch2_bio_map(sb->bio, sb->sb); if (submit_bio_wait(sb->bio)) return "IO error"; if (uuid_le_cmp(sb->sb->magic, BCACHE_MAGIC)) - return "Not a bcache superblock"; + return "Not a bcachefs superblock"; if (le64_to_cpu(sb->sb->version) != BCACHE_SB_VERSION_CDEV_V4) return "Unsupported superblock version"; @@ -588,7 +587,7 @@ reread: order = get_order(bytes); if (order > sb->page_order) { - if (__bch_super_realloc(sb, order)) + if (__bch2_super_realloc(sb, order)) return "cannot allocate memory"; goto reread; } @@ -600,13 +599,13 @@ reread: csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb->sb), (struct nonce) { 0 }, sb->sb); - if (bch_crc_cmp(csum, sb->sb->csum)) + if (bch2_crc_cmp(csum, sb->sb->csum)) return "bad checksum reading superblock"; return NULL; } -const char *bch_read_super(struct bcache_superblock *sb, +const char *bch2_read_super(struct bcache_superblock *sb, struct bch_opts opts, const char *path) { @@ -624,16 +623,16 @@ const char *bch_read_super(struct bcache_superblock *sb, if (!(opt_defined(opts.nochanges) && opts.nochanges)) sb->mode |= FMODE_WRITE; - err = bch_blkdev_open(path, sb->mode, sb, &sb->bdev); + err = bch2_blkdev_open(path, sb->mode, sb, &sb->bdev); if (err) return err; err = "cannot allocate memory"; - if (__bch_super_realloc(sb, 0)) + if (__bch2_super_realloc(sb, 0)) goto err; err = "dynamic fault"; - if (bch_fs_init_fault("read_super")) + if (bch2_fs_init_fault("read_super")) goto err; err = read_one_super(sb, offset); @@ -660,7 +659,7 @@ const char *bch_read_super(struct bcache_superblock *sb, * use sb buffer to read layout, since sb buffer is page aligned but * layout won't be: */ - bch_bio_map(sb->bio, sb->sb); + bch2_bio_map(sb->bio, sb->sb); err = "IO error"; if (submit_bio_wait(sb->bio)) @@ -696,7 +695,7 @@ got_super: return NULL; err: - bch_free_super(sb); + bch2_free_super(sb); return err; } @@ -708,9 +707,7 @@ static void write_super_endio(struct bio *bio) /* XXX: return errors directly */ - bch_dev_fatal_io_err_on(bio->bi_error, ca, "superblock write"); - - bch_account_io_completion(ca); + bch2_dev_fatal_io_err_on(bio->bi_error, ca, "superblock write"); closure_put(&ca->fs->sb_write); percpu_ref_put(&ca->io_ref); @@ -742,13 +739,13 @@ static bool write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx) bio->bi_end_io = write_super_endio; bio->bi_private = ca; bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); - bch_bio_map(bio, sb); + bch2_bio_map(bio, sb); - closure_bio_submit_punt(bio, &c->sb_write, c); + closure_bio_submit(bio, &c->sb_write); return true; } -void bch_write_super(struct bch_fs *c) +void bch2_write_super(struct bch_fs *c) { struct closure *cl = &c->sb_write; struct bch_dev *ca; @@ -762,7 +759,7 @@ void bch_write_super(struct bch_fs *c) le64_add_cpu(&c->disk_sb->seq, 1); for_each_online_member(ca, c, i) - bch_sb_from_fs(c, ca); + bch2_sb_from_fs(c, ca); if (c->opts.nochanges) goto out; @@ -778,10 +775,10 @@ void bch_write_super(struct bch_fs *c) } while (wrote); out: /* Make new options visible after they're persistent: */ - bch_sb_update(c); + bch2_sb_update(c); } -void bch_check_mark_super_slowpath(struct bch_fs *c, const struct bkey_i *k, +void bch2_check_mark_super_slowpath(struct bch_fs *c, const struct bkey_i *k, bool meta) { struct bch_member *mi; @@ -792,12 +789,12 @@ void bch_check_mark_super_slowpath(struct bch_fs *c, const struct bkey_i *k, mutex_lock(&c->sb_lock); /* recheck, might have raced */ - if (bch_check_super_marked(c, k, meta)) { + if (bch2_check_super_marked(c, k, meta)) { mutex_unlock(&c->sb_lock); return; } - mi = bch_sb_get_members(c->disk_sb)->members; + mi = bch2_sb_get_members(c->disk_sb)->members; extent_for_each_ptr(e, ptr) if (!ptr->cached) { @@ -815,6 +812,6 @@ void bch_check_mark_super_slowpath(struct bch_fs *c, const struct bkey_i *k, ? SET_BCH_SB_META_REPLICAS_HAVE : SET_BCH_SB_DATA_REPLICAS_HAVE)(c->disk_sb, nr_replicas); - bch_write_super(c); + bch2_write_super(c); mutex_unlock(&c->sb_lock); } diff --git a/libbcache/super-io.h b/libbcachefs/super-io.h index 1a9bd309..8f0d82db 100644 --- a/libbcache/super-io.h +++ b/libbcachefs/super-io.h @@ -6,10 +6,10 @@ #include <asm/byteorder.h> -struct bch_sb_field *bch_sb_field_get(struct bch_sb *, enum bch_sb_field_type); -struct bch_sb_field *bch_sb_field_resize(struct bcache_superblock *, +struct bch_sb_field *bch2_sb_field_get(struct bch_sb *, enum bch_sb_field_type); +struct bch_sb_field *bch2_sb_field_resize(struct bcache_superblock *, enum bch_sb_field_type, unsigned); -struct bch_sb_field *bch_fs_sb_field_resize(struct bch_fs *, +struct bch_sb_field *bch2_fs_sb_field_resize(struct bch_fs *, enum bch_sb_field_type, unsigned); #define field_to_type(_f, _name) \ @@ -17,23 +17,23 @@ struct bch_sb_field *bch_fs_sb_field_resize(struct bch_fs *, #define BCH_SB_FIELD_TYPE(_name) \ static inline struct bch_sb_field_##_name * \ -bch_sb_get_##_name(struct bch_sb *sb) \ +bch2_sb_get_##_name(struct bch_sb *sb) \ { \ - return field_to_type(bch_sb_field_get(sb, \ + return field_to_type(bch2_sb_field_get(sb, \ BCH_SB_FIELD_##_name), _name); \ } \ \ static inline struct bch_sb_field_##_name * \ -bch_sb_resize_##_name(struct bcache_superblock *sb, unsigned u64s) \ +bch2_sb_resize_##_name(struct bcache_superblock *sb, unsigned u64s) \ { \ - return field_to_type(bch_sb_field_resize(sb, \ + return field_to_type(bch2_sb_field_resize(sb, \ BCH_SB_FIELD_##_name, u64s), _name); \ } \ \ static inline struct bch_sb_field_##_name * \ -bch_fs_sb_resize_##_name(struct bch_fs *c, unsigned u64s) \ +bch2_fs_sb_resize_##_name(struct bch_fs *c, unsigned u64s) \ { \ - return field_to_type(bch_fs_sb_field_resize(c, \ + return field_to_type(bch2_fs_sb_field_resize(c, \ BCH_SB_FIELD_##_name, u64s), _name); \ } @@ -41,8 +41,8 @@ BCH_SB_FIELD_TYPE(journal); BCH_SB_FIELD_TYPE(members); BCH_SB_FIELD_TYPE(crypt); -static inline bool bch_sb_test_feature(struct bch_sb *sb, - enum bch_sb_features f) +static inline bool bch2_sb_test_feature(struct bch_sb *sb, + enum bch_sb_features f) { unsigned w = f / 64; unsigned b = f % 64; @@ -50,10 +50,10 @@ static inline bool bch_sb_test_feature(struct bch_sb *sb, return le64_to_cpu(sb->features[w]) & (1ULL << b); } -static inline void bch_sb_set_feature(struct bch_sb *sb, - enum bch_sb_features f) +static inline void bch2_sb_set_feature(struct bch_sb *sb, + enum bch_sb_features f) { - if (!bch_sb_test_feature(sb, f)) { + if (!bch2_sb_test_feature(sb, f)) { unsigned w = f / 64; unsigned b = f % 64; @@ -61,7 +61,7 @@ static inline void bch_sb_set_feature(struct bch_sb *sb, } } -static inline __le64 bch_sb_magic(struct bch_fs *c) +static inline __le64 bch2_sb_magic(struct bch_fs *c) { __le64 ret; memcpy(&ret, &c->sb.uuid, sizeof(ret)); @@ -70,20 +70,20 @@ static inline __le64 bch_sb_magic(struct bch_fs *c) static inline __u64 jset_magic(struct bch_fs *c) { - return __le64_to_cpu(bch_sb_magic(c) ^ JSET_MAGIC); + return __le64_to_cpu(bch2_sb_magic(c) ^ JSET_MAGIC); } static inline __u64 pset_magic(struct bch_fs *c) { - return __le64_to_cpu(bch_sb_magic(c) ^ PSET_MAGIC); + return __le64_to_cpu(bch2_sb_magic(c) ^ PSET_MAGIC); } static inline __u64 bset_magic(struct bch_fs *c) { - return __le64_to_cpu(bch_sb_magic(c) ^ BSET_MAGIC); + return __le64_to_cpu(bch2_sb_magic(c) ^ BSET_MAGIC); } -static inline struct bch_member_cpu bch_mi_to_cpu(struct bch_member *mi) +static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi) { return (struct bch_member_cpu) { .nbuckets = le64_to_cpu(mi->nbuckets), @@ -95,29 +95,29 @@ static inline struct bch_member_cpu bch_mi_to_cpu(struct bch_member *mi) .has_data = BCH_MEMBER_HAS_DATA(mi), .replacement = BCH_MEMBER_REPLACEMENT(mi), .discard = BCH_MEMBER_DISCARD(mi), - .valid = !bch_is_zero(mi->uuid.b, sizeof(uuid_le)), + .valid = !bch2_is_zero(mi->uuid.b, sizeof(uuid_le)), }; } -int bch_sb_to_fs(struct bch_fs *, struct bch_sb *); -int bch_sb_from_fs(struct bch_fs *, struct bch_dev *); +int bch2_sb_to_fs(struct bch_fs *, struct bch_sb *); +int bch2_sb_from_fs(struct bch_fs *, struct bch_dev *); -void bch_free_super(struct bcache_superblock *); -int bch_super_realloc(struct bcache_superblock *, unsigned); +void bch2_free_super(struct bcache_superblock *); +int bch2_super_realloc(struct bcache_superblock *, unsigned); -const char *bch_validate_journal_layout(struct bch_sb *, - struct bch_member_cpu); -const char *bch_validate_cache_super(struct bcache_superblock *); +const char *bch2_validate_journal_layout(struct bch_sb *, + struct bch_member_cpu); +const char *bch2_validate_cache_super(struct bcache_superblock *); -const char *bch_read_super(struct bcache_superblock *, +const char *bch2_read_super(struct bcache_superblock *, struct bch_opts, const char *); -void bch_write_super(struct bch_fs *); +void bch2_write_super(struct bch_fs *); -void bch_check_mark_super_slowpath(struct bch_fs *, - const struct bkey_i *, bool); +void bch2_check_mark_super_slowpath(struct bch_fs *, + const struct bkey_i *, bool); -static inline bool bch_check_super_marked(struct bch_fs *c, - const struct bkey_i *k, bool meta) +static inline bool bch2_check_super_marked(struct bch_fs *c, + const struct bkey_i *k, bool meta) { struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k); const struct bch_extent_ptr *ptr; @@ -147,13 +147,13 @@ static inline bool bch_check_super_marked(struct bch_fs *c, return ret; } -static inline void bch_check_mark_super(struct bch_fs *c, - const struct bkey_i *k, bool meta) +static inline void bch2_check_mark_super(struct bch_fs *c, + const struct bkey_i *k, bool meta) { - if (bch_check_super_marked(c, k, meta)) + if (bch2_check_super_marked(c, k, meta)) return; - bch_check_mark_super_slowpath(c, k, meta); + bch2_check_mark_super_slowpath(c, k, meta); } #endif /* _BCACHE_SUPER_IO_H */ diff --git a/libbcache/super.c b/libbcachefs/super.c index f5f74936..8aa5cc00 100644 --- a/libbcache/super.c +++ b/libbcachefs/super.c @@ -1,13 +1,12 @@ /* - * bcache setup/teardown code, and some metadata io - read a superblock and + * bcachefs setup/teardown code, and some metadata io - read a superblock and * figure out what to do with it. * * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> * Copyright 2012 Google, Inc. */ -#include "bcache.h" -#include "blockdev.h" +#include "bcachefs.h" #include "alloc.h" #include "btree_cache.h" #include "btree_gc.h" @@ -28,12 +27,9 @@ #include "move.h" #include "migrate.h" #include "movinggc.h" -#include "notify.h" -#include "stats.h" #include "super.h" #include "super-io.h" #include "tier.h" -#include "writeback.h" #include <linux/backing-dev.h> #include <linux/blkdev.h> @@ -45,11 +41,10 @@ #include <linux/module.h> #include <linux/percpu.h> #include <linux/random.h> -#include <linux/reboot.h> #include <linux/sysfs.h> #include <crypto/hash.h> -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); @@ -61,20 +56,18 @@ static const uuid_le invalid_uuid = { } }; -static struct kset *bcache_kset; +static struct kset *bcachefs_kset; static LIST_HEAD(bch_fs_list); static DEFINE_MUTEX(bch_fs_list_lock); static DECLARE_WAIT_QUEUE_HEAD(bch_read_only_wait); -struct workqueue_struct *bcache_io_wq; -struct crypto_shash *bch_sha256; -static void bch_dev_free(struct bch_dev *); -static int bch_dev_alloc(struct bch_fs *, unsigned); -static int bch_dev_sysfs_online(struct bch_dev *); -static void __bch_dev_read_only(struct bch_fs *, struct bch_dev *); +static void bch2_dev_free(struct bch_dev *); +static int bch2_dev_alloc(struct bch_fs *, unsigned); +static int bch2_dev_sysfs_online(struct bch_dev *); +static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *); -struct bch_fs *bch_bdev_to_fs(struct block_device *bdev) +struct bch_fs *bch2_bdev_to_fs(struct block_device *bdev) { struct bch_fs *c; struct bch_dev *ca; @@ -97,7 +90,7 @@ found: return c; } -static struct bch_fs *__bch_uuid_to_fs(uuid_le uuid) +static struct bch_fs *__bch2_uuid_to_fs(uuid_le uuid) { struct bch_fs *c; @@ -110,12 +103,12 @@ static struct bch_fs *__bch_uuid_to_fs(uuid_le uuid) return NULL; } -struct bch_fs *bch_uuid_to_fs(uuid_le uuid) +struct bch_fs *bch2_uuid_to_fs(uuid_le uuid) { struct bch_fs *c; mutex_lock(&bch_fs_list_lock); - c = __bch_uuid_to_fs(uuid); + c = __bch2_uuid_to_fs(uuid); if (c) closure_get(&c->cl); mutex_unlock(&bch_fs_list_lock); @@ -123,7 +116,7 @@ struct bch_fs *bch_uuid_to_fs(uuid_le uuid) return c; } -int bch_congested(struct bch_fs *c, int bdi_bits) +int bch2_congested(struct bch_fs *c, int bdi_bits) { struct backing_dev_info *bdi; struct bch_dev *ca; @@ -160,11 +153,11 @@ int bch_congested(struct bch_fs *c, int bdi_bits) return ret; } -static int bch_congested_fn(void *data, int bdi_bits) +static int bch2_congested_fn(void *data, int bdi_bits) { struct bch_fs *c = data; - return bch_congested(c, bdi_bits); + return bch2_congested(c, bdi_bits); } /* Filesystem RO/RW: */ @@ -184,27 +177,27 @@ static int bch_congested_fn(void *data, int bdi_bits) * - allocator depends on the journal (when it rewrites prios and gens) */ -static void __bch_fs_read_only(struct bch_fs *c) +static void __bch2_fs_read_only(struct bch_fs *c) { struct bch_dev *ca; unsigned i; - bch_tiering_stop(c); + bch2_tiering_stop(c); for_each_member_device(ca, c, i) - bch_moving_gc_stop(ca); + bch2_moving_gc_stop(ca); - bch_gc_thread_stop(c); + bch2_gc_thread_stop(c); - bch_btree_flush(c); + bch2_btree_flush(c); for_each_member_device(ca, c, i) - bch_dev_allocator_stop(ca); + bch2_dev_allocator_stop(ca); - bch_fs_journal_stop(&c->journal); + bch2_fs_journal_stop(&c->journal); } -static void bch_writes_disabled(struct percpu_ref *writes) +static void bch2_writes_disabled(struct percpu_ref *writes) { struct bch_fs *c = container_of(writes, struct bch_fs, writes); @@ -212,7 +205,7 @@ static void bch_writes_disabled(struct percpu_ref *writes) wake_up(&bch_read_only_wait); } -void bch_fs_read_only(struct bch_fs *c) +void bch2_fs_read_only(struct bch_fs *c) { mutex_lock(&c->state_lock); if (c->state != BCH_FS_STARTING && @@ -222,15 +215,13 @@ void bch_fs_read_only(struct bch_fs *c) if (test_bit(BCH_FS_ERROR, &c->flags)) goto out; - trace_fs_read_only(c); - /* * Block new foreground-end write operations from starting - any new * writes will return -EROFS: * * (This is really blocking new _allocations_, writes to previously * allocated space can still happen until stopping the allocator in - * bch_dev_allocator_stop()). + * bch2_dev_allocator_stop()). */ percpu_ref_kill(&c->writes); @@ -238,7 +229,7 @@ void bch_fs_read_only(struct bch_fs *c) cancel_delayed_work(&c->pd_controllers_update); c->foreground_write_pd.rate.rate = UINT_MAX; - bch_wake_delayed_writes((unsigned long) c); + bch2_wake_delayed_writes((unsigned long) c); /* * If we're not doing an emergency shutdown, we want to wait on @@ -255,53 +246,51 @@ void bch_fs_read_only(struct bch_fs *c) test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) || test_bit(BCH_FS_EMERGENCY_RO, &c->flags)); - __bch_fs_read_only(c); + __bch2_fs_read_only(c); wait_event(bch_read_only_wait, test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags)); clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags); - if (!bch_journal_error(&c->journal) && + if (!bch2_journal_error(&c->journal) && !test_bit(BCH_FS_ERROR, &c->flags)) { mutex_lock(&c->sb_lock); SET_BCH_SB_CLEAN(c->disk_sb, true); - bch_write_super(c); + bch2_write_super(c); mutex_unlock(&c->sb_lock); } c->state = BCH_FS_RO; - bch_notify_fs_read_only(c); - trace_fs_read_only_done(c); out: mutex_unlock(&c->state_lock); } -static void bch_fs_read_only_work(struct work_struct *work) +static void bch2_fs_read_only_work(struct work_struct *work) { struct bch_fs *c = container_of(work, struct bch_fs, read_only_work); - bch_fs_read_only(c); + bch2_fs_read_only(c); } -static void bch_fs_read_only_async(struct bch_fs *c) +static void bch2_fs_read_only_async(struct bch_fs *c) { queue_work(system_long_wq, &c->read_only_work); } -bool bch_fs_emergency_read_only(struct bch_fs *c) +bool bch2_fs_emergency_read_only(struct bch_fs *c) { bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags); - bch_fs_read_only_async(c); - bch_journal_halt(&c->journal); + bch2_fs_read_only_async(c); + bch2_journal_halt(&c->journal); wake_up(&bch_read_only_wait); return ret; } -const char *bch_fs_read_write(struct bch_fs *c) +const char *bch2_fs_read_write(struct bch_fs *c) { struct bch_dev *ca; const char *err = NULL; @@ -314,24 +303,24 @@ const char *bch_fs_read_write(struct bch_fs *c) err = "error starting allocator thread"; for_each_rw_member(ca, c, i) - if (bch_dev_allocator_start(ca)) { + if (bch2_dev_allocator_start(ca)) { percpu_ref_put(&ca->io_ref); goto err; } err = "error starting btree GC thread"; - if (bch_gc_thread_start(c)) + if (bch2_gc_thread_start(c)) goto err; err = "error starting moving GC thread"; for_each_rw_member(ca, c, i) - if (bch_moving_gc_start(ca)) { + if (bch2_moving_gc_start(ca)) { percpu_ref_put(&ca->io_ref); goto err; } err = "error starting tiering thread"; - if (bch_tiering_start(c)) + if (bch2_tiering_start(c)) goto err; schedule_delayed_work(&c->pd_controllers_update, 5 * HZ); @@ -345,21 +334,20 @@ out: mutex_unlock(&c->state_lock); return err; err: - __bch_fs_read_only(c); + __bch2_fs_read_only(c); goto out; } /* Filesystem startup/shutdown: */ -static void bch_fs_free(struct bch_fs *c) +static void bch2_fs_free(struct bch_fs *c) { - bch_fs_encryption_exit(c); - bch_fs_btree_exit(c); - bch_fs_journal_exit(&c->journal); - bch_io_clock_exit(&c->io_clock[WRITE]); - bch_io_clock_exit(&c->io_clock[READ]); - bch_fs_compress_exit(c); - bch_fs_blockdev_exit(c); + bch2_fs_encryption_exit(c); + bch2_fs_btree_exit(c); + bch2_fs_journal_exit(&c->journal); + bch2_io_clock_exit(&c->io_clock[WRITE]); + bch2_io_clock_exit(&c->io_clock[READ]); + bch2_fs_compress_exit(c); bdi_destroy(&c->bdi); lg_lock_free(&c->usage_lock); free_percpu(c->usage_percpu); @@ -384,25 +372,24 @@ static void bch_fs_free(struct bch_fs *c) module_put(THIS_MODULE); } -static void bch_fs_exit(struct bch_fs *c) +static void bch2_fs_exit(struct bch_fs *c) { unsigned i; del_timer_sync(&c->foreground_write_wakeup); cancel_delayed_work_sync(&c->pd_controllers_update); cancel_work_sync(&c->read_only_work); - cancel_work_sync(&c->bio_submit_work); cancel_work_sync(&c->read_retry_work); for (i = 0; i < c->sb.nr_devices; i++) if (c->devs[i]) - bch_dev_free(c->devs[i]); + bch2_dev_free(c->devs[i]); closure_debug_destroy(&c->cl); kobject_put(&c->kobj); } -static void bch_fs_offline(struct bch_fs *c) +static void bch2_fs_offline(struct bch_fs *c) { struct bch_dev *ca; unsigned i; @@ -415,113 +402,46 @@ static void bch_fs_offline(struct bch_fs *c) if (ca->kobj.state_in_sysfs && ca->disk_sb.bdev) sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj, - "bcache"); + "bcachefs"); if (c->kobj.state_in_sysfs) kobject_del(&c->kobj); - bch_fs_debug_exit(c); - bch_fs_chardev_exit(c); - - bch_cache_accounting_destroy(&c->accounting); + bch2_fs_debug_exit(c); + bch2_fs_chardev_exit(c); kobject_put(&c->time_stats); kobject_put(&c->opts_dir); kobject_put(&c->internal); - __bch_fs_read_only(c); + __bch2_fs_read_only(c); } -/* - * should be __bch_fs_stop4 - block devices are closed, now we can finally - * free it - */ -void bch_fs_release(struct kobject *kobj) +void bch2_fs_release(struct kobject *kobj) { struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); - bch_notify_fs_stopped(c); - bch_fs_free(c); -} - -/* - * All activity on the filesystem should have stopped now - close devices: - */ -static void __bch_fs_stop3(struct closure *cl) -{ - struct bch_fs *c = container_of(cl, struct bch_fs, cl); - - bch_fs_exit(c); -} - -/* - * Openers (i.e. block devices) should have exited, shutdown all userspace - * interfaces and wait for &c->cl to hit 0 - */ -static void __bch_fs_stop2(struct closure *cl) -{ - struct bch_fs *c = container_of(cl, struct bch_fs, caching); - - bch_fs_offline(c); - - closure_return(cl); -} - -/* - * First phase of the shutdown process that's kicked off by bch_fs_stop_async(); - * we haven't waited for anything to stop yet, we're just punting to process - * context to shut down block devices: - */ -static void __bch_fs_stop1(struct closure *cl) -{ - struct bch_fs *c = container_of(cl, struct bch_fs, caching); - - bch_blockdevs_stop(c); - - continue_at(cl, __bch_fs_stop2, system_wq); + bch2_fs_free(c); } -void bch_fs_stop_async(struct bch_fs *c) -{ - mutex_lock(&c->state_lock); - if (c->state != BCH_FS_STOPPING) { - c->state = BCH_FS_STOPPING; - closure_queue(&c->caching); - } - mutex_unlock(&c->state_lock); -} - -void bch_fs_stop(struct bch_fs *c) +void bch2_fs_stop(struct bch_fs *c) { mutex_lock(&c->state_lock); BUG_ON(c->state == BCH_FS_STOPPING); c->state = BCH_FS_STOPPING; mutex_unlock(&c->state_lock); - bch_blockdevs_stop(c); - - closure_sync(&c->caching); - closure_debug_destroy(&c->caching); + bch2_fs_offline(c); - bch_fs_offline(c); - - closure_put(&c->cl); closure_sync(&c->cl); - bch_fs_exit(c); -} - -/* Stop, detaching from backing devices: */ -void bch_fs_detach(struct bch_fs *c) -{ - if (!test_and_set_bit(BCH_FS_DETACHING, &c->flags)) - bch_fs_stop_async(c); + bch2_fs_exit(c); } #define alloc_bucket_pages(gfp, ca) \ ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(ca)))) -static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts) +static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) { struct bch_sb_field_members *mi; struct bch_fs *c; @@ -537,11 +457,10 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts) mutex_init(&c->state_lock); mutex_init(&c->sb_lock); - INIT_RADIX_TREE(&c->devices, GFP_KERNEL); mutex_init(&c->btree_cache_lock); mutex_init(&c->bucket_lock); mutex_init(&c->btree_root_lock); - INIT_WORK(&c->read_only_work, bch_fs_read_only_work); + INIT_WORK(&c->read_only_work, bch2_fs_read_only_work); init_rwsem(&c->gc_lock); @@ -550,11 +469,10 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts) BCH_TIME_STATS() #undef BCH_TIME_STAT - bch_fs_allocator_init(c); - bch_fs_tiering_init(c); + bch2_fs_allocator_init(c); + bch2_fs_tiering_init(c); INIT_LIST_HEAD(&c->list); - INIT_LIST_HEAD(&c->cached_devs); INIT_LIST_HEAD(&c->btree_cache); INIT_LIST_HEAD(&c->btree_cache_freeable); INIT_LIST_HEAD(&c->btree_cache_freed); @@ -564,11 +482,9 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts) mutex_init(&c->btree_interior_update_lock); mutex_init(&c->bio_bounce_pages_lock); - INIT_WORK(&c->bio_submit_work, bch_bio_submit_work); - spin_lock_init(&c->bio_submit_lock); bio_list_init(&c->read_retry_list); spin_lock_init(&c->read_retry_lock); - INIT_WORK(&c->read_retry_work, bch_read_retry_work); + INIT_WORK(&c->read_retry_work, bch2_read_retry_work); mutex_init(&c->zlib_workspace_lock); seqcount_init(&c->gc_pos_lock); @@ -578,11 +494,7 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts) c->prio_clock[WRITE].hand = 1; c->prio_clock[WRITE].min_prio = 0; - c->congested_read_threshold_us = 2000; - c->congested_write_threshold_us = 20000; - c->error_limit = 16 << IO_ERROR_SHIFT; init_waitqueue_head(&c->writeback_wait); - c->writeback_pages_max = (256 << 10) / PAGE_SIZE; c->copy_gc_enabled = 1; @@ -596,11 +508,9 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts) c->journal.blocked_time = &c->journal_blocked_time; c->journal.flush_seq_time = &c->journal_flush_seq_time; - mutex_init(&c->uevent_lock); - mutex_lock(&c->sb_lock); - if (bch_sb_to_fs(c, sb)) { + if (bch2_sb_to_fs(c, sb)) { mutex_unlock(&c->sb_lock); goto err; } @@ -609,15 +519,15 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts) scnprintf(c->name, sizeof(c->name), "%pU", &c->sb.user_uuid); - bch_opts_apply(&c->opts, bch_sb_opts(sb)); - bch_opts_apply(&c->opts, opts); + bch2_opts_apply(&c->opts, bch2_sb_opts(sb)); + bch2_opts_apply(&c->opts, opts); c->opts.nochanges |= c->opts.noreplay; c->opts.read_only |= c->opts.nochanges; c->block_bits = ilog2(c->sb.block_size); - if (bch_fs_init_fault("fs_alloc")) + if (bch2_fs_init_fault("fs_alloc")) goto err; iter_size = (btree_blocks(c) + 1) * 2 * @@ -625,11 +535,11 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts) journal_entry_bytes = 512U << BCH_SB_JOURNAL_ENTRY_SIZE(sb); - if (!(c->wq = alloc_workqueue("bcache", + if (!(c->wq = alloc_workqueue("bcachefs", WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) || !(c->copygc_wq = alloc_workqueue("bcache_copygc", WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) || - percpu_ref_init(&c->writes, bch_writes_disabled, 0, GFP_KERNEL) || + percpu_ref_init(&c->writes, bch2_writes_disabled, 0, GFP_KERNEL) || mempool_init_kmalloc_pool(&c->btree_reserve_pool, 1, sizeof(struct btree_reserve)) || mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1, @@ -648,25 +558,24 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts) lg_lock_init(&c->usage_lock) || mempool_init_page_pool(&c->btree_bounce_pool, 1, ilog2(btree_pages(c))) || - bdi_setup_and_register(&c->bdi, "bcache") || - bch_fs_blockdev_init(c) || - bch_io_clock_init(&c->io_clock[READ]) || - bch_io_clock_init(&c->io_clock[WRITE]) || - bch_fs_journal_init(&c->journal, journal_entry_bytes) || - bch_fs_btree_init(c) || - bch_fs_encryption_init(c) || - bch_fs_compress_init(c) || - bch_check_set_has_compressed_data(c, c->opts.compression)) + bdi_setup_and_register(&c->bdi, "bcachefs") || + bch2_io_clock_init(&c->io_clock[READ]) || + bch2_io_clock_init(&c->io_clock[WRITE]) || + bch2_fs_journal_init(&c->journal, journal_entry_bytes) || + bch2_fs_btree_init(c) || + bch2_fs_encryption_init(c) || + bch2_fs_compress_init(c) || + bch2_check_set_has_compressed_data(c, c->opts.compression)) goto err; c->bdi.ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE; - c->bdi.congested_fn = bch_congested_fn; + c->bdi.congested_fn = bch2_congested_fn; c->bdi.congested_data = c; - mi = bch_sb_get_members(c->disk_sb); + mi = bch2_sb_get_members(c->disk_sb); for (i = 0; i < c->sb.nr_devices; i++) - if (!bch_is_zero(mi->members[i].uuid.b, sizeof(uuid_le)) && - bch_dev_alloc(c, i)) + if (!bch2_is_zero(mi->members[i].uuid.b, sizeof(uuid_le)) && + bch2_dev_alloc(c, i)) goto err; /* @@ -675,26 +584,18 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts) */ closure_init(&c->cl, NULL); - c->kobj.kset = bcache_kset; - kobject_init(&c->kobj, &bch_fs_ktype); - kobject_init(&c->internal, &bch_fs_internal_ktype); - kobject_init(&c->opts_dir, &bch_fs_opts_dir_ktype); - kobject_init(&c->time_stats, &bch_fs_time_stats_ktype); - - bch_cache_accounting_init(&c->accounting, &c->cl); - - closure_init(&c->caching, &c->cl); - set_closure_fn(&c->caching, __bch_fs_stop1, system_wq); - - closure_get(&c->cl); - continue_at_noreturn(&c->cl, __bch_fs_stop3, system_wq); + c->kobj.kset = bcachefs_kset; + kobject_init(&c->kobj, &bch2_fs_ktype); + kobject_init(&c->internal, &bch2_fs_internal_ktype); + kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype); + kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype); return c; err: - bch_fs_free(c); + bch2_fs_free(c); return NULL; } -static const char *__bch_fs_online(struct bch_fs *c) +static const char *__bch2_fs_online(struct bch_fs *c) { struct bch_dev *ca; const char *err = NULL; @@ -706,35 +607,28 @@ static const char *__bch_fs_online(struct bch_fs *c) if (!list_empty(&c->list)) return NULL; - if (__bch_uuid_to_fs(c->sb.uuid)) + if (__bch2_uuid_to_fs(c->sb.uuid)) return "filesystem UUID already open"; - ret = bch_fs_chardev_init(c); + ret = bch2_fs_chardev_init(c); if (ret) return "error creating character device"; - bch_fs_debug_init(c); + bch2_fs_debug_init(c); if (kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) || kobject_add(&c->internal, &c->kobj, "internal") || kobject_add(&c->opts_dir, &c->kobj, "options") || - kobject_add(&c->time_stats, &c->kobj, "time_stats") || - bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) + kobject_add(&c->time_stats, &c->kobj, "time_stats")) return "error creating sysfs objects"; mutex_lock(&c->state_lock); err = "error creating sysfs objects"; __for_each_member_device(ca, c, i) - if (bch_dev_sysfs_online(ca)) + if (bch2_dev_sysfs_online(ca)) goto err; - err = "can't bring up blockdev volumes"; - if (bch_blockdev_volumes_start(c)) - goto err; - - bch_attach_backing_devs(c); - list_add(&c->list, &bch_fs_list); err = NULL; err: @@ -742,18 +636,18 @@ err: return err; } -static const char *bch_fs_online(struct bch_fs *c) +static const char *bch2_fs_online(struct bch_fs *c) { const char *err; mutex_lock(&bch_fs_list_lock); - err = __bch_fs_online(c); + err = __bch2_fs_online(c); mutex_unlock(&bch_fs_list_lock); return err; } -static const char *__bch_fs_start(struct bch_fs *c) +static const char *__bch2_fs_start(struct bch_fs *c) { const char *err = "cannot allocate memory"; struct bch_sb_field_members *mi; @@ -768,11 +662,11 @@ static const char *__bch_fs_start(struct bch_fs *c) mutex_lock(&c->sb_lock); for_each_online_member(ca, c, i) - bch_sb_from_fs(c, ca); + bch2_sb_from_fs(c, ca); mutex_unlock(&c->sb_lock); if (BCH_SB_INITIALIZED(c->disk_sb)) { - ret = bch_journal_read(c, &journal); + ret = bch2_journal_read(c, &journal); if (ret) goto err; @@ -783,7 +677,7 @@ static const char *__bch_fs_start(struct bch_fs *c) err = "error reading priorities"; for_each_readable_member(ca, c, i) { - ret = bch_prio_read(ca); + ret = bch2_prio_read(ca); if (ret) { percpu_ref_put(&ca->io_ref); goto err; @@ -795,7 +689,7 @@ static const char *__bch_fs_start(struct bch_fs *c) struct bkey_i *k; err = "bad btree root"; - k = bch_journal_find_btree_root(c, j, id, &level); + k = bch2_journal_find_btree_root(c, j, id, &level); if (!k && id == BTREE_ID_EXTENTS) goto err; if (!k) { @@ -804,14 +698,14 @@ static const char *__bch_fs_start(struct bch_fs *c) } err = "error reading btree root"; - if (bch_btree_root_read(c, id, k, level)) + if (bch2_btree_root_read(c, id, k, level)) goto err; } bch_verbose(c, "starting mark and sweep:"); err = "error in recovery"; - if (bch_initial_gc(c, &journal)) + if (bch2_initial_gc(c, &journal)) goto err; if (c->opts.noreplay) @@ -820,15 +714,15 @@ static const char *__bch_fs_start(struct bch_fs *c) bch_verbose(c, "mark and sweep done"); /* - * bch_journal_start() can't happen sooner, or btree_gc_finish() + * bch2_journal_start() can't happen sooner, or btree_gc_finish() * will give spurious errors about oldest_gen > bucket_gen - * this is a hack but oh well. */ - bch_journal_start(c); + bch2_journal_start(c); err = "error starting allocator thread"; for_each_rw_member(ca, c, i) - if (bch_dev_allocator_start(ca)) { + if (bch2_dev_allocator_start(ca)) { percpu_ref_put(&ca->io_ref); goto err; } @@ -836,7 +730,7 @@ static const char *__bch_fs_start(struct bch_fs *c) bch_verbose(c, "starting journal replay:"); err = "journal replay failed"; - ret = bch_journal_replay(c, &journal); + ret = bch2_journal_replay(c, &journal); if (ret) goto err; @@ -847,7 +741,7 @@ static const char *__bch_fs_start(struct bch_fs *c) bch_verbose(c, "starting fsck:"); err = "error in fsck"; - ret = bch_fsck(c, !c->opts.nofsck); + ret = bch2_fsck(c, !c->opts.nofsck); if (ret) goto err; @@ -861,11 +755,11 @@ static const char *__bch_fs_start(struct bch_fs *c) bch_notice(c, "initializing new filesystem"); - bch_initial_gc(c, NULL); + bch2_initial_gc(c, NULL); err = "unable to allocate journal buckets"; for_each_rw_member(ca, c, i) - if (bch_dev_journal_alloc(ca)) { + if (bch2_dev_journal_alloc(ca)) { percpu_ref_put(&ca->io_ref); goto err; } @@ -874,19 +768,19 @@ static const char *__bch_fs_start(struct bch_fs *c) * journal_res_get() will crash if called before this has * set up the journal.pin FIFO and journal.cur pointer: */ - bch_journal_start(c); - bch_journal_set_replay_done(&c->journal); + bch2_journal_start(c); + bch2_journal_set_replay_done(&c->journal); err = "error starting allocator thread"; for_each_rw_member(ca, c, i) - if (bch_dev_allocator_start(ca)) { + if (bch2_dev_allocator_start(ca)) { percpu_ref_put(&ca->io_ref); goto err; } err = "cannot allocate new btree root"; for (id = 0; id < BTREE_ID_NR; id++) - if (bch_btree_root_alloc(c, id, &cl)) { + if (bch2_btree_root_alloc(c, id, &cl)) { closure_sync(&cl); goto err; } @@ -894,37 +788,37 @@ static const char *__bch_fs_start(struct bch_fs *c) /* Wait for new btree roots to be written: */ closure_sync(&cl); - bch_inode_init(c, &inode, 0, 0, + bch2_inode_init(c, &inode, 0, 0, S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0); inode.inum = BCACHE_ROOT_INO; - bch_inode_pack(&packed_inode, &inode); + bch2_inode_pack(&packed_inode, &inode); err = "error creating root directory"; - if (bch_btree_insert(c, BTREE_ID_INODES, + if (bch2_btree_insert(c, BTREE_ID_INODES, &packed_inode.inode.k_i, NULL, NULL, NULL, 0)) goto err; err = "error writing first journal entry"; - if (bch_journal_meta(&c->journal)) + if (bch2_journal_meta(&c->journal)) goto err; } recovery_done: err = "dynamic fault"; - if (bch_fs_init_fault("fs_start")) + if (bch2_fs_init_fault("fs_start")) goto err; if (c->opts.read_only) { - bch_fs_read_only(c); + bch2_fs_read_only(c); } else { - err = bch_fs_read_write(c); + err = bch2_fs_read_write(c); if (err) goto err; } mutex_lock(&c->sb_lock); - mi = bch_sb_get_members(c->disk_sb); + mi = bch2_sb_get_members(c->disk_sb); now = ktime_get_seconds(); for_each_member_device(ca, c, i) @@ -934,12 +828,12 @@ recovery_done: SET_BCH_SB_CLEAN(c->disk_sb, false); c->disk_sb->version = BCACHE_SB_VERSION_CDEV; - bch_write_super(c); + bch2_write_super(c); mutex_unlock(&c->sb_lock); err = NULL; out: - bch_journal_entries_free(&journal); + bch2_journal_entries_free(&journal); return err; err: switch (ret) { @@ -973,16 +867,16 @@ err: goto out; } -const char *bch_fs_start(struct bch_fs *c) +const char *bch2_fs_start(struct bch_fs *c) { - return __bch_fs_start(c) ?: bch_fs_online(c); + return __bch2_fs_start(c) ?: bch2_fs_online(c); } -static const char *bch_dev_may_add(struct bch_sb *sb, struct bch_fs *c) +static const char *bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c) { struct bch_sb_field_members *sb_mi; - sb_mi = bch_sb_get_members(sb); + sb_mi = bch2_sb_get_members(sb); if (!sb_mi) return "Invalid superblock: member info area missing"; @@ -996,11 +890,11 @@ static const char *bch_dev_may_add(struct bch_sb *sb, struct bch_fs *c) return NULL; } -static const char *bch_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb) +static const char *bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb) { struct bch_sb *newest = le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb; - struct bch_sb_field_members *mi = bch_sb_get_members(newest); + struct bch_sb_field_members *mi = bch2_sb_get_members(newest); if (uuid_le_cmp(fs->uuid, sb->uuid)) return "device not a member of filesystem"; @@ -1008,7 +902,7 @@ static const char *bch_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb) if (sb->dev_idx >= newest->nr_devices) return "device has invalid dev_idx"; - if (bch_is_zero(mi->members[sb->dev_idx].uuid.b, sizeof(uuid_le))) + if (bch2_is_zero(mi->members[sb->dev_idx].uuid.b, sizeof(uuid_le))) return "device has been removed"; if (fs->block_size != sb->block_size) @@ -1019,14 +913,14 @@ static const char *bch_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb) /* Device startup/shutdown: */ -void bch_dev_release(struct kobject *kobj) +void bch2_dev_release(struct kobject *kobj) { struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj); kfree(ca); } -static void bch_dev_free(struct bch_dev *ca) +static void bch2_dev_free(struct bch_dev *ca) { unsigned i; @@ -1035,13 +929,13 @@ static void bch_dev_free(struct bch_dev *ca) if (ca->kobj.state_in_sysfs && ca->disk_sb.bdev) sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj, - "bcache"); + "bcachefs"); if (ca->kobj.state_in_sysfs) kobject_del(&ca->kobj); - bch_free_super(&ca->disk_sb); - bch_dev_journal_exit(ca); + bch2_free_super(&ca->disk_sb); + bch2_dev_journal_exit(ca); free_percpu(ca->sectors_written); bioset_exit(&ca->replica_set); @@ -1062,20 +956,20 @@ static void bch_dev_free(struct bch_dev *ca) kobject_put(&ca->kobj); } -static void bch_dev_io_ref_release(struct percpu_ref *ref) +static void bch2_dev_io_ref_release(struct percpu_ref *ref) { struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref); complete(&ca->offline_complete); } -static void __bch_dev_offline(struct bch_dev *ca) +static void __bch2_dev_offline(struct bch_dev *ca) { struct bch_fs *c = ca->fs; lockdep_assert_held(&c->state_lock); - __bch_dev_read_only(ca->fs, ca); + __bch2_dev_read_only(ca->fs, ca); reinit_completion(&ca->offline_complete); percpu_ref_kill(&ca->io_ref); @@ -1085,22 +979,22 @@ static void __bch_dev_offline(struct bch_dev *ca) struct kobject *block = &part_to_dev(ca->disk_sb.bdev->bd_part)->kobj; - sysfs_remove_link(block, "bcache"); + sysfs_remove_link(block, "bcachefs"); sysfs_remove_link(&ca->kobj, "block"); } - bch_free_super(&ca->disk_sb); - bch_dev_journal_exit(ca); + bch2_free_super(&ca->disk_sb); + bch2_dev_journal_exit(ca); } -static void bch_dev_ref_release(struct percpu_ref *ref) +static void bch2_dev_ref_release(struct percpu_ref *ref) { struct bch_dev *ca = container_of(ref, struct bch_dev, ref); complete(&ca->stop_complete); } -static void bch_dev_stop(struct bch_dev *ca) +static void bch2_dev_stop(struct bch_dev *ca) { struct bch_fs *c = ca->fs; @@ -1116,7 +1010,7 @@ static void bch_dev_stop(struct bch_dev *ca) wait_for_completion(&ca->stop_complete); } -static int bch_dev_sysfs_online(struct bch_dev *ca) +static int bch2_dev_sysfs_online(struct bch_dev *ca) { struct bch_fs *c = ca->fs; int ret; @@ -1135,7 +1029,7 @@ static int bch_dev_sysfs_online(struct bch_dev *ca) struct kobject *block = &part_to_dev(ca->disk_sb.bdev->bd_part)->kobj; - ret = sysfs_create_link(block, &ca->kobj, "bcache"); + ret = sysfs_create_link(block, &ca->kobj, "bcachefs"); if (ret) return ret; ret = sysfs_create_link(&ca->kobj, block, "block"); @@ -1146,7 +1040,7 @@ static int bch_dev_sysfs_online(struct bch_dev *ca) return 0; } -static int bch_dev_alloc(struct bch_fs *c, unsigned dev_idx) +static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx) { struct bch_member *member; size_t reserve_none, movinggc_reserve, free_inc_reserve, total_reserve; @@ -1154,14 +1048,14 @@ static int bch_dev_alloc(struct bch_fs *c, unsigned dev_idx) unsigned i; struct bch_dev *ca; - if (bch_fs_init_fault("dev_alloc")) + if (bch2_fs_init_fault("dev_alloc")) return -ENOMEM; ca = kzalloc(sizeof(*ca), GFP_KERNEL); if (!ca) return -ENOMEM; - kobject_init(&ca->kobj, &bch_dev_ktype); + kobject_init(&ca->kobj, &bch2_dev_ktype); init_completion(&ca->stop_complete); init_completion(&ca->offline_complete); @@ -1173,16 +1067,16 @@ static int bch_dev_alloc(struct bch_fs *c, unsigned dev_idx) spin_lock_init(&ca->freelist_lock); spin_lock_init(&ca->prio_buckets_lock); mutex_init(&ca->heap_lock); - bch_dev_moving_gc_init(ca); + bch2_dev_moving_gc_init(ca); - INIT_WORK(&ca->io_error_work, bch_nonfatal_io_error_work); + INIT_WORK(&ca->io_error_work, bch2_nonfatal_io_error_work); - if (bch_fs_init_fault("dev_alloc")) + if (bch2_fs_init_fault("dev_alloc")) goto err; - member = bch_sb_get_members(c->disk_sb)->members + dev_idx; + member = bch2_sb_get_members(c->disk_sb)->members + dev_idx; - ca->mi = bch_mi_to_cpu(member); + ca->mi = bch2_mi_to_cpu(member); ca->uuid = member->uuid; ca->bucket_bits = ilog2(ca->mi.bucket_size); scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx); @@ -1198,9 +1092,9 @@ static int bch_dev_alloc(struct bch_fs *c, unsigned dev_idx) free_inc_reserve = movinggc_reserve / 2; heap_size = movinggc_reserve * 8; - if (percpu_ref_init(&ca->ref, bch_dev_ref_release, + if (percpu_ref_init(&ca->ref, bch2_dev_ref_release, 0, GFP_KERNEL) || - percpu_ref_init(&ca->io_ref, bch_dev_io_ref_release, + percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_release, PERCPU_REF_INIT_DEAD, GFP_KERNEL) || !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || !init_fifo(&ca->free[RESERVE_BTREE], BTREE_NODE_RESERVE, GFP_KERNEL) || @@ -1235,16 +1129,16 @@ static int bch_dev_alloc(struct bch_fs *c, unsigned dev_idx) ca->fs = c; rcu_assign_pointer(c->devs[ca->dev_idx], ca); - if (bch_dev_sysfs_online(ca)) + if (bch2_dev_sysfs_online(ca)) pr_warn("error creating sysfs objects"); return 0; err: - bch_dev_free(ca); + bch2_dev_free(ca); return -ENOMEM; } -static int __bch_dev_online(struct bch_fs *c, struct bcache_superblock *sb) +static int __bch2_dev_online(struct bch_fs *c, struct bcache_superblock *sb) { struct bch_dev *ca; int ret; @@ -1253,7 +1147,7 @@ static int __bch_dev_online(struct bch_fs *c, struct bcache_superblock *sb) if (le64_to_cpu(sb->sb->seq) > le64_to_cpu(c->disk_sb->seq)) - bch_sb_to_fs(c, sb->sb); + bch2_sb_to_fs(c, sb->sb); BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices || !c->devs[sb->sb->dev_idx]); @@ -1265,7 +1159,7 @@ static int __bch_dev_online(struct bch_fs *c, struct bcache_superblock *sb) return -EINVAL; } - ret = bch_dev_journal_init(ca, sb->sb); + ret = bch2_dev_journal_init(ca, sb->sb); if (ret) return ret; @@ -1288,12 +1182,12 @@ static int __bch_dev_online(struct bch_fs *c, struct bcache_superblock *sb) bdevname(ca->disk_sb.bdev, c->name); bdevname(ca->disk_sb.bdev, ca->name); - if (bch_dev_sysfs_online(ca)) + if (bch2_dev_sysfs_online(ca)) pr_warn("error creating sysfs objects"); lg_local_lock(&c->usage_lock); if (!gc_will_visit(c, gc_phase(GC_PHASE_SB_METADATA))) - bch_mark_dev_metadata(ca->fs, ca); + bch2_mark_dev_metadata(ca->fs, ca); lg_local_unlock(&c->usage_lock); percpu_ref_reinit(&ca->io_ref); @@ -1302,7 +1196,7 @@ static int __bch_dev_online(struct bch_fs *c, struct bcache_superblock *sb) /* Device management: */ -bool bch_fs_may_start(struct bch_fs *c, int flags) +bool bch2_fs_may_start(struct bch_fs *c, int flags) { struct bch_sb_field_members *mi; unsigned meta_missing = 0; @@ -1311,11 +1205,11 @@ bool bch_fs_may_start(struct bch_fs *c, int flags) unsigned i; mutex_lock(&c->sb_lock); - mi = bch_sb_get_members(c->disk_sb); + mi = bch2_sb_get_members(c->disk_sb); for (i = 0; i < c->disk_sb->nr_devices; i++) if (!c->devs[i] && - !bch_is_zero(mi->members[i].uuid.b, sizeof(uuid_le))) { + !bch2_is_zero(mi->members[i].uuid.b, sizeof(uuid_le))) { degraded = true; if (BCH_MEMBER_HAS_METADATA(&mi->members[i])) meta_missing++; @@ -1346,8 +1240,8 @@ bool bch_fs_may_start(struct bch_fs *c, int flags) return true; } -bool bch_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca, - enum bch_member_state new_state, int flags) +bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca, + enum bch_member_state new_state, int flags) { lockdep_assert_held(&c->state_lock); @@ -1375,79 +1269,74 @@ bool bch_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca, return true; } -static void __bch_dev_read_only(struct bch_fs *c, struct bch_dev *ca) +static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca) { - bch_moving_gc_stop(ca); + bch2_moving_gc_stop(ca); /* * This stops new data writes (e.g. to existing open data * buckets) and then waits for all existing writes to * complete. */ - bch_dev_allocator_stop(ca); + bch2_dev_allocator_stop(ca); - bch_dev_group_remove(&c->journal.devs, ca); + bch2_dev_group_remove(&c->journal.devs, ca); } -static const char *__bch_dev_read_write(struct bch_fs *c, struct bch_dev *ca) +static const char *__bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca) { lockdep_assert_held(&c->state_lock); BUG_ON(ca->mi.state != BCH_MEMBER_STATE_RW); - trace_bcache_cache_read_write(ca); - - if (bch_dev_allocator_start(ca)) + if (bch2_dev_allocator_start(ca)) return "error starting allocator thread"; - if (bch_moving_gc_start(ca)) + if (bch2_moving_gc_start(ca)) return "error starting moving GC thread"; - if (bch_tiering_start(c)) + if (bch2_tiering_start(c)) return "error starting tiering thread"; - bch_notify_dev_read_write(ca); - trace_bcache_cache_read_write_done(ca); - return NULL; } -int __bch_dev_set_state(struct bch_fs *c, struct bch_dev *ca, - enum bch_member_state new_state, int flags) +int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, + enum bch_member_state new_state, int flags) { struct bch_sb_field_members *mi; if (ca->mi.state == new_state) return 0; - if (!bch_dev_state_allowed(c, ca, new_state, flags)) + if (!bch2_dev_state_allowed(c, ca, new_state, flags)) return -EINVAL; if (new_state == BCH_MEMBER_STATE_RW) { - if (__bch_dev_read_write(c, ca)) + if (__bch2_dev_read_write(c, ca)) return -ENOMEM; } else { - __bch_dev_read_only(c, ca); + __bch2_dev_read_only(c, ca); } - bch_notice(ca, "%s", bch_dev_state[new_state]); + bch_notice(ca, "%s", bch2_dev_state[new_state]); mutex_lock(&c->sb_lock); - mi = bch_sb_get_members(c->disk_sb); + mi = bch2_sb_get_members(c->disk_sb); SET_BCH_MEMBER_STATE(&mi->members[ca->dev_idx], new_state); - bch_write_super(c); + bch2_write_super(c); mutex_unlock(&c->sb_lock); return 0; } -int bch_dev_set_state(struct bch_fs *c, struct bch_dev *ca, - enum bch_member_state new_state, int flags) +int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, + enum bch_member_state new_state, int flags) { int ret; mutex_lock(&c->state_lock); - ret = __bch_dev_set_state(c, ca, new_state, flags); + ret = __bch2_dev_set_state(c, ca, new_state, flags); mutex_unlock(&c->state_lock); return ret; @@ -1455,7 +1344,7 @@ int bch_dev_set_state(struct bch_fs *c, struct bch_dev *ca, /* Device add/removal: */ -int bch_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) +int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) { struct bch_sb_field_members *mi; unsigned dev_idx = ca->dev_idx; @@ -1470,7 +1359,7 @@ int bch_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) goto err; } - if (!bch_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) { + if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) { bch_err(ca, "Cannot remove without losing data"); goto err; } @@ -1480,7 +1369,7 @@ int bch_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) * * flag_data_bad() does not check btree pointers */ - ret = bch_flag_data_bad(ca); + ret = bch2_flag_data_bad(ca); if (ret) { bch_err(ca, "Remove failed"); goto err; @@ -1499,21 +1388,21 @@ int bch_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags) c->journal.prio_buckets[dev_idx] = 0; spin_unlock(&c->journal.lock); - bch_journal_meta(&c->journal); + bch2_journal_meta(&c->journal); - __bch_dev_offline(ca); - bch_dev_stop(ca); - bch_dev_free(ca); + __bch2_dev_offline(ca); + bch2_dev_stop(ca); + bch2_dev_free(ca); /* * Free this device's slot in the bch_member array - all pointers to * this device must be gone: */ mutex_lock(&c->sb_lock); - mi = bch_sb_get_members(c->disk_sb); + mi = bch2_sb_get_members(c->disk_sb); memset(&mi->members[dev_idx].uuid, 0, sizeof(mi->members[dev_idx].uuid)); - bch_write_super(c); + bch2_write_super(c); mutex_unlock(&c->sb_lock); mutex_unlock(&c->state_lock); @@ -1523,7 +1412,7 @@ err: return ret; } -int bch_dev_add(struct bch_fs *c, const char *path) +int bch2_dev_add(struct bch_fs *c, const char *path) { struct bcache_superblock sb; const char *err; @@ -1533,15 +1422,15 @@ int bch_dev_add(struct bch_fs *c, const char *path) unsigned dev_idx, nr_devices, u64s; int ret = -EINVAL; - err = bch_read_super(&sb, bch_opts_empty(), path); + err = bch2_read_super(&sb, bch2_opts_empty(), path); if (err) return -EINVAL; - err = bch_validate_cache_super(&sb); + err = bch2_validate_cache_super(&sb); if (err) return -EINVAL; - err = bch_dev_may_add(sb.sb, c); + err = bch2_dev_may_add(sb.sb, c); if (err) return -EINVAL; @@ -1552,17 +1441,17 @@ int bch_dev_add(struct bch_fs *c, const char *path) * Preserve the old cache member information (esp. tier) * before we start bashing the disk stuff. */ - dev_mi = bch_sb_get_members(sb.sb); + dev_mi = bch2_sb_get_members(sb.sb); saved_mi = dev_mi->members[sb.sb->dev_idx]; saved_mi.last_mount = cpu_to_le64(ktime_get_seconds()); - if (dynamic_fault("bcache:add:no_slot")) + if (dynamic_fault("bcachefs:add:no_slot")) goto no_slot; - mi = bch_sb_get_members(c->disk_sb); + mi = bch2_sb_get_members(c->disk_sb); for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++) if (dev_idx >= c->sb.nr_devices || - bch_is_zero(mi->members[dev_idx].uuid.b, + bch2_is_zero(mi->members[dev_idx].uuid.b, sizeof(uuid_le))) goto have_slot; no_slot: @@ -1576,11 +1465,11 @@ have_slot: sizeof(struct bch_member) * nr_devices) / sizeof(u64); err = "no space in superblock for member info"; - mi = bch_fs_sb_resize_members(c, u64s); + mi = bch2_fs_sb_resize_members(c, u64s); if (!mi) goto err_unlock; - dev_mi = bch_sb_resize_members(&sb, u64s); + dev_mi = bch2_sb_resize_members(&sb, u64s); if (!dev_mi) goto err_unlock; @@ -1596,46 +1485,45 @@ have_slot: c->disk_sb->nr_devices = nr_devices; c->sb.nr_devices = nr_devices; - if (bch_dev_alloc(c, dev_idx)) { + if (bch2_dev_alloc(c, dev_idx)) { err = "cannot allocate memory"; ret = -ENOMEM; goto err_unlock; } - if (__bch_dev_online(c, &sb)) { - err = "bch_dev_online() error"; + if (__bch2_dev_online(c, &sb)) { + err = "bch2_dev_online() error"; ret = -ENOMEM; goto err_unlock; } - bch_write_super(c); + bch2_write_super(c); mutex_unlock(&c->sb_lock); ca = c->devs[dev_idx]; if (ca->mi.state == BCH_MEMBER_STATE_RW) { err = "journal alloc failed"; - if (bch_dev_journal_alloc(ca)) + if (bch2_dev_journal_alloc(ca)) goto err; - err = __bch_dev_read_write(c, ca); + err = __bch2_dev_read_write(c, ca); if (err) goto err; } - bch_notify_dev_added(ca); mutex_unlock(&c->state_lock); return 0; err_unlock: mutex_unlock(&c->sb_lock); err: mutex_unlock(&c->state_lock); - bch_free_super(&sb); + bch2_free_super(&sb); bch_err(c, "Unable to add device: %s", err); return ret ?: -EINVAL; } -int bch_dev_online(struct bch_fs *c, const char *path) +int bch2_dev_online(struct bch_fs *c, const char *path) { struct bcache_superblock sb = { 0 }; struct bch_dev *ca; @@ -1644,19 +1532,19 @@ int bch_dev_online(struct bch_fs *c, const char *path) mutex_lock(&c->state_lock); - err = bch_read_super(&sb, bch_opts_empty(), path); + err = bch2_read_super(&sb, bch2_opts_empty(), path); if (err) goto err; dev_idx = sb.sb->dev_idx; - err = bch_dev_in_fs(c->disk_sb, sb.sb); + err = bch2_dev_in_fs(c->disk_sb, sb.sb); if (err) goto err; mutex_lock(&c->sb_lock); - if (__bch_dev_online(c, &sb)) { - err = "__bch_dev_online() error"; + if (__bch2_dev_online(c, &sb)) { + err = "__bch2_dev_online() error"; mutex_unlock(&c->sb_lock); goto err; } @@ -1664,7 +1552,7 @@ int bch_dev_online(struct bch_fs *c, const char *path) ca = c->devs[dev_idx]; if (ca->mi.state == BCH_MEMBER_STATE_RW) { - err = __bch_dev_read_write(c, ca); + err = __bch2_dev_read_write(c, ca); if (err) goto err; } @@ -1673,29 +1561,29 @@ int bch_dev_online(struct bch_fs *c, const char *path) return 0; err: mutex_unlock(&c->state_lock); - bch_free_super(&sb); + bch2_free_super(&sb); bch_err(c, "error bringing %s online: %s", path, err); return -EINVAL; } -int bch_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags) +int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags) { mutex_lock(&c->state_lock); - if (!bch_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) { + if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) { bch_err(ca, "Cannot offline required disk"); mutex_unlock(&c->state_lock); return -EINVAL; } - __bch_dev_read_only(c, ca); - __bch_dev_offline(ca); + __bch2_dev_read_only(c, ca); + __bch2_dev_offline(ca); mutex_unlock(&c->state_lock); return 0; } -int bch_dev_evacuate(struct bch_fs *c, struct bch_dev *ca) +int bch2_dev_evacuate(struct bch_fs *c, struct bch_dev *ca) { int ret; @@ -1709,13 +1597,13 @@ int bch_dev_evacuate(struct bch_fs *c, struct bch_dev *ca) mutex_unlock(&c->state_lock); - ret = bch_move_data_off_device(ca); + ret = bch2_move_data_off_device(ca); if (ret) { bch_err(ca, "Error migrating data: %i", ret); return ret; } - ret = bch_move_metadata_off_device(ca); + ret = bch2_move_metadata_off_device(ca); if (ret) { bch_err(ca, "Error migrating metadata: %i", ret); return ret; @@ -1731,8 +1619,8 @@ int bch_dev_evacuate(struct bch_fs *c, struct bch_dev *ca) /* Filesystem open: */ -const char *bch_fs_open(char * const *devices, unsigned nr_devices, - struct bch_opts opts, struct bch_fs **ret) +const char *bch2_fs_open(char * const *devices, unsigned nr_devices, + struct bch_opts opts, struct bch_fs **ret) { const char *err; struct bch_fs *c = NULL; @@ -1751,7 +1639,7 @@ const char *bch_fs_open(char * const *devices, unsigned nr_devices, goto err; for (i = 0; i < nr_devices; i++) { - err = bch_read_super(&sb[i], opts, devices[i]); + err = bch2_read_super(&sb[i], opts, devices[i]); if (err) goto err; @@ -1759,7 +1647,7 @@ const char *bch_fs_open(char * const *devices, unsigned nr_devices, if (__SB_IS_BDEV(le64_to_cpu(sb[i].sb->version))) goto err; - err = bch_validate_cache_super(&sb[i]); + err = bch2_validate_cache_super(&sb[i]); if (err) goto err; } @@ -1770,36 +1658,36 @@ const char *bch_fs_open(char * const *devices, unsigned nr_devices, best_sb = i; for (i = 0; i < nr_devices; i++) { - err = bch_dev_in_fs(sb[best_sb].sb, sb[i].sb); + err = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb); if (err) goto err; } err = "cannot allocate memory"; - c = bch_fs_alloc(sb[best_sb].sb, opts); + c = bch2_fs_alloc(sb[best_sb].sb, opts); if (!c) goto err; - err = "bch_dev_online() error"; + err = "bch2_dev_online() error"; mutex_lock(&c->sb_lock); for (i = 0; i < nr_devices; i++) - if (__bch_dev_online(c, &sb[i])) { + if (__bch2_dev_online(c, &sb[i])) { mutex_unlock(&c->sb_lock); goto err; } mutex_unlock(&c->sb_lock); err = "insufficient devices"; - if (!bch_fs_may_start(c, 0)) + if (!bch2_fs_may_start(c, 0)) goto err; if (!c->opts.nostart) { - err = __bch_fs_start(c); + err = __bch2_fs_start(c); if (err) goto err; } - err = bch_fs_online(c); + err = bch2_fs_online(c); if (err) goto err; @@ -1817,34 +1705,34 @@ out: return err; err: if (c) - bch_fs_stop(c); + bch2_fs_stop(c); for (i = 0; i < nr_devices; i++) - bch_free_super(&sb[i]); + bch2_free_super(&sb[i]); goto out; } -static const char *__bch_fs_open_incremental(struct bcache_superblock *sb, - struct bch_opts opts) +static const char *__bch2_fs_open_incremental(struct bcache_superblock *sb, + struct bch_opts opts) { const char *err; struct bch_fs *c; bool allocated_fs = false; - err = bch_validate_cache_super(sb); + err = bch2_validate_cache_super(sb); if (err) return err; mutex_lock(&bch_fs_list_lock); - c = __bch_uuid_to_fs(sb->sb->uuid); + c = __bch2_uuid_to_fs(sb->sb->uuid); if (c) { closure_get(&c->cl); - err = bch_dev_in_fs(c->disk_sb, sb->sb); + err = bch2_dev_in_fs(c->disk_sb, sb->sb); if (err) goto err; } else { - c = bch_fs_alloc(sb->sb, opts); + c = bch2_fs_alloc(sb->sb, opts); err = "cannot allocate memory"; if (!c) goto err; @@ -1852,22 +1740,22 @@ static const char *__bch_fs_open_incremental(struct bcache_superblock *sb, allocated_fs = true; } - err = "bch_dev_online() error"; + err = "bch2_dev_online() error"; mutex_lock(&c->sb_lock); - if (__bch_dev_online(c, sb)) { + if (__bch2_dev_online(c, sb)) { mutex_unlock(&c->sb_lock); goto err; } mutex_unlock(&c->sb_lock); - if (!c->opts.nostart && bch_fs_may_start(c, 0)) { - err = __bch_fs_start(c); + if (!c->opts.nostart && bch2_fs_may_start(c, 0)) { + err = __bch2_fs_start(c); if (err) goto err; } - err = __bch_fs_online(c); + err = __bch2_fs_online(c); if (err) goto err; @@ -1879,169 +1767,66 @@ err: mutex_unlock(&bch_fs_list_lock); if (allocated_fs) - bch_fs_stop(c); + bch2_fs_stop(c); else if (c) closure_put(&c->cl); return err; } -const char *bch_fs_open_incremental(const char *path) +const char *bch2_fs_open_incremental(const char *path) { struct bcache_superblock sb; - struct bch_opts opts = bch_opts_empty(); + struct bch_opts opts = bch2_opts_empty(); const char *err; - err = bch_read_super(&sb, opts, path); + err = bch2_read_super(&sb, opts, path); if (err) return err; - if (__SB_IS_BDEV(le64_to_cpu(sb.sb->version))) { - mutex_lock(&bch_fs_list_lock); - err = bch_backing_dev_register(&sb); - mutex_unlock(&bch_fs_list_lock); - } else { - err = __bch_fs_open_incremental(&sb, opts); - } + if (!__SB_IS_BDEV(le64_to_cpu(sb.sb->version))) + err = __bch2_fs_open_incremental(&sb, opts); + else + err = "not a bcachefs superblock"; - bch_free_super(&sb); + bch2_free_super(&sb); return err; } /* Global interfaces/init */ -#define kobj_attribute_write(n, fn) \ - static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn) - -#define kobj_attribute_rw(n, show, store) \ - static struct kobj_attribute ksysfs_##n = \ - __ATTR(n, S_IWUSR|S_IRUSR, show, store) - -static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, - const char *, size_t); - -kobj_attribute_write(register, register_bcache); -kobj_attribute_write(register_quiet, register_bcache); - -static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, - const char *buffer, size_t size) +static void bcachefs_exit(void) { - ssize_t ret = -EINVAL; - const char *err = "cannot allocate memory"; - char *path = NULL; - - if (!try_module_get(THIS_MODULE)) - return -EBUSY; - - if (!(path = kstrndup(skip_spaces(buffer), size, GFP_KERNEL))) - goto err; - - err = bch_fs_open_incremental(strim(path)); - if (err) - goto err; - - ret = size; -out: - kfree(path); - module_put(THIS_MODULE); - return ret; -err: - pr_err("error opening %s: %s", path, err); - goto out; + bch2_debug_exit(); + bch2_vfs_exit(); + bch2_chardev_exit(); + if (bcachefs_kset) + kset_unregister(bcachefs_kset); } -static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) +static int __init bcachefs_init(void) { - if (code == SYS_DOWN || - code == SYS_HALT || - code == SYS_POWER_OFF) { - struct bch_fs *c; - - mutex_lock(&bch_fs_list_lock); - - if (!list_empty(&bch_fs_list)) - pr_info("Setting all devices read only:"); - - list_for_each_entry(c, &bch_fs_list, list) - bch_fs_read_only_async(c); - - list_for_each_entry(c, &bch_fs_list, list) - bch_fs_read_only(c); - - mutex_unlock(&bch_fs_list_lock); - } - - return NOTIFY_DONE; -} - -static struct notifier_block reboot = { - .notifier_call = bcache_reboot, - .priority = INT_MAX, /* before any real devices */ -}; - -static ssize_t reboot_test(struct kobject *k, struct kobj_attribute *attr, - const char *buffer, size_t size) -{ - bcache_reboot(NULL, SYS_DOWN, NULL); - return size; -} - -kobj_attribute_write(reboot, reboot_test); - -static void bcache_exit(void) -{ - bch_debug_exit(); - bch_vfs_exit(); - bch_blockdev_exit(); - bch_chardev_exit(); - if (bcache_kset) - kset_unregister(bcache_kset); - if (bcache_io_wq) - destroy_workqueue(bcache_io_wq); - if (!IS_ERR_OR_NULL(bch_sha256)) - crypto_free_shash(bch_sha256); - unregister_reboot_notifier(&reboot); -} - -static int __init bcache_init(void) -{ - static const struct attribute *files[] = { - &ksysfs_register.attr, - &ksysfs_register_quiet.attr, - &ksysfs_reboot.attr, - NULL - }; - - register_reboot_notifier(&reboot); - closure_debug_init(); - bkey_pack_test(); - - bch_sha256 = crypto_alloc_shash("sha256", 0, 0); - if (IS_ERR(bch_sha256)) - goto err; + bch2_bkey_pack_test(); - if (!(bcache_io_wq = create_freezable_workqueue("bcache_io")) || - !(bcache_kset = kset_create_and_add("bcache", NULL, fs_kobj)) || - sysfs_create_files(&bcache_kset->kobj, files) || - bch_chardev_init() || - bch_blockdev_init() || - bch_vfs_init() || - bch_debug_init()) + if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) || + bch2_chardev_init() || + bch2_vfs_init() || + bch2_debug_init()) goto err; return 0; err: - bcache_exit(); + bcachefs_exit(); return -ENOMEM; } #define BCH_DEBUG_PARAM(name, description) \ - bool bch_##name; \ - module_param_named(name, bch_##name, bool, 0644); \ + bool bch2_##name; \ + module_param_named(name, bch2_##name, bool, 0644); \ MODULE_PARM_DESC(name, description); BCH_DEBUG_PARAMS() #undef BCH_DEBUG_PARAM -module_exit(bcache_exit); -module_init(bcache_init); +module_exit(bcachefs_exit); +module_init(bcachefs_init); diff --git a/libbcachefs/super.h b/libbcachefs/super.h new file mode 100644 index 00000000..94424414 --- /dev/null +++ b/libbcachefs/super.h @@ -0,0 +1,130 @@ +#ifndef _BCACHE_SUPER_H +#define _BCACHE_SUPER_H + +#include "extents.h" + +#include "bcachefs_ioctl.h" + +static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s) +{ + return s >> ca->bucket_bits; +} + +static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b) +{ + return ((sector_t) b) << ca->bucket_bits; +} + +static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s) +{ + return s & (ca->mi.bucket_size - 1); +} + +static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter) +{ + struct bch_dev *ca = NULL; + + while (*iter < c->sb.nr_devices && + !(ca = rcu_dereference_check(c->devs[*iter], + lockdep_is_held(&c->state_lock)))) + (*iter)++; + + return ca; +} + +#define __for_each_member_device(ca, c, iter) \ + for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter))); (iter)++) + +#define for_each_member_device_rcu(ca, c, iter) \ + __for_each_member_device(ca, c, iter) + +static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter) +{ + struct bch_dev *ca; + + rcu_read_lock(); + if ((ca = __bch2_next_dev(c, iter))) + percpu_ref_get(&ca->ref); + rcu_read_unlock(); + + return ca; +} + +/* + * If you break early, you must drop your ref on the current device + */ +#define for_each_member_device(ca, c, iter) \ + for ((iter) = 0; \ + (ca = bch2_get_next_dev(c, &(iter))); \ + percpu_ref_put(&ca->ref), (iter)++) + +static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c, + unsigned *iter, + int state_mask) +{ + struct bch_dev *ca; + + rcu_read_lock(); + while ((ca = __bch2_next_dev(c, iter)) && + (!((1 << ca->mi.state) & state_mask) || + !percpu_ref_tryget(&ca->io_ref))) + (*iter)++; + rcu_read_unlock(); + + return ca; +} + +#define __for_each_online_member(ca, c, iter, state_mask) \ + for ((iter) = 0; \ + (ca = bch2_get_next_online_dev(c, &(iter), state_mask)); \ + percpu_ref_put(&ca->io_ref), (iter)++) + +#define for_each_online_member(ca, c, iter) \ + __for_each_online_member(ca, c, iter, ~0) + +#define for_each_rw_member(ca, c, iter) \ + __for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_RW) + +#define for_each_readable_member(ca, c, iter) \ + __for_each_online_member(ca, c, iter, \ + (1 << BCH_MEMBER_STATE_RW)|(1 << BCH_MEMBER_STATE_RO)) + +struct bch_fs *bch2_bdev_to_fs(struct block_device *); +struct bch_fs *bch2_uuid_to_fs(uuid_le); +int bch2_congested(struct bch_fs *, int); + +void bch2_dev_release(struct kobject *); + +bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *, + enum bch_member_state, int); +int __bch2_dev_set_state(struct bch_fs *, struct bch_dev *, + enum bch_member_state, int); +int bch2_dev_set_state(struct bch_fs *, struct bch_dev *, + enum bch_member_state, int); + +int bch2_dev_fail(struct bch_dev *, int); +int bch2_dev_remove(struct bch_fs *, struct bch_dev *, int); +int bch2_dev_add(struct bch_fs *, const char *); +int bch2_dev_online(struct bch_fs *, const char *); +int bch2_dev_offline(struct bch_fs *, struct bch_dev *, int); +int bch2_dev_evacuate(struct bch_fs *, struct bch_dev *); + +bool bch2_fs_emergency_read_only(struct bch_fs *); +void bch2_fs_read_only(struct bch_fs *); +const char *bch2_fs_read_write(struct bch_fs *); + +void bch2_fs_release(struct kobject *); +void bch2_fs_stop(struct bch_fs *); + +const char *bch2_fs_start(struct bch_fs *); +const char *bch2_fs_open(char * const *, unsigned, struct bch_opts, + struct bch_fs **); +const char *bch2_fs_open_incremental(const char *path); + +extern struct kobj_type bch2_fs_ktype; +extern struct kobj_type bch2_fs_internal_ktype; +extern struct kobj_type bch2_fs_time_stats_ktype; +extern struct kobj_type bch2_fs_opts_dir_ktype; +extern struct kobj_type bch2_dev_ktype; + +#endif /* _BCACHE_SUPER_H */ diff --git a/libbcache/super_types.h b/libbcachefs/super_types.h index 69c747de..69c747de 100644 --- a/libbcache/super_types.h +++ b/libbcachefs/super_types.h diff --git a/libbcache/sysfs.c b/libbcachefs/sysfs.c index 3536ec0c..11c6cdcc 100644 --- a/libbcache/sysfs.c +++ b/libbcachefs/sysfs.c @@ -5,9 +5,8 @@ * Copyright 2012 Google, Inc. */ -#include "bcache.h" +#include "bcachefs.h" #include "alloc.h" -#include "blockdev.h" #include "compress.h" #include "sysfs.h" #include "btree_cache.h" @@ -20,23 +19,15 @@ #include "keylist.h" #include "move.h" #include "opts.h" -#include "request.h" #include "super-io.h" #include "tier.h" -#include "writeback.h" #include <linux/blkdev.h> #include <linux/sort.h> -write_attribute(attach); -write_attribute(detach); -write_attribute(unregister); -write_attribute(stop); -write_attribute(clear_stats); write_attribute(trigger_btree_coalesce); write_attribute(trigger_gc); write_attribute(prune_cache); -write_attribute(blockdev_volume_create); read_attribute(uuid); read_attribute(minor); @@ -85,37 +76,13 @@ read_attribute(has_metadata); read_attribute(bset_tree_stats); read_attribute(alloc_debug); -read_attribute(state); read_attribute(cache_read_races); -read_attribute(writeback_keys_done); -read_attribute(writeback_keys_failed); -read_attribute(io_errors); -rw_attribute(io_error_limit); -rw_attribute(io_error_halflife); -read_attribute(congested); -rw_attribute(congested_read_threshold_us); -rw_attribute(congested_write_threshold_us); - -rw_attribute(sequential_cutoff); -rw_attribute(cache_mode); -rw_attribute(writeback_metadata); -rw_attribute(writeback_running); -rw_attribute(writeback_percent); -sysfs_pd_controller_attribute(writeback); - -read_attribute(stripe_size); -read_attribute(partial_stripes_expensive); rw_attribute(journal_write_delay_ms); rw_attribute(journal_reclaim_delay_ms); read_attribute(journal_entry_size_max); rw_attribute(discard); -rw_attribute(running); -rw_attribute(label); -rw_attribute(readahead); -rw_attribute(verify); -rw_attribute(bypass_torture_test); rw_attribute(cache_replacement_policy); rw_attribute(foreground_write_ratelimit_enabled); @@ -133,7 +100,6 @@ rw_attribute(pd_controllers_update_seconds); rw_attribute(foreground_target_percent); -rw_attribute(size); read_attribute(meta_replicas_have); read_attribute(data_replicas_have); @@ -161,287 +127,7 @@ static struct attribute sysfs_state_rw = { .mode = S_IRUGO }; -SHOW(bch_cached_dev) -{ - struct cached_dev *dc = container_of(kobj, struct cached_dev, - disk.kobj); - const char *states[] = { "no cache", "clean", "dirty", "inconsistent" }; - -#define var(stat) (dc->stat) - - if (attr == &sysfs_cache_mode) - return bch_snprint_string_list(buf, PAGE_SIZE, - bch_cache_modes + 1, - BDEV_CACHE_MODE(dc->disk_sb.sb)); - - var_printf(verify, "%i"); - var_printf(bypass_torture_test, "%i"); - var_printf(writeback_metadata, "%i"); - var_printf(writeback_running, "%i"); - var_print(writeback_percent); - sysfs_pd_controller_show(writeback, &dc->writeback_pd); - - sysfs_hprint(dirty_data, - bcache_dev_sectors_dirty(&dc->disk) << 9); - sysfs_print(dirty_bytes, - bcache_dev_sectors_dirty(&dc->disk) << 9); - - sysfs_hprint(stripe_size, dc->disk.stripe_size << 9); - var_printf(partial_stripes_expensive, "%u"); - - var_hprint(sequential_cutoff); - var_hprint(readahead); - - sysfs_print(running, atomic_read(&dc->running)); - sysfs_print(state, states[BDEV_STATE(dc->disk_sb.sb)]); - - if (attr == &sysfs_label) { - memcpy(buf, dc->disk_sb.sb->label, BCH_SB_LABEL_SIZE); - buf[BCH_SB_LABEL_SIZE + 1] = '\0'; - strcat(buf, "\n"); - return strlen(buf); - } - -#undef var - return 0; -} - -STORE(bch_cached_dev) -{ - struct cached_dev *dc = container_of(kobj, struct cached_dev, - disk.kobj); - struct kobj_uevent_env *env; - -#define d_strtoul(var) sysfs_strtoul(var, dc->var) -#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX) -#define d_strtoi_h(var) sysfs_hatoi(var, dc->var) - - d_strtoul(verify); - d_strtoul(bypass_torture_test); - d_strtoul(writeback_metadata); - d_strtoul(writeback_running); - sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); - sysfs_pd_controller_store(writeback, &dc->writeback_pd); - - d_strtoi_h(sequential_cutoff); - d_strtoi_h(readahead); - - if (attr == &sysfs_writeback_running) - bch_writeback_queue(dc); - - if (attr == &sysfs_writeback_percent) - schedule_delayed_work(&dc->writeback_pd_update, - dc->writeback_pd_update_seconds * HZ); - - if (attr == &sysfs_clear_stats) - bch_cache_accounting_clear(&dc->accounting); - - if (attr == &sysfs_running && - strtoul_or_return(buf)) - bch_cached_dev_run(dc); - - if (attr == &sysfs_cache_mode) { - ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1); - - if (v < 0) - return v; - - if ((unsigned) v != BDEV_CACHE_MODE(dc->disk_sb.sb)) { - SET_BDEV_CACHE_MODE(dc->disk_sb.sb, v); - bch_write_bdev_super(dc, NULL); - } - } - - if (attr == &sysfs_label) { - u64 journal_seq = 0; - int ret = 0; - - if (size > BCH_SB_LABEL_SIZE) - return -EINVAL; - - mutex_lock(&dc->disk.inode_lock); - - memcpy(dc->disk_sb.sb->label, buf, size); - if (size < BCH_SB_LABEL_SIZE) - dc->disk_sb.sb->label[size] = '\0'; - if (size && dc->disk_sb.sb->label[size - 1] == '\n') - dc->disk_sb.sb->label[size - 1] = '\0'; - - memcpy(dc->disk.inode.v.i_label, - dc->disk_sb.sb->label, BCH_SB_LABEL_SIZE); - - bch_write_bdev_super(dc, NULL); - - if (dc->disk.c) - ret = bch_btree_update(dc->disk.c, BTREE_ID_INODES, - &dc->disk.inode.k_i, - &journal_seq); - - mutex_unlock(&dc->disk.inode_lock); - - if (ret) - return ret; - - if (dc->disk.c) - ret = bch_journal_flush_seq(&dc->disk.c->journal, - journal_seq); - if (ret) - return ret; - - env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); - if (!env) - return -ENOMEM; - add_uevent_var(env, "DRIVER=bcache"); - add_uevent_var(env, "CACHED_UUID=%pU", dc->disk_sb.sb->disk_uuid.b), - add_uevent_var(env, "CACHED_LABEL=%s", buf); - kobject_uevent_env( - &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp); - kfree(env); - } - - if (attr == &sysfs_attach) { - struct bch_fs *c; - uuid_le uuid; - int ret; - - if (uuid_parse(buf, &uuid)) - return -EINVAL; - - c = bch_uuid_to_fs(uuid); - if (!c) { - pr_err("Can't attach %s: cache set not found", buf); - return -ENOENT; - } - - dc->disk_sb.sb->set_uuid = uuid; - - ret = bch_cached_dev_attach(dc, c); - closure_put(&c->cl); - if (ret) - return ret; - } - - if (attr == &sysfs_detach && dc->disk.c) - bch_cached_dev_detach(dc); - - if (attr == &sysfs_stop) - bch_blockdev_stop(&dc->disk); - - return size; -} - -static struct attribute *bch_cached_dev_files[] = { - &sysfs_attach, - &sysfs_detach, - &sysfs_stop, - &sysfs_cache_mode, - &sysfs_writeback_metadata, - &sysfs_writeback_running, - &sysfs_writeback_percent, - sysfs_pd_controller_files(writeback), - &sysfs_dirty_data, - &sysfs_dirty_bytes, - &sysfs_stripe_size, - &sysfs_partial_stripes_expensive, - &sysfs_sequential_cutoff, - &sysfs_clear_stats, - &sysfs_running, - &sysfs_state, - &sysfs_label, - &sysfs_readahead, -#ifdef CONFIG_BCACHE_DEBUG - &sysfs_verify, - &sysfs_bypass_torture_test, -#endif - NULL -}; -KTYPE(bch_cached_dev); - -SHOW(bch_blockdev_volume) -{ - struct bcache_device *d = container_of(kobj, struct bcache_device, - kobj); - - sysfs_hprint(size, le64_to_cpu(d->inode.v.i_size)); - - if (attr == &sysfs_label) { - memcpy(buf, d->inode.v.i_label, BCH_SB_LABEL_SIZE); - buf[BCH_SB_LABEL_SIZE + 1] = '\0'; - strcat(buf, "\n"); - return strlen(buf); - } - - return 0; -} - -STORE(bch_blockdev_volume) -{ - struct bcache_device *d = container_of(kobj, struct bcache_device, - kobj); - - if (attr == &sysfs_size) { - u64 journal_seq = 0; - u64 v = strtoi_h_or_return(buf); - int ret; - - mutex_lock(&d->inode_lock); - - if (v < le64_to_cpu(d->inode.v.i_size) ){ - ret = bch_inode_truncate(d->c, d->inode.k.p.inode, - v >> 9, NULL, NULL); - if (ret) { - mutex_unlock(&d->inode_lock); - return ret; - } - } - d->inode.v.i_size = cpu_to_le64(v); - ret = bch_btree_update(d->c, BTREE_ID_INODES, - &d->inode.k_i, &journal_seq); - - mutex_unlock(&d->inode_lock); - - if (ret) - return ret; - - ret = bch_journal_flush_seq(&d->c->journal, journal_seq); - if (ret) - return ret; - - set_capacity(d->disk, v >> 9); - } - - if (attr == &sysfs_label) { - u64 journal_seq = 0; - int ret; - - mutex_lock(&d->inode_lock); - - memcpy(d->inode.v.i_label, buf, BCH_SB_LABEL_SIZE); - ret = bch_btree_update(d->c, BTREE_ID_INODES, - &d->inode.k_i, &journal_seq); - - mutex_unlock(&d->inode_lock); - - return ret ?: bch_journal_flush_seq(&d->c->journal, journal_seq); - } - - if (attr == &sysfs_unregister) { - set_bit(BCACHE_DEV_DETACHING, &d->flags); - bch_blockdev_stop(d); - } - - return size; -} - -static struct attribute *bch_blockdev_volume_files[] = { - &sysfs_unregister, - &sysfs_label, - &sysfs_size, - NULL -}; -KTYPE(bch_blockdev_volume); - -static int bch_bset_print_stats(struct bch_fs *c, char *buf) +static int bch2_bset_print_stats(struct bch_fs *c, char *buf) { struct bset_stats stats; size_t nodes = 0; @@ -454,7 +140,7 @@ static int bch_bset_print_stats(struct bch_fs *c, char *buf) rcu_read_lock(); for_each_cached_btree(b, c, tbl, iter, pos) { - bch_btree_keys_stats(b, &stats); + bch2_btree_keys_stats(b, &stats); nodes++; } rcu_read_unlock(); @@ -484,7 +170,7 @@ static int bch_bset_print_stats(struct bch_fs *c, char *buf) stats.failed_overflow); } -static unsigned bch_root_usage(struct bch_fs *c) +static unsigned bch2_root_usage(struct bch_fs *c) { unsigned bytes = 0; struct bkey_packed *k; @@ -508,7 +194,7 @@ lock_root: return (bytes * 100) / btree_bytes(c); } -static size_t bch_btree_cache_size(struct bch_fs *c) +static size_t bch2_btree_cache_size(struct bch_fs *c) { size_t ret = 0; struct btree *b; @@ -521,20 +207,20 @@ static size_t bch_btree_cache_size(struct bch_fs *c) return ret; } -static unsigned bch_fs_available_percent(struct bch_fs *c) +static unsigned bch2_fs_available_percent(struct bch_fs *c) { return div64_u64((u64) sectors_available(c) * 100, c->capacity ?: 1); } #if 0 -static unsigned bch_btree_used(struct bch_fs *c) +static unsigned bch2_btree_used(struct bch_fs *c) { return div64_u64(c->gc_stats.key_bytes * 100, (c->gc_stats.nodes ?: 1) * btree_bytes(c)); } -static unsigned bch_average_key_size(struct bch_fs *c) +static unsigned bch2_average_key_size(struct bch_fs *c) { return c->gc_stats.nkeys ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) @@ -544,7 +230,7 @@ static unsigned bch_average_key_size(struct bch_fs *c) static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf) { - struct bch_fs_usage stats = bch_fs_usage_read(c); + struct bch_fs_usage stats = bch2_fs_usage_read(c); return scnprintf(buf, PAGE_SIZE, "capacity:\t\t%llu\n" @@ -569,7 +255,7 @@ static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf) stats.online_reserved); } -static ssize_t bch_compression_stats(struct bch_fs *c, char *buf) +static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf) { struct btree_iter iter; struct bkey_s_c k; @@ -600,7 +286,7 @@ static ssize_t bch_compression_stats(struct bch_fs *c, char *buf) break; } } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return snprintf(buf, PAGE_SIZE, "uncompressed data:\n" @@ -617,7 +303,7 @@ static ssize_t bch_compression_stats(struct bch_fs *c, char *buf) compressed_sectors_uncompressed << 9); } -SHOW(bch_fs) +SHOW(bch2_fs) { struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); @@ -632,37 +318,21 @@ SHOW(bch_fs) sysfs_hprint(btree_node_size, c->sb.btree_node_size << 9); sysfs_print(btree_node_size_bytes, c->sb.btree_node_size << 9); - sysfs_hprint(btree_cache_size, bch_btree_cache_size(c)); - sysfs_print(cache_available_percent, bch_fs_available_percent(c)); + sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c)); + sysfs_print(cache_available_percent, bch2_fs_available_percent(c)); sysfs_print(btree_gc_running, c->gc_pos.phase != GC_PHASE_DONE); #if 0 /* XXX: reimplement */ - sysfs_print(btree_used_percent, bch_btree_used(c)); + sysfs_print(btree_used_percent, bch2_btree_used(c)); sysfs_print(btree_nodes, c->gc_stats.nodes); - sysfs_hprint(average_key_size, bch_average_key_size(c)); + sysfs_hprint(average_key_size, bch2_average_key_size(c)); #endif sysfs_print(cache_read_races, atomic_long_read(&c->cache_read_races)); - sysfs_print(writeback_keys_done, - atomic_long_read(&c->writeback_keys_done)); - sysfs_print(writeback_keys_failed, - atomic_long_read(&c->writeback_keys_failed)); - - /* See count_io_errors for why 88 */ - sysfs_print(io_error_halflife, c->error_decay * 88); - sysfs_print(io_error_limit, c->error_limit >> IO_ERROR_SHIFT); - - sysfs_hprint(congested, - ((uint64_t) bch_get_congested(c)) << 9); - sysfs_print(congested_read_threshold_us, - c->congested_read_threshold_us); - sysfs_print(congested_write_threshold_us, - c->congested_write_threshold_us); - sysfs_printf(foreground_write_ratelimit_enabled, "%i", c->foreground_write_ratelimit_enabled); sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); @@ -683,69 +353,35 @@ SHOW(bch_fs) /* Debugging: */ if (attr == &sysfs_journal_debug) - return bch_journal_print_debug(&c->journal, buf); + return bch2_journal_print_debug(&c->journal, buf); #define BCH_DEBUG_PARAM(name, description) sysfs_print(name, c->name); BCH_DEBUG_PARAMS() #undef BCH_DEBUG_PARAM - if (!bch_fs_running(c)) + if (!bch2_fs_running(c)) return -EPERM; if (attr == &sysfs_bset_tree_stats) - return bch_bset_print_stats(c, buf); + return bch2_bset_print_stats(c, buf); if (attr == &sysfs_alloc_debug) return show_fs_alloc_debug(c, buf); sysfs_print(tree_depth, c->btree_roots[BTREE_ID_EXTENTS].b->level); - sysfs_print(root_usage_percent, bch_root_usage(c)); + sysfs_print(root_usage_percent, bch2_root_usage(c)); if (attr == &sysfs_compression_stats) - return bch_compression_stats(c, buf); + return bch2_compression_stats(c, buf); sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b); return 0; } -STORE(__bch_fs) +STORE(__bch2_fs) { struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); - if (attr == &sysfs_unregister) { - bch_fs_detach(c); - return size; - } - - if (attr == &sysfs_stop) { - bch_fs_stop_async(c); - return size; - } - - if (attr == &sysfs_clear_stats) { - atomic_long_set(&c->writeback_keys_done, 0); - atomic_long_set(&c->writeback_keys_failed, 0); - bch_cache_accounting_clear(&c->accounting); - - return size; - } - - sysfs_strtoul(congested_read_threshold_us, - c->congested_read_threshold_us); - sysfs_strtoul(congested_write_threshold_us, - c->congested_write_threshold_us); - - if (attr == &sysfs_io_error_limit) { - c->error_limit = strtoul_or_return(buf) << IO_ERROR_SHIFT; - return size; - } - - /* See count_io_errors() for why 88 */ - if (attr == &sysfs_io_error_halflife) { - c->error_decay = strtoul_or_return(buf) / 88; - return size; - } - sysfs_strtoul(journal_write_delay_ms, c->journal.write_delay_ms); sysfs_strtoul(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms); @@ -768,7 +404,7 @@ STORE(__bch_fs) ssize_t ret = strtoul_safe(buf, c->tiering_enabled) ?: (ssize_t) size; - bch_tiering_start(c); /* issue wakeups */ + bch2_tiering_start(c); /* issue wakeups */ return ret; } @@ -787,30 +423,22 @@ STORE(__bch_fs) BCH_DEBUG_PARAMS() #undef BCH_DEBUG_PARAM - if (!bch_fs_running(c)) + if (!bch2_fs_running(c)) return -EPERM; if (attr == &sysfs_journal_flush) { - bch_journal_meta_async(&c->journal, NULL); + bch2_journal_meta_async(&c->journal, NULL); return size; } - if (attr == &sysfs_blockdev_volume_create) { - u64 v = strtoi_h_or_return(buf); - int r = bch_blockdev_volume_create(c, v); - - if (r) - return r; - } - if (attr == &sysfs_trigger_btree_coalesce) - bch_coalesce(c); + bch2_coalesce(c); /* Debugging: */ if (attr == &sysfs_trigger_gc) - bch_gc(c); + bch2_gc(c); if (attr == &sysfs_prune_cache) { struct shrink_control sc; @@ -823,24 +451,21 @@ STORE(__bch_fs) return size; } -STORE(bch_fs) +STORE(bch2_fs) { struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); mutex_lock(&c->state_lock); - size = __bch_fs_store(kobj, attr, buf, size); + size = __bch2_fs_store(kobj, attr, buf, size); mutex_unlock(&c->state_lock); return size; } -static struct attribute *bch_fs_files[] = { - &sysfs_unregister, - &sysfs_stop, +static struct attribute *bch2_fs_files[] = { &sysfs_journal_write_delay_ms, &sysfs_journal_reclaim_delay_ms, &sysfs_journal_entry_size_max, - &sysfs_blockdev_volume_create, &sysfs_block_size, &sysfs_block_size_bytes, @@ -854,13 +479,6 @@ static struct attribute *bch_fs_files[] = { &sysfs_average_key_size, - &sysfs_io_error_limit, - &sysfs_io_error_halflife, - &sysfs_congested, - &sysfs_congested_read_threshold_us, - &sysfs_congested_write_threshold_us, - &sysfs_clear_stats, - &sysfs_meta_replicas_have, &sysfs_data_replicas_have, @@ -870,27 +488,27 @@ static struct attribute *bch_fs_files[] = { &sysfs_journal_flush, NULL }; -KTYPE(bch_fs); +KTYPE(bch2_fs); /* internal dir - just a wrapper */ -SHOW(bch_fs_internal) +SHOW(bch2_fs_internal) { struct bch_fs *c = container_of(kobj, struct bch_fs, internal); - return bch_fs_show(&c->kobj, attr, buf); + return bch2_fs_show(&c->kobj, attr, buf); } -STORE(bch_fs_internal) +STORE(bch2_fs_internal) { struct bch_fs *c = container_of(kobj, struct bch_fs, internal); - return bch_fs_store(&c->kobj, attr, buf, size); + return bch2_fs_store(&c->kobj, attr, buf, size); } -static void bch_fs_internal_release(struct kobject *k) +static void bch2_fs_internal_release(struct kobject *k) { } -static struct attribute *bch_fs_internal_files[] = { +static struct attribute *bch2_fs_internal_files[] = { &sysfs_journal_debug, &sysfs_alloc_debug, @@ -902,8 +520,6 @@ static struct attribute *bch_fs_internal_files[] = { &sysfs_bset_tree_stats, &sysfs_cache_read_races, - &sysfs_writeback_keys_done, - &sysfs_writeback_keys_failed, &sysfs_trigger_btree_coalesce, &sysfs_trigger_gc, @@ -921,34 +537,34 @@ static struct attribute *bch_fs_internal_files[] = { NULL }; -KTYPE(bch_fs_internal); +KTYPE(bch2_fs_internal); /* options */ -SHOW(bch_fs_opts_dir) +SHOW(bch2_fs_opts_dir) { struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir); - return bch_opt_show(&c->opts, attr->name, buf, PAGE_SIZE); + return bch2_opt_show(&c->opts, attr->name, buf, PAGE_SIZE); } -STORE(bch_fs_opts_dir) +STORE(bch2_fs_opts_dir) { struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir); const struct bch_option *opt; enum bch_opt_id id; u64 v; - id = bch_parse_sysfs_opt(attr->name, buf, &v); + id = bch2_parse_sysfs_opt(attr->name, buf, &v); if (id < 0) return id; - opt = &bch_opt_table[id]; + opt = &bch2_opt_table[id]; mutex_lock(&c->sb_lock); if (id == Opt_compression) { - int ret = bch_check_set_has_compressed_data(c, v); + int ret = bch2_check_set_has_compressed_data(c, v); if (ret) { mutex_unlock(&c->sb_lock); return ret; @@ -957,21 +573,21 @@ STORE(bch_fs_opts_dir) if (opt->set_sb != SET_NO_SB_OPT) { opt->set_sb(c->disk_sb, v); - bch_write_super(c); + bch2_write_super(c); } - bch_opt_set(&c->opts, id, v); + bch2_opt_set(&c->opts, id, v); mutex_unlock(&c->sb_lock); return size; } -static void bch_fs_opts_dir_release(struct kobject *k) +static void bch2_fs_opts_dir_release(struct kobject *k) { } -static struct attribute *bch_fs_opts_dir_files[] = { +static struct attribute *bch2_fs_opts_dir_files[] = { #define BCH_OPT(_name, ...) \ &sysfs_opt_##_name, @@ -980,11 +596,11 @@ static struct attribute *bch_fs_opts_dir_files[] = { NULL }; -KTYPE(bch_fs_opts_dir); +KTYPE(bch2_fs_opts_dir); /* time stats */ -SHOW(bch_fs_time_stats) +SHOW(bch2_fs_time_stats) { struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats); @@ -997,7 +613,7 @@ SHOW(bch_fs_time_stats) return 0; } -STORE(bch_fs_time_stats) +STORE(bch2_fs_time_stats) { struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats); @@ -1009,11 +625,11 @@ STORE(bch_fs_time_stats) return size; } -static void bch_fs_time_stats_release(struct kobject *k) +static void bch2_fs_time_stats_release(struct kobject *k) { } -static struct attribute *bch_fs_time_stats_files[] = { +static struct attribute *bch2_fs_time_stats_files[] = { #define BCH_TIME_STAT(name, frequency_units, duration_units) \ sysfs_time_stats_attribute_list(name, frequency_units, duration_units) BCH_TIME_STATS() @@ -1021,7 +637,7 @@ static struct attribute *bch_fs_time_stats_files[] = { NULL }; -KTYPE(bch_fs_time_stats); +KTYPE(bch2_fs_time_stats); typedef unsigned (bucket_map_fn)(struct bch_dev *, struct bucket *, void *); @@ -1109,7 +725,7 @@ static ssize_t show_reserve_stats(struct bch_dev *ca, char *buf) static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf) { struct bch_fs *c = ca->fs; - struct bch_dev_usage stats = bch_dev_usage_read(ca); + struct bch_dev_usage stats = bch2_dev_usage_read(ca); return scnprintf(buf, PAGE_SIZE, "free_inc: %zu/%zu\n" @@ -1149,11 +765,11 @@ static u64 sectors_written(struct bch_dev *ca) return ret; } -SHOW(bch_dev) +SHOW(bch2_dev) { struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj); struct bch_fs *c = ca->fs; - struct bch_dev_usage stats = bch_dev_usage_read(ca); + struct bch_dev_usage stats = bch2_dev_usage_read(ca); sysfs_printf(uuid, "%pU\n", ca->uuid.b); @@ -1171,9 +787,6 @@ SHOW(bch_dev) (atomic64_read(&ca->meta_sectors_written) + atomic64_read(&ca->btree_sectors_written)) << 9); - sysfs_print(io_errors, - atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT); - sysfs_hprint(dirty_data, stats.sectors[S_DIRTY] << 9); sysfs_print(dirty_bytes, stats.sectors[S_DIRTY] << 9); sysfs_print(dirty_buckets, stats.buckets_dirty); @@ -1190,16 +803,16 @@ SHOW(bch_dev) sysfs_pd_controller_show(copy_gc, &ca->moving_gc_pd); if (attr == &sysfs_cache_replacement_policy) - return bch_snprint_string_list(buf, PAGE_SIZE, - bch_cache_replacement_policies, - ca->mi.replacement); + return bch2_snprint_string_list(buf, PAGE_SIZE, + bch2_cache_replacement_policies, + ca->mi.replacement); sysfs_print(tier, ca->mi.tier); if (attr == &sysfs_state_rw) - return bch_snprint_string_list(buf, PAGE_SIZE, - bch_dev_state, - ca->mi.state); + return bch2_snprint_string_list(buf, PAGE_SIZE, + bch2_dev_state, + ca->mi.state); if (attr == &sysfs_read_priority_stats) return show_quantiles(ca, buf, bucket_priority_fn, (void *) 0); @@ -1217,7 +830,7 @@ SHOW(bch_dev) return 0; } -STORE(bch_dev) +STORE(bch2_dev) { struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj); struct bch_fs *c = ca->fs; @@ -1229,27 +842,27 @@ STORE(bch_dev) bool v = strtoul_or_return(buf); mutex_lock(&c->sb_lock); - mi = &bch_sb_get_members(c->disk_sb)->members[ca->dev_idx]; + mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx]; if (v != BCH_MEMBER_DISCARD(mi)) { SET_BCH_MEMBER_DISCARD(mi, v); - bch_write_super(c); + bch2_write_super(c); } mutex_unlock(&c->sb_lock); } if (attr == &sysfs_cache_replacement_policy) { - ssize_t v = bch_read_string_list(buf, bch_cache_replacement_policies); + ssize_t v = bch2_read_string_list(buf, bch2_cache_replacement_policies); if (v < 0) return v; mutex_lock(&c->sb_lock); - mi = &bch_sb_get_members(c->disk_sb)->members[ca->dev_idx]; + mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx]; if ((unsigned) v != BCH_MEMBER_REPLACEMENT(mi)) { SET_BCH_MEMBER_REPLACEMENT(mi, v); - bch_write_super(c); + bch2_write_super(c); } mutex_unlock(&c->sb_lock); } @@ -1267,34 +880,22 @@ STORE(bch_dev) return size; } - mi = &bch_sb_get_members(c->disk_sb)->members[ca->dev_idx]; + mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx]; SET_BCH_MEMBER_TIER(mi, v); - bch_write_super(c); + bch2_write_super(c); - bch_dev_group_remove(&c->tiers[prev_tier].devs, ca); - bch_dev_group_add(&c->tiers[ca->mi.tier].devs, ca); + bch2_dev_group_remove(&c->tiers[prev_tier].devs, ca); + bch2_dev_group_add(&c->tiers[ca->mi.tier].devs, ca); mutex_unlock(&c->sb_lock); - bch_recalc_capacity(c); - bch_tiering_start(c); - } - - if (attr == &sysfs_clear_stats) { - int cpu; - - for_each_possible_cpu(cpu) - *per_cpu_ptr(ca->sectors_written, cpu) = 0; - - atomic64_set(&ca->btree_sectors_written, 0); - atomic64_set(&ca->meta_sectors_written, 0); - atomic_set(&ca->io_count, 0); - atomic_set(&ca->io_errors, 0); + bch2_recalc_capacity(c); + bch2_tiering_start(c); } return size; } -static struct attribute *bch_dev_files[] = { +static struct attribute *bch2_dev_files[] = { &sysfs_uuid, &sysfs_bucket_size, &sysfs_bucket_size_bytes, @@ -1323,8 +924,6 @@ static struct attribute *bch_dev_files[] = { &sysfs_written, &sysfs_btree_written, &sysfs_metadata_written, - &sysfs_io_errors, - &sysfs_clear_stats, &sysfs_cache_replacement_policy, &sysfs_tier, &sysfs_state_rw, @@ -1333,4 +932,4 @@ static struct attribute *bch_dev_files[] = { sysfs_pd_controller_files(copy_gc), NULL }; -KTYPE(bch_dev); +KTYPE(bch2_dev); diff --git a/libbcache/sysfs.h b/libbcachefs/sysfs.h index 02700246..d1f17cff 100644 --- a/libbcache/sysfs.h +++ b/libbcachefs/sysfs.h @@ -44,7 +44,7 @@ do { \ #define sysfs_hprint(file, val) \ do { \ if (attr == &sysfs_ ## file) { \ - ssize_t ret = bch_hprint(buf, val); \ + ssize_t ret = bch2_hprint(buf, val); \ strcat(buf, "\n"); \ return ret + 1; \ } \ diff --git a/libbcache/tier.c b/libbcachefs/tier.c index 8627ac3e..16d32928 100644 --- a/libbcache/tier.c +++ b/libbcachefs/tier.c @@ -1,5 +1,5 @@ -#include "bcache.h" +#include "bcachefs.h" #include "alloc.h" #include "btree_iter.h" #include "buckets.h" @@ -13,7 +13,7 @@ #include <linux/freezer.h> #include <linux/kthread.h> -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> struct tiering_state { struct bch_tier *tier; @@ -83,12 +83,12 @@ static int issue_tiering_move(struct bch_fs *c, { int ret; - ret = bch_data_move(c, ctxt, &s->ca->tiering_write_point, k, NULL); + ret = bch2_data_move(c, ctxt, &s->ca->tiering_write_point, k, NULL); if (!ret) { - trace_bcache_tiering_copy(k.k); + trace_tiering_copy(k.k); s->sectors += k.k->size; } else { - trace_bcache_tiering_alloc_fail(c, k.k->size); + trace_tiering_alloc_fail(c, k.k->size); } return ret; @@ -110,19 +110,19 @@ static s64 read_tiering(struct bch_fs *c, struct bch_tier *tier) if (!nr_devices) return 0; - trace_bcache_tiering_start(c); + trace_tiering_start(c); memset(&s, 0, sizeof(s)); s.tier = tier; s.stripe_size = 2048; /* 1 mb for now */ - bch_move_ctxt_init(&ctxt, &tier->pd.rate, + bch2_move_ctxt_init(&ctxt, &tier->pd.rate, nr_devices * SECTORS_IN_FLIGHT_PER_DEVICE); - bch_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN); + bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN); while (!kthread_should_stop() && - !bch_move_ctxt_wait(&ctxt) && - (k = bch_btree_iter_peek(&iter)).k && + !bch2_move_ctxt_wait(&ctxt) && + (k = bch2_btree_iter_peek(&iter)).k && !btree_iter_err(k)) { if (!tiering_pred(c, &s, k)) goto next; @@ -133,30 +133,30 @@ static s64 read_tiering(struct bch_fs *c, struct bch_tier *tier) ret = issue_tiering_move(c, &s, &ctxt, k); if (ret) { - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); /* memory allocation failure, wait for some IO to finish */ - bch_move_ctxt_wait_for_io(&ctxt); + bch2_move_ctxt_wait_for_io(&ctxt); continue; } next: - bch_btree_iter_advance_pos(&iter); - //bch_btree_iter_cond_resched(&iter); + bch2_btree_iter_advance_pos(&iter); + //bch2_btree_iter_cond_resched(&iter); /* unlock before calling moving_context_wait() */ - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); cond_resched(); } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); tier_put_device(&s); - bch_move_ctxt_exit(&ctxt); - trace_bcache_tiering_end(c, ctxt.sectors_moved, ctxt.keys_moved); + bch2_move_ctxt_exit(&ctxt); + trace_tiering_end(c, ctxt.sectors_moved, ctxt.keys_moved); return ctxt.sectors_moved; } -static int bch_tiering_thread(void *arg) +static int bch2_tiering_thread(void *arg) { struct bch_tier *tier = arg; struct bch_fs *c = container_of(tier, struct bch_fs, tiers[tier->idx]); @@ -196,7 +196,7 @@ static int bch_tiering_thread(void *arg) if (available_sectors < (tier_capacity >> 1)) break; - bch_kthread_io_clock_wait(clock, + bch2_kthread_io_clock_wait(clock, last + available_sectors - (tier_capacity >> 1)); @@ -210,10 +210,10 @@ static int bch_tiering_thread(void *arg) return 0; } -static void __bch_tiering_stop(struct bch_tier *tier) +static void __bch2_tiering_stop(struct bch_tier *tier) { tier->pd.rate.rate = UINT_MAX; - bch_ratelimit_reset(&tier->pd.rate); + bch2_ratelimit_reset(&tier->pd.rate); if (tier->migrate) kthread_stop(tier->migrate); @@ -221,19 +221,19 @@ static void __bch_tiering_stop(struct bch_tier *tier) tier->migrate = NULL; } -void bch_tiering_stop(struct bch_fs *c) +void bch2_tiering_stop(struct bch_fs *c) { struct bch_tier *tier; for (tier = c->tiers; tier < c->tiers + ARRAY_SIZE(c->tiers); tier++) - __bch_tiering_stop(tier); + __bch2_tiering_stop(tier); } -static int __bch_tiering_start(struct bch_tier *tier) +static int __bch2_tiering_start(struct bch_tier *tier) { if (!tier->migrate) { struct task_struct *p = - kthread_create(bch_tiering_thread, tier, + kthread_create(bch2_tiering_thread, tier, "bch_tier[%u]", tier->idx); if (IS_ERR(p)) return PTR_ERR(p); @@ -245,7 +245,7 @@ static int __bch_tiering_start(struct bch_tier *tier) return 0; } -int bch_tiering_start(struct bch_fs *c) +int bch2_tiering_start(struct bch_fs *c) { struct bch_tier *tier; bool have_faster_tier = false; @@ -258,11 +258,11 @@ int bch_tiering_start(struct bch_fs *c) continue; if (have_faster_tier) { - int ret = __bch_tiering_start(tier); + int ret = __bch2_tiering_start(tier); if (ret) return ret; } else { - __bch_tiering_stop(tier); + __bch2_tiering_stop(tier); } have_faster_tier = true; @@ -271,12 +271,12 @@ int bch_tiering_start(struct bch_fs *c) return 0; } -void bch_fs_tiering_init(struct bch_fs *c) +void bch2_fs_tiering_init(struct bch_fs *c) { unsigned i; for (i = 0; i < ARRAY_SIZE(c->tiers); i++) { c->tiers[i].idx = i; - bch_pd_controller_init(&c->tiers[i].pd); + bch2_pd_controller_init(&c->tiers[i].pd); } } diff --git a/libbcachefs/tier.h b/libbcachefs/tier.h new file mode 100644 index 00000000..a4fd6225 --- /dev/null +++ b/libbcachefs/tier.h @@ -0,0 +1,8 @@ +#ifndef _BCACHE_TIER_H +#define _BCACHE_TIER_H + +void bch2_tiering_stop(struct bch_fs *); +int bch2_tiering_start(struct bch_fs *); +void bch2_fs_tiering_init(struct bch_fs *); + +#endif diff --git a/libbcache/trace.c b/libbcachefs/trace.c index def525d1..13f0fc24 100644 --- a/libbcache/trace.c +++ b/libbcachefs/trace.c @@ -1,11 +1,11 @@ -#include "bcache.h" +#include "bcachefs.h" #include "alloc_types.h" -#include "blockdev_types.h" #include "buckets.h" #include "btree_types.h" #include "keylist.h" #include <linux/blktrace_api.h> +#include "keylist.h" #define CREATE_TRACE_POINTS -#include <trace/events/bcache.h> +#include <trace/events/bcachefs.h> diff --git a/libbcache/util.c b/libbcachefs/util.c index 5f816593..e4cd6317 100644 --- a/libbcache/util.c +++ b/libbcachefs/util.c @@ -23,7 +23,7 @@ #define simple_strtouint(c, end, base) simple_strtoul(c, end, base) #define STRTO_H(name, type) \ -int bch_ ## name ## _h(const char *cp, type *res) \ +int bch2_ ## name ## _h(const char *cp, type *res) \ { \ int u = 0; \ char *e; \ @@ -77,7 +77,7 @@ STRTO_H(strtouint, unsigned int) STRTO_H(strtoll, long long) STRTO_H(strtoull, unsigned long long) -ssize_t bch_hprint(char *buf, s64 v) +ssize_t bch2_hprint(char *buf, s64 v) { static const char units[] = "?kMGTPEZY"; char dec[4] = ""; @@ -101,7 +101,7 @@ ssize_t bch_hprint(char *buf, s64 v) return sprintf(buf, "%lli%s%c", v, dec, units[u]); } -ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[], +ssize_t bch2_snprint_string_list(char *buf, size_t size, const char * const list[], size_t selected) { char *out = buf; @@ -115,7 +115,7 @@ ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[ return out - buf; } -ssize_t bch_read_string_list(const char *buf, const char * const list[]) +ssize_t bch2_read_string_list(const char *buf, const char * const list[]) { size_t i; char *s, *d = kstrndup(buf, PAGE_SIZE - 1, GFP_KERNEL); @@ -136,7 +136,7 @@ ssize_t bch_read_string_list(const char *buf, const char * const list[]) return i; } -bool bch_is_zero(const void *_p, size_t n) +bool bch2_is_zero(const void *_p, size_t n) { const char *p = _p; size_t i; @@ -147,7 +147,7 @@ bool bch_is_zero(const void *_p, size_t n) return true; } -void bch_time_stats_clear(struct time_stats *stats) +void bch2_time_stats_clear(struct time_stats *stats) { spin_lock(&stats->lock); @@ -161,7 +161,7 @@ void bch_time_stats_clear(struct time_stats *stats) spin_unlock(&stats->lock); } -void __bch_time_stats_update(struct time_stats *stats, u64 start_time) +void __bch2_time_stats_update(struct time_stats *stats, u64 start_time) { u64 now, duration, last; @@ -193,22 +193,22 @@ void __bch_time_stats_update(struct time_stats *stats, u64 start_time) stats->last = now ?: 1; } -void bch_time_stats_update(struct time_stats *stats, u64 start_time) +void bch2_time_stats_update(struct time_stats *stats, u64 start_time) { spin_lock(&stats->lock); - __bch_time_stats_update(stats, start_time); + __bch2_time_stats_update(stats, start_time); spin_unlock(&stats->lock); } /** - * bch_ratelimit_delay() - return how long to delay until the next time to do + * bch2_ratelimit_delay() - return how long to delay until the next time to do * some work * * @d - the struct bch_ratelimit to update * * Returns the amount of time to delay by, in jiffies */ -u64 bch_ratelimit_delay(struct bch_ratelimit *d) +u64 bch2_ratelimit_delay(struct bch_ratelimit *d) { u64 now = local_clock(); @@ -218,12 +218,12 @@ u64 bch_ratelimit_delay(struct bch_ratelimit *d) } /** - * bch_ratelimit_increment() - increment @d by the amount of work done + * bch2_ratelimit_increment() - increment @d by the amount of work done * * @d - the struct bch_ratelimit to update * @done - the amount of work done, in arbitrary units */ -void bch_ratelimit_increment(struct bch_ratelimit *d, u64 done) +void bch2_ratelimit_increment(struct bch_ratelimit *d, u64 done) { u64 now = local_clock(); @@ -236,10 +236,10 @@ void bch_ratelimit_increment(struct bch_ratelimit *d, u64 done) d->next = now - NSEC_PER_SEC * 2; } -int bch_ratelimit_wait_freezable_stoppable(struct bch_ratelimit *d) +int bch2_ratelimit_wait_freezable_stoppable(struct bch_ratelimit *d) { while (1) { - u64 delay = bch_ratelimit_delay(d); + u64 delay = bch2_ratelimit_delay(d); if (delay) set_current_state(TASK_INTERRUPTIBLE); @@ -263,7 +263,7 @@ int bch_ratelimit_wait_freezable_stoppable(struct bch_ratelimit *d) * @sign: 1 or -1; 1 if increasing the rate makes actual go up, -1 if increasing * it makes actual go down. */ -void bch_pd_controller_update(struct bch_pd_controller *pd, +void bch2_pd_controller_update(struct bch_pd_controller *pd, s64 target, s64 actual, int sign) { s64 proportional, derivative, change; @@ -307,7 +307,7 @@ void bch_pd_controller_update(struct bch_pd_controller *pd, pd->last_target = target; } -void bch_pd_controller_init(struct bch_pd_controller *pd) +void bch2_pd_controller_init(struct bch_pd_controller *pd) { pd->rate.rate = 1024; pd->last_update = jiffies; @@ -317,7 +317,7 @@ void bch_pd_controller_init(struct bch_pd_controller *pd) pd->backpressure = 1; } -size_t bch_pd_controller_print_debug(struct bch_pd_controller *pd, char *buf) +size_t bch2_pd_controller_print_debug(struct bch_pd_controller *pd, char *buf) { /* 2^64 - 1 is 20 digits, plus null byte */ char rate[21]; @@ -328,12 +328,12 @@ size_t bch_pd_controller_print_debug(struct bch_pd_controller *pd, char *buf) char change[21]; s64 next_io; - bch_hprint(rate, pd->rate.rate); - bch_hprint(actual, pd->last_actual); - bch_hprint(target, pd->last_target); - bch_hprint(proportional, pd->last_proportional); - bch_hprint(derivative, pd->last_derivative); - bch_hprint(change, pd->last_change); + bch2_hprint(rate, pd->rate.rate); + bch2_hprint(actual, pd->last_actual); + bch2_hprint(target, pd->last_target); + bch2_hprint(proportional, pd->last_proportional); + bch2_hprint(derivative, pd->last_derivative); + bch2_hprint(change, pd->last_change); next_io = div64_s64(pd->rate.next - local_clock(), NSEC_PER_MSEC); @@ -349,7 +349,7 @@ size_t bch_pd_controller_print_debug(struct bch_pd_controller *pd, char *buf) derivative, change, next_io); } -void bch_bio_map(struct bio *bio, void *base) +void bch2_bio_map(struct bio *bio, void *base) { size_t size = bio->bi_iter.bi_size; struct bio_vec *bv = bio->bi_io_vec; @@ -377,7 +377,7 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, } } -size_t bch_rand_range(size_t max) +size_t bch2_rand_range(size_t max) { size_t rand; diff --git a/libbcache/util.h b/libbcachefs/util.h index 88cbe301..5f13c824 100644 --- a/libbcache/util.h +++ b/libbcachefs/util.h @@ -3,6 +3,7 @@ #include <linux/bio.h> #include <linux/blkdev.h> +#include <linux/closure.h> #include <linux/errno.h> #include <linux/blkdev.h> #include <linux/freezer.h> @@ -13,14 +14,12 @@ #include <linux/vmalloc.h> #include <linux/workqueue.h> -#include "closure.h" - #define PAGE_SECTOR_SHIFT (PAGE_SHIFT - 9) #define PAGE_SECTORS (1UL << PAGE_SECTOR_SHIFT) struct closure; -#ifdef CONFIG_BCACHE_DEBUG +#ifdef CONFIG_BCACHEFS_DEBUG #define EBUG_ON(cond) BUG_ON(cond) #define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0) @@ -245,36 +244,36 @@ do { \ #define ANYSINT_MAX(t) \ ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) -int bch_strtoint_h(const char *, int *); -int bch_strtouint_h(const char *, unsigned int *); -int bch_strtoll_h(const char *, long long *); -int bch_strtoull_h(const char *, unsigned long long *); +int bch2_strtoint_h(const char *, int *); +int bch2_strtouint_h(const char *, unsigned int *); +int bch2_strtoll_h(const char *, long long *); +int bch2_strtoull_h(const char *, unsigned long long *); -static inline int bch_strtol_h(const char *cp, long *res) +static inline int bch2_strtol_h(const char *cp, long *res) { #if BITS_PER_LONG == 32 - return bch_strtoint_h(cp, (int *) res); + return bch2_strtoint_h(cp, (int *) res); #else - return bch_strtoll_h(cp, (long long *) res); + return bch2_strtoll_h(cp, (long long *) res); #endif } -static inline int bch_strtoul_h(const char *cp, long *res) +static inline int bch2_strtoul_h(const char *cp, long *res) { #if BITS_PER_LONG == 32 - return bch_strtouint_h(cp, (unsigned int *) res); + return bch2_strtouint_h(cp, (unsigned int *) res); #else - return bch_strtoull_h(cp, (unsigned long long *) res); + return bch2_strtoull_h(cp, (unsigned long long *) res); #endif } #define strtoi_h(cp, res) \ - ( type_is(*res, int) ? bch_strtoint_h(cp, (void *) res)\ - : type_is(*res, long) ? bch_strtol_h(cp, (void *) res)\ - : type_is(*res, long long) ? bch_strtoll_h(cp, (void *) res)\ - : type_is(*res, unsigned) ? bch_strtouint_h(cp, (void *) res)\ - : type_is(*res, unsigned long) ? bch_strtoul_h(cp, (void *) res)\ - : type_is(*res, unsigned long long) ? bch_strtoull_h(cp, (void *) res)\ + ( type_is(*res, int) ? bch2_strtoint_h(cp, (void *) res)\ + : type_is(*res, long) ? bch2_strtol_h(cp, (void *) res)\ + : type_is(*res, long long) ? bch2_strtoll_h(cp, (void *) res)\ + : type_is(*res, unsigned) ? bch2_strtouint_h(cp, (void *) res)\ + : type_is(*res, unsigned long) ? bch2_strtoul_h(cp, (void *) res)\ + : type_is(*res, unsigned long long) ? bch2_strtoull_h(cp, (void *) res)\ : -EINVAL) #define strtoul_safe(cp, var) \ @@ -317,14 +316,14 @@ static inline int bch_strtoul_h(const char *cp, long *res) : type_is(var, char *) ? "%s\n" \ : "%i\n", var) -ssize_t bch_hprint(char *buf, s64 v); +ssize_t bch2_hprint(char *buf, s64 v); -bool bch_is_zero(const void *, size_t); +bool bch2_is_zero(const void *, size_t); -ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[], +ssize_t bch2_snprint_string_list(char *buf, size_t size, const char * const list[], size_t selected); -ssize_t bch_read_string_list(const char *buf, const char * const list[]); +ssize_t bch2_read_string_list(const char *buf, const char * const list[]); struct time_stats { spinlock_t lock; @@ -340,9 +339,9 @@ struct time_stats { u64 last; }; -void bch_time_stats_clear(struct time_stats *stats); -void __bch_time_stats_update(struct time_stats *stats, u64 time); -void bch_time_stats_update(struct time_stats *stats, u64 time); +void bch2_time_stats_clear(struct time_stats *stats); +void __bch2_time_stats_update(struct time_stats *stats, u64 time); +void bch2_time_stats_update(struct time_stats *stats, u64 time); static inline unsigned local_clock_us(void) { @@ -383,7 +382,7 @@ do { \ #define sysfs_clear_time_stats(stats, name) \ do { \ if (attr == &sysfs_ ## name ## _clear) \ - bch_time_stats_clear(stats); \ + bch2_time_stats_clear(stats); \ } while (0) #define sysfs_time_stats_attribute(name, \ @@ -423,19 +422,19 @@ struct bch_ratelimit { /* * Rate at which we want to do work, in units per nanosecond * The units here correspond to the units passed to - * bch_ratelimit_increment() + * bch2_ratelimit_increment() */ unsigned rate; }; -static inline void bch_ratelimit_reset(struct bch_ratelimit *d) +static inline void bch2_ratelimit_reset(struct bch_ratelimit *d) { d->next = local_clock(); } -u64 bch_ratelimit_delay(struct bch_ratelimit *); -void bch_ratelimit_increment(struct bch_ratelimit *, u64); -int bch_ratelimit_wait_freezable_stoppable(struct bch_ratelimit *); +u64 bch2_ratelimit_delay(struct bch_ratelimit *); +void bch2_ratelimit_increment(struct bch_ratelimit *, u64); +int bch2_ratelimit_wait_freezable_stoppable(struct bch_ratelimit *); struct bch_pd_controller { struct bch_ratelimit rate; @@ -454,14 +453,14 @@ struct bch_pd_controller { s64 last_change; s64 last_target; - /* If true, the rate will not increase if bch_ratelimit_delay() + /* If true, the rate will not increase if bch2_ratelimit_delay() * is not being called often enough. */ bool backpressure; }; -void bch_pd_controller_update(struct bch_pd_controller *, s64, s64, int); -void bch_pd_controller_init(struct bch_pd_controller *); -size_t bch_pd_controller_print_debug(struct bch_pd_controller *, char *); +void bch2_pd_controller_update(struct bch_pd_controller *, s64, s64, int); +void bch2_pd_controller_init(struct bch_pd_controller *); +size_t bch2_pd_controller_print_debug(struct bch_pd_controller *, char *); #define sysfs_pd_controller_attribute(name) \ rw_attribute(name##_rate); \ @@ -485,7 +484,7 @@ do { \ sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \ \ if (attr == &sysfs_##name##_rate_debug) \ - return bch_pd_controller_print_debug(var, buf); \ + return bch2_pd_controller_print_debug(var, buf); \ } while (0) #define sysfs_pd_controller_store(name, var) \ @@ -601,7 +600,7 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) return x; } -void bch_bio_map(struct bio *bio, void *base); +void bch2_bio_map(struct bio *bio, void *base); static inline sector_t bdev_sectors(struct block_device *bdev) { @@ -611,13 +610,7 @@ static inline sector_t bdev_sectors(struct block_device *bdev) #define closure_bio_submit(bio, cl) \ do { \ closure_get(cl); \ - generic_make_request(bio); \ -} while (0) - -#define closure_bio_submit_punt(bio, cl, c) \ -do { \ - closure_get(cl); \ - bch_generic_make_request(bio, c); \ + submit_bio(bio); \ } while (0) #define kthread_wait_freezable(cond) \ @@ -640,7 +633,7 @@ do { \ _ret; \ }) -size_t bch_rand_range(size_t); +size_t bch2_rand_range(size_t); void memcpy_to_bio(struct bio *, struct bvec_iter, void *); void memcpy_from_bio(void *, struct bio *, struct bvec_iter); diff --git a/libbcache/vstructs.h b/libbcachefs/vstructs.h index ce2cece0..ce2cece0 100644 --- a/libbcache/vstructs.h +++ b/libbcachefs/vstructs.h diff --git a/libbcache/xattr.c b/libbcachefs/xattr.c index a5c66fa1..62a08897 100644 --- a/libbcache/xattr.c +++ b/libbcachefs/xattr.c @@ -1,5 +1,5 @@ -#include "bcache.h" +#include "bcachefs.h" #include "bkey_methods.h" #include "btree_update.h" #include "extents.h" @@ -18,30 +18,30 @@ struct xattr_search_key { #define X_SEARCH(_type, _name, _len) ((struct xattr_search_key) \ { .type = _type, .name = QSTR_INIT(_name, _len) }) -static u64 bch_xattr_hash(const struct bch_hash_info *info, +static u64 bch2_xattr_hash(const struct bch_hash_info *info, const struct xattr_search_key *key) { struct bch_str_hash_ctx ctx; - bch_str_hash_init(&ctx, info); - bch_str_hash_update(&ctx, info, &key->type, sizeof(key->type)); - bch_str_hash_update(&ctx, info, key->name.name, key->name.len); + bch2_str_hash_init(&ctx, info); + bch2_str_hash_update(&ctx, info, &key->type, sizeof(key->type)); + bch2_str_hash_update(&ctx, info, key->name.name, key->name.len); - return bch_str_hash_end(&ctx, info); + return bch2_str_hash_end(&ctx, info); } #define xattr_val(_xattr) ((_xattr)->x_name + (_xattr)->x_name_len) static u64 xattr_hash_key(const struct bch_hash_info *info, const void *key) { - return bch_xattr_hash(info, key); + return bch2_xattr_hash(info, key); } static u64 xattr_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k) { struct bkey_s_c_xattr x = bkey_s_c_to_xattr(k); - return bch_xattr_hash(info, + return bch2_xattr_hash(info, &X_SEARCH(x.v->x_type, x.v->x_name, x.v->x_name_len)); } @@ -75,7 +75,7 @@ static const struct bch_hash_desc xattr_hash_desc = { .cmp_bkey = xattr_cmp_bkey, }; -static const char *bch_xattr_invalid(const struct bch_fs *c, +static const char *bch2_xattr_invalid(const struct bch_fs *c, struct bkey_s_c k) { switch (k.k->type) { @@ -94,7 +94,7 @@ static const char *bch_xattr_invalid(const struct bch_fs *c, } } -static void bch_xattr_to_text(struct bch_fs *c, char *buf, +static void bch2_xattr_to_text(struct bch_fs *c, char *buf, size_t size, struct bkey_s_c k) { struct bkey_s_c_xattr xattr; @@ -132,12 +132,12 @@ static void bch_xattr_to_text(struct bch_fs *c, char *buf, } } -const struct bkey_ops bch_bkey_xattr_ops = { - .key_invalid = bch_xattr_invalid, - .val_to_text = bch_xattr_to_text, +const struct bkey_ops bch2_bkey_xattr_ops = { + .key_invalid = bch2_xattr_invalid, + .val_to_text = bch2_xattr_to_text, }; -int bch_xattr_get(struct bch_fs *c, struct inode *inode, +int bch2_xattr_get(struct bch_fs *c, struct inode *inode, const char *name, void *buffer, size_t size, int type) { struct bch_inode_info *ei = to_bch_ei(inode); @@ -146,11 +146,11 @@ int bch_xattr_get(struct bch_fs *c, struct inode *inode, struct bkey_s_c_xattr xattr; int ret; - k = bch_hash_lookup(xattr_hash_desc, &ei->str_hash, c, + k = bch2_hash_lookup(xattr_hash_desc, &ei->str_hash, c, ei->vfs_inode.i_ino, &iter, &X_SEARCH(type, name, strlen(name))); if (IS_ERR(k.k)) - return bch_btree_iter_unlock(&iter) ?: -ENODATA; + return bch2_btree_iter_unlock(&iter) ?: -ENODATA; xattr = bkey_s_c_to_xattr(k); ret = le16_to_cpu(xattr.v->x_val_len); @@ -161,11 +161,11 @@ int bch_xattr_get(struct bch_fs *c, struct inode *inode, memcpy(buffer, xattr_val(xattr.v), ret); } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return ret; } -int __bch_xattr_set(struct bch_fs *c, u64 inum, +int __bch2_xattr_set(struct bch_fs *c, u64 inum, const struct bch_hash_info *hash_info, const char *name, const void *value, size_t size, int flags, int type, u64 *journal_seq) @@ -174,7 +174,7 @@ int __bch_xattr_set(struct bch_fs *c, u64 inum, int ret; if (!value) { - ret = bch_hash_delete(xattr_hash_desc, hash_info, + ret = bch2_hash_delete(xattr_hash_desc, hash_info, c, inum, journal_seq, &search); } else { @@ -199,7 +199,7 @@ int __bch_xattr_set(struct bch_fs *c, u64 inum, memcpy(xattr->v.x_name, search.name.name, search.name.len); memcpy(xattr_val(&xattr->v), value, size); - ret = bch_hash_set(xattr_hash_desc, hash_info, c, + ret = bch2_hash_set(xattr_hash_desc, hash_info, c, inum, journal_seq, &xattr->k_i, (flags & XATTR_CREATE ? BCH_HASH_SET_MUST_CREATE : 0)| @@ -213,25 +213,25 @@ int __bch_xattr_set(struct bch_fs *c, u64 inum, return ret; } -int bch_xattr_set(struct bch_fs *c, struct inode *inode, +int bch2_xattr_set(struct bch_fs *c, struct inode *inode, const char *name, const void *value, size_t size, int flags, int type) { struct bch_inode_info *ei = to_bch_ei(inode); - return __bch_xattr_set(c, inode->i_ino, &ei->str_hash, + return __bch2_xattr_set(c, inode->i_ino, &ei->str_hash, name, value, size, flags, type, &ei->journal_seq); } -static const struct xattr_handler *bch_xattr_type_to_handler(unsigned); +static const struct xattr_handler *bch2_xattr_type_to_handler(unsigned); -static size_t bch_xattr_emit(struct dentry *dentry, +static size_t bch2_xattr_emit(struct dentry *dentry, const struct bch_xattr *xattr, char *buffer, size_t buffer_size) { const struct xattr_handler *handler = - bch_xattr_type_to_handler(xattr->x_type); + bch2_xattr_type_to_handler(xattr->x_type); if (handler && (!handler->list || handler->list(dentry))) { const char *prefix = handler->prefix ?: handler->name; @@ -251,7 +251,7 @@ static size_t bch_xattr_emit(struct dentry *dentry, } } -ssize_t bch_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) +ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) { struct bch_fs *c = dentry->d_sb->s_fs_info; struct btree_iter iter; @@ -272,10 +272,10 @@ ssize_t bch_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) xattr = bkey_s_c_to_xattr(k).v; - len = bch_xattr_emit(dentry, xattr, buffer, buffer_size); + len = bch2_xattr_emit(dentry, xattr, buffer, buffer_size); if (buffer) { if (len > buffer_size) { - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return -ERANGE; } @@ -286,55 +286,55 @@ ssize_t bch_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) ret += len; } - bch_btree_iter_unlock(&iter); + bch2_btree_iter_unlock(&iter); return ret; } -static int bch_xattr_get_handler(const struct xattr_handler *handler, +static int bch2_xattr_get_handler(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size) { struct bch_fs *c = inode->i_sb->s_fs_info; - return bch_xattr_get(c, inode, name, buffer, size, handler->flags); + return bch2_xattr_get(c, inode, name, buffer, size, handler->flags); } -static int bch_xattr_set_handler(const struct xattr_handler *handler, +static int bch2_xattr_set_handler(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct bch_fs *c = inode->i_sb->s_fs_info; - return bch_xattr_set(c, inode, name, value, size, flags, + return bch2_xattr_set(c, inode, name, value, size, flags, handler->flags); } static const struct xattr_handler bch_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, - .get = bch_xattr_get_handler, - .set = bch_xattr_set_handler, + .get = bch2_xattr_get_handler, + .set = bch2_xattr_set_handler, .flags = BCH_XATTR_INDEX_USER, }; -static bool bch_xattr_trusted_list(struct dentry *dentry) +static bool bch2_xattr_trusted_list(struct dentry *dentry) { return capable(CAP_SYS_ADMIN); } static const struct xattr_handler bch_xattr_trusted_handler = { .prefix = XATTR_TRUSTED_PREFIX, - .list = bch_xattr_trusted_list, - .get = bch_xattr_get_handler, - .set = bch_xattr_set_handler, + .list = bch2_xattr_trusted_list, + .get = bch2_xattr_get_handler, + .set = bch2_xattr_set_handler, .flags = BCH_XATTR_INDEX_TRUSTED, }; static const struct xattr_handler bch_xattr_security_handler = { .prefix = XATTR_SECURITY_PREFIX, - .get = bch_xattr_get_handler, - .set = bch_xattr_set_handler, + .get = bch2_xattr_get_handler, + .set = bch2_xattr_set_handler, .flags = BCH_XATTR_INDEX_SECURITY, }; @@ -348,7 +348,7 @@ static const struct xattr_handler *bch_xattr_handler_map[] = { [BCH_XATTR_INDEX_SECURITY] = &bch_xattr_security_handler, }; -const struct xattr_handler *bch_xattr_handlers[] = { +const struct xattr_handler *bch2_xattr_handlers[] = { &bch_xattr_user_handler, &posix_acl_access_xattr_handler, &posix_acl_default_xattr_handler, @@ -357,7 +357,7 @@ const struct xattr_handler *bch_xattr_handlers[] = { NULL }; -static const struct xattr_handler *bch_xattr_type_to_handler(unsigned type) +static const struct xattr_handler *bch2_xattr_type_to_handler(unsigned type) { return type < ARRAY_SIZE(bch_xattr_handler_map) ? bch_xattr_handler_map[type] diff --git a/libbcachefs/xattr.h b/libbcachefs/xattr.h new file mode 100644 index 00000000..14eba241 --- /dev/null +++ b/libbcachefs/xattr.h @@ -0,0 +1,20 @@ +#ifndef _BCACHE_XATTR_H +#define _BCACHE_XATTR_H + +extern const struct bkey_ops bch2_bkey_xattr_ops; + +struct dentry; +struct xattr_handler; +struct bch_hash_info; + +int bch2_xattr_get(struct bch_fs *, struct inode *, + const char *, void *, size_t, int); +int __bch2_xattr_set(struct bch_fs *, u64, const struct bch_hash_info *, + const char *, const void *, size_t, int, int, u64 *); +int bch2_xattr_set(struct bch_fs *, struct inode *, + const char *, const void *, size_t, int, int); +ssize_t bch2_xattr_list(struct dentry *, char *, size_t); + +extern const struct xattr_handler *bch2_xattr_handlers[]; + +#endif /* _BCACHE_XATTR_H */ diff --git a/libbcache/closure.c b/linux/closure.c index f6f4dd99..26a29356 100644 --- a/libbcache/closure.c +++ b/linux/closure.c @@ -5,12 +5,11 @@ * Copyright 2012 Google, Inc. */ +#include <linux/closure.h> #include <linux/debugfs.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/seq_file.h> -#include "closure.h" - static inline void closure_put_after_sub(struct closure *cl, int flags) { int r = flags & CLOSURE_REMAINING_MASK; @@ -128,7 +127,7 @@ void __sched __closure_sync(struct closure *cl) } EXPORT_SYMBOL(__closure_sync); -#ifdef CONFIG_BCACHE_CLOSURES_DEBUG +#ifdef CONFIG_DEBUG_CLOSURES static LIST_HEAD(closure_list); static DEFINE_SPINLOCK(closure_list_lock); @@ -202,9 +201,11 @@ static const struct file_operations debug_ops = { .release = single_release }; -void __init closure_debug_init(void) +static int __init closure_debug_init(void) { debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops); + return 0; } +late_initcall(closure_debug_init) #endif diff --git a/mkfs.bcache b/mkfs.bcache deleted file mode 100755 index 411c1512..00000000 --- a/mkfs.bcache +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -exec bcache format "$@" diff --git a/mkfs.bcachefs b/mkfs.bcachefs new file mode 100755 index 00000000..a1ce6159 --- /dev/null +++ b/mkfs.bcachefs @@ -0,0 +1,3 @@ +#!/bin/sh + +exec bcachefs format "$@" diff --git a/tools-util.c b/tools-util.c index bb2ac47c..bd114af3 100644 --- a/tools-util.c +++ b/tools-util.c @@ -18,7 +18,7 @@ #include "ccan/crc/crc.h" -#include "linux/bcache-ioctl.h" +#include "bcachefs_ioctl.h" #include "linux/sort.h" #include "tools-util.h" #include "util.h" @@ -93,7 +93,7 @@ u64 read_file_u64(int dirfd, const char *path) ssize_t read_string_list_or_die(const char *opt, const char * const list[], const char *msg) { - ssize_t v = bch_read_string_list(opt, list); + ssize_t v = bch2_read_string_list(opt, list); if (v < 0) die("Bad %s %s", msg, opt); @@ -169,12 +169,12 @@ int open_for_format(const char *dev, bool force) /* Global control device: */ int bcachectl_open(void) { - return xopen("/dev/bcache-ctl", O_RDWR); + return xopen("/dev/bcachefs-ctl", O_RDWR); } /* Filesystem handles (ioctl, sysfs dir): */ -#define SYSFS_BASE "/sys/fs/bcache/" +#define SYSFS_BASE "/sys/fs/bcachefs/" struct bcache_handle bcache_fs_open(const char *path) { @@ -187,7 +187,7 @@ struct bcache_handle bcache_fs_open(const char *path) ret.sysfs_fd = xopen(sysfs, O_RDONLY); char *minor = read_file_str(ret.sysfs_fd, "minor"); - char *ctl = mprintf("/dev/bcache%s-ctl", minor); + char *ctl = mprintf("/dev/bcachefs%s-ctl", minor); ret.ioctl_fd = xopen(ctl, O_RDWR); free(sysfs); @@ -319,7 +319,7 @@ unsigned hatoi_validate(const char *s, const char *msg) { u64 v; - if (bch_strtoull_h(s, &v)) + if (bch2_strtoull_h(s, &v)) die("bad %s %s", msg, s); if (v & (v - 1)) |