summaryrefslogtreecommitdiff
path: root/libbcache
diff options
context:
space:
mode:
Diffstat (limited to 'libbcache')
-rw-r--r--libbcache/alloc.c20
-rw-r--r--libbcache/alloc.h8
-rw-r--r--libbcache/bcache.h48
-rw-r--r--libbcache/bkey_methods.c2
-rw-r--r--libbcache/blockdev.c24
-rw-r--r--libbcache/blockdev.h126
-rw-r--r--libbcache/btree_gc.c27
-rw-r--r--libbcache/btree_io.c10
-rw-r--r--libbcache/btree_update.c12
-rw-r--r--libbcache/buckets.c36
-rw-r--r--libbcache/buckets.h12
-rw-r--r--libbcache/chardev.c79
-rw-r--r--libbcache/chardev.h27
-rw-r--r--libbcache/checksum.c4
-rw-r--r--libbcache/checksum.h4
-rw-r--r--libbcache/compress.c11
-rw-r--r--libbcache/error.c20
-rw-r--r--libbcache/error.h48
-rw-r--r--libbcache/extents.c40
-rw-r--r--libbcache/fs-gc.c2
-rw-r--r--libbcache/fs.c54
-rw-r--r--libbcache/fs.h9
-rw-r--r--libbcache/inode.h2
-rw-r--r--libbcache/io.c19
-rw-r--r--libbcache/journal.c14
-rw-r--r--libbcache/journal.h2
-rw-r--r--libbcache/movinggc.c2
-rw-r--r--libbcache/notify.c20
-rw-r--r--libbcache/notify.h40
-rw-r--r--libbcache/opts.c252
-rw-r--r--libbcache/opts.h169
-rw-r--r--libbcache/stats.h16
-rw-r--r--libbcache/super-io.c19
-rw-r--r--libbcache/super-io.h4
-rw-r--r--libbcache/super.c513
-rw-r--r--libbcache/super.h48
-rw-r--r--libbcache/sysfs.c174
-rw-r--r--libbcache/writeback.c2
-rw-r--r--libbcache/writeback.h22
-rw-r--r--libbcache/xattr.c27
-rw-r--r--libbcache/xattr.h3
41 files changed, 1110 insertions, 861 deletions
diff --git a/libbcache/alloc.c b/libbcache/alloc.c
index cd22c38..8cb3194 100644
--- a/libbcache/alloc.c
+++ b/libbcache/alloc.c
@@ -78,7 +78,7 @@ static void __bch_bucket_free(struct cache *, struct bucket *);
/* Allocation groups: */
-void bch_cache_group_remove_cache(struct cache_group *grp, struct cache *ca)
+void bch_dev_group_remove(struct cache_group *grp, struct cache *ca)
{
unsigned i;
@@ -96,7 +96,7 @@ void bch_cache_group_remove_cache(struct cache_group *grp, struct cache *ca)
spin_unlock(&grp->lock);
}
-void bch_cache_group_add_cache(struct cache_group *grp, struct cache *ca)
+void bch_dev_group_add(struct cache_group *grp, struct cache *ca)
{
unsigned i;
@@ -318,7 +318,7 @@ static int bch_prio_write(struct cache *ca)
bucket_bytes(ca) - sizeof(p->csum));
ret = prio_io(ca, r, REQ_OP_WRITE);
- if (cache_fatal_io_err_on(ret, ca,
+ if (bch_dev_fatal_io_err_on(ret, ca,
"prio write to bucket %zu", r) ||
bch_meta_write_fault("prio"))
return ret;
@@ -400,7 +400,7 @@ int bch_prio_read(struct cache *ca)
bucket_nr++;
ret = prio_io(ca, bucket, REQ_OP_READ);
- if (cache_fatal_io_err_on(ret, ca,
+ if (bch_dev_fatal_io_err_on(ret, ca,
"prior read from bucket %llu",
bucket) ||
bch_meta_read_fault("prio"))
@@ -1724,7 +1724,7 @@ static bool bch_dev_has_open_write_point(struct cache *ca)
}
/* device goes ro: */
-void bch_cache_allocator_stop(struct cache *ca)
+void bch_dev_allocator_stop(struct cache *ca)
{
struct cache_set *c = ca->set;
struct cache_group *tier = &c->cache_tiers[ca->mi.tier];
@@ -1736,8 +1736,8 @@ void bch_cache_allocator_stop(struct cache *ca)
/* First, remove device from allocation groups: */
- bch_cache_group_remove_cache(tier, ca);
- bch_cache_group_remove_cache(&c->cache_all, ca);
+ bch_dev_group_remove(tier, ca);
+ bch_dev_group_remove(&c->cache_all, ca);
bch_recalc_capacity(c);
@@ -1805,7 +1805,7 @@ void bch_cache_allocator_stop(struct cache *ca)
/*
* Startup the allocator thread for transition to RW mode:
*/
-int bch_cache_allocator_start(struct cache *ca)
+int bch_dev_allocator_start(struct cache *ca)
{
struct cache_set *c = ca->set;
struct cache_group *tier = &c->cache_tiers[ca->mi.tier];
@@ -1824,8 +1824,8 @@ int bch_cache_allocator_start(struct cache *ca)
get_task_struct(k);
ca->alloc_thread = k;
- bch_cache_group_add_cache(tier, ca);
- bch_cache_group_add_cache(&c->cache_all, ca);
+ bch_dev_group_add(tier, ca);
+ bch_dev_group_add(&c->cache_all, ca);
bch_recalc_capacity(c);
diff --git a/libbcache/alloc.h b/libbcache/alloc.h
index ac83e4f..09139a5 100644
--- a/libbcache/alloc.h
+++ b/libbcache/alloc.h
@@ -20,8 +20,8 @@ static inline size_t prio_buckets(const struct cache *ca)
return DIV_ROUND_UP((size_t) (ca)->mi.nbuckets, prios_per_bucket(ca));
}
-void bch_cache_group_remove_cache(struct cache_group *, struct cache *);
-void bch_cache_group_add_cache(struct cache_group *, struct cache *);
+void bch_dev_group_remove(struct cache_group *, struct cache *);
+void bch_dev_group_add(struct cache_group *, struct cache *);
int bch_prio_read(struct cache *);
@@ -103,8 +103,8 @@ static inline struct cache *cache_group_next(struct cache_group *devs,
((_ca) = __open_bucket_next_online_device(_c, _ob, _ptr, _ca));\
(_ptr)++)
-void bch_cache_allocator_stop(struct cache *);
-int bch_cache_allocator_start(struct cache *);
+void bch_dev_allocator_stop(struct cache *);
+int bch_dev_allocator_start(struct cache *);
void bch_open_buckets_init(struct cache_set *);
#endif /* _BCACHE_ALLOC_H */
diff --git a/libbcache/bcache.h b/libbcache/bcache.h
index 8a0262f..babc08d 100644
--- a/libbcache/bcache.h
+++ b/libbcache/bcache.h
@@ -203,8 +203,8 @@
#include <linux/dynamic_fault.h>
-#define cache_set_init_fault(name) \
- dynamic_fault("bcache:cache_set_init:" name)
+#define bch_fs_init_fault(name) \
+ dynamic_fault("bcache:bch_fs_init:" name)
#define bch_meta_read_fault(name) \
dynamic_fault("bcache:meta:read:" name)
#define bch_meta_write_fault(name) \
@@ -349,8 +349,8 @@ struct cache_member_rcu {
/* cache->flags: */
enum {
- CACHE_DEV_REMOVING,
- CACHE_DEV_FORCE_REMOVE,
+ BCH_DEV_REMOVING,
+ BCH_DEV_FORCE_REMOVE,
};
struct cache {
@@ -367,7 +367,7 @@ struct cache {
u8 dev_idx;
/*
* Cached version of this device's member info from superblock
- * Committed by bch_write_super() -> bch_cache_set_mi_update()
+ * Committed by bch_write_super() -> bch_fs_mi_update()
*/
struct cache_member_cpu mi;
uuid_le uuid;
@@ -461,34 +461,34 @@ struct cache {
* Flag bits for what phase of startup/shutdown the cache set is at, how we're
* shutting down, etc.:
*
- * CACHE_SET_UNREGISTERING means we're not just shutting down, we're detaching
+ * BCH_FS_UNREGISTERING means we're not just shutting down, we're detaching
* all the backing devices first (their cached data gets invalidated, and they
* won't automatically reattach).
*
- * CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
- * we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
+ * BCH_FS_STOPPING always gets set first when we're closing down a cache set;
+ * we'll continue to run normally for awhile with BCH_FS_STOPPING set (i.e.
* flushing dirty data).
*
- * CACHE_SET_RUNNING means all cache devices have been registered and journal
+ * BCH_FS_RUNNING means all cache devices have been registered and journal
* replay is complete.
*/
enum {
/* Startup: */
- CACHE_SET_INITIAL_GC_DONE,
- CACHE_SET_RUNNING,
+ BCH_FS_INITIAL_GC_DONE,
+ BCH_FS_RUNNING,
/* Shutdown: */
- CACHE_SET_UNREGISTERING,
- CACHE_SET_STOPPING,
- CACHE_SET_RO,
- CACHE_SET_RO_COMPLETE,
- CACHE_SET_EMERGENCY_RO,
- CACHE_SET_WRITE_DISABLE_COMPLETE,
- CACHE_SET_GC_STOPPING,
- CACHE_SET_GC_FAILURE,
- CACHE_SET_BDEV_MOUNTED,
- CACHE_SET_ERROR,
- CACHE_SET_FSCK_FIXED_ERRORS,
+ BCH_FS_DETACHING,
+ BCH_FS_STOPPING,
+ BCH_FS_RO,
+ BCH_FS_RO_COMPLETE,
+ BCH_FS_EMERGENCY_RO,
+ BCH_FS_WRITE_DISABLE_COMPLETE,
+ BCH_FS_GC_STOPPING,
+ BCH_FS_GC_FAILURE,
+ BCH_FS_BDEV_MOUNTED,
+ BCH_FS_ERROR,
+ BCH_FS_FSCK_FIXED_ERRORS,
};
struct btree_debug {
@@ -520,11 +520,11 @@ struct cache_set {
struct cache __rcu *cache[BCH_SB_MEMBERS_MAX];
- struct cache_set_opts opts;
+ struct bch_opts opts;
/*
* Cached copy in native endianness:
- * Set by bch_cache_set_mi_update():
+ * Set by bch_fs_mi_update():
*/
struct cache_member_rcu __rcu *members;
diff --git a/libbcache/bkey_methods.c b/libbcache/bkey_methods.c
index 90f7e5f..5ae97e3 100644
--- a/libbcache/bkey_methods.c
+++ b/libbcache/bkey_methods.c
@@ -80,7 +80,7 @@ void bkey_debugcheck(struct cache_set *c, struct btree *b, struct bkey_s_c k)
char buf[160];
bch_bkey_val_to_text(c, type, buf, sizeof(buf), k);
- cache_set_bug(c, "invalid bkey %s: %s", buf, invalid);
+ bch_fs_bug(c, "invalid bkey %s: %s", buf, invalid);
return;
}
diff --git a/libbcache/blockdev.c b/libbcache/blockdev.c
index d3a373c..82b07f5 100644
--- a/libbcache/blockdev.c
+++ b/libbcache/blockdev.c
@@ -17,7 +17,7 @@
static int bch_blockdev_major;
static DEFINE_IDA(bch_blockdev_minor);
static LIST_HEAD(uncached_devices);
-struct kmem_cache *bch_search_cache;
+static struct kmem_cache *bch_search_cache;
static void write_bdev_super_endio(struct bio *bio)
{
@@ -67,7 +67,7 @@ bool bch_is_open_backing_dev(struct block_device *bdev)
struct cache_set *c, *tc;
struct cached_dev *dc, *t;
- list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
+ list_for_each_entry_safe(c, tc, &bch_fs_list, list)
list_for_each_entry_safe(dc, t, &c->cached_devs, list)
if (dc->disk_sb.bdev == bdev)
return true;
@@ -387,10 +387,10 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
return -EINVAL;
}
- if (!test_bit(CACHE_SET_RUNNING, &c->flags))
+ if (!test_bit(BCH_FS_RUNNING, &c->flags))
return 0;
- if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
+ if (test_bit(BCH_FS_STOPPING, &c->flags)) {
pr_err("Can't attach %s: shutting down", buf);
return -EINVAL;
}
@@ -652,7 +652,7 @@ const char *bch_backing_dev_register(struct bcache_superblock *sb)
bdevname(dc->disk_sb.bdev, name));
list_add(&dc->list, &uncached_devices);
- list_for_each_entry(c, &bch_cache_sets, list)
+ list_for_each_entry(c, &bch_fs_list, list)
bch_cached_dev_attach(dc, c);
if (BDEV_STATE(dc->disk_sb.sb) == BDEV_STATE_NONE ||
@@ -742,7 +742,7 @@ int bch_blockdev_volumes_start(struct cache_set *c)
struct bkey_s_c_inode_blockdev inode;
int ret = 0;
- if (test_bit(CACHE_SET_STOPPING, &c->flags))
+ if (test_bit(BCH_FS_STOPPING, &c->flags))
return -EINVAL;
for_each_btree_key(&iter, c, BTREE_ID_INODES, POS_MIN, k) {
@@ -799,7 +799,7 @@ void bch_blockdevs_stop(struct cache_set *c)
d = radix_tree_deref_slot(slot);
if (CACHED_DEV(&d->inode.v) &&
- test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
+ test_bit(BCH_FS_DETACHING, &c->flags)) {
dc = container_of(d, struct cached_dev, disk);
bch_cached_dev_detach(dc);
} else {
@@ -811,6 +811,16 @@ void bch_blockdevs_stop(struct cache_set *c)
mutex_unlock(&bch_register_lock);
}
+void bch_fs_blockdev_exit(struct cache_set *c)
+{
+ mempool_exit(&c->search);
+}
+
+int bch_fs_blockdev_init(struct cache_set *c)
+{
+ return mempool_init_slab_pool(&c->search, 1, bch_search_cache);
+}
+
void bch_blockdev_exit(void)
{
kmem_cache_destroy(bch_search_cache);
diff --git a/libbcache/blockdev.h b/libbcache/blockdev.h
index 0fc0ed1..aa6c12b 100644
--- a/libbcache/blockdev.h
+++ b/libbcache/blockdev.h
@@ -4,6 +4,49 @@
#include "blockdev_types.h"
#include "io_types.h"
+struct search {
+ /* Stack frame for bio_complete */
+ struct closure cl;
+
+ union {
+ struct bch_read_bio rbio;
+ struct bch_write_bio wbio;
+ };
+ /* Not modified */
+ struct bio *orig_bio;
+ struct bcache_device *d;
+
+ unsigned inode;
+ unsigned write:1;
+
+ /* Flags only used for reads */
+ unsigned recoverable:1;
+ unsigned read_dirty_data:1;
+ unsigned cache_miss:1;
+
+ /*
+ * For reads: bypass read from cache and insertion into cache
+ * For writes: discard key range from cache, sending the write to
+ * the backing device (if there is a backing device)
+ */
+ unsigned bypass:1;
+
+ unsigned long start_time;
+
+ /*
+ * Mostly only used for writes. For reads, we still make use of
+ * some trivial fields:
+ * - c
+ * - error
+ */
+ struct bch_write_op iop;
+};
+
+#ifndef NO_BCACHE_BLOCKDEV
+
+extern struct kobj_type bch_cached_dev_ktype;
+extern struct kobj_type bch_blockdev_volume_ktype;
+
void bch_write_bdev_super(struct cached_dev *, struct closure *);
void bch_cached_dev_release(struct kobject *);
@@ -24,9 +67,49 @@ int bch_blockdev_volumes_start(struct cache_set *);
void bch_blockdevs_stop(struct cache_set *);
+void bch_fs_blockdev_exit(struct cache_set *);
+int bch_fs_blockdev_init(struct cache_set *);
void bch_blockdev_exit(void);
int bch_blockdev_init(void);
+#else
+
+static inline void bch_write_bdev_super(struct cached_dev *dc,
+ struct closure *cl) {}
+
+static inline void bch_cached_dev_release(struct kobject *kobj) {}
+static inline void bch_blockdev_volume_release(struct kobject *kobj) {}
+
+static inline int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+{
+ return 0;
+}
+static inline void bch_attach_backing_devs(struct cache_set *c) {}
+
+static inline void bch_cached_dev_detach(struct cached_dev *dc) {}
+static inline void bch_cached_dev_run(struct cached_dev *dc) {}
+static inline void bch_blockdev_stop(struct bcache_device *d) {}
+
+static inline bool bch_is_open_backing_dev(struct block_device *bdev)
+{
+ return false;
+}
+static inline const char *bch_backing_dev_register(struct bcache_superblock *sb)
+{
+ return "not implemented";
+}
+
+static inline int bch_blockdev_volume_create(struct cache_set *c, u64 s) { return 0; }
+static inline int bch_blockdev_volumes_start(struct cache_set *c) { return 0; }
+
+static inline void bch_blockdevs_stop(struct cache_set *c) {}
+static inline void bch_fs_blockdev_exit(struct cache_set *c) {}
+static inline int bch_fs_blockdev_init(struct cache_set *c) { return 0; }
+static inline void bch_blockdev_exit(void) {}
+static inline int bch_blockdev_init(void) { return 0; }
+
+#endif
+
static inline void cached_dev_put(struct cached_dev *dc)
{
if (atomic_dec_and_test(&dc->count))
@@ -53,47 +136,4 @@ static inline struct bcache_device *bch_dev_find(struct cache_set *c, u64 inode)
return radix_tree_lookup(&c->devices, inode);
}
-struct search {
- /* Stack frame for bio_complete */
- struct closure cl;
-
- union {
- struct bch_read_bio rbio;
- struct bch_write_bio wbio;
- };
- /* Not modified */
- struct bio *orig_bio;
- struct bcache_device *d;
-
- unsigned inode;
- unsigned write:1;
-
- /* Flags only used for reads */
- unsigned recoverable:1;
- unsigned read_dirty_data:1;
- unsigned cache_miss:1;
-
- /*
- * For reads: bypass read from cache and insertion into cache
- * For writes: discard key range from cache, sending the write to
- * the backing device (if there is a backing device)
- */
- unsigned bypass:1;
-
- unsigned long start_time;
-
- /*
- * Mostly only used for writes. For reads, we still make use of
- * some trivial fields:
- * - c
- * - error
- */
- struct bch_write_op iop;
-};
-
-extern struct kmem_cache *bch_search_cache;
-
-extern struct kobj_type bch_cached_dev_ktype;
-extern struct kobj_type bch_blockdev_volume_ktype;
-
#endif /* _BCACHE_BLOCKDEV_H */
diff --git a/libbcache/btree_gc.c b/libbcache/btree_gc.c
index 5c77b26..0eb7290 100644
--- a/libbcache/btree_gc.c
+++ b/libbcache/btree_gc.c
@@ -54,8 +54,7 @@ static void btree_node_range_checks(struct cache_set *c, struct btree *b,
? btree_type_successor(b->btree_id, l->max)
: l->max;
- cache_set_inconsistent_on(bkey_cmp(b->data->min_key,
- expected_min), c,
+ bch_fs_inconsistent_on(bkey_cmp(b->data->min_key, expected_min), c,
"btree node has incorrect min key: %llu:%llu != %llu:%llu",
b->data->min_key.inode,
b->data->min_key.offset,
@@ -67,16 +66,14 @@ static void btree_node_range_checks(struct cache_set *c, struct btree *b,
if (b->level > r->depth) {
l = &r->l[b->level - 1];
- cache_set_inconsistent_on(bkey_cmp(b->data->min_key,
- l->min), c,
+ bch_fs_inconsistent_on(bkey_cmp(b->data->min_key, l->min), c,
"btree node min doesn't match min of child nodes: %llu:%llu != %llu:%llu",
b->data->min_key.inode,
b->data->min_key.offset,
l->min.inode,
l->min.offset);
- cache_set_inconsistent_on(bkey_cmp(b->data->max_key,
- l->max), c,
+ bch_fs_inconsistent_on(bkey_cmp(b->data->max_key, l->max), c,
"btree node max doesn't match max of child nodes: %llu:%llu != %llu:%llu",
b->data->max_key.inode,
b->data->max_key.offset,
@@ -308,7 +305,7 @@ static void bch_mark_pending_btree_node_frees(struct cache_set *c)
&stats);
/*
* Don't apply stats - pending deletes aren't tracked in
- * cache_set_stats:
+ * bch_alloc_stats:
*/
mutex_unlock(&c->btree_interior_update_lock);
@@ -345,7 +342,7 @@ void bch_gc(struct cache_set *c)
* uses, GC could skip past them
*/
- if (test_bit(CACHE_SET_GC_FAILURE, &c->flags))
+ if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
return;
trace_bcache_gc_start(c);
@@ -410,7 +407,7 @@ void bch_gc(struct cache_set *c)
if (ret) {
bch_err(c, "btree gc failed: %d", ret);
- set_bit(CACHE_SET_GC_FAILURE, &c->flags);
+ set_bit(BCH_FS_GC_FAILURE, &c->flags);
up_write(&c->gc_lock);
return;
}
@@ -725,7 +722,7 @@ static int bch_coalesce_btree(struct cache_set *c, enum btree_id btree_id)
lock_seq[0] = merge[0]->lock.state.seq;
- if (test_bit(CACHE_SET_GC_STOPPING, &c->flags)) {
+ if (test_bit(BCH_FS_GC_STOPPING, &c->flags)) {
bch_btree_iter_unlock(&iter);
return -ESHUTDOWN;
}
@@ -756,7 +753,7 @@ void bch_coalesce(struct cache_set *c)
if (btree_gc_coalesce_disabled(c))
return;
- if (test_bit(CACHE_SET_GC_FAILURE, &c->flags))
+ if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
return;
down_read(&c->gc_lock);
@@ -771,7 +768,7 @@ void bch_coalesce(struct cache_set *c)
if (ret) {
if (ret != -ESHUTDOWN)
bch_err(c, "btree coalescing failed: %d", ret);
- set_bit(CACHE_SET_GC_FAILURE, &c->flags);
+ set_bit(BCH_FS_GC_FAILURE, &c->flags);
return;
}
}
@@ -824,7 +821,7 @@ static int bch_gc_thread(void *arg)
void bch_gc_thread_stop(struct cache_set *c)
{
- set_bit(CACHE_SET_GC_STOPPING, &c->flags);
+ set_bit(BCH_FS_GC_STOPPING, &c->flags);
if (!IS_ERR_OR_NULL(c->gc_thread))
kthread_stop(c->gc_thread);
@@ -832,7 +829,7 @@ void bch_gc_thread_stop(struct cache_set *c)
int bch_gc_thread_start(struct cache_set *c)
{
- clear_bit(CACHE_SET_GC_STOPPING, &c->flags);
+ clear_bit(BCH_FS_GC_STOPPING, &c->flags);
c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
if (IS_ERR(c->gc_thread))
@@ -903,7 +900,7 @@ int bch_initial_gc(struct cache_set *c, struct list_head *journal)
bch_mark_metadata(c);
gc_pos_set(c, gc_phase(GC_PHASE_DONE));
- set_bit(CACHE_SET_INITIAL_GC_DONE, &c->flags);
+ set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
return 0;
}
diff --git a/libbcache/btree_io.c b/libbcache/btree_io.c
index e772c6a..ab67591 100644
--- a/libbcache/btree_io.c
+++ b/libbcache/btree_io.c
@@ -873,7 +873,7 @@ static void bset_encrypt(struct cache_set *c, struct bset *i, struct nonce nonce
}
#define btree_node_error(b, c, ptr, fmt, ...) \
- cache_set_inconsistent(c, \
+ bch_fs_inconsistent(c, \
"btree node error at btree %u level %u/%u bucket %zu block %u u64s %u: " fmt,\
(b)->btree_id, (b)->level, btree_node_root(c, b) \
? btree_node_root(c, b)->level : -1, \
@@ -1194,8 +1194,8 @@ void bch_btree_node_read(struct cache_set *c, struct btree *b)
closure_init_stack(&cl);
pick = bch_btree_pick_ptr(c, b);
- if (cache_set_fatal_err_on(!pick.ca, c,
- "no cache device for btree node")) {
+ if (bch_fs_fatal_err_on(!pick.ca, c,
+ "no cache device for btree node")) {
set_btree_node_read_error(b);
return;
}
@@ -1214,7 +1214,7 @@ void bch_btree_node_read(struct cache_set *c, struct btree *b)
bch_generic_make_request(bio, c);
closure_sync(&cl);
- if (cache_fatal_io_err_on(bio->bi_error,
+ if (bch_dev_fatal_io_err_on(bio->bi_error,
pick.ca, "IO error reading bucket %zu",
PTR_BUCKET_NR(pick.ca, &pick.ptr)) ||
bch_meta_read_fault("btree")) {
@@ -1297,7 +1297,7 @@ static void btree_node_write_endio(struct bio *bio)
struct closure *cl = !wbio->split ? wbio->cl : NULL;
struct cache *ca = wbio->ca;
- if (cache_fatal_io_err_on(bio->bi_error, ca, "btree write") ||
+ if (bch_dev_fatal_io_err_on(bio->bi_error, ca, "btree write") ||
bch_meta_write_fault("btree"))
set_btree_node_write_error(b);
diff --git a/libbcache/btree_update.c b/libbcache/btree_update.c
index c3bb209..95d127f 100644
--- a/libbcache/btree_update.c
+++ b/libbcache/btree_update.c
@@ -112,7 +112,7 @@ found:
d->index_update_done = true;
/*
- * Btree nodes are accounted as freed in cache_set_stats when they're
+ * Btree nodes are accounted as freed in bch_alloc_stats when they're
* freed from the index:
*/
stats->s[S_COMPRESSED][S_META] -= c->sb.btree_node_size;
@@ -149,7 +149,7 @@ found:
&tmp, 0);
/*
* Don't apply tmp - pending deletes aren't tracked in
- * cache_set_stats:
+ * bch_alloc_stats:
*/
}
@@ -218,7 +218,7 @@ static void bch_btree_node_free_ondisk(struct cache_set *c,
&stats, 0);
/*
* Don't apply stats - pending deletes aren't tracked in
- * cache_set_stats:
+ * bch_alloc_stats:
*/
}
@@ -384,8 +384,8 @@ static void bch_btree_set_root_inmem(struct cache_set *c, struct btree *b,
bch_btree_node_free_index(c, NULL, old->btree_id,
bkey_i_to_s_c(&old->key),
&stats);
- bch_cache_set_stats_apply(c, &stats, &btree_reserve->disk_res,
- gc_pos_btree_root(b->btree_id));
+ bch_fs_stats_apply(c, &stats, &btree_reserve->disk_res,
+ gc_pos_btree_root(b->btree_id));
}
bch_recalc_btree_reserve(c);
@@ -654,7 +654,7 @@ static void bch_insert_fixup_btree_ptr(struct btree_iter *iter,
bkey_disassemble(b, k, &tmp),
&stats);
- bch_cache_set_stats_apply(c, &stats, disk_res, gc_pos_btree_node(b));
+ bch_fs_stats_apply(c, &stats, disk_res, gc_pos_btree_node(b));
bch_btree_bset_insert_key(iter, b, node_iter, insert);
set_btree_node_dirty(b);
diff --git a/libbcache/buckets.c b/libbcache/buckets.c
index 757bc03..315cfbe 100644
--- a/libbcache/buckets.c
+++ b/libbcache/buckets.c
@@ -75,7 +75,7 @@
#define lg_local_lock lg_global_lock
#define lg_local_unlock lg_global_unlock
-static void bch_cache_set_stats_verify(struct cache_set *c)
+static void bch_fs_stats_verify(struct cache_set *c)
{
struct bucket_stats_cache_set stats =
__bch_bucket_stats_read_cache_set(c);
@@ -98,7 +98,7 @@ static void bch_cache_set_stats_verify(struct cache_set *c)
#else
-static void bch_cache_set_stats_verify(struct cache_set *c) {}
+static void bch_fs_stats_verify(struct cache_set *c) {}
#endif
@@ -199,10 +199,10 @@ static inline int is_cached_bucket(struct bucket_mark m)
return !m.owned_by_allocator && !m.dirty_sectors && !!m.cached_sectors;
}
-void bch_cache_set_stats_apply(struct cache_set *c,
- struct bucket_stats_cache_set *stats,
- struct disk_reservation *disk_res,
- struct gc_pos gc_pos)
+void bch_fs_stats_apply(struct cache_set *c,
+ struct bucket_stats_cache_set *stats,
+ struct disk_reservation *disk_res,
+ struct gc_pos gc_pos)
{
s64 added =
stats->s[S_COMPRESSED][S_META] +
@@ -230,7 +230,7 @@ void bch_cache_set_stats_apply(struct cache_set *c,
if (!gc_will_visit(c, gc_pos))
bucket_stats_add(this_cpu_ptr(c->bucket_stats_percpu), stats);
- bch_cache_set_stats_verify(c);
+ bch_fs_stats_verify(c);
lg_local_unlock(&c->bucket_stats_lock);
memset(stats, 0, sizeof(*stats));
@@ -239,7 +239,7 @@ void bch_cache_set_stats_apply(struct cache_set *c,
static void bucket_stats_update(struct cache *ca,
struct bucket_mark old, struct bucket_mark new,
bool may_make_unavailable,
- struct bucket_stats_cache_set *cache_set_stats)
+ struct bucket_stats_cache_set *bch_alloc_stats)
{
struct cache_set *c = ca->set;
struct bucket_stats_cache *cache_stats;
@@ -249,15 +249,15 @@ static void bucket_stats_update(struct cache *ca,
!is_available_bucket(new) &&
c->gc_pos.phase == GC_PHASE_DONE);
- if (cache_set_stats) {
- cache_set_stats->s[S_COMPRESSED][S_CACHED] +=
+ if (bch_alloc_stats) {
+ bch_alloc_stats->s[S_COMPRESSED][S_CACHED] +=
(int) new.cached_sectors - (int) old.cached_sectors;
- cache_set_stats->s[S_COMPRESSED]
+ bch_alloc_stats->s[S_COMPRESSED]
[old.is_metadata ? S_META : S_DIRTY] -=
old.dirty_sectors;
- cache_set_stats->s[S_COMPRESSED]
+ bch_alloc_stats->s[S_COMPRESSED]
[new.is_metadata ? S_META : S_DIRTY] +=
new.dirty_sectors;
}
@@ -312,7 +312,7 @@ void bch_invalidate_bucket(struct cache *ca, struct bucket *g)
* Ick:
*
* Only stats.sectors_cached should be nonzero: this is important
- * because in this path we modify cache_set_stats based on how the
+ * because in this path we modify bch_alloc_stats based on how the
* bucket_mark was modified, and the sector counts in bucket_mark are
* subject to (saturating) overflow - and if they did overflow, the
* cache set stats will now be off. We can tolerate this for
@@ -620,13 +620,13 @@ void bch_mark_key(struct cache_set *c, struct bkey_s_c k,
__bch_mark_key(c, k, sectors, metadata, false, stats,
gc_will_visit(c, gc_pos), journal_seq);
- bch_cache_set_stats_verify(c);
+ bch_fs_stats_verify(c);
lg_local_unlock(&c->bucket_stats_lock);
}
static u64 __recalc_sectors_available(struct cache_set *c)
{
- return c->capacity - cache_set_sectors_used(c);
+ return c->capacity - bch_fs_sectors_used(c);
}
/* Used by gc when it's starting: */
@@ -653,7 +653,7 @@ void bch_disk_reservation_put(struct cache_set *c,
this_cpu_sub(c->bucket_stats_percpu->online_reserved,
res->sectors);
- bch_cache_set_stats_verify(c);
+ bch_fs_stats_verify(c);
lg_local_unlock(&c->bucket_stats_lock);
res->sectors = 0;
@@ -697,7 +697,7 @@ out:
stats->online_reserved += sectors;
res->sectors += sectors;
- bch_cache_set_stats_verify(c);
+ bch_fs_stats_verify(c);
lg_local_unlock(&c->bucket_stats_lock);
return 0;
@@ -734,7 +734,7 @@ recalculate:
ret = -ENOSPC;
}
- bch_cache_set_stats_verify(c);
+ bch_fs_stats_verify(c);
lg_global_unlock(&c->bucket_stats_lock);
if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
up_read(&c->gc_lock);
diff --git a/libbcache/buckets.h b/libbcache/buckets.h
index 8194dd9..9c6e438 100644
--- a/libbcache/buckets.h
+++ b/libbcache/buckets.h
@@ -195,12 +195,12 @@ static inline u64 buckets_free_cache(struct cache *ca)
struct bucket_stats_cache_set __bch_bucket_stats_read_cache_set(struct cache_set *);
struct bucket_stats_cache_set bch_bucket_stats_read_cache_set(struct cache_set *);
-void bch_cache_set_stats_apply(struct cache_set *,
- struct bucket_stats_cache_set *,
- struct disk_reservation *,
+void bch_fs_stats_apply(struct cache_set *,
+ struct bucket_stats_cache_set *,
+ struct disk_reservation *,
struct gc_pos);
-static inline u64 __cache_set_sectors_used(struct cache_set *c)
+static inline u64 __bch_fs_sectors_used(struct cache_set *c)
{
struct bucket_stats_cache_set stats = __bch_bucket_stats_read_cache_set(c);
u64 reserved = stats.persistent_reserved +
@@ -212,9 +212,9 @@ static inline u64 __cache_set_sectors_used(struct cache_set *c)
(reserved >> 7);
}
-static inline u64 cache_set_sectors_used(struct cache_set *c)
+static inline u64 bch_fs_sectors_used(struct cache_set *c)
{
- return min(c->capacity, __cache_set_sectors_used(c));
+ return min(c->capacity, __bch_fs_sectors_used(c));
}
/* XXX: kill? */
diff --git a/libbcache/chardev.c b/libbcache/chardev.c
index b361b09..b142d7b 100644
--- a/libbcache/chardev.c
+++ b/libbcache/chardev.c
@@ -53,9 +53,7 @@ static long bch_ioctl_assemble(struct bch_ioctl_assemble __user *user_arg)
}
}
- err = bch_register_cache_set(devs, arg.nr_devs,
- cache_set_opts_empty(),
- NULL);
+ err = bch_fs_open(devs, arg.nr_devs, bch_opts_empty(), NULL);
if (err) {
pr_err("Could not register cache set: %s", err);
ret = -EINVAL;
@@ -84,7 +82,7 @@ static long bch_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg)
if (!path)
return -ENOMEM;
- err = bch_register_one(path);
+ err = bch_fs_open_incremental(path);
kfree(path);
if (err) {
@@ -109,7 +107,7 @@ static long bch_global_ioctl(unsigned cmd, void __user *arg)
static long bch_ioctl_stop(struct cache_set *c)
{
- bch_cache_set_stop(c);
+ bch_fs_stop(c);
return 0;
}
@@ -127,7 +125,7 @@ static long bch_ioctl_disk_add(struct cache_set *c,
if (!path)
return -ENOMEM;
- ret = bch_cache_set_add_cache(c, path);
+ ret = bch_dev_add(c, path);
kfree(path);
return ret;
@@ -175,7 +173,7 @@ static long bch_ioctl_disk_remove(struct cache_set *c,
if (IS_ERR(ca))
return PTR_ERR(ca);
- ret = bch_cache_remove(ca, arg.flags & BCH_FORCE_IF_DATA_MISSING)
+ ret = bch_dev_remove(ca, arg.flags & BCH_FORCE_IF_DATA_MISSING)
? 0 : -EBUSY;
percpu_ref_put(&ca->ref);
@@ -197,7 +195,7 @@ static long bch_ioctl_disk_fail(struct cache_set *c,
return PTR_ERR(ca);
/* XXX: failed not actually implemented yet */
- ret = bch_cache_remove(ca, true);
+ ret = bch_dev_remove(ca, true);
percpu_ref_put(&ca->ref);
return ret;
@@ -268,7 +266,7 @@ static long bch_ioctl_query_uuid(struct cache_set *c,
sizeof(c->sb.user_uuid));
}
-long bch_cache_set_ioctl(struct cache_set *c, unsigned cmd, void __user *arg)
+long bch_fs_ioctl(struct cache_set *c, unsigned cmd, void __user *arg)
{
/* ioctls that don't require admin cap: */
switch (cmd) {
@@ -309,12 +307,71 @@ static long bch_chardev_ioctl(struct file *filp, unsigned cmd, unsigned long v)
void __user *arg = (void __user *) v;
return c
- ? bch_cache_set_ioctl(c, cmd, arg)
+ ? bch_fs_ioctl(c, cmd, arg)
: bch_global_ioctl(cmd, arg);
}
-const struct file_operations bch_chardev_fops = {
+static const struct file_operations bch_chardev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = bch_chardev_ioctl,
.open = nonseekable_open,
};
+
+static int bch_chardev_major;
+static struct class *bch_chardev_class;
+static struct device *bch_chardev;
+static DEFINE_IDR(bch_chardev_minor);
+
+void bch_fs_chardev_exit(struct cache_set *c)
+{
+ if (!IS_ERR_OR_NULL(c->chardev))
+ device_unregister(c->chardev);
+ if (c->minor >= 0)
+ idr_remove(&bch_chardev_minor, c->minor);
+}
+
+int bch_fs_chardev_init(struct cache_set *c)
+{
+ c->minor = idr_alloc(&bch_chardev_minor, c, 0, 0, GFP_KERNEL);
+ if (c->minor < 0)
+ return c->minor;
+
+ c->chardev = device_create(bch_chardev_class, NULL,
+ MKDEV(bch_chardev_major, c->minor), NULL,
+ "bcache%u-ctl", c->minor);
+ if (IS_ERR(c->chardev))
+ return PTR_ERR(c->chardev);
+
+ return 0;
+}
+
+void bch_chardev_exit(void)
+{
+ if (!IS_ERR_OR_NULL(bch_chardev_class))
+ device_destroy(bch_chardev_class,
+ MKDEV(bch_chardev_major, 0));
+ if (!IS_ERR_OR_NULL(bch_chardev_class))
+ class_destroy(bch_chardev_class);
+ if (bch_chardev_major > 0)
+ unregister_chrdev(bch_chardev_major, "bcache");
+
+}
+
+int __init bch_chardev_init(void)
+{
+ bch_chardev_major = register_chrdev(0, "bcache-ctl", &bch_chardev_fops);
+ if (bch_chardev_major < 0)
+ return bch_chardev_major;
+
+ bch_chardev_class = class_create(THIS_MODULE, "bcache");
+ if (IS_ERR(bch_chardev_class))
+ return PTR_ERR(bch_chardev_class);
+
+ bch_chardev = device_create(bch_chardev_class, NULL,
+ MKDEV(bch_chardev_major, 255),
+ NULL, "bcache-ctl");
+ if (IS_ERR(bch_chardev))
+ return PTR_ERR(bch_chardev);
+
+ return 0;
+}
diff --git a/libbcache/chardev.h b/libbcache/chardev.h
index 657bf2b..15310c1 100644
--- a/libbcache/chardev.h
+++ b/libbcache/chardev.h
@@ -1,7 +1,30 @@
#ifndef _BCACHE_CHARDEV_H
#define _BCACHE_CHARDEV_H
-long bch_cache_set_ioctl(struct cache_set *, unsigned, void __user *);
-extern const struct file_operations bch_chardev_fops;
+#ifndef NO_BCACHE_CHARDEV
+
+long bch_fs_ioctl(struct cache_set *, unsigned, void __user *);
+
+void bch_fs_chardev_exit(struct cache_set *);
+int bch_fs_chardev_init(struct cache_set *);
+
+void bch_chardev_exit(void);
+int __init bch_chardev_init(void);
+
+#else
+
+static inline long bch_fs_ioctl(struct cache_set *c,
+ unsigned cmd, void __user * arg)
+{
+ return -ENOSYS;
+}
+
+static inline void bch_fs_chardev_exit(struct cache_set *c) {}
+static inline int bch_fs_chardev_init(struct cache_set *c) { return 0; }
+
+static inline void bch_chardev_exit(void) {}
+static inline int __init bch_chardev_init(void) { return 0; }
+
+#endif
#endif /* _BCACHE_CHARDEV_H */
diff --git a/libbcache/checksum.c b/libbcache/checksum.c
index eb41f2e..dae52d4 100644
--- a/libbcache/checksum.c
+++ b/libbcache/checksum.c
@@ -560,7 +560,7 @@ err:
return ret;
}
-void bch_cache_set_encryption_free(struct cache_set *c)
+void bch_fs_encryption_free(struct cache_set *c)
{
if (!IS_ERR_OR_NULL(c->poly1305))
crypto_free_shash(c->poly1305);
@@ -568,7 +568,7 @@ void bch_cache_set_encryption_free(struct cache_set *c)
crypto_free_blkcipher(c->chacha20);
}
-int bch_cache_set_encryption_init(struct cache_set *c)
+int bch_fs_encryption_init(struct cache_set *c)
{
struct bch_sb_field_crypt *crypt;
struct bch_key key;
diff --git a/libbcache/checksum.h b/libbcache/checksum.h
index a9a1758..137c915 100644
--- a/libbcache/checksum.h
+++ b/libbcache/checksum.h
@@ -43,8 +43,8 @@ void bch_encrypt_bio(struct cache_set *, unsigned,
int bch_disable_encryption(struct cache_set *);
int bch_enable_encryption(struct cache_set *, bool);
-void bch_cache_set_encryption_free(struct cache_set *);
-int bch_cache_set_encryption_init(struct cache_set *);
+void bch_fs_encryption_free(struct cache_set *);
+int bch_fs_encryption_init(struct cache_set *);
static inline unsigned bch_data_checksum_type(struct cache_set *c)
{
diff --git a/libbcache/compress.c b/libbcache/compress.c
index e76850b..f81a814 100644
--- a/libbcache/compress.c
+++ b/libbcache/compress.c
@@ -119,6 +119,13 @@ static void bio_unmap_or_unbounce(struct cache_set *c, void *data,
}
}
+static inline void zlib_set_workspace(z_stream *strm, void *workspace)
+{
+#ifdef __KERNEL__
+ strm->workspace = workspace;
+#endif
+}
+
static int __bio_uncompress(struct cache_set *c, struct bio *src,
void *dst_data, struct bch_extent_crc128 crc)
{
@@ -150,11 +157,11 @@ static int __bio_uncompress(struct cache_set *c, struct bio *src,
workspace = c->zlib_workspace;
}
- strm.workspace = workspace;
strm.next_in = src_data;
strm.avail_in = src_len;
strm.next_out = dst_data;
strm.avail_out = dst_len;
+ zlib_set_workspace(&strm, workspace);
zlib_inflateInit2(&strm, -MAX_WBITS);
ret = zlib_inflate(&strm, Z_FINISH);
@@ -310,12 +317,12 @@ static int __bio_compress(struct cache_set *c,
workspace = c->zlib_workspace;
}
- strm.workspace = workspace;
strm.next_in = src_data;
strm.avail_in = min(src->bi_iter.bi_size,
dst->bi_iter.bi_size);
strm.next_out = dst_data;
strm.avail_out = dst->bi_iter.bi_size;
+ zlib_set_workspace(&strm, workspace);
zlib_deflateInit2(&strm, Z_DEFAULT_COMPRESSION,
Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL,
Z_DEFAULT_STRATEGY);
diff --git a/libbcache/error.c b/libbcache/error.c
index 9ba33ef..9f39be1 100644
--- a/libbcache/error.c
+++ b/libbcache/error.c
@@ -6,19 +6,19 @@
void bch_inconsistent_error(struct cache_set *c)
{
- set_bit(CACHE_SET_ERROR, &c->flags);
+ set_bit(BCH_FS_ERROR, &c->flags);
switch (c->opts.errors) {
case BCH_ON_ERROR_CONTINUE:
break;
case BCH_ON_ERROR_RO:
- if (!test_bit(CACHE_SET_INITIAL_GC_DONE, &c->flags)) {
+ if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) {
/* XXX do something better here? */
- bch_cache_set_stop(c);
+ bch_fs_stop(c);
return;
}
- if (bch_cache_set_emergency_read_only(c))
+ if (bch_fs_emergency_read_only(c))
bch_err(c, "emergency read only");
break;
case BCH_ON_ERROR_PANIC:
@@ -29,7 +29,7 @@ void bch_inconsistent_error(struct cache_set *c)
void bch_fatal_error(struct cache_set *c)
{
- if (bch_cache_set_emergency_read_only(c))
+ if (bch_fs_emergency_read_only(c))
bch_err(c, "emergency read only");
}
@@ -116,15 +116,15 @@ void bch_nonfatal_io_error_work(struct work_struct *work)
bool dev;
if (errors < c->error_limit) {
- bch_notify_cache_error(ca, false);
+ bch_notify_dev_error(ca, false);
} else {
- bch_notify_cache_error(ca, true);
+ bch_notify_dev_error(ca, true);
mutex_lock(&bch_register_lock);
- dev = bch_cache_may_remove(ca);
+ dev = bch_dev_may_remove(ca);
if (dev
- ? bch_cache_read_only(ca)
- : bch_cache_set_emergency_read_only(c))
+ ? bch_dev_read_only(ca)
+ : bch_fs_emergency_read_only(c))
bch_err(c,
"too many IO errors on %s, setting %s RO",
bdevname(ca->disk_sb.bdev, buf),
diff --git a/libbcache/error.h b/libbcache/error.h
index 33a28c4..3f12bbe 100644
--- a/libbcache/error.h
+++ b/libbcache/error.h
@@ -13,7 +13,7 @@ struct cache_set;
/* Error messages: */
-#define __bch_cache_error(ca, fmt, ...) \
+#define __bch_dev_error(ca, fmt, ...) \
do { \
char _buf[BDEVNAME_SIZE]; \
bch_err((ca)->set, "%s: " fmt, \
@@ -28,16 +28,16 @@ do { \
* XXX: audit and convert to inconsistent() checks
*/
-#define cache_set_bug(c, ...) \
+#define bch_fs_bug(c, ...) \
do { \
bch_err(c, __VA_ARGS__); \
BUG(); \
} while (0)
-#define cache_set_bug_on(cond, c, ...) \
+#define bch_fs_bug_on(cond, c, ...) \
do { \
if (cond) \
- cache_set_bug(c, __VA_ARGS__); \
+ bch_fs_bug(c, __VA_ARGS__); \
} while (0)
/*
@@ -53,18 +53,18 @@ do { \
void bch_inconsistent_error(struct cache_set *);
-#define cache_set_inconsistent(c, ...) \
+#define bch_fs_inconsistent(c, ...) \
do { \
bch_err(c, __VA_ARGS__); \
bch_inconsistent_error(c); \
} while (0)
-#define cache_set_inconsistent_on(cond, c, ...) \
+#define bch_fs_inconsistent_on(cond, c, ...) \
({ \
int _ret = !!(cond); \
\
if (_ret) \
- cache_set_inconsistent(c, __VA_ARGS__); \
+ bch_fs_inconsistent(c, __VA_ARGS__); \
_ret; \
})
@@ -73,18 +73,18 @@ do { \
* entire cache set:
*/
-#define cache_inconsistent(ca, ...) \
+#define bch_dev_inconsistent(ca, ...) \
do { \
- __bch_cache_error(ca, __VA_ARGS__); \
+ __bch_dev_error(ca, __VA_ARGS__); \
bch_inconsistent_error((ca)->set); \
} while (0)
-#define cache_inconsistent_on(cond, ca, ...) \
+#define bch_dev_inconsistent_on(cond, ca, ...) \
({ \
int _ret = !!(cond); \
\
if (_ret) \
- cache_inconsistent(ca, __VA_ARGS__); \
+ bch_dev_inconsistent(ca, __VA_ARGS__); \
_ret; \
})
@@ -112,7 +112,7 @@ enum {
\
if (_can_fix && (c)->opts.fix_errors) { \
bch_err(c, msg ", fixing", ##__VA_ARGS__); \
- set_bit(CACHE_SET_FSCK_FIXED_ERRORS, &(c)->flags); \
+ set_bit(BCH_FS_FSCK_FIXED_ERRORS, &(c)->flags); \
_fix = true; \
} else if (_can_ignore && \
(c)->opts.errors == BCH_ON_ERROR_CONTINUE) { \
@@ -154,28 +154,28 @@ enum {
void bch_fatal_error(struct cache_set *);
-#define cache_set_fatal_error(c, ...) \
+#define bch_fs_fatal_error(c, ...) \
do { \
bch_err(c, __VA_ARGS__); \
bch_fatal_error(c); \
} while (0)
-#define cache_set_fatal_err_on(cond, c, ...) \
+#define bch_fs_fatal_err_on(cond, c, ...) \
({ \
int _ret = !!(cond); \
\
if (_ret) \
- cache_set_fatal_error(c, __VA_ARGS__); \
+ bch_fs_fatal_error(c, __VA_ARGS__); \
_ret; \
})
-#define cache_fatal_error(ca, ...) \
+#define bch_dev_fatal_error(ca, ...) \
do { \
- __bch_cache_error(ca, __VA_ARGS__); \
+ __bch_dev_error(ca, __VA_ARGS__); \
bch_fatal_error(c); \
} while (0)
-#define cache_fatal_io_error(ca, fmt, ...) \
+#define bch_dev_fatal_io_error(ca, fmt, ...) \
do { \
char _buf[BDEVNAME_SIZE]; \
\
@@ -185,12 +185,12 @@ do { \
bch_fatal_error((ca)->set); \
} while (0)
-#define cache_fatal_io_err_on(cond, ca, ...) \
+#define bch_dev_fatal_io_err_on(cond, ca, ...) \
({ \
int _ret = !!(cond); \
\
if (_ret) \
- cache_fatal_io_error(ca, __VA_ARGS__); \
+ bch_dev_fatal_io_error(ca, __VA_ARGS__); \
_ret; \
})
@@ -209,7 +209,7 @@ void bch_nonfatal_io_error_work(struct work_struct *);
void bch_nonfatal_io_error(struct cache *);
#if 0
-#define cache_set_nonfatal_io_error(c, ...) \
+#define bch_fs_nonfatal_io_error(c, ...) \
do { \
bch_err(c, __VA_ARGS__); \
bch_nonfatal_io_error(c); \
@@ -217,7 +217,7 @@ do { \
#endif
/* Logs message and handles the error: */
-#define cache_nonfatal_io_error(ca, fmt, ...) \
+#define bch_dev_nonfatal_io_error(ca, fmt, ...) \
do { \
char _buf[BDEVNAME_SIZE]; \
\
@@ -227,12 +227,12 @@ do { \
bch_nonfatal_io_error(ca); \
} while (0)
-#define cache_nonfatal_io_err_on(cond, ca, ...) \
+#define bch_dev_nonfatal_io_err_on(cond, ca, ...) \
({ \
bool _ret = (cond); \
\
if (_ret) \
- cache_nonfatal_io_error(ca, __VA_ARGS__); \
+ bch_dev_nonfatal_io_error(ca, __VA_ARGS__); \
_ret; \
})
diff --git a/libbcache/extents.c b/libbcache/extents.c
index 4b8a266..523f3f4 100644
--- a/libbcache/extents.c
+++ b/libbcache/extents.c
@@ -561,7 +561,7 @@ static void btree_ptr_debugcheck(struct cache_set *c, struct btree *b,
if (replicas < c->sb.meta_replicas_have) {
bch_bkey_val_to_text(c, btree_node_type(b),
buf, sizeof(buf), k);
- cache_set_bug(c,
+ bch_fs_bug(c,
"btree key bad (too few replicas, %u < %u): %s",
replicas, c->sb.meta_replicas_have, buf);
return;
@@ -570,7 +570,7 @@ static void btree_ptr_debugcheck(struct cache_set *c, struct btree *b,
return;
err:
bch_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), k);
- cache_set_bug(c, "%s btree pointer %s: bucket %zi prio %i "
+ bch_fs_bug(c, "%s btree pointer %s: bucket %zi prio %i "
"gen %i last_gc %i mark %08x",
err, buf, PTR_BUCKET_NR(ca, ptr),
g->read_prio, PTR_BUCKET(ca, ptr)->mark.gen,
@@ -609,13 +609,13 @@ bch_btree_pick_ptr(struct cache_set *c, const struct btree *b)
extent_for_each_online_device_crc(c, e, crc, ptr, ca) {
struct btree *root = btree_node_root(c, b);
- if (cache_set_inconsistent_on(crc, c,
+ if (bch_fs_inconsistent_on(crc, c,
"btree node pointer with crc at btree %u level %u/%u bucket %zu",
b->btree_id, b->level, root ? root->level : -1,
PTR_BUCKET_NR(ca, ptr)))
break;
- if (cache_inconsistent_on(ptr_stale(ca, ptr), ca,
+ if (bch_dev_inconsistent_on(ptr_stale(ca, ptr), ca,
"stale btree node pointer at btree %u level %u/%u bucket %zu",
b->btree_id, b->level, root ? root->level : -1,
PTR_BUCKET_NR(ca, ptr)))
@@ -1556,8 +1556,8 @@ next:
stop:
extent_insert_committed(s);
- bch_cache_set_stats_apply(c, &s->stats, s->trans->disk_res,
- gc_pos_btree_node(b));
+ bch_fs_stats_apply(c, &s->stats, s->trans->disk_res,
+ gc_pos_btree_node(b));
EBUG_ON(bkey_cmp(iter->pos, s->committed));
EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) != iter->at_end_of_leaf);
@@ -1718,8 +1718,8 @@ stop:
bkey_start_offset(&insert->k->k),
insert->k->k.size);
- bch_cache_set_stats_apply(c, &s.stats, trans->disk_res,
- gc_pos_btree_node(b));
+ bch_fs_stats_apply(c, &s.stats, trans->disk_res,
+ gc_pos_btree_node(b));
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
EBUG_ON(bkey_cmp(iter->pos, s.committed));
@@ -1870,10 +1870,10 @@ static void bch_extent_debugcheck_extent(struct cache_set *c, struct btree *b,
stale = ptr_stale(ca, ptr);
- cache_set_bug_on(stale && !ptr->cached, c,
+ bch_fs_bug_on(stale && !ptr->cached, c,
"stale dirty pointer");
- cache_set_bug_on(stale > 96, c,
+ bch_fs_bug_on(stale > 96, c,
"key too stale: %i",
stale);
@@ -1897,7 +1897,7 @@ static void bch_extent_debugcheck_extent(struct cache_set *c, struct btree *b,
if (replicas > BCH_REPLICAS_MAX) {
bch_bkey_val_to_text(c, btree_node_type(b), buf,
sizeof(buf), e.s_c);
- cache_set_bug(c,
+ bch_fs_bug(c,
"extent key bad (too many replicas: %u): %s",
replicas, buf);
return;
@@ -1907,7 +1907,7 @@ static void bch_extent_debugcheck_extent(struct cache_set *c, struct btree *b,
replicas < c->sb.data_replicas_have) {
bch_bkey_val_to_text(c, btree_node_type(b), buf,
sizeof(buf), e.s_c);
- cache_set_bug(c,
+ bch_fs_bug(c,
"extent key bad (too few replicas, %u < %u): %s",
replicas, c->sb.data_replicas_have, buf);
return;
@@ -1918,20 +1918,20 @@ static void bch_extent_debugcheck_extent(struct cache_set *c, struct btree *b,
bad_device:
bch_bkey_val_to_text(c, btree_node_type(b), buf,
sizeof(buf), e.s_c);
- cache_set_bug(c, "extent pointer to dev %u missing device: %s",
- ptr->dev, buf);
+ bch_fs_bug(c, "extent pointer to dev %u missing device: %s",
+ ptr->dev, buf);
cache_member_info_put();
return;
bad_ptr:
bch_bkey_val_to_text(c, btree_node_type(b), buf,
sizeof(buf), e.s_c);
- cache_set_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu prio %i "
- "gen %i last_gc %i mark 0x%08x",
- buf, PTR_BUCKET_NR(ca, ptr),
- g->read_prio, PTR_BUCKET(ca, ptr)->mark.gen,
- ca->oldest_gens[PTR_BUCKET_NR(ca, ptr)],
- (unsigned) g->mark.counter);
+ bch_fs_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu prio %i "
+ "gen %i last_gc %i mark 0x%08x",
+ buf, PTR_BUCKET_NR(ca, ptr),
+ g->read_prio, PTR_BUCKET(ca, ptr)->mark.gen,
+ ca->oldest_gens[PTR_BUCKET_NR(ca, ptr)],
+ (unsigned) g->mark.counter);
cache_member_info_put();
return;
}
diff --git a/libbcache/fs-gc.c b/libbcache/fs-gc.c
index a758e89..e9585fd 100644
--- a/libbcache/fs-gc.c
+++ b/libbcache/fs-gc.c
@@ -634,7 +634,7 @@ static int bch_gc_do_inode(struct cache_set *c,
bool do_update = false;
ret = bch_inode_unpack(inode, &u);
- if (cache_set_inconsistent_on(ret, c,
+ if (bch_fs_inconsistent_on(ret, c,
"error unpacking inode %llu in fs-gc",
inode.k->p.inode))
return ret;
diff --git a/libbcache/fs.c b/libbcache/fs.c
index 76948e7..ab0d972 100644
--- a/libbcache/fs.c
+++ b/libbcache/fs.c
@@ -831,7 +831,8 @@ static int bch_inode_user_flags_set(struct bch_inode_info *ei,
#define FS_IOC_GOINGDOWN _IOR ('X', 125, __u32)
-static long bch_fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+static long bch_fs_file_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
@@ -893,12 +894,12 @@ setflags_out:
down_write(&sb->s_umount);
sb->s_flags |= MS_RDONLY;
- bch_cache_set_emergency_read_only(c);
+ bch_fs_emergency_read_only(c);
up_write(&sb->s_umount);
return 0;
default:
- return bch_cache_set_ioctl(c, cmd, (void __user *) arg);
+ return bch_fs_ioctl(c, cmd, (void __user *) arg);
}
}
@@ -916,7 +917,7 @@ static long bch_compat_fs_ioctl(struct file *file, unsigned int cmd, unsigned lo
default:
return -ENOIOCTLCMD;
}
- return bch_fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+ return bch_fs_file_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif
@@ -946,7 +947,7 @@ static const struct file_operations bch_file_operations = {
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = bch_fallocate_dispatch,
- .unlocked_ioctl = bch_fs_ioctl,
+ .unlocked_ioctl = bch_fs_file_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = bch_compat_fs_ioctl,
#endif
@@ -982,7 +983,7 @@ static const struct file_operations bch_dir_file_operations = {
.read = generic_read_dir,
.iterate = bch_vfs_readdir,
.fsync = bch_fsync,
- .unlocked_ioctl = bch_fs_ioctl,
+ .unlocked_ioctl = bch_fs_file_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = bch_compat_fs_ioctl,
#endif
@@ -1156,7 +1157,7 @@ static int bch_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_type = BCACHE_STATFS_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = c->capacity >> PAGE_SECTOR_SHIFT;
- buf->f_bfree = (c->capacity - cache_set_sectors_used(c)) >> PAGE_SECTOR_SHIFT;
+ buf->f_bfree = (c->capacity - bch_fs_sectors_used(c)) >> PAGE_SECTOR_SHIFT;
buf->f_bavail = buf->f_bfree;
buf->f_files = atomic_long_read(&c->nr_inodes);
buf->f_ffree = U64_MAX;
@@ -1190,7 +1191,7 @@ static struct cache_set *bdev_to_cache_set(struct block_device *bdev)
rcu_read_lock();
- list_for_each_entry(c, &bch_cache_sets, list)
+ list_for_each_entry(c, &bch_fs_list, list)
for_each_cache_rcu(ca, c, i)
if (ca->disk_sb.bdev == bdev) {
rcu_read_unlock();
@@ -1203,7 +1204,7 @@ static struct cache_set *bdev_to_cache_set(struct block_device *bdev)
}
static struct cache_set *bch_open_as_blockdevs(const char *_dev_name,
- struct cache_set_opts opts)
+ struct bch_opts opts)
{
size_t nr_devs = 0, i = 0;
char *dev_name, *s, **devs;
@@ -1226,7 +1227,7 @@ static struct cache_set *bch_open_as_blockdevs(const char *_dev_name,
(s = strchr(s, ':')) && (*s++ = '\0'))
devs[i++] = s;
- err = bch_register_cache_set(devs, nr_devs, opts, &c);
+ err = bch_fs_open(devs, nr_devs, opts, &c);
if (err) {
/*
* Already open?
@@ -1256,7 +1257,7 @@ static struct cache_set *bch_open_as_blockdevs(const char *_dev_name,
if (!c)
goto err_unlock;
- if (!test_bit(CACHE_SET_RUNNING, &c->flags)) {
+ if (!test_bit(BCH_FS_RUNNING, &c->flags)) {
err = "incomplete cache set";
c = NULL;
goto err_unlock;
@@ -1266,7 +1267,7 @@ static struct cache_set *bch_open_as_blockdevs(const char *_dev_name,
mutex_unlock(&bch_register_lock);
}
- set_bit(CACHE_SET_BDEV_MOUNTED, &c->flags);
+ set_bit(BCH_FS_BDEV_MOUNTED, &c->flags);
err:
kfree(devs);
kfree(dev_name);
@@ -1281,10 +1282,12 @@ err_unlock:
static int bch_remount(struct super_block *sb, int *flags, char *data)
{
struct cache_set *c = sb->s_fs_info;
- struct cache_set_opts opts;
+ struct bch_opts opts = bch_opts_empty();
int ret;
- ret = bch_parse_options(&opts, *flags, data);
+ opts.read_only = (*flags & MS_RDONLY) != 0;
+
+ ret = bch_parse_mount_opts(&opts, data);
if (ret)
return ret;
@@ -1295,11 +1298,11 @@ static int bch_remount(struct super_block *sb, int *flags, char *data)
const char *err = NULL;
if (opts.read_only) {
- bch_cache_set_read_only_sync(c);
+ bch_fs_read_only_sync(c);
sb->s_flags |= MS_RDONLY;
} else {
- err = bch_cache_set_read_write(c);
+ err = bch_fs_read_write(c);
if (err) {
bch_err(c, "error going rw: %s", err);
ret = -EINVAL;
@@ -1355,11 +1358,13 @@ static struct dentry *bch_mount(struct file_system_type *fs_type,
struct cache *ca;
struct super_block *sb;
struct inode *inode;
- struct cache_set_opts opts;
+ struct bch_opts opts = bch_opts_empty();
unsigned i;
int ret;
- ret = bch_parse_options(&opts, flags, data);
+ opts.read_only = (flags & MS_RDONLY) != 0;
+
+ ret = bch_parse_mount_opts(&opts, data);
if (ret)
return ERR_PTR(ret);
@@ -1443,16 +1448,9 @@ static void bch_kill_sb(struct super_block *sb)
generic_shutdown_super(sb);
- if (test_bit(CACHE_SET_BDEV_MOUNTED, &c->flags)) {
- DECLARE_COMPLETION_ONSTACK(complete);
-
- c->stop_completion = &complete;
- bch_cache_set_stop(c);
- closure_put(&c->cl);
-
- /* Killable? */
- wait_for_completion(&complete);
- } else
+ if (test_bit(BCH_FS_BDEV_MOUNTED, &c->flags))
+ bch_fs_stop_sync(c);
+ else
closure_put(&c->cl);
}
diff --git a/libbcache/fs.h b/libbcache/fs.h
index aec6159..933fb6d 100644
--- a/libbcache/fs.h
+++ b/libbcache/fs.h
@@ -41,6 +41,8 @@ static inline unsigned nlink_bias(umode_t mode)
struct bch_inode_unpacked;
+#ifndef NO_BCACHE_FS
+
/* returns 0 if we want to do the update, or error is passed up */
typedef int (*inode_set_fn)(struct bch_inode_info *,
struct bch_inode_unpacked *, void *);
@@ -53,4 +55,11 @@ int __must_check bch_write_inode(struct cache_set *,
void bch_fs_exit(void);
int bch_fs_init(void);
+#else
+
+static inline void bch_fs_exit(void) {}
+static inline int bch_fs_init(void) { return 0; }
+
+#endif
+
#endif /* _BCACHE_FS_H */
diff --git a/libbcache/inode.h b/libbcache/inode.h
index 81dccf6..46abc2b 100644
--- a/libbcache/inode.h
+++ b/libbcache/inode.h
@@ -1,6 +1,8 @@
#ifndef _BCACHE_INODE_H
#define _BCACHE_INODE_H
+#include <linux/math64.h>
+
extern const struct bkey_ops bch_bkey_inode_ops;
struct bch_inode_unpacked {
diff --git a/libbcache/io.c b/libbcache/io.c
index 2f0e48a..be99a97 100644
--- a/libbcache/io.c
+++ b/libbcache/io.c
@@ -358,8 +358,8 @@ static void bch_write_endio(struct bio *bio)
struct bio *orig = wbio->orig;
struct cache *ca = wbio->ca;
- if (cache_nonfatal_io_err_on(bio->bi_error, ca,
- "data write"))
+ if (bch_dev_nonfatal_io_err_on(bio->bi_error, ca,
+ "data write"))
set_closure_fn(cl, bch_write_io_error, index_update_wq(op));
bch_account_io_completion_time(ca, wbio->submit_time_us,
@@ -722,8 +722,8 @@ void bch_wake_delayed_writes(unsigned long data)
spin_lock_irqsave(&c->foreground_write_pd_lock, flags);
while ((op = c->write_wait_head)) {
- if (!test_bit(CACHE_SET_RO, &c->flags) &&
- !test_bit(CACHE_SET_STOPPING, &c->flags) &&
+ if (!test_bit(BCH_FS_RO, &c->flags) &&
+ !test_bit(BCH_FS_STOPPING, &c->flags) &&
time_after(op->expires, jiffies)) {
mod_timer(&c->foreground_write_wakeup, op->expires);
break;
@@ -938,7 +938,7 @@ static int bio_checksum_uncompress(struct cache_set *c,
}
csum = bch_checksum_bio(c, rbio->crc.csum_type, nonce, src);
- if (cache_nonfatal_io_err_on(bch_crc_cmp(rbio->crc.csum, csum), rbio->ca,
+ if (bch_dev_nonfatal_io_err_on(bch_crc_cmp(rbio->crc.csum, csum), rbio->ca,
"data checksum error, inode %llu offset %llu: expected %0llx%0llx got %0llx%0llx (type %u)",
rbio->inode, (u64) rbio->parent_iter.bi_sector << 9,
rbio->crc.csum.hi, rbio->crc.csum.lo, csum.hi, csum.lo,
@@ -1069,8 +1069,8 @@ static void __bch_read_endio(struct cache_set *c, struct bch_read_bio *rbio)
}
if (rbio->promote &&
- !test_bit(CACHE_SET_RO, &c->flags) &&
- !test_bit(CACHE_SET_STOPPING, &c->flags)) {
+ !test_bit(BCH_FS_RO, &c->flags) &&
+ !test_bit(BCH_FS_STOPPING, &c->flags)) {
struct cache_promote_op *promote = rbio->promote;
struct closure *cl = &promote->cl;
@@ -1119,14 +1119,15 @@ static void bch_read_endio(struct bio *bio)
bch_account_io_completion_time(rbio->ca, rbio->submit_time_us, REQ_OP_READ);
- cache_nonfatal_io_err_on(bio->bi_error, rbio->ca, "data read");
+ bch_dev_nonfatal_io_err_on(bio->bi_error, rbio->ca, "data read");
if (error) {
bch_read_error_maybe_retry(c, rbio, error);
return;
}
- if (rbio->crc.compression_type != BCH_COMPRESSION_NONE) {
+ if (rbio->crc.compression_type != BCH_COMPRESSION_NONE ||
+ bch_csum_type_is_encryption(rbio->crc.csum_type)) {
struct bio_decompress_worker *d;
preempt_disable();
diff --git a/libbcache/journal.c b/libbcache/journal.c
index 3bb9e3c..99dd9f2 100644
--- a/libbcache/journal.c
+++ b/libbcache/journal.c
@@ -285,7 +285,7 @@ int bch_journal_seq_should_ignore(struct cache_set *c, u64 seq, struct btree *b)
/* Interier updates aren't journalled: */
BUG_ON(b->level);
- BUG_ON(seq > journal_seq && test_bit(CACHE_SET_INITIAL_GC_DONE, &c->flags));
+ BUG_ON(seq > journal_seq && test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags));
if (seq <= journal_seq) {
if (list_empty_careful(&j->seq_blacklist))
@@ -301,7 +301,7 @@ int bch_journal_seq_should_ignore(struct cache_set *c, u64 seq, struct btree *b)
* Decrease this back to j->seq + 2 when we next rev the on disk format:
* increasing it temporarily to work around bug in old kernels
*/
- cache_set_inconsistent_on(seq > journal_seq + 4, c,
+ bch_fs_inconsistent_on(seq > journal_seq + 4, c,
"bset journal seq too far in the future: %llu > %llu",
seq, journal_seq);
@@ -691,7 +691,7 @@ reread: sectors_read = min_t(unsigned,
ret = submit_bio_wait(bio);
- if (cache_fatal_io_err_on(ret, ca,
+ if (bch_dev_fatal_io_err_on(ret, ca,
"journal read from sector %llu",
offset) ||
bch_meta_read_fault("journal"))
@@ -1412,7 +1412,7 @@ void bch_journal_start(struct cache_set *c)
for_each_cache(ca, c, i)
if (is_journal_device(ca))
- bch_cache_group_add_cache(&c->journal.devs, ca);
+ bch_dev_group_add(&c->journal.devs, ca);
list_for_each_entry(bl, &j->seq_blacklist, list)
new_seq = max(new_seq, bl->seq);
@@ -1566,7 +1566,7 @@ static int bch_set_nr_journal_buckets(struct cache *ca, unsigned nr)
return 0;
}
-int bch_cache_journal_alloc(struct cache *ca)
+int bch_dev_journal_alloc(struct cache *ca)
{
struct journal_device *ja = &ca->journal;
struct bch_sb_field_journal *journal_buckets;
@@ -1882,7 +1882,7 @@ static void journal_reclaim_work(struct work_struct *work)
j->last_flushed = jiffies;
}
- if (!test_bit(CACHE_SET_RO, &c->flags))
+ if (!test_bit(BCH_FS_RO, &c->flags))
queue_delayed_work(system_freezable_wq, &j->reclaim_work,
msecs_to_jiffies(j->reclaim_delay_ms));
}
@@ -2014,7 +2014,7 @@ static void journal_write_endio(struct bio *bio)
struct cache *ca = bio->bi_private;
struct journal *j = &ca->set->journal;
- if (cache_fatal_io_err_on(bio->bi_error, ca, "journal write") ||
+ if (bch_dev_fatal_io_err_on(bio->bi_error, ca, "journal write") ||
bch_meta_write_fault("journal"))
bch_journal_halt(j);
diff --git a/libbcache/journal.h b/libbcache/journal.h
index 9274831..02a6e67 100644
--- a/libbcache/journal.h
+++ b/libbcache/journal.h
@@ -361,7 +361,7 @@ int bch_journal_alloc(struct journal *, unsigned);
ssize_t bch_journal_print_debug(struct journal *, char *);
-int bch_cache_journal_alloc(struct cache *);
+int bch_dev_journal_alloc(struct cache *);
static inline unsigned bch_nr_journal_buckets(struct bch_sb_field_journal *j)
{
diff --git a/libbcache/movinggc.c b/libbcache/movinggc.c
index 83407eb..e40dfbc 100644
--- a/libbcache/movinggc.c
+++ b/libbcache/movinggc.c
@@ -274,7 +274,7 @@ int bch_moving_gc_thread_start(struct cache *ca)
if (ca->set->opts.nochanges)
return 0;
- if (cache_set_init_fault("moving_gc_start"))
+ if (bch_fs_init_fault("moving_gc_start"))
return -ENOMEM;
t = kthread_create(bch_moving_gc_thread, ca, "bch_copygc_read");
diff --git a/libbcache/notify.c b/libbcache/notify.c
index 3a50f8f..00b7999 100644
--- a/libbcache/notify.c
+++ b/libbcache/notify.c
@@ -47,28 +47,28 @@ static void notify_put(struct cache_set *c)
mutex_unlock(&c->uevent_lock);
}
-void bch_notify_cache_set_read_write(struct cache_set *c)
+void bch_notify_fs_read_write(struct cache_set *c)
{
notify_get(c);
notify_var(c, "STATE=active");
notify_put(c);
}
-void bch_notify_cache_set_read_only(struct cache_set *c)
+void bch_notify_fs_read_only(struct cache_set *c)
{
notify_get(c);
notify_var(c, "STATE=readonly");
notify_put(c);
}
-void bch_notify_cache_set_stopped(struct cache_set *c)
+void bch_notify_fs_stopped(struct cache_set *c)
{
notify_get(c);
notify_var(c, "STATE=stopped");
notify_put(c);
}
-void bch_notify_cache_read_write(struct cache *ca)
+void bch_notify_dev_read_write(struct cache *ca)
{
struct cache_set *c = ca->set;
@@ -77,7 +77,7 @@ void bch_notify_cache_read_write(struct cache *ca)
notify_put(c);
}
-void bch_notify_cache_read_only(struct cache *ca)
+void bch_notify_dev_read_only(struct cache *ca)
{
struct cache_set *c = ca->set;
@@ -86,7 +86,7 @@ void bch_notify_cache_read_only(struct cache *ca)
notify_put(c);
}
-void bch_notify_cache_added(struct cache *ca)
+void bch_notify_dev_added(struct cache *ca)
{
struct cache_set *c = ca->set;
@@ -95,7 +95,7 @@ void bch_notify_cache_added(struct cache *ca)
notify_put(c);
}
-void bch_notify_cache_removing(struct cache *ca)
+void bch_notify_dev_removing(struct cache *ca)
{
struct cache_set *c = ca->set;
@@ -104,7 +104,7 @@ void bch_notify_cache_removing(struct cache *ca)
notify_put(c);
}
-void bch_notify_cache_remove_failed(struct cache *ca)
+void bch_notify_dev_remove_failed(struct cache *ca)
{
struct cache_set *c = ca->set;
@@ -113,7 +113,7 @@ void bch_notify_cache_remove_failed(struct cache *ca)
notify_put(c);
}
-void bch_notify_cache_removed(struct cache *ca)
+void bch_notify_dev_removed(struct cache *ca)
{
struct cache_set *c = ca->set;
@@ -122,7 +122,7 @@ void bch_notify_cache_removed(struct cache *ca)
notify_put(c);
}
-void bch_notify_cache_error(struct cache *ca, bool fatal)
+void bch_notify_dev_error(struct cache *ca, bool fatal)
{
struct cache_set *c = ca->set;
diff --git a/libbcache/notify.h b/libbcache/notify.h
index 80d6587..e1971db 100644
--- a/libbcache/notify.h
+++ b/libbcache/notify.h
@@ -7,16 +7,34 @@
#ifndef _NOTIFY_H
#define _NOTIFY_H
-void bch_notify_cache_set_read_write(struct cache_set *);
-void bch_notify_cache_set_read_only(struct cache_set *);
-void bch_notify_cache_set_stopped(struct cache_set *);
-
-void bch_notify_cache_read_write(struct cache *);
-void bch_notify_cache_read_only(struct cache *);
-void bch_notify_cache_added(struct cache *);
-void bch_notify_cache_removing(struct cache *);
-void bch_notify_cache_removed(struct cache *);
-void bch_notify_cache_remove_failed(struct cache *);
-void bch_notify_cache_error(struct cache *, bool);
+#ifndef NO_BCACHE_NOTIFY
+
+void bch_notify_fs_read_write(struct cache_set *);
+void bch_notify_fs_read_only(struct cache_set *);
+void bch_notify_fs_stopped(struct cache_set *);
+
+void bch_notify_dev_read_write(struct cache *);
+void bch_notify_dev_read_only(struct cache *);
+void bch_notify_dev_added(struct cache *);
+void bch_notify_dev_removing(struct cache *);
+void bch_notify_dev_removed(struct cache *);
+void bch_notify_dev_remove_failed(struct cache *);
+void bch_notify_dev_error(struct cache *, bool);
+
+#else
+
+static inline void bch_notify_fs_read_write(struct cache_set *c) {}
+static inline void bch_notify_fs_read_only(struct cache_set *c) {}
+static inline void bch_notify_fs_stopped(struct cache_set *c) {}
+
+static inline void bch_notify_dev_read_write(struct cache *ca) {}
+static inline void bch_notify_dev_read_only(struct cache *ca) {}
+static inline void bch_notify_dev_added(struct cache *ca) {}
+static inline void bch_notify_dev_removing(struct cache *ca) {}
+static inline void bch_notify_dev_removed(struct cache *ca) {}
+static inline void bch_notify_dev_remove_failed(struct cache *ca) {}
+static inline void bch_notify_dev_error(struct cache *ca, bool b) {}
+
+#endif
#endif /* _NOTIFY_H */
diff --git a/libbcache/opts.c b/libbcache/opts.c
index 333654e..ea71dfb 100644
--- a/libbcache/opts.c
+++ b/libbcache/opts.c
@@ -49,7 +49,7 @@ const char * const bch_cache_modes[] = {
NULL
};
-const char * const bch_cache_state[] = {
+const char * const bch_dev_state[] = {
"active",
"readonly",
"failed",
@@ -57,148 +57,186 @@ const char * const bch_cache_state[] = {
NULL
};
-
-const char * const bch_bool_opt[] = {
- "0",
- "1",
- NULL
-};
-
-const char * const bch_uint_opt[] = {
- NULL
-};
-
-enum bch_opts {
-#define BCH_OPT(_name, _choices, _min, _max, _sb_opt, _perm) \
- Opt_##_name,
-
+const struct bch_option bch_opt_table[] = {
+#define OPT_BOOL() .type = BCH_OPT_BOOL
+#define OPT_UINT(_min, _max) .type = BCH_OPT_UINT, .min = _min, .max = _max
+#define OPT_STR(_choices) .type = BCH_OPT_STR, .choices = _choices
+
+#define BCH_OPT(_name, _mode, _sb_opt, _bits, _type) \
+ [Opt_##_name] = { \
+ .name = #_name, \
+ .set_sb = SET_##_sb_opt, \
+ _type \
+ },
BCH_VISIBLE_OPTS()
#undef BCH_OPT
-
- Opt_bad_opt,
};
-struct bch_option {
- const char *name;
- const char * const *opts;
- unsigned long min, max;
-};
-
-struct bch_opt_result {
- enum bch_opts opt;
- unsigned val;
-};
-
-static int parse_bool_opt(const struct bch_option *opt, const char *s)
+static enum bch_opt_id bch_opt_lookup(const char *name)
{
- if (!strcmp(opt->name, s))
- return true;
+ const struct bch_option *i;
- if (!strncmp("no", s, 2) && !strcmp(opt->name, s + 2))
- return false;
+ for (i = bch_opt_table;
+ i < bch_opt_table + ARRAY_SIZE(bch_opt_table);
+ i++)
+ if (!strcmp(name, i->name))
+ return i - bch_opt_table;
return -1;
}
-static int parse_uint_opt(const struct bch_option *opt, const char *s)
+static u64 bch_opt_get(struct bch_opts *opts, enum bch_opt_id id)
{
- unsigned long v;
- int ret;
-
- if (strncmp(opt->name, s, strlen(opt->name)))
- return -1;
+ switch (id) {
+#define BCH_OPT(_name, ...) \
+ case Opt_##_name: \
+ return opts->_name; \
- s += strlen(opt->name);
-
- if (*s != '=')
- return -1;
+ BCH_VISIBLE_OPTS()
+#undef BCH_OPT
- s++;
+ default:
+ BUG();
+ }
+}
- ret = kstrtoul(s, 10, &v);
- if (ret)
- return ret;
+void bch_opt_set(struct bch_opts *opts, enum bch_opt_id id, u64 v)
+{
+ switch (id) {
+#define BCH_OPT(_name, ...) \
+ case Opt_##_name: \
+ opts->_name = v; \
+ break;
- if (v < opt->min || v >= opt->max)
- return -ERANGE;
+ BCH_VISIBLE_OPTS()
+#undef BCH_OPT
- return 0;
+ default:
+ BUG();
+ }
}
-static int parse_string_opt(const struct bch_option *opt, const char *s)
+/*
+ * Initial options from superblock - here we don't want any options undefined,
+ * any options the superblock doesn't specify are set to 0:
+ */
+struct bch_opts bch_sb_opts(struct bch_sb *sb)
{
- if (strncmp(opt->name, s, strlen(opt->name)))
- return -1;
+ struct bch_opts opts = bch_opts_empty();
- s += strlen(opt->name);
+#define BCH_OPT(_name, _mode, _sb_opt, ...) \
+ if (_sb_opt != NO_SB_OPT) \
+ opts._name = _sb_opt(sb);
- if (*s != '=')
- return -1;
-
- s++;
+ BCH_OPTS()
+#undef BCH_OPT
- return bch_read_string_list(s, opt->opts);
+ return opts;
}
-static struct bch_opt_result parse_one_opt(const char *opt)
+int parse_one_opt(enum bch_opt_id id, const char *val, u64 *res)
{
- static const struct bch_option opt_table[] = {
-#define BCH_OPT(_name, _choices, _min, _max, _sb_opt, _perm) \
- [Opt_##_name] = { \
- .name = #_name, \
- .opts = _choices, \
- .min = _min, \
- .max = _max, \
- },
- BCH_VISIBLE_OPTS()
-#undef BCH_OPT
- }, *i;
-
- for (i = opt_table;
- i < opt_table + ARRAY_SIZE(opt_table);
- i++) {
- int res = i->opts == bch_bool_opt ? parse_bool_opt(i, opt)
- : i->opts == bch_uint_opt ? parse_uint_opt(i, opt)
- : parse_string_opt(i, opt);
-
- if (res >= 0)
- return (struct bch_opt_result) {
- i - opt_table, res
- };
+ const struct bch_option *opt = &bch_opt_table[id];
+ ssize_t ret;
+
+ switch (opt->type) {
+ case BCH_OPT_BOOL:
+ ret = kstrtou64(val, 10, res);
+ if (ret < 0)
+ return ret;
+
+ if (*res > 1)
+ return -ERANGE;
+ break;
+ case BCH_OPT_UINT:
+ ret = kstrtou64(val, 10, res);
+ if (ret < 0)
+ return ret;
+
+ if (*res < opt->min || *res >= opt->max)
+ return -ERANGE;
+ break;
+ case BCH_OPT_STR:
+ ret = bch_read_string_list(val, opt->choices);
+ if (ret < 0)
+ return ret;
+
+ *res = ret;
+ break;
}
- return (struct bch_opt_result) { Opt_bad_opt };
+ return 0;
}
-int bch_parse_options(struct cache_set_opts *opts, int flags, char *options)
+int bch_parse_mount_opts(struct bch_opts *opts, char *options)
{
- char *p;
+ char *opt, *name, *val;
+ enum bch_opt_id id;
+ int ret;
+ u64 v;
+
+ while ((opt = strsep(&options, ",")) != NULL) {
+ name = strsep(&opt, "=");
+ val = opt;
+
+ if (val) {
+ id = bch_opt_lookup(name);
+ if (id < 0)
+ return -EINVAL;
+
+ ret = parse_one_opt(id, val, &v);
+ if (ret < 0)
+ return ret;
+ } else {
+ id = bch_opt_lookup(name);
+ v = 1;
+
+ if (id < 0 &&
+ !strncmp("no", name, 2)) {
+ id = bch_opt_lookup(name + 2);
+ v = 0;
+ }
+
+ if (bch_opt_table[id].type != BCH_OPT_BOOL)
+ return -EINVAL;
+ }
- *opts = cache_set_opts_empty();
+ bch_opt_set(opts, id, v);
+ }
- opts->read_only = (flags & MS_RDONLY) != 0;
+ return 0;
+}
- if (!options)
- return 0;
+enum bch_opt_id bch_parse_sysfs_opt(const char *name, const char *val,
+ u64 *res)
+{
+ enum bch_opt_id id = bch_opt_lookup(name);
+ int ret;
- while ((p = strsep(&options, ",")) != NULL) {
- struct bch_opt_result res = parse_one_opt(p);
+ if (id < 0)
+ return -EINVAL;
- switch (res.opt) {
-#define BCH_OPT(_name, _choices, _min, _max, _sb_opt, _perm) \
- case Opt_##_name: \
- opts->_name = res.val; \
- break;
+ ret = parse_one_opt(id, val, res);
+ if (ret < 0)
+ return ret;
- BCH_VISIBLE_OPTS()
-#undef BCH_OPT
+ return id;
+}
- case Opt_bad_opt:
- return -EINVAL;
- default:
- BUG();
- }
- }
+ssize_t bch_opt_show(struct bch_opts *opts, const char *name,
+ char *buf, size_t size)
+{
+ enum bch_opt_id id = bch_opt_lookup(name);
+ const struct bch_option *opt;
+ u64 v;
- return 0;
+ if (id < 0)
+ return -EINVAL;
+
+ v = bch_opt_get(opts, id);
+ opt = &bch_opt_table[id];
+
+ return opt->type == BCH_OPT_STR
+ ? bch_snprint_string_list(buf, size, opt->choices, v)
+ : snprintf(buf, size, "%lli\n", v);
}
diff --git a/libbcache/opts.h b/libbcache/opts.h
index 1d30848..95184db 100644
--- a/libbcache/opts.h
+++ b/libbcache/opts.h
@@ -12,7 +12,7 @@ extern const char * const bch_compression_types[];
extern const char * const bch_str_hash_types[];
extern const char * const bch_cache_replacement_policies[];
extern const char * const bch_cache_modes[];
-extern const char * const bch_cache_state[];
+extern const char * const bch_dev_state[];
/*
* Mount options; we also store defaults in the superblock.
@@ -22,92 +22,135 @@ extern const char * const bch_cache_state[];
* updates the superblock.
*
* We store options as signed integers, where -1 means undefined. This means we
- * can pass the mount options to cache_set_alloc() as a whole struct, and then
- * only apply the options from that struct that are defined.
+ * can pass the mount options to bch_fs_alloc() as a whole struct, and then only
+ * apply the options from that struct that are defined.
*/
-extern const char * const bch_bool_opt[];
-extern const char * const bch_uint_opt[];
-
/* dummy option, for options that aren't stored in the superblock */
LE64_BITMASK(NO_SB_OPT, struct bch_sb, flags[0], 0, 0);
-#define BCH_VISIBLE_OPTS() \
- BCH_OPT(verbose_recovery, \
- bch_bool_opt, 0, 2, \
- NO_SB_OPT, false) \
- BCH_OPT(posix_acl, \
- bch_bool_opt, 0, 2, \
- NO_SB_OPT, false) \
- BCH_OPT(journal_flush_disabled, \
- bch_bool_opt, 0, 2, \
- NO_SB_OPT, true) \
- BCH_OPT(nofsck, \
- bch_bool_opt, 0, 2, \
- NO_SB_OPT, true) \
- BCH_OPT(fix_errors, \
- bch_bool_opt, 0, 2, \
- NO_SB_OPT, true) \
- BCH_OPT(nochanges, \
- bch_bool_opt, 0, 2, \
- NO_SB_OPT, 0) \
- BCH_OPT(noreplay, \
- bch_bool_opt, 0, 2, \
- NO_SB_OPT, 0) \
- BCH_OPT(norecovery, \
- bch_bool_opt, 0, 2, \
- NO_SB_OPT, 0) \
- BCH_SB_OPTS()
-
-#define BCH_OPTS() \
- BCH_OPT(read_only, \
- bch_bool_opt, 0, 2, \
- NO_SB_OPT, 0) \
+/**
+ * BCH_OPT(name, mode, sb_opt, type, ...)
+ *
+ * @name - name of mount option, sysfs attribute, and struct bch_opts
+ * member
+ *
+ * @mode - sysfs attr permissions
+ *
+ * @sb_option - name of corresponding superblock option
+ *
+ * @type - one of OPT_BOOL, OPT_UINT, OPT_STR
+ */
+
+enum opt_type {
+ BCH_OPT_BOOL,
+ BCH_OPT_UINT,
+ BCH_OPT_STR,
+};
+
+#define BCH_VISIBLE_OPTS() \
+ BCH_OPT(errors, 0644, BCH_SB_ERROR_ACTION, \
+ s8, OPT_STR(bch_error_actions)) \
+ BCH_OPT(metadata_replicas, 0444, BCH_SB_META_REPLICAS_WANT,\
+ s8, OPT_UINT(0, BCH_REPLICAS_MAX)) \
+ BCH_OPT(data_replicas, 0444, BCH_SB_DATA_REPLICAS_WANT,\
+ s8, OPT_UINT(0, BCH_REPLICAS_MAX)) \
+ BCH_OPT(metadata_checksum, 0644, BCH_SB_META_CSUM_TYPE, \
+ s8, OPT_STR(bch_csum_types)) \
+ BCH_OPT(data_checksum, 0644, BCH_SB_DATA_CSUM_TYPE, \
+ s8, OPT_STR(bch_csum_types)) \
+ BCH_OPT(compression, 0644, BCH_SB_COMPRESSION_TYPE,\
+ s8, OPT_STR(bch_compression_types)) \
+ BCH_OPT(str_hash, 0644, BCH_SB_STR_HASH_TYPE, \
+ s8, OPT_STR(bch_str_hash_types)) \
+ BCH_OPT(inodes_32bit, 0644, BCH_SB_INODE_32BIT, \
+ s8, OPT_BOOL()) \
+ BCH_OPT(gc_reserve_percent, 0444, BCH_SB_GC_RESERVE, \
+ s8, OPT_UINT(5, 21)) \
+ BCH_OPT(root_reserve_percent, 0444, BCH_SB_ROOT_RESERVE, \
+ s8, OPT_UINT(0, 100)) \
+ BCH_OPT(wide_macs, 0644, BCH_SB_128_BIT_MACS, \
+ s8, OPT_BOOL()) \
+ BCH_OPT(verbose_recovery, 0444, NO_SB_OPT, \
+ s8, OPT_BOOL()) \
+ BCH_OPT(posix_acl, 0444, NO_SB_OPT, \
+ s8, OPT_BOOL()) \
+ BCH_OPT(journal_flush_disabled, 0644, NO_SB_OPT, \
+ s8, OPT_BOOL()) \
+ BCH_OPT(nofsck, 0444, NO_SB_OPT, \
+ s8, OPT_BOOL()) \
+ BCH_OPT(fix_errors, 0444, NO_SB_OPT, \
+ s8, OPT_BOOL()) \
+ BCH_OPT(nochanges, 0444, NO_SB_OPT, \
+ s8, OPT_BOOL()) \
+ BCH_OPT(noreplay, 0444, NO_SB_OPT, \
+ s8, OPT_BOOL()) \
+ BCH_OPT(norecovery, 0444, NO_SB_OPT, \
+ s8, OPT_BOOL())
+
+#define BCH_OPTS() \
+ BCH_OPT(read_only, 0444, NO_SB_OPT, \
+ s8, OPT_BOOL()) \
BCH_VISIBLE_OPTS()
-struct cache_set_opts {
-#define BCH_OPT(_name, _choices, _min, _max, _sb_opt, _perm)\
- s8 _name;
+struct bch_opts {
+#define BCH_OPT(_name, _mode, _sb_opt, _bits, ...) \
+ _bits _name;
BCH_OPTS()
#undef BCH_OPT
};
-static inline struct cache_set_opts cache_set_opts_empty(void)
+enum bch_opt_id {
+#define BCH_OPT(_name, ...) \
+ Opt_##_name,
+
+ BCH_VISIBLE_OPTS()
+#undef BCH_OPT
+};
+
+struct bch_option {
+ const char *name;
+ void (*set_sb)(struct bch_sb *, u64);
+ enum opt_type type;
+
+ union {
+ struct {
+ u64 min, max;
+ };
+ struct {
+ const char * const *choices;
+ };
+ };
+
+};
+
+extern const struct bch_option bch_opt_table[];
+
+static inline struct bch_opts bch_opts_empty(void)
{
- struct cache_set_opts ret;
+ struct bch_opts ret;
memset(&ret, 255, sizeof(ret));
return ret;
}
-/*
- * Initial options from superblock - here we don't want any options undefined,
- * any options the superblock doesn't specify are set to 0:
- */
-static inline struct cache_set_opts cache_superblock_opts(struct bch_sb *sb)
+static inline void bch_opts_apply(struct bch_opts *dst, struct bch_opts src)
{
- return (struct cache_set_opts) {
-#define BCH_OPT(_name, _choices, _min, _max, _sb_opt, _perm)\
- ._name = _sb_opt##_BITS ? _sb_opt(sb) : 0,
+#define BCH_OPT(_name, ...) \
+ if (src._name >= 0) \
+ dst->_name = src._name;
- BCH_SB_OPTS()
+ BCH_OPTS()
#undef BCH_OPT
- };
}
-static inline void cache_set_opts_apply(struct cache_set_opts *dst,
- struct cache_set_opts src)
-{
-#define BCH_OPT(_name, _choices, _min, _max, _sb_opt, _perm)\
- BUILD_BUG_ON(_max > S8_MAX); \
- if (src._name >= 0) \
- dst->_name = src._name;
+void bch_opt_set(struct bch_opts *, enum bch_opt_id, u64);
+struct bch_opts bch_sb_opts(struct bch_sb *);
- BCH_SB_OPTS()
-#undef BCH_OPT
-}
+int bch_parse_mount_opts(struct bch_opts *, char *);
+enum bch_opt_id bch_parse_sysfs_opt(const char *, const char *, u64 *);
-int bch_parse_options(struct cache_set_opts *, int, char *);
+ssize_t bch_opt_show(struct bch_opts *, const char *, char *, size_t);
#endif /* _BCACHE_OPTS_H */
diff --git a/libbcache/stats.h b/libbcache/stats.h
index 39877f9..c177ce3 100644
--- a/libbcache/stats.h
+++ b/libbcache/stats.h
@@ -7,11 +7,27 @@ struct cache_set;
struct cached_dev;
struct bcache_device;
+#ifndef NO_BCACHE_ACCOUNTING
+
void bch_cache_accounting_init(struct cache_accounting *, struct closure *);
int bch_cache_accounting_add_kobjs(struct cache_accounting *, struct kobject *);
void bch_cache_accounting_clear(struct cache_accounting *);
void bch_cache_accounting_destroy(struct cache_accounting *);
+#else
+
+static inline void bch_cache_accounting_init(struct cache_accounting *acc,
+ struct closure *cl) {}
+static inline int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
+ struct kobject *cl)
+{
+ return 0;
+}
+static inline void bch_cache_accounting_clear(struct cache_accounting *acc) {}
+static inline void bch_cache_accounting_destroy(struct cache_accounting *acc) {}
+
+#endif
+
static inline void mark_cache_stats(struct cache_stat_collector *stats,
bool hit, bool bypass)
{
diff --git a/libbcache/super-io.c b/libbcache/super-io.c
index 66338a1..be27d3e 100644
--- a/libbcache/super-io.c
+++ b/libbcache/super-io.c
@@ -370,7 +370,7 @@ static bool bch_is_open_cache(struct block_device *bdev)
unsigned i;
rcu_read_lock();
- list_for_each_entry(c, &bch_cache_sets, list)
+ list_for_each_entry(c, &bch_fs_list, list)
for_each_cache_rcu(ca, c, i)
if (ca->disk_sb.bdev == bdev) {
rcu_read_unlock();
@@ -388,7 +388,7 @@ static bool bch_is_open(struct block_device *bdev)
}
static const char *bch_blkdev_open(const char *path, void *holder,
- struct cache_set_opts opts,
+ struct bch_opts opts,
struct block_device **ret)
{
struct block_device *bdev;
@@ -423,9 +423,8 @@ static const char *bch_blkdev_open(const char *path, void *holder,
}
/* Update cached mi: */
-int bch_cache_set_mi_update(struct cache_set *c,
- struct bch_member *mi,
- unsigned nr_devices)
+int bch_fs_mi_update(struct cache_set *c, struct bch_member *mi,
+ unsigned nr_devices)
{
struct cache_member_rcu *new, *old;
struct cache *ca;
@@ -529,7 +528,7 @@ int bch_sb_to_cache_set(struct cache_set *c, struct bch_sb *src)
if (bch_fs_sb_realloc(c, le32_to_cpu(src->u64s) - journal_u64s))
return -ENOMEM;
- if (bch_cache_set_mi_update(c, members->members, src->nr_devices))
+ if (bch_fs_mi_update(c, members->members, src->nr_devices))
return -ENOMEM;
__copy_super(c->disk_sb, src);
@@ -608,7 +607,7 @@ reread:
}
const char *bch_read_super(struct bcache_superblock *sb,
- struct cache_set_opts opts,
+ struct bch_opts opts,
const char *path)
{
struct bch_sb_layout layout;
@@ -628,7 +627,7 @@ const char *bch_read_super(struct bcache_superblock *sb,
goto err;
err = "dynamic fault";
- if (cache_set_init_fault("read_super"))
+ if (bch_fs_init_fault("read_super"))
goto err;
err = read_one_super(sb, BCH_SB_SECTOR);
@@ -698,7 +697,7 @@ static void write_super_endio(struct bio *bio)
/* XXX: return errors directly */
- cache_fatal_io_err_on(bio->bi_error, ca, "superblock write");
+ bch_dev_fatal_io_err_on(bio->bi_error, ca, "superblock write");
bch_account_io_completion(ca);
@@ -766,7 +765,7 @@ void bch_write_super(struct cache_set *c)
} while (wrote);
/* Make new options visible after they're persistent: */
- bch_cache_set_mi_update(c, members->members, c->sb.nr_devices);
+ bch_fs_mi_update(c, members->members, c->sb.nr_devices);
bch_sb_update(c);
}
diff --git a/libbcache/super-io.h b/libbcache/super-io.h
index 1eda57b..665de81 100644
--- a/libbcache/super-io.h
+++ b/libbcache/super-io.h
@@ -80,7 +80,7 @@ static inline struct cache_member_cpu cache_mi_to_cpu_mi(struct bch_member *mi)
};
}
-int bch_cache_set_mi_update(struct cache_set *, struct bch_member *, unsigned);
+int bch_fs_mi_update(struct cache_set *, struct bch_member *, unsigned);
int bch_sb_to_cache_set(struct cache_set *, struct bch_sb *);
int bch_sb_from_cache_set(struct cache_set *, struct cache *);
@@ -96,7 +96,7 @@ int bch_super_realloc(struct bcache_superblock *, unsigned);
const char *bch_validate_cache_super(struct bcache_superblock *);
const char *bch_read_super(struct bcache_superblock *,
- struct cache_set_opts, const char *);
+ struct bch_opts, const char *);
void bch_write_super(struct cache_set *);
void bch_check_mark_super_slowpath(struct cache_set *,
diff --git a/libbcache/super.c b/libbcache/super.c
index c026c0d..fab3480 100644
--- a/libbcache/super.c
+++ b/libbcache/super.c
@@ -63,18 +63,14 @@ static const uuid_le invalid_uuid = {
static struct kset *bcache_kset;
struct mutex bch_register_lock;
-LIST_HEAD(bch_cache_sets);
+LIST_HEAD(bch_fs_list);
-static int bch_chardev_major;
-static struct class *bch_chardev_class;
-static struct device *bch_chardev;
-static DEFINE_IDR(bch_chardev_minor);
static DECLARE_WAIT_QUEUE_HEAD(bch_read_only_wait);
struct workqueue_struct *bcache_io_wq;
struct crypto_shash *bch_sha256;
-static void bch_cache_stop(struct cache *);
-static int bch_cache_online(struct cache *);
+static void bch_dev_stop(struct cache *);
+static int bch_dev_online(struct cache *);
static int bch_congested_fn(void *data, int bdi_bits)
{
@@ -128,7 +124,7 @@ static int bch_congested_fn(void *data, int bdi_bits)
* - allocator depends on the journal (when it rewrites prios and gens)
*/
-static void __bch_cache_set_read_only(struct cache_set *c)
+static void __bch_fs_read_only(struct cache_set *c)
{
struct cache *ca;
unsigned i;
@@ -145,7 +141,7 @@ static void __bch_cache_set_read_only(struct cache_set *c)
bch_btree_flush(c);
for_each_cache(ca, c, i)
- bch_cache_allocator_stop(ca);
+ bch_dev_allocator_stop(ca);
/*
* Write a journal entry after flushing the btree, so we don't end up
@@ -167,11 +163,11 @@ static void bch_writes_disabled(struct percpu_ref *writes)
{
struct cache_set *c = container_of(writes, struct cache_set, writes);
- set_bit(CACHE_SET_WRITE_DISABLE_COMPLETE, &c->flags);
+ set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
wake_up(&bch_read_only_wait);
}
-static void bch_cache_set_read_only_work(struct work_struct *work)
+static void bch_fs_read_only_work(struct work_struct *work)
{
struct cache_set *c =
container_of(work, struct cache_set, read_only_work);
@@ -184,19 +180,19 @@ static void bch_cache_set_read_only_work(struct work_struct *work)
c->foreground_write_pd.rate.rate = UINT_MAX;
bch_wake_delayed_writes((unsigned long) c);
- if (!test_bit(CACHE_SET_EMERGENCY_RO, &c->flags)) {
+ if (!test_bit(BCH_FS_EMERGENCY_RO, &c->flags)) {
/*
* If we're not doing an emergency shutdown, we want to wait on
* outstanding writes to complete so they don't see spurious
* errors due to shutting down the allocator:
*/
wait_event(bch_read_only_wait,
- test_bit(CACHE_SET_WRITE_DISABLE_COMPLETE, &c->flags));
+ test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
- __bch_cache_set_read_only(c);
+ __bch_fs_read_only(c);
if (!bch_journal_error(&c->journal) &&
- !test_bit(CACHE_SET_ERROR, &c->flags)) {
+ !test_bit(BCH_FS_ERROR, &c->flags)) {
mutex_lock(&c->sb_lock);
SET_BCH_SB_CLEAN(c->disk_sb, true);
bch_write_super(c);
@@ -210,25 +206,25 @@ static void bch_cache_set_read_only_work(struct work_struct *work)
* we do need to wait on them before returning and signalling
* that going RO is complete:
*/
- __bch_cache_set_read_only(c);
+ __bch_fs_read_only(c);
wait_event(bch_read_only_wait,
- test_bit(CACHE_SET_WRITE_DISABLE_COMPLETE, &c->flags));
+ test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
}
- bch_notify_cache_set_read_only(c);
- trace_bcache_cache_set_read_only_done(c);
+ bch_notify_fs_read_only(c);
+ trace_fs_read_only_done(c);
- set_bit(CACHE_SET_RO_COMPLETE, &c->flags);
+ set_bit(BCH_FS_RO_COMPLETE, &c->flags);
wake_up(&bch_read_only_wait);
}
-bool bch_cache_set_read_only(struct cache_set *c)
+bool bch_fs_read_only(struct cache_set *c)
{
- if (test_and_set_bit(CACHE_SET_RO, &c->flags))
+ if (test_and_set_bit(BCH_FS_RO, &c->flags))
return false;
- trace_bcache_cache_set_read_only(c);
+ trace_fs_read_only(c);
percpu_ref_get(&c->writes);
@@ -238,7 +234,7 @@ bool bch_cache_set_read_only(struct cache_set *c)
*
* (This is really blocking new _allocations_, writes to previously
* allocated space can still happen until stopping the allocator in
- * bch_cache_allocator_stop()).
+ * bch_dev_allocator_stop()).
*/
percpu_ref_kill(&c->writes);
@@ -246,30 +242,30 @@ bool bch_cache_set_read_only(struct cache_set *c)
return true;
}
-bool bch_cache_set_emergency_read_only(struct cache_set *c)
+bool bch_fs_emergency_read_only(struct cache_set *c)
{
- bool ret = !test_and_set_bit(CACHE_SET_EMERGENCY_RO, &c->flags);
+ bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
- bch_cache_set_read_only(c);
+ bch_fs_read_only(c);
bch_journal_halt(&c->journal);
wake_up(&bch_read_only_wait);
return ret;
}
-void bch_cache_set_read_only_sync(struct cache_set *c)
+void bch_fs_read_only_sync(struct cache_set *c)
{
- /* so we don't race with bch_cache_set_read_write() */
+ /* so we don't race with bch_fs_read_write() */
lockdep_assert_held(&bch_register_lock);
- bch_cache_set_read_only(c);
+ bch_fs_read_only(c);
wait_event(bch_read_only_wait,
- test_bit(CACHE_SET_RO_COMPLETE, &c->flags) &&
- test_bit(CACHE_SET_WRITE_DISABLE_COMPLETE, &c->flags));
+ test_bit(BCH_FS_RO_COMPLETE, &c->flags) &&
+ test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
}
-static const char *__bch_cache_set_read_write(struct cache_set *c)
+static const char *__bch_fs_read_write(struct cache_set *c)
{
struct cache *ca;
const char *err;
@@ -280,7 +276,7 @@ static const char *__bch_cache_set_read_write(struct cache_set *c)
err = "error starting allocator thread";
for_each_cache(ca, c, i)
if (ca->mi.state == BCH_MEMBER_STATE_ACTIVE &&
- bch_cache_allocator_start(ca)) {
+ bch_dev_allocator_start(ca)) {
percpu_ref_put(&ca->ref);
goto err;
}
@@ -308,35 +304,35 @@ static const char *__bch_cache_set_read_write(struct cache_set *c)
return NULL;
err:
- __bch_cache_set_read_only(c);
+ __bch_fs_read_only(c);
return err;
}
-const char *bch_cache_set_read_write(struct cache_set *c)
+const char *bch_fs_read_write(struct cache_set *c)
{
const char *err;
lockdep_assert_held(&bch_register_lock);
- if (!test_bit(CACHE_SET_RO_COMPLETE, &c->flags))
+ if (!test_bit(BCH_FS_RO_COMPLETE, &c->flags))
return NULL;
- err = __bch_cache_set_read_write(c);
+ err = __bch_fs_read_write(c);
if (err)
return err;
percpu_ref_reinit(&c->writes);
- clear_bit(CACHE_SET_WRITE_DISABLE_COMPLETE, &c->flags);
- clear_bit(CACHE_SET_EMERGENCY_RO, &c->flags);
- clear_bit(CACHE_SET_RO_COMPLETE, &c->flags);
- clear_bit(CACHE_SET_RO, &c->flags);
+ clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
+ clear_bit(BCH_FS_EMERGENCY_RO, &c->flags);
+ clear_bit(BCH_FS_RO_COMPLETE, &c->flags);
+ clear_bit(BCH_FS_RO, &c->flags);
return NULL;
}
/* Cache set startup/shutdown: */
-static void cache_set_free(struct cache_set *c)
+static void bch_fs_free(struct cache_set *c)
{
del_timer_sync(&c->foreground_write_wakeup);
cancel_delayed_work_sync(&c->pd_controllers_update);
@@ -344,12 +340,13 @@ static void cache_set_free(struct cache_set *c)
cancel_work_sync(&c->bio_submit_work);
cancel_work_sync(&c->read_retry_work);
- bch_cache_set_encryption_free(c);
+ bch_fs_encryption_free(c);
bch_btree_cache_free(c);
bch_journal_free(&c->journal);
bch_io_clock_exit(&c->io_clock[WRITE]);
bch_io_clock_exit(&c->io_clock[READ]);
bch_compress_free(c);
+ bch_fs_blockdev_exit(c);
bdi_destroy(&c->bdi);
lg_lock_free(&c->bucket_stats_lock);
free_percpu(c->bucket_stats_percpu);
@@ -362,7 +359,6 @@ static void cache_set_free(struct cache_set *c)
mempool_exit(&c->btree_interior_update_pool);
mempool_exit(&c->btree_reserve_pool);
mempool_exit(&c->fill_iter);
- mempool_exit(&c->search);
percpu_ref_exit(&c->writes);
if (c->copygc_wq)
@@ -377,18 +373,18 @@ static void cache_set_free(struct cache_set *c)
}
/*
- * should be __cache_set_stop4 - block devices are closed, now we can finally
+ * should be __bch_fs_stop4 - block devices are closed, now we can finally
* free it
*/
-void bch_cache_set_release(struct kobject *kobj)
+void bch_fs_release(struct kobject *kobj)
{
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
struct completion *stop_completion = c->stop_completion;
- bch_notify_cache_set_stopped(c);
+ bch_notify_fs_stopped(c);
bch_info(c, "stopped");
- cache_set_free(c);
+ bch_fs_free(c);
if (stop_completion)
complete(stop_completion);
@@ -397,7 +393,7 @@ void bch_cache_set_release(struct kobject *kobj)
/*
* All activity on the cache_set should have stopped now - close devices:
*/
-static void __cache_set_stop3(struct closure *cl)
+static void __bch_fs_stop3(struct closure *cl)
{
struct cache_set *c = container_of(cl, struct cache_set, cl);
struct cache *ca;
@@ -405,13 +401,9 @@ static void __cache_set_stop3(struct closure *cl)
mutex_lock(&bch_register_lock);
for_each_cache(ca, c, i)
- bch_cache_stop(ca);
- mutex_unlock(&bch_register_lock);
+ bch_dev_stop(ca);
- mutex_lock(&bch_register_lock);
list_del(&c->list);
- if (c->minor >= 0)
- idr_remove(&bch_chardev_minor, c->minor);
mutex_unlock(&bch_register_lock);
closure_debug_destroy(&c->cl);
@@ -422,14 +414,12 @@ static void __cache_set_stop3(struct closure *cl)
* Openers (i.e. block devices) should have exited, shutdown all userspace
* interfaces and wait for &c->cl to hit 0
*/
-static void __cache_set_stop2(struct closure *cl)
+static void __bch_fs_stop2(struct closure *cl)
{
struct cache_set *c = container_of(cl, struct cache_set, caching);
bch_debug_exit_cache_set(c);
-
- if (!IS_ERR_OR_NULL(c->chardev))
- device_unregister(c->chardev);
+ bch_fs_chardev_exit(c);
if (c->kobj.state_in_sysfs)
kobject_del(&c->kobj);
@@ -441,39 +431,52 @@ static void __cache_set_stop2(struct closure *cl)
kobject_put(&c->internal);
mutex_lock(&bch_register_lock);
- bch_cache_set_read_only_sync(c);
+ bch_fs_read_only_sync(c);
mutex_unlock(&bch_register_lock);
closure_return(cl);
}
/*
- * First phase of the shutdown process that's kicked off by cache_set_stop(); we
+ * First phase of the shutdown process that's kicked off by bch_fs_stop(); we
* haven't waited for anything to stop yet, we're just punting to process
* context to shut down block devices:
*/
-static void __cache_set_stop1(struct closure *cl)
+static void __bch_fs_stop1(struct closure *cl)
{
struct cache_set *c = container_of(cl, struct cache_set, caching);
bch_blockdevs_stop(c);
- continue_at(cl, __cache_set_stop2, system_wq);
+ continue_at(cl, __bch_fs_stop2, system_wq);
}
-void bch_cache_set_stop(struct cache_set *c)
+void bch_fs_stop(struct cache_set *c)
{
- if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
+ if (!test_and_set_bit(BCH_FS_STOPPING, &c->flags))
closure_queue(&c->caching);
}
-void bch_cache_set_unregister(struct cache_set *c)
+void bch_fs_stop_sync(struct cache_set *c)
{
- if (!test_and_set_bit(CACHE_SET_UNREGISTERING, &c->flags))
- bch_cache_set_stop(c);
+ DECLARE_COMPLETION_ONSTACK(complete);
+
+ c->stop_completion = &complete;
+ bch_fs_stop(c);
+ closure_put(&c->cl);
+
+ /* Killable? */
+ wait_for_completion(&complete);
}
-static unsigned cache_set_nr_devices(struct cache_set *c)
+/* Stop, detaching from backing devices: */
+void bch_fs_detach(struct cache_set *c)
+{
+ if (!test_and_set_bit(BCH_FS_DETACHING, &c->flags))
+ bch_fs_stop(c);
+}
+
+static unsigned bch_fs_nr_devices(struct cache_set *c)
{
struct bch_sb_field_members *mi;
unsigned i, nr = 0;
@@ -490,7 +493,7 @@ static unsigned cache_set_nr_devices(struct cache_set *c)
return nr;
}
-static unsigned cache_set_nr_online_devices(struct cache_set *c)
+static unsigned bch_fs_nr_online_devices(struct cache_set *c)
{
unsigned i, nr = 0;
@@ -504,8 +507,7 @@ static unsigned cache_set_nr_online_devices(struct cache_set *c)
#define alloc_bucket_pages(gfp, ca) \
((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(ca))))
-static struct cache_set *bch_cache_set_alloc(struct bch_sb *sb,
- struct cache_set_opts opts)
+static struct cache_set *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
{
struct cache_set *c;
unsigned iter_size, journal_entry_bytes;
@@ -523,7 +525,7 @@ static struct cache_set *bch_cache_set_alloc(struct bch_sb *sb,
mutex_init(&c->btree_cache_lock);
mutex_init(&c->bucket_lock);
mutex_init(&c->btree_root_lock);
- INIT_WORK(&c->read_only_work, bch_cache_set_read_only_work);
+ INIT_WORK(&c->read_only_work, bch_fs_read_only_work);
init_rwsem(&c->gc_lock);
@@ -591,15 +593,15 @@ static struct cache_set *bch_cache_set_alloc(struct bch_sb *sb,
scnprintf(c->name, sizeof(c->name), "%pU", &c->sb.user_uuid);
- c->opts = cache_superblock_opts(sb);
- cache_set_opts_apply(&c->opts, opts);
+ bch_opts_apply(&c->opts, bch_sb_opts(sb));
+ bch_opts_apply(&c->opts, opts);
c->opts.nochanges |= c->opts.noreplay;
c->opts.read_only |= c->opts.nochanges;
c->block_bits = ilog2(c->sb.block_size);
- if (cache_set_init_fault("cache_set_alloc"))
+ if (bch_fs_init_fault("fs_alloc"))
goto err;
iter_size = (btree_blocks(c) + 1) * 2 *
@@ -612,7 +614,6 @@ static struct cache_set *bch_cache_set_alloc(struct bch_sb *sb,
!(c->copygc_wq = alloc_workqueue("bcache_copygc",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
percpu_ref_init(&c->writes, bch_writes_disabled, 0, GFP_KERNEL) ||
- mempool_init_slab_pool(&c->search, 1, bch_search_cache) ||
mempool_init_kmalloc_pool(&c->btree_reserve_pool, 1,
sizeof(struct btree_reserve)) ||
mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
@@ -632,11 +633,12 @@ static struct cache_set *bch_cache_set_alloc(struct bch_sb *sb,
mempool_init_page_pool(&c->btree_bounce_pool, 1,
ilog2(btree_pages(c))) ||
bdi_setup_and_register(&c->bdi, "bcache") ||
+ bch_fs_blockdev_init(c) ||
bch_io_clock_init(&c->io_clock[READ]) ||
bch_io_clock_init(&c->io_clock[WRITE]) ||
bch_journal_alloc(&c->journal, journal_entry_bytes) ||
bch_btree_cache_alloc(c) ||
- bch_cache_set_encryption_init(c) ||
+ bch_fs_encryption_init(c) ||
bch_compress_init(c) ||
bch_check_set_has_compressed_data(c, c->opts.compression))
goto err;
@@ -652,42 +654,39 @@ static struct cache_set *bch_cache_set_alloc(struct bch_sb *sb,
closure_init(&c->cl, NULL);
c->kobj.kset = bcache_kset;
- kobject_init(&c->kobj, &bch_cache_set_ktype);
- kobject_init(&c->internal, &bch_cache_set_internal_ktype);
- kobject_init(&c->opts_dir, &bch_cache_set_opts_dir_ktype);
- kobject_init(&c->time_stats, &bch_cache_set_time_stats_ktype);
+ kobject_init(&c->kobj, &bch_fs_ktype);
+ kobject_init(&c->internal, &bch_fs_internal_ktype);
+ kobject_init(&c->opts_dir, &bch_fs_opts_dir_ktype);
+ kobject_init(&c->time_stats, &bch_fs_time_stats_ktype);
bch_cache_accounting_init(&c->accounting, &c->cl);
closure_init(&c->caching, &c->cl);
- set_closure_fn(&c->caching, __cache_set_stop1, system_wq);
+ set_closure_fn(&c->caching, __bch_fs_stop1, system_wq);
- continue_at_noreturn(&c->cl, __cache_set_stop3, system_wq);
+ continue_at_noreturn(&c->cl, __bch_fs_stop3, system_wq);
return c;
err:
- cache_set_free(c);
+ bch_fs_free(c);
return NULL;
}
-static int bch_cache_set_online(struct cache_set *c)
+static int bch_fs_online(struct cache_set *c)
{
struct cache *ca;
unsigned i;
+ int ret;
lockdep_assert_held(&bch_register_lock);
- if (c->kobj.state_in_sysfs)
+ if (!list_empty(&c->list))
return 0;
- c->minor = idr_alloc(&bch_chardev_minor, c, 0, 0, GFP_KERNEL);
- if (c->minor < 0)
- return c->minor;
+ list_add(&c->list, &bch_fs_list);
- c->chardev = device_create(bch_chardev_class, NULL,
- MKDEV(bch_chardev_major, c->minor), NULL,
- "bcache%u-ctl", c->minor);
- if (IS_ERR(c->chardev))
- return PTR_ERR(c->chardev);
+ ret = bch_fs_chardev_init(c);
+ if (ret)
+ return ret;
if (kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ||
kobject_add(&c->internal, &c->kobj, "internal") ||
@@ -697,16 +696,15 @@ static int bch_cache_set_online(struct cache_set *c)
return -1;
for_each_cache(ca, c, i)
- if (bch_cache_online(ca)) {
+ if (bch_dev_online(ca)) {
percpu_ref_put(&ca->ref);
return -1;
}
- list_add(&c->list, &bch_cache_sets);
return 0;
}
-static const char *run_cache_set(struct cache_set *c)
+static const char *bch_fs_start(struct cache_set *c)
{
const char *err = "cannot allocate memory";
struct bch_sb_field_members *mi;
@@ -718,7 +716,7 @@ static const char *run_cache_set(struct cache_set *c)
int ret = -EINVAL;
lockdep_assert_held(&bch_register_lock);
- BUG_ON(test_bit(CACHE_SET_RUNNING, &c->flags));
+ BUG_ON(test_bit(BCH_FS_RUNNING, &c->flags));
/* We don't want bch_fatal_error() to free underneath us */
closure_get(&c->caching);
@@ -756,12 +754,6 @@ static const char *run_cache_set(struct cache_set *c)
bch_recalc_min_prio(ca, WRITE);
}
- /*
- * If bch_prio_read() fails it'll call cache_set_error and we'll
- * tear everything down right away, but if we perhaps checked
- * sooner we could avoid journal replay.
- */
-
for (id = 0; id < BTREE_ID_NR; id++) {
unsigned level;
struct bkey_i *k;
@@ -801,7 +793,7 @@ static const char *run_cache_set(struct cache_set *c)
err = "error starting allocator thread";
for_each_cache(ca, c, i)
if (ca->mi.state == BCH_MEMBER_STATE_ACTIVE &&
- bch_cache_allocator_start(ca)) {
+ bch_dev_allocator_start(ca)) {
percpu_ref_put(&ca->ref);
goto err;
}
@@ -836,7 +828,7 @@ static const char *run_cache_set(struct cache_set *c)
err = "unable to allocate journal buckets";
for_each_cache(ca, c, i)
- if (bch_cache_journal_alloc(ca)) {
+ if (bch_dev_journal_alloc(ca)) {
percpu_ref_put(&ca->ref);
goto err;
}
@@ -853,7 +845,7 @@ static const char *run_cache_set(struct cache_set *c)
err = "error starting allocator thread";
for_each_cache(ca, c, i)
if (ca->mi.state == BCH_MEMBER_STATE_ACTIVE &&
- bch_cache_allocator_start(ca)) {
+ bch_dev_allocator_start(ca)) {
percpu_ref_put(&ca->ref);
goto err;
}
@@ -886,9 +878,9 @@ static const char *run_cache_set(struct cache_set *c)
}
recovery_done:
if (c->opts.read_only) {
- bch_cache_set_read_only_sync(c);
+ bch_fs_read_only_sync(c);
} else {
- err = __bch_cache_set_read_write(c);
+ err = __bch_fs_read_write(c);
if (err)
goto err;
}
@@ -910,11 +902,11 @@ recovery_done:
mutex_unlock(&c->sb_lock);
err = "dynamic fault";
- if (cache_set_init_fault("run_cache_set"))
+ if (bch_fs_init_fault("fs_start"))
goto err;
err = "error creating kobject";
- if (bch_cache_set_online(c))
+ if (bch_fs_online(c))
goto err;
err = "can't bring up blockdev volumes";
@@ -922,10 +914,10 @@ recovery_done:
goto err;
bch_debug_init_cache_set(c);
- set_bit(CACHE_SET_RUNNING, &c->flags);
+ set_bit(BCH_FS_RUNNING, &c->flags);
bch_attach_backing_devs(c);
- bch_notify_cache_set_read_write(c);
+ bch_notify_fs_read_write(c);
err = NULL;
out:
bch_journal_entries_free(&journal);
@@ -959,12 +951,11 @@ err:
}
BUG_ON(!err);
- set_bit(CACHE_SET_ERROR, &c->flags);
+ set_bit(BCH_FS_ERROR, &c->flags);
goto out;
}
-static const char *can_add_cache(struct bch_sb *sb,
- struct cache_set *c)
+static const char *bch_dev_may_add(struct bch_sb *sb, struct cache_set *c)
{
struct bch_sb_field_members *sb_mi;
@@ -982,14 +973,14 @@ static const char *can_add_cache(struct bch_sb *sb,
return NULL;
}
-static const char *can_attach_cache(struct bch_sb *sb, struct cache_set *c)
+static const char *bch_dev_in_fs(struct bch_sb *sb, struct cache_set *c)
{
struct bch_sb_field_members *mi = bch_sb_get_members(c->disk_sb);
struct bch_sb_field_members *dev_mi = bch_sb_get_members(sb);
uuid_le dev_uuid = dev_mi->members[sb->dev_idx].uuid;
const char *err;
- err = can_add_cache(sb, c);
+ err = bch_dev_may_add(sb, c);
if (err)
return err;
@@ -1010,7 +1001,7 @@ static const char *can_attach_cache(struct bch_sb *sb, struct cache_set *c)
/* Cache device */
-bool bch_cache_read_only(struct cache *ca)
+bool bch_dev_read_only(struct cache *ca)
{
struct cache_set *c = ca->set;
struct bch_sb_field_members *mi;
@@ -1023,9 +1014,9 @@ bool bch_cache_read_only(struct cache *ca)
if (ca->mi.state != BCH_MEMBER_STATE_ACTIVE)
return false;
- if (!bch_cache_may_remove(ca)) {
+ if (!bch_dev_may_remove(ca)) {
bch_err(c, "required member %s going RO, forcing fs RO", buf);
- bch_cache_set_read_only_sync(c);
+ bch_fs_read_only_sync(c);
}
trace_bcache_cache_read_only(ca);
@@ -1037,9 +1028,9 @@ bool bch_cache_read_only(struct cache *ca)
* buckets) and then waits for all existing writes to
* complete.
*/
- bch_cache_allocator_stop(ca);
+ bch_dev_allocator_stop(ca);
- bch_cache_group_remove_cache(&c->journal.devs, ca);
+ bch_dev_group_remove(&c->journal.devs, ca);
/*
* Device data write barrier -- no non-meta-data writes should
@@ -1049,7 +1040,7 @@ bool bch_cache_read_only(struct cache *ca)
trace_bcache_cache_read_only_done(ca);
bch_notice(c, "%s read only", bdevname(ca->disk_sb.bdev, buf));
- bch_notify_cache_read_only(ca);
+ bch_notify_dev_read_only(ca);
mutex_lock(&c->sb_lock);
mi = bch_sb_get_members(c->disk_sb);
@@ -1060,41 +1051,41 @@ bool bch_cache_read_only(struct cache *ca)
return true;
}
-static const char *__bch_cache_read_write(struct cache_set *c, struct cache *ca)
+static const char *__bch_dev_read_write(struct cache_set *c, struct cache *ca)
{
lockdep_assert_held(&bch_register_lock);
if (ca->mi.state == BCH_MEMBER_STATE_ACTIVE)
return NULL;
- if (test_bit(CACHE_DEV_REMOVING, &ca->flags))
+ if (test_bit(BCH_DEV_REMOVING, &ca->flags))
return "removing";
trace_bcache_cache_read_write(ca);
- if (bch_cache_allocator_start(ca))
+ if (bch_dev_allocator_start(ca))
return "error starting allocator thread";
if (bch_moving_gc_thread_start(ca))
return "error starting moving GC thread";
- bch_cache_group_add_cache(&c->journal.devs, ca);
+ bch_dev_group_add(&c->journal.devs, ca);
wake_up_process(c->tiering_read);
- bch_notify_cache_read_write(ca);
+ bch_notify_dev_read_write(ca);
trace_bcache_cache_read_write_done(ca);
return NULL;
}
-const char *bch_cache_read_write(struct cache *ca)
+const char *bch_dev_read_write(struct cache *ca)
{
struct cache_set *c = ca->set;
struct bch_sb_field_members *mi;
const char *err;
- err = __bch_cache_read_write(c, ca);
+ err = __bch_dev_read_write(c, ca);
if (err)
return err;
@@ -1109,11 +1100,11 @@ const char *bch_cache_read_write(struct cache *ca)
}
/*
- * bch_cache_stop has already returned, so we no longer hold the register
+ * bch_dev_stop has already returned, so we no longer hold the register
* lock at the point this is called.
*/
-void bch_cache_release(struct kobject *kobj)
+void bch_dev_release(struct kobject *kobj)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
@@ -1121,7 +1112,7 @@ void bch_cache_release(struct kobject *kobj)
kfree(ca);
}
-static void bch_cache_free_work(struct work_struct *work)
+static void bch_dev_free_work(struct work_struct *work)
{
struct cache *ca = container_of(work, struct cache, free_work);
struct cache_set *c = ca->set;
@@ -1142,7 +1133,7 @@ static void bch_cache_free_work(struct work_struct *work)
bch_free_super(&ca->disk_sb);
/*
- * bch_cache_stop can be called in the middle of initialization
+ * bch_dev_stop can be called in the middle of initialization
* of the struct cache object.
* As such, not all the sub-structures may be initialized.
* However, they were zeroed when the object was allocated.
@@ -1170,23 +1161,23 @@ static void bch_cache_free_work(struct work_struct *work)
kobject_put(&c->kobj);
}
-static void bch_cache_percpu_ref_release(struct percpu_ref *ref)
+static void bch_dev_percpu_ref_release(struct percpu_ref *ref)
{
struct cache *ca = container_of(ref, struct cache, ref);
schedule_work(&ca->free_work);
}
-static void bch_cache_free_rcu(struct rcu_head *rcu)
+static void bch_dev_free_rcu(struct rcu_head *rcu)
{
struct cache *ca = container_of(rcu, struct cache, free_rcu);
/*
* This decrements the ref count to ca, and once the ref count
* is 0 (outstanding bios to the ca also incremented it and
- * decrement it on completion/error), bch_cache_percpu_ref_release
- * is called, and that eventually results in bch_cache_free_work
- * being called, which in turn results in bch_cache_release being
+ * decrement it on completion/error), bch_dev_percpu_ref_release
+ * is called, and that eventually results in bch_dev_free_work
+ * being called, which in turn results in bch_dev_release being
* called.
*
* In particular, these functions won't be called until there are no
@@ -1198,7 +1189,7 @@ static void bch_cache_free_rcu(struct rcu_head *rcu)
percpu_ref_kill(&ca->ref);
}
-static void bch_cache_stop(struct cache *ca)
+static void bch_dev_stop(struct cache *ca)
{
struct cache_set *c = ca->set;
@@ -1209,16 +1200,16 @@ static void bch_cache_stop(struct cache *ca)
rcu_assign_pointer(c->cache[ca->dev_idx], NULL);
}
- call_rcu(&ca->free_rcu, bch_cache_free_rcu);
+ call_rcu(&ca->free_rcu, bch_dev_free_rcu);
}
-static void bch_cache_remove_work(struct work_struct *work)
+static void bch_dev_remove_work(struct work_struct *work)
{
struct cache *ca = container_of(work, struct cache, remove_work);
struct bch_sb_field_members *mi;
struct cache_set *c = ca->set;
char name[BDEVNAME_SIZE];
- bool force = test_bit(CACHE_DEV_FORCE_REMOVE, &ca->flags);
+ bool force = test_bit(BCH_DEV_FORCE_REMOVE, &ca->flags);
unsigned dev_idx = ca->dev_idx;
bdevname(ca->disk_sb.bdev, name);
@@ -1226,8 +1217,8 @@ static void bch_cache_remove_work(struct work_struct *work)
/*
* Device should already be RO, now migrate data off:
*
- * XXX: locking is sketchy, bch_cache_read_write() has to check
- * CACHE_DEV_REMOVING bit
+ * XXX: locking is sketchy, bch_dev_read_write() has to check
+ * BCH_DEV_REMOVING bit
*/
if (!ca->mi.has_data) {
/* Nothing to do: */
@@ -1250,7 +1241,7 @@ static void bch_cache_remove_work(struct work_struct *work)
} else {
bch_err(c, "Remove of %s failed, unable to migrate data off",
name);
- clear_bit(CACHE_DEV_REMOVING, &ca->flags);
+ clear_bit(BCH_DEV_REMOVING, &ca->flags);
return;
}
@@ -1268,7 +1259,7 @@ static void bch_cache_remove_work(struct work_struct *work)
} else {
bch_err(c, "Remove of %s failed, unable to migrate metadata off",
name);
- clear_bit(CACHE_DEV_REMOVING, &ca->flags);
+ clear_bit(BCH_DEV_REMOVING, &ca->flags);
return;
}
@@ -1276,7 +1267,7 @@ static void bch_cache_remove_work(struct work_struct *work)
* Ok, really doing the remove:
* Drop device's prio pointer before removing it from superblock:
*/
- bch_notify_cache_removed(ca);
+ bch_notify_dev_removed(ca);
spin_lock(&c->journal.lock);
c->journal.prio_buckets[dev_idx] = 0;
@@ -1291,7 +1282,7 @@ static void bch_cache_remove_work(struct work_struct *work)
closure_get(&c->cl);
mutex_lock(&bch_register_lock);
- bch_cache_stop(ca);
+ bch_dev_stop(ca);
/*
* RCU barrier between dropping between c->cache and dropping from
@@ -1317,27 +1308,27 @@ static void bch_cache_remove_work(struct work_struct *work)
closure_put(&c->cl);
}
-bool bch_cache_remove(struct cache *ca, bool force)
+bool bch_dev_remove(struct cache *ca, bool force)
{
mutex_lock(&bch_register_lock);
- if (test_bit(CACHE_DEV_REMOVING, &ca->flags))
+ if (test_bit(BCH_DEV_REMOVING, &ca->flags))
return false;
- if (!bch_cache_may_remove(ca)) {
+ if (!bch_dev_may_remove(ca)) {
bch_err(ca->set, "Can't remove last device in tier %u",
ca->mi.tier);
- bch_notify_cache_remove_failed(ca);
+ bch_notify_dev_remove_failed(ca);
return false;
}
/* First, go RO before we try to migrate data off: */
- bch_cache_read_only(ca);
+ bch_dev_read_only(ca);
if (force)
- set_bit(CACHE_DEV_FORCE_REMOVE, &ca->flags);
- set_bit(CACHE_DEV_REMOVING, &ca->flags);
- bch_notify_cache_removing(ca);
+ set_bit(BCH_DEV_FORCE_REMOVE, &ca->flags);
+ set_bit(BCH_DEV_REMOVING, &ca->flags);
+ bch_notify_dev_removing(ca);
mutex_unlock(&bch_register_lock);
@@ -1347,7 +1338,7 @@ bool bch_cache_remove(struct cache *ca, bool force)
return true;
}
-static int bch_cache_online(struct cache *ca)
+static int bch_dev_online(struct cache *ca)
{
char buf[12];
@@ -1365,9 +1356,9 @@ static int bch_cache_online(struct cache *ca)
return 0;
}
-static const char *cache_alloc(struct bcache_superblock *sb,
- struct cache_set *c,
- struct cache **ret)
+static const char *bch_dev_alloc(struct bcache_superblock *sb,
+ struct cache_set *c,
+ struct cache **ret)
{
struct bch_member *member;
size_t reserve_none, movinggc_reserve, free_inc_reserve, total_reserve;
@@ -1379,28 +1370,28 @@ static const char *cache_alloc(struct bcache_superblock *sb,
if (c->sb.nr_devices == 1)
bdevname(sb->bdev, c->name);
- if (cache_set_init_fault("cache_alloc"))
+ if (bch_fs_init_fault("dev_alloc"))
return err;
ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
return err;
- if (percpu_ref_init(&ca->ref, bch_cache_percpu_ref_release,
+ if (percpu_ref_init(&ca->ref, bch_dev_percpu_ref_release,
0, GFP_KERNEL)) {
kfree(ca);
return err;
}
- kobject_init(&ca->kobj, &bch_cache_ktype);
+ kobject_init(&ca->kobj, &bch_dev_ktype);
spin_lock_init(&ca->self.lock);
ca->self.nr_devices = 1;
rcu_assign_pointer(ca->self.d[0].dev, ca);
ca->dev_idx = sb->sb->dev_idx;
- INIT_WORK(&ca->free_work, bch_cache_free_work);
- INIT_WORK(&ca->remove_work, bch_cache_remove_work);
+ INIT_WORK(&ca->free_work, bch_dev_free_work);
+ INIT_WORK(&ca->remove_work, bch_dev_remove_work);
spin_lock_init(&ca->freelist_lock);
spin_lock_init(&ca->prio_buckets_lock);
mutex_init(&ca->heap_lock);
@@ -1413,7 +1404,7 @@ static const char *cache_alloc(struct bcache_superblock *sb,
INIT_WORK(&ca->io_error_work, bch_nonfatal_io_error_work);
err = "dynamic fault";
- if (cache_set_init_fault("cache_alloc"))
+ if (bch_fs_init_fault("dev_alloc"))
goto err;
member = bch_sb_get_members(ca->disk_sb.sb)->members +
@@ -1490,7 +1481,7 @@ static const char *cache_alloc(struct bcache_superblock *sb,
err = "error creating kobject";
if (c->kobj.state_in_sysfs &&
- bch_cache_online(ca))
+ bch_dev_online(ca))
goto err;
if (ret)
@@ -1499,73 +1490,24 @@ static const char *cache_alloc(struct bcache_superblock *sb,
kobject_put(&ca->kobj);
return NULL;
err:
- bch_cache_stop(ca);
+ bch_dev_stop(ca);
return err;
}
-static struct cache_set *cache_set_lookup(uuid_le uuid)
+static struct cache_set *bch_fs_lookup(uuid_le uuid)
{
struct cache_set *c;
lockdep_assert_held(&bch_register_lock);
- list_for_each_entry(c, &bch_cache_sets, list)
+ list_for_each_entry(c, &bch_fs_list, list)
if (!memcmp(&c->disk_sb->uuid, &uuid, sizeof(uuid_le)))
return c;
return NULL;
}
-static const char *register_cache(struct bcache_superblock *sb,
- struct cache_set_opts opts)
-{
- char name[BDEVNAME_SIZE];
- const char *err;
- struct cache_set *c;
- bool allocated_cache_set = false;
-
- err = bch_validate_cache_super(sb);
- if (err)
- return err;
-
- bdevname(sb->bdev, name);
-
- c = cache_set_lookup(sb->sb->uuid);
- if (c) {
- err = can_attach_cache(sb->sb, c);
- if (err)
- return err;
- } else {
- c = bch_cache_set_alloc(sb->sb, opts);
- if (!c)
- return "cannot allocate memory";
-
- allocated_cache_set = true;
- }
-
- err = cache_alloc(sb, c, NULL);
- if (err)
- goto err;
-
- if (cache_set_nr_online_devices(c) == cache_set_nr_devices(c)) {
- err = run_cache_set(c);
- if (err)
- goto err;
- } else {
- err = "error creating kobject";
- if (bch_cache_set_online(c))
- goto err;
- }
-
- bch_info(c, "started");
- return NULL;
-err:
- if (allocated_cache_set)
- bch_cache_set_stop(c);
- return err;
-}
-
-int bch_cache_set_add_cache(struct cache_set *c, const char *path)
+int bch_dev_add(struct cache_set *c, const char *path)
{
struct bcache_superblock sb;
const char *err;
@@ -1588,7 +1530,7 @@ int bch_cache_set_add_cache(struct cache_set *c, const char *path)
mutex_lock(&c->sb_lock);
- err = can_add_cache(sb.sb, c);
+ err = bch_dev_may_add(sb.sb, c);
if (err)
goto err_unlock;
@@ -1605,7 +1547,7 @@ int bch_cache_set_add_cache(struct cache_set *c, const char *path)
if (dynamic_fault("bcache:add:no_slot"))
goto no_slot;
- if (test_bit(CACHE_SET_GC_FAILURE, &c->flags))
+ if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
goto no_slot;
mi = bch_sb_get_members(c->disk_sb);
@@ -1646,7 +1588,7 @@ have_slot:
sb.sb->dev_idx = dev_idx;
sb.sb->nr_devices = nr_devices;
- if (bch_cache_set_mi_update(c, dev_mi->members, nr_devices)) {
+ if (bch_fs_mi_update(c, dev_mi->members, nr_devices)) {
err = "cannot allocate memory";
ret = -ENOMEM;
goto err_unlock;
@@ -1657,20 +1599,20 @@ have_slot:
c->disk_sb->nr_devices = nr_devices;
c->sb.nr_devices = nr_devices;
- err = cache_alloc(&sb, c, &ca);
+ err = bch_dev_alloc(&sb, c, &ca);
if (err)
goto err_unlock;
bch_write_super(c);
err = "journal alloc failed";
- if (bch_cache_journal_alloc(ca))
+ if (bch_dev_journal_alloc(ca))
goto err_put;
- bch_notify_cache_added(ca);
+ bch_notify_dev_added(ca);
if (ca->mi.state == BCH_MEMBER_STATE_ACTIVE) {
- err = __bch_cache_read_write(c, ca);
+ err = __bch_dev_read_write(c, ca);
if (err)
goto err_put;
}
@@ -1680,7 +1622,7 @@ have_slot:
mutex_unlock(&bch_register_lock);
return 0;
err_put:
- bch_cache_stop(ca);
+ bch_dev_stop(ca);
err_unlock:
mutex_unlock(&c->sb_lock);
err_unlock_register:
@@ -1691,9 +1633,8 @@ err_unlock_register:
return ret ?: -EINVAL;
}
-const char *bch_register_cache_set(char * const *devices, unsigned nr_devices,
- struct cache_set_opts opts,
- struct cache_set **ret)
+const char *bch_fs_open(char * const *devices, unsigned nr_devices,
+ struct bch_opts opts, struct cache_set **ret)
{
const char *err;
struct cache_set *c = NULL;
@@ -1736,30 +1677,30 @@ const char *bch_register_cache_set(char * const *devices, unsigned nr_devices,
}
err = "cache set already registered";
- if (cache_set_lookup(sb->sb->uuid))
+ if (bch_fs_lookup(sb->sb->uuid))
goto err_unlock;
err = "cannot allocate memory";
- c = bch_cache_set_alloc(sb[0].sb, opts);
+ c = bch_fs_alloc(sb[0].sb, opts);
if (!c)
goto err_unlock;
for (i = 0; i < nr_devices; i++) {
- err = cache_alloc(&sb[i], c, NULL);
+ err = bch_dev_alloc(&sb[i], c, NULL);
if (err)
goto err_unlock;
}
err = "insufficient devices";
- if (cache_set_nr_online_devices(c) != cache_set_nr_devices(c))
+ if (bch_fs_nr_online_devices(c) != bch_fs_nr_devices(c))
goto err_unlock;
- err = run_cache_set(c);
+ err = bch_fs_start(c);
if (err)
goto err_unlock;
err = "error creating kobject";
- if (bch_cache_set_online(c))
+ if (bch_fs_online(c))
goto err_unlock;
if (ret) {
@@ -1778,7 +1719,7 @@ out:
return err;
err_unlock:
if (c)
- bch_cache_set_stop(c);
+ bch_fs_stop(c);
mutex_unlock(&bch_register_lock);
err:
for (i = 0; i < nr_devices; i++)
@@ -1786,10 +1727,59 @@ err:
goto out;
}
-const char *bch_register_one(const char *path)
+static const char *__bch_fs_open_incremental(struct bcache_superblock *sb,
+ struct bch_opts opts)
+{
+ char name[BDEVNAME_SIZE];
+ const char *err;
+ struct cache_set *c;
+ bool allocated_cache_set = false;
+
+ err = bch_validate_cache_super(sb);
+ if (err)
+ return err;
+
+ bdevname(sb->bdev, name);
+
+ c = bch_fs_lookup(sb->sb->uuid);
+ if (c) {
+ err = bch_dev_in_fs(sb->sb, c);
+ if (err)
+ return err;
+ } else {
+ c = bch_fs_alloc(sb->sb, opts);
+ if (!c)
+ return "cannot allocate memory";
+
+ allocated_cache_set = true;
+ }
+
+ err = bch_dev_alloc(sb, c, NULL);
+ if (err)
+ goto err;
+
+ if (bch_fs_nr_online_devices(c) == bch_fs_nr_devices(c)) {
+ err = bch_fs_start(c);
+ if (err)
+ goto err;
+ } else {
+ err = "error creating kobject";
+ if (bch_fs_online(c))
+ goto err;
+ }
+
+ bch_info(c, "started");
+ return NULL;
+err:
+ if (allocated_cache_set)
+ bch_fs_stop(c);
+ return err;
+}
+
+const char *bch_fs_open_incremental(const char *path)
{
struct bcache_superblock sb;
- struct cache_set_opts opts = cache_set_opts_empty();
+ struct bch_opts opts = bch_opts_empty();
const char *err;
mutex_lock(&bch_register_lock);
@@ -1801,7 +1791,7 @@ const char *bch_register_one(const char *path)
if (__SB_IS_BDEV(le64_to_cpu(sb.sb->version)))
err = bch_backing_dev_register(&sb);
else
- err = register_cache(&sb, opts);
+ err = __bch_fs_open_incremental(&sb, opts);
bch_free_super(&sb);
err:
@@ -1837,7 +1827,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!(path = kstrndup(skip_spaces(buffer), size, GFP_KERNEL)))
goto err;
- err = bch_register_one(strim(path));
+ err = bch_fs_open_incremental(strim(path));
if (err)
goto err;
@@ -1860,14 +1850,14 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
mutex_lock(&bch_register_lock);
- if (!list_empty(&bch_cache_sets))
+ if (!list_empty(&bch_fs_list))
pr_info("Setting all devices read only:");
- list_for_each_entry(c, &bch_cache_sets, list)
- bch_cache_set_read_only(c);
+ list_for_each_entry(c, &bch_fs_list, list)
+ bch_fs_read_only(c);
- list_for_each_entry(c, &bch_cache_sets, list)
- bch_cache_set_read_only_sync(c);
+ list_for_each_entry(c, &bch_fs_list, list)
+ bch_fs_read_only_sync(c);
mutex_unlock(&bch_register_lock);
}
@@ -1894,17 +1884,11 @@ static void bcache_exit(void)
bch_debug_exit();
bch_fs_exit();
bch_blockdev_exit();
+ bch_chardev_exit();
if (bcache_kset)
kset_unregister(bcache_kset);
if (bcache_io_wq)
destroy_workqueue(bcache_io_wq);
- if (!IS_ERR_OR_NULL(bch_chardev_class))
- device_destroy(bch_chardev_class,
- MKDEV(bch_chardev_major, 0));
- if (!IS_ERR_OR_NULL(bch_chardev_class))
- class_destroy(bch_chardev_class);
- if (bch_chardev_major > 0)
- unregister_chrdev(bch_chardev_major, "bcache");
if (!IS_ERR_OR_NULL(bch_sha256))
crypto_free_shash(bch_sha256);
unregister_reboot_notifier(&reboot);
@@ -1928,23 +1912,10 @@ static int __init bcache_init(void)
if (IS_ERR(bch_sha256))
goto err;
- bch_chardev_major = register_chrdev(0, "bcache-ctl", &bch_chardev_fops);
- if (bch_chardev_major < 0)
- goto err;
-
- bch_chardev_class = class_create(THIS_MODULE, "bcache");
- if (IS_ERR(bch_chardev_class))
- goto err;
-
- bch_chardev = device_create(bch_chardev_class, NULL,
- MKDEV(bch_chardev_major, 255),
- NULL, "bcache-ctl");
- if (IS_ERR(bch_chardev))
- goto err;
-
if (!(bcache_io_wq = create_freezable_workqueue("bcache_io")) ||
!(bcache_kset = kset_create_and_add("bcache", NULL, fs_kobj)) ||
sysfs_create_files(&bcache_kset->kobj, files) ||
+ bch_chardev_init() ||
bch_blockdev_init() ||
bch_fs_init() ||
bch_debug_init())
diff --git a/libbcache/super.h b/libbcache/super.h
index 014d7ae..bcf7d98 100644
--- a/libbcache/super.h
+++ b/libbcache/super.h
@@ -54,7 +54,7 @@ static inline struct cache *bch_get_next_cache(struct cache_set *c,
(ca = bch_get_next_cache(c, &(iter))); \
percpu_ref_put(&ca->ref), (iter)++)
-static inline bool bch_cache_may_remove(struct cache *ca)
+static inline bool bch_dev_may_remove(struct cache *ca)
{
struct cache_set *c = ca->set;
struct cache_group *tier = &c->cache_tiers[ca->mi.tier];
@@ -80,37 +80,37 @@ static inline bool bch_cache_may_remove(struct cache *ca)
rcu_access_pointer(tier->d[0].dev) != ca;
}
-void bch_cache_set_release(struct kobject *);
-void bch_cache_release(struct kobject *);
+void bch_dev_release(struct kobject *);
-void bch_cache_set_unregister(struct cache_set *);
-void bch_cache_set_stop(struct cache_set *);
+bool bch_dev_read_only(struct cache *);
+const char *bch_dev_read_write(struct cache *);
+bool bch_dev_remove(struct cache *, bool force);
+int bch_dev_add(struct cache_set *, const char *);
-const char *bch_register_one(const char *path);
-const char *bch_register_cache_set(char * const *, unsigned,
- struct cache_set_opts,
- struct cache_set **);
+void bch_fs_detach(struct cache_set *);
-bool bch_cache_set_read_only(struct cache_set *);
-bool bch_cache_set_emergency_read_only(struct cache_set *);
-void bch_cache_set_read_only_sync(struct cache_set *);
-const char *bch_cache_set_read_write(struct cache_set *);
+bool bch_fs_read_only(struct cache_set *);
+bool bch_fs_emergency_read_only(struct cache_set *);
+void bch_fs_read_only_sync(struct cache_set *);
+const char *bch_fs_read_write(struct cache_set *);
-bool bch_cache_read_only(struct cache *);
-const char *bch_cache_read_write(struct cache *);
-bool bch_cache_remove(struct cache *, bool force);
-int bch_cache_set_add_cache(struct cache_set *, const char *);
+void bch_fs_release(struct kobject *);
+void bch_fs_stop(struct cache_set *);
+void bch_fs_stop_sync(struct cache_set *);
+
+const char *bch_fs_open(char * const *, unsigned, struct bch_opts,
+ struct cache_set **);
+const char *bch_fs_open_incremental(const char *path);
extern struct mutex bch_register_lock;
-extern struct list_head bch_cache_sets;
-extern struct idr bch_cache_set_minor;
+extern struct list_head bch_fs_list;
extern struct workqueue_struct *bcache_io_wq;
extern struct crypto_shash *bch_sha256;
-extern struct kobj_type bch_cache_set_ktype;
-extern struct kobj_type bch_cache_set_internal_ktype;
-extern struct kobj_type bch_cache_set_time_stats_ktype;
-extern struct kobj_type bch_cache_set_opts_dir_ktype;
-extern struct kobj_type bch_cache_ktype;
+extern struct kobj_type bch_fs_ktype;
+extern struct kobj_type bch_fs_internal_ktype;
+extern struct kobj_type bch_fs_time_stats_ktype;
+extern struct kobj_type bch_fs_opts_dir_ktype;
+extern struct kobj_type bch_dev_ktype;
#endif /* _BCACHE_SUPER_H */
diff --git a/libbcache/sysfs.c b/libbcache/sysfs.c
index 57b7dd9..9f45a6b 100644
--- a/libbcache/sysfs.c
+++ b/libbcache/sysfs.c
@@ -142,10 +142,9 @@ read_attribute(tier);
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
-#define BCH_OPT(_name, _choices, _min, _max, _sb_opt, _perm) \
+#define BCH_OPT(_name, _mode, ...) \
static struct attribute sysfs_opt_##_name = { \
- .name = #_name, \
- .mode = S_IRUGO|(_perm ? S_IWUSR : 0) \
+ .name = #_name, .mode = _mode, \
};
BCH_VISIBLE_OPTS()
@@ -298,7 +297,7 @@ STORE(__cached_dev)
if (uuid_parse(buf, &dc->disk_sb.sb->user_uuid))
return -EINVAL;
- list_for_each_entry(c, &bch_cache_sets, list) {
+ list_for_each_entry(c, &bch_fs_list, list) {
v = bch_cached_dev_attach(dc, c);
if (!v)
return size;
@@ -515,7 +514,7 @@ lock_root:
return (bytes * 100) / btree_bytes(c);
}
-static size_t bch_cache_size(struct cache_set *c)
+static size_t bch_btree_cache_size(struct cache_set *c)
{
size_t ret = 0;
struct btree *b;
@@ -528,7 +527,7 @@ static size_t bch_cache_size(struct cache_set *c)
return ret;
}
-static unsigned bch_cache_available_percent(struct cache_set *c)
+static unsigned bch_fs_available_percent(struct cache_set *c)
{
return div64_u64((u64) sectors_available(c) * 100,
c->capacity ?: 1);
@@ -549,7 +548,7 @@ static unsigned bch_average_key_size(struct cache_set *c)
}
#endif
-static ssize_t show_cache_set_alloc_debug(struct cache_set *c, char *buf)
+static ssize_t show_fs_alloc_debug(struct cache_set *c, char *buf)
{
struct bucket_stats_cache_set stats = bch_bucket_stats_read_cache_set(c);
@@ -624,7 +623,7 @@ static ssize_t bch_compression_stats(struct cache_set *c, char *buf)
compressed_sectors_uncompressed << 9);
}
-SHOW(bch_cache_set)
+SHOW(bch_fs)
{
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
@@ -639,8 +638,8 @@ SHOW(bch_cache_set)
sysfs_hprint(btree_node_size, c->sb.btree_node_size << 9);
sysfs_print(btree_node_size_bytes, c->sb.btree_node_size << 9);
- sysfs_hprint(btree_cache_size, bch_cache_size(c));
- sysfs_print(cache_available_percent, bch_cache_available_percent(c));
+ sysfs_hprint(btree_cache_size, bch_btree_cache_size(c));
+ sysfs_print(cache_available_percent, bch_fs_available_percent(c));
sysfs_print(btree_gc_running, c->gc_pos.phase != GC_PHASE_DONE);
@@ -695,13 +694,13 @@ SHOW(bch_cache_set)
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
- if (!test_bit(CACHE_SET_RUNNING, &c->flags))
+ if (!test_bit(BCH_FS_RUNNING, &c->flags))
return -EPERM;
if (attr == &sysfs_bset_tree_stats)
return bch_bset_print_stats(c, buf);
if (attr == &sysfs_alloc_debug)
- return show_cache_set_alloc_debug(c, buf);
+ return show_fs_alloc_debug(c, buf);
sysfs_print(tree_depth, c->btree_roots[BTREE_ID_EXTENTS].b->level);
sysfs_print(root_usage_percent, bch_root_usage(c));
@@ -714,17 +713,17 @@ SHOW(bch_cache_set)
return 0;
}
-STORE(__bch_cache_set)
+STORE(__bch_fs)
{
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
if (attr == &sysfs_unregister) {
- bch_cache_set_unregister(c);
+ bch_fs_detach(c);
return size;
}
if (attr == &sysfs_stop) {
- bch_cache_set_stop(c);
+ bch_fs_stop(c);
return size;
}
@@ -800,10 +799,10 @@ STORE(__bch_cache_set)
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
- if (!test_bit(CACHE_SET_RUNNING, &c->flags))
+ if (!test_bit(BCH_FS_RUNNING, &c->flags))
return -EPERM;
- if (test_bit(CACHE_SET_STOPPING, &c->flags))
+ if (test_bit(BCH_FS_STOPPING, &c->flags))
return -EINTR;
if (attr == &sysfs_blockdev_volume_create) {
@@ -833,17 +832,17 @@ STORE(__bch_cache_set)
return size;
}
-STORE(bch_cache_set)
+STORE(bch_fs)
{
struct cache_set *c = container_of(kobj, struct cache_set, kobj);
mutex_lock(&bch_register_lock);
- size = __bch_cache_set_store(kobj, attr, buf, size);
+ size = __bch_fs_store(kobj, attr, buf, size);
mutex_unlock(&bch_register_lock);
if (attr == &sysfs_add_device) {
char *path = kstrdup(buf, GFP_KERNEL);
- int r = bch_cache_set_add_cache(c, strim(path));
+ int r = bch_dev_add(c, strim(path));
kfree(path);
if (r)
@@ -853,7 +852,7 @@ STORE(bch_cache_set)
return size;
}
-static struct attribute *bch_cache_set_files[] = {
+static struct attribute *bch_fs_files[] = {
&sysfs_unregister,
&sysfs_stop,
&sysfs_journal_write_delay_ms,
@@ -890,27 +889,27 @@ static struct attribute *bch_cache_set_files[] = {
&sysfs_journal_flush,
NULL
};
-KTYPE(bch_cache_set);
+KTYPE(bch_fs);
/* internal dir - just a wrapper */
-SHOW(bch_cache_set_internal)
+SHOW(bch_fs_internal)
{
struct cache_set *c = container_of(kobj, struct cache_set, internal);
- return bch_cache_set_show(&c->kobj, attr, buf);
+ return bch_fs_show(&c->kobj, attr, buf);
}
-STORE(bch_cache_set_internal)
+STORE(bch_fs_internal)
{
struct cache_set *c = container_of(kobj, struct cache_set, internal);
- return bch_cache_set_store(&c->kobj, attr, buf, size);
+ return bch_fs_store(&c->kobj, attr, buf, size);
}
-static void bch_cache_set_internal_release(struct kobject *k)
+static void bch_fs_internal_release(struct kobject *k)
{
}
-static struct attribute *bch_cache_set_internal_files[] = {
+static struct attribute *bch_fs_internal_files[] = {
&sysfs_journal_debug,
&sysfs_alloc_debug,
@@ -941,73 +940,58 @@ static struct attribute *bch_cache_set_internal_files[] = {
NULL
};
-KTYPE(bch_cache_set_internal);
+KTYPE(bch_fs_internal);
/* options */
-SHOW(bch_cache_set_opts_dir)
+SHOW(bch_fs_opts_dir)
{
struct cache_set *c = container_of(kobj, struct cache_set, opts_dir);
-#define BCH_OPT(_name, _choices, _min, _max, _sb_opt, _perm) \
- if (attr == &sysfs_opt_##_name) \
- return _choices == bch_bool_opt || _choices == bch_uint_opt\
- ? snprintf(buf, PAGE_SIZE, "%i\n", c->opts._name)\
- : bch_snprint_string_list(buf, PAGE_SIZE, \
- _choices, c->opts._name);\
-
- BCH_VISIBLE_OPTS()
-#undef BCH_OPT
-
- return 0;
+ return bch_opt_show(&c->opts, attr->name, buf, PAGE_SIZE);
}
-STORE(bch_cache_set_opts_dir)
+STORE(bch_fs_opts_dir)
{
struct cache_set *c = container_of(kobj, struct cache_set, opts_dir);
+ const struct bch_option *opt;
+ enum bch_opt_id id;
+ u64 v;
+
+ id = bch_parse_sysfs_opt(attr->name, buf, &v);
+ if (id < 0)
+ return id;
+
+ opt = &bch_opt_table[id];
+
+ mutex_lock(&c->sb_lock);
-#define BCH_OPT(_name, _choices, _min, _max, _sb_opt, _perm) \
- if (attr == &sysfs_opt_##_name) { \
- ssize_t v = (_choices == bch_bool_opt || \
- _choices == bch_uint_opt) \
- ? strtoul_restrict_or_return(buf, _min, _max - 1)\
- : bch_read_string_list(buf, _choices); \
- \
- if (v < 0) \
- return v; \
- \
- mutex_lock(&c->sb_lock); \
- if (attr == &sysfs_opt_compression) { \
- int ret = bch_check_set_has_compressed_data(c, v);\
- if (ret) { \
- mutex_unlock(&c->sb_lock); \
- return ret; \
- } \
- } \
- \
- if (_sb_opt##_BITS && v != _sb_opt(c->disk_sb)) { \
- SET_##_sb_opt(c->disk_sb, v); \
- bch_write_super(c); \
- } \
- \
- c->opts._name = v; \
- mutex_unlock(&c->sb_lock); \
- \
- return size; \
+ if (id == Opt_compression) {
+ int ret = bch_check_set_has_compressed_data(c, v);
+ if (ret) {
+ mutex_unlock(&c->sb_lock);
+ return ret;
+ }
}
- BCH_VISIBLE_OPTS()
-#undef BCH_OPT
+ if (opt->set_sb != SET_NO_SB_OPT) {
+ opt->set_sb(c->disk_sb, v);
+ bch_write_super(c);
+ }
+
+ bch_opt_set(&c->opts, id, v);
+
+ mutex_unlock(&c->sb_lock);
return size;
}
-static void bch_cache_set_opts_dir_release(struct kobject *k)
+static void bch_fs_opts_dir_release(struct kobject *k)
{
}
-static struct attribute *bch_cache_set_opts_dir_files[] = {
-#define BCH_OPT(_name, _choices, _min, _max, _sb_opt, _perm) \
+static struct attribute *bch_fs_opts_dir_files[] = {
+#define BCH_OPT(_name, ...) \
&sysfs_opt_##_name,
BCH_VISIBLE_OPTS()
@@ -1015,11 +999,11 @@ static struct attribute *bch_cache_set_opts_dir_files[] = {
NULL
};
-KTYPE(bch_cache_set_opts_dir);
+KTYPE(bch_fs_opts_dir);
/* time stats */
-SHOW(bch_cache_set_time_stats)
+SHOW(bch_fs_time_stats)
{
struct cache_set *c = container_of(kobj, struct cache_set, time_stats);
@@ -1032,7 +1016,7 @@ SHOW(bch_cache_set_time_stats)
return 0;
}
-STORE(bch_cache_set_time_stats)
+STORE(bch_fs_time_stats)
{
struct cache_set *c = container_of(kobj, struct cache_set, time_stats);
@@ -1044,11 +1028,11 @@ STORE(bch_cache_set_time_stats)
return size;
}
-static void bch_cache_set_time_stats_release(struct kobject *k)
+static void bch_fs_time_stats_release(struct kobject *k)
{
}
-static struct attribute *bch_cache_set_time_stats_files[] = {
+static struct attribute *bch_fs_time_stats_files[] = {
#define BCH_TIME_STAT(name, frequency_units, duration_units) \
sysfs_time_stats_attribute_list(name, frequency_units, duration_units)
BCH_TIME_STATS()
@@ -1056,7 +1040,7 @@ static struct attribute *bch_cache_set_time_stats_files[] = {
NULL
};
-KTYPE(bch_cache_set_time_stats);
+KTYPE(bch_fs_time_stats);
typedef unsigned (bucket_map_fn)(struct cache *, struct bucket *, void *);
@@ -1141,7 +1125,7 @@ static ssize_t show_reserve_stats(struct cache *ca, char *buf)
return ret;
}
-static ssize_t show_cache_alloc_debug(struct cache *ca, char *buf)
+static ssize_t show_dev_alloc_debug(struct cache *ca, char *buf)
{
struct cache_set *c = ca->set;
struct bucket_stats_cache stats = bch_bucket_stats_read_cache(ca);
@@ -1184,7 +1168,7 @@ static u64 sectors_written(struct cache *ca)
return ret;
}
-SHOW(bch_cache)
+SHOW(bch_dev)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
struct cache_set *c = ca->set;
@@ -1233,7 +1217,7 @@ SHOW(bch_cache)
if (attr == &sysfs_state_rw)
return bch_snprint_string_list(buf, PAGE_SIZE,
- bch_cache_state,
+ bch_dev_state,
ca->mi.state);
if (attr == &sysfs_read_priority_stats)
@@ -1247,12 +1231,12 @@ SHOW(bch_cache)
if (attr == &sysfs_reserve_stats)
return show_reserve_stats(ca, buf);
if (attr == &sysfs_alloc_debug)
- return show_cache_alloc_debug(ca, buf);
+ return show_dev_alloc_debug(ca, buf);
return 0;
}
-STORE(__bch_cache)
+STORE(__bch_dev)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
struct cache_set *c = ca->set;
@@ -1292,7 +1276,7 @@ STORE(__bch_cache)
if (attr == &sysfs_state_rw) {
char name[BDEVNAME_SIZE];
const char *err = NULL;
- ssize_t v = bch_read_string_list(buf, bch_cache_state);
+ ssize_t v = bch_read_string_list(buf, bch_dev_state);
if (v < 0)
return v;
@@ -1302,10 +1286,10 @@ STORE(__bch_cache)
switch (v) {
case BCH_MEMBER_STATE_ACTIVE:
- err = bch_cache_read_write(ca);
+ err = bch_dev_read_write(ca);
break;
case BCH_MEMBER_STATE_RO:
- bch_cache_read_only(ca);
+ bch_dev_read_only(ca);
break;
case BCH_MEMBER_STATE_FAILED:
case BCH_MEMBER_STATE_SPARE:
@@ -1314,14 +1298,14 @@ STORE(__bch_cache)
*/
pr_err("can't set %s %s: not supported",
bdevname(ca->disk_sb.bdev, name),
- bch_cache_state[v]);
+ bch_dev_state[v]);
return -EINVAL;
}
if (err) {
pr_err("can't set %s %s: %s",
bdevname(ca->disk_sb.bdev, name),
- bch_cache_state[v], err);
+ bch_dev_state[v], err);
return -EINVAL;
}
}
@@ -1332,7 +1316,7 @@ STORE(__bch_cache)
if (!strncmp(buf, "force", 5) &&
(buf[5] == '\0' || buf[5] == '\n'))
force = true;
- bch_cache_remove(ca, force);
+ bch_dev_remove(ca, force);
}
if (attr == &sysfs_clear_stats) {
@@ -1349,9 +1333,9 @@ STORE(__bch_cache)
return size;
}
-STORE_LOCKED(bch_cache)
+STORE_LOCKED(bch_dev)
-static struct attribute *bch_cache_files[] = {
+static struct attribute *bch_dev_files[] = {
&sysfs_uuid,
&sysfs_unregister,
&sysfs_bucket_size,
@@ -1391,4 +1375,4 @@ static struct attribute *bch_cache_files[] = {
sysfs_pd_controller_files(copy_gc),
NULL
};
-KTYPE(bch_cache);
+KTYPE(bch_dev);
diff --git a/libbcache/writeback.c b/libbcache/writeback.c
index 600bfbf..b19a83c 100644
--- a/libbcache/writeback.c
+++ b/libbcache/writeback.c
@@ -169,7 +169,7 @@ static void read_dirty_endio(struct bio *bio)
{
struct dirty_io *io = container_of(bio, struct dirty_io, bio);
- cache_nonfatal_io_err_on(bio->bi_error, io->ca, "writeback read");
+ bch_dev_nonfatal_io_err_on(bio->bi_error, io->ca, "writeback read");
bch_account_io_completion(io->ca);
diff --git a/libbcache/writeback.h b/libbcache/writeback.h
index 77e5965..250b709 100644
--- a/libbcache/writeback.h
+++ b/libbcache/writeback.h
@@ -87,6 +87,8 @@ static inline void bch_writeback_add(struct cached_dev *dc)
}
}
+#ifndef NO_BCACHE_WRITEBACK
+
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, u64, int);
void bch_writeback_recalc_oldest_gens(struct cache_set *);
@@ -97,4 +99,24 @@ void bch_cached_dev_writeback_free(struct cached_dev *);
int bch_cached_dev_writeback_init(struct cached_dev *);
int bch_cached_dev_writeback_start(struct cached_dev *);
+#else
+
+static inline void bcache_dev_sectors_dirty_add(struct cache_set *c,
+ unsigned i, u64 o, int n) {}
+static inline void bch_writeback_recalc_oldest_gens(struct cache_set *c) {}
+static inline void bch_sectors_dirty_init(struct cached_dev *dc,
+ struct cache_set *c) {}
+static inline void bch_cached_dev_writeback_stop(struct cached_dev *dc) {}
+static inline void bch_cached_dev_writeback_free(struct cached_dev *dc) {}
+static inline int bch_cached_dev_writeback_init(struct cached_dev *dc)
+{
+ return 0;
+}
+static inline int bch_cached_dev_writeback_start(struct cached_dev *dc)
+{
+ return 0;
+}
+
+#endif
+
#endif
diff --git a/libbcache/xattr.c b/libbcache/xattr.c
index 56a8e8f..7855236 100644
--- a/libbcache/xattr.c
+++ b/libbcache/xattr.c
@@ -165,18 +165,18 @@ int bch_xattr_get(struct cache_set *c, struct inode *inode,
return ret;
}
-int bch_xattr_set(struct cache_set *c, struct inode *inode,
+int __bch_xattr_set(struct cache_set *c, u64 inum,
+ const struct bch_hash_info *hash_info,
const char *name, const void *value, size_t size,
- int flags, int type)
+ int flags, int type, u64 *journal_seq)
{
- struct bch_inode_info *ei = to_bch_ei(inode);
struct xattr_search_key search = X_SEARCH(type, name, strlen(name));
int ret;
if (!value) {
- ret = bch_hash_delete(xattr_hash_desc, &ei->str_hash,
- c, ei->vfs_inode.i_ino,
- &ei->journal_seq, &search);
+ ret = bch_hash_delete(xattr_hash_desc, hash_info,
+ c, inum,
+ journal_seq, &search);
} else {
struct bkey_i_xattr *xattr;
unsigned u64s = BKEY_U64s +
@@ -199,8 +199,8 @@ int bch_xattr_set(struct cache_set *c, struct inode *inode,
memcpy(xattr->v.x_name, search.name.name, search.name.len);
memcpy(xattr_val(&xattr->v), value, size);
- ret = bch_hash_set(xattr_hash_desc, &ei->str_hash, c,
- ei->vfs_inode.i_ino, &ei->journal_seq,
+ ret = bch_hash_set(xattr_hash_desc, hash_info, c,
+ inum, journal_seq,
&xattr->k_i,
(flags & XATTR_CREATE ? BCH_HASH_SET_MUST_CREATE : 0)|
(flags & XATTR_REPLACE ? BCH_HASH_SET_MUST_REPLACE : 0));
@@ -213,6 +213,17 @@ int bch_xattr_set(struct cache_set *c, struct inode *inode,
return ret;
}
+int bch_xattr_set(struct cache_set *c, struct inode *inode,
+ const char *name, const void *value, size_t size,
+ int flags, int type)
+{
+ struct bch_inode_info *ei = to_bch_ei(inode);
+
+ return __bch_xattr_set(c, inode->i_ino, &ei->str_hash,
+ name, value, size, flags, type,
+ &ei->journal_seq);
+}
+
static const struct xattr_handler *bch_xattr_type_to_handler(unsigned);
static size_t bch_xattr_emit(struct dentry *dentry,
diff --git a/libbcache/xattr.h b/libbcache/xattr.h
index 54eb920..429031a 100644
--- a/libbcache/xattr.h
+++ b/libbcache/xattr.h
@@ -5,9 +5,12 @@ extern const struct bkey_ops bch_bkey_xattr_ops;
struct dentry;
struct xattr_handler;
+struct bch_hash_info;
int bch_xattr_get(struct cache_set *, struct inode *,
const char *, void *, size_t, int);
+int __bch_xattr_set(struct cache_set *, u64, const struct bch_hash_info *,
+ const char *, const void *, size_t, int, int, u64 *);
int bch_xattr_set(struct cache_set *, struct inode *,
const char *, const void *, size_t, int, int);
ssize_t bch_xattr_list(struct dentry *, char *, size_t);