summaryrefslogtreecommitdiff
path: root/fs/bcachefs/super.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bcachefs/super.c')
-rw-r--r--fs/bcachefs/super.c145
1 files changed, 66 insertions, 79 deletions
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 30be083b09bf..8673e9744ce1 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -149,44 +149,6 @@ struct bch_fs *bch2_uuid_to_fs(uuid_le uuid)
return c;
}
-int bch2_congested(void *data, int bdi_bits)
-{
- struct bch_fs *c = data;
- struct backing_dev_info *bdi;
- struct bch_dev *ca;
- unsigned i;
- int ret = 0;
-
- rcu_read_lock();
- if (bdi_bits & (1 << WB_sync_congested)) {
- /* Reads - check all devices: */
- for_each_readable_member(ca, c, i) {
- bdi = ca->disk_sb.bdev->bd_bdi;
-
- if (bdi_congested(bdi, bdi_bits)) {
- ret = 1;
- break;
- }
- }
- } else {
- const struct bch_devs_mask *devs =
- bch2_target_to_mask(c, c->opts.foreground_target) ?:
- &c->rw_devs[BCH_DATA_user];
-
- for_each_member_device_rcu(ca, c, i, devs) {
- bdi = ca->disk_sb.bdev->bd_bdi;
-
- if (bdi_congested(bdi, bdi_bits)) {
- ret = 1;
- break;
- }
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
/* Filesystem RO/RW: */
/*
@@ -207,14 +169,15 @@ int bch2_congested(void *data, int bdi_bits)
static void __bch2_fs_read_only(struct bch_fs *c)
{
struct bch_dev *ca;
- bool wrote = false;
unsigned i, clean_passes = 0;
- int ret;
bch2_rebalance_stop(c);
bch2_copygc_stop(c);
bch2_gc_thread_stop(c);
+ bch2_io_timer_del(&c->io_clock[READ], &c->bucket_clock[READ].rescale);
+ bch2_io_timer_del(&c->io_clock[WRITE], &c->bucket_clock[WRITE].rescale);
+
/*
* Flush journal before stopping allocators, because flushing journal
* blacklist entries involves allocating new btree nodes:
@@ -228,20 +191,6 @@ static void __bch2_fs_read_only(struct bch_fs *c)
if (!test_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags))
goto nowrote_alloc;
- bch_verbose(c, "writing alloc info");
- /*
- * This should normally just be writing the bucket read/write clocks:
- */
- ret = bch2_stripes_write(c, BTREE_INSERT_NOCHECK_RW, &wrote) ?:
- bch2_alloc_write(c, BTREE_INSERT_NOCHECK_RW, &wrote);
- bch_verbose(c, "writing alloc info complete");
-
- if (ret && !test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
- bch2_fs_inconsistent(c, "error writing out alloc info %i", ret);
-
- if (ret)
- goto nowrote_alloc;
-
bch_verbose(c, "flushing journal and stopping allocators");
bch2_journal_flush_all_pins(&c->journal);
@@ -454,6 +403,9 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
+ bch2_io_timer_add(&c->io_clock[READ], &c->bucket_clock[READ].rescale);
+ bch2_io_timer_add(&c->io_clock[WRITE], &c->bucket_clock[WRITE].rescale);
+
for_each_rw_member(ca, c, i) {
ret = bch2_dev_allocator_start(ca);
if (ret) {
@@ -496,9 +448,10 @@ int bch2_fs_read_write_early(struct bch_fs *c)
/* Filesystem startup/shutdown: */
-static void bch2_fs_free(struct bch_fs *c)
+static void __bch2_fs_free(struct bch_fs *c)
{
unsigned i;
+ int cpu;
for (i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_exit(&c->times[i]);
@@ -523,6 +476,12 @@ static void bch2_fs_free(struct bch_fs *c)
free_percpu(c->usage[1]);
free_percpu(c->usage[0]);
kfree(c->usage_base);
+
+ if (c->btree_iters_bufs)
+ for_each_possible_cpu(cpu)
+ kfree(per_cpu_ptr(c->btree_iters_bufs, cpu)->iter);
+
+ free_percpu(c->btree_iters_bufs);
free_percpu(c->pcpu);
mempool_exit(&c->large_bkey_pool);
mempool_exit(&c->btree_bounce_pool);
@@ -533,6 +492,7 @@ static void bch2_fs_free(struct bch_fs *c)
kfree(c->replicas_gc.entries);
kfree(rcu_dereference_protected(c->disk_groups, 1));
kfree(c->journal_seq_blacklist_table);
+ kfree(c->unused_inode_hints);
free_heap(&c->copygc_heap);
if (c->journal_reclaim_wq)
@@ -552,10 +512,10 @@ static void bch2_fs_release(struct kobject *kobj)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
- bch2_fs_free(c);
+ __bch2_fs_free(c);
}
-void bch2_fs_stop(struct bch_fs *c)
+void __bch2_fs_stop(struct bch_fs *c)
{
struct bch_dev *ca;
unsigned i;
@@ -586,13 +546,6 @@ void bch2_fs_stop(struct bch_fs *c)
kobject_put(&c->opts_dir);
kobject_put(&c->internal);
- mutex_lock(&bch_fs_list_lock);
- list_del(&c->list);
- mutex_unlock(&bch_fs_list_lock);
-
- closure_sync(&c->cl);
- closure_debug_destroy(&c->cl);
-
/* btree prefetch might have kicked off reads in the background: */
bch2_btree_flush_all_reads(c);
@@ -605,6 +558,22 @@ void bch2_fs_stop(struct bch_fs *c)
for (i = 0; i < c->sb.nr_devices; i++)
if (c->devs[i])
+ bch2_free_super(&c->devs[i]->disk_sb);
+}
+
+void bch2_fs_free(struct bch_fs *c)
+{
+ unsigned i;
+
+ mutex_lock(&bch_fs_list_lock);
+ list_del(&c->list);
+ mutex_unlock(&bch_fs_list_lock);
+
+ closure_sync(&c->cl);
+ closure_debug_destroy(&c->cl);
+
+ for (i = 0; i < c->sb.nr_devices; i++)
+ if (c->devs[i])
bch2_dev_free(rcu_dereference_protected(c->devs[i], 1));
bch_verbose(c, "shutdown complete");
@@ -612,6 +581,12 @@ void bch2_fs_stop(struct bch_fs *c)
kobject_put(&c->kobj);
}
+void bch2_fs_stop(struct bch_fs *c)
+{
+ __bch2_fs_stop(c);
+ bch2_fs_free(c);
+}
+
static const char *bch2_fs_online(struct bch_fs *c)
{
struct bch_dev *ca;
@@ -669,6 +644,14 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
__module_get(THIS_MODULE);
+ closure_init(&c->cl, NULL);
+
+ c->kobj.kset = bcachefs_kset;
+ kobject_init(&c->kobj, &bch2_fs_ktype);
+ kobject_init(&c->internal, &bch2_fs_internal_ktype);
+ kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
+ kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
+
c->minor = -1;
c->disk_sb.fs_sb = true;
@@ -761,11 +744,13 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
(btree_blocks(c) + 1) * 2 *
sizeof(struct sort_iter_set);
+ c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus()));
+
if (!(c->wq = alloc_workqueue("bcachefs",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
- !(c->copygc_wq = alloc_workqueue("bcache_copygc",
+ !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
- !(c->journal_reclaim_wq = alloc_workqueue("bcache_journal",
+ !(c->journal_reclaim_wq = alloc_workqueue("bcachefs_journal_reclaim",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
percpu_ref_init(&c->writes, bch2_writes_disabled,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
@@ -775,9 +760,12 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
offsetof(struct btree_write_bio, wbio.bio)),
BIOSET_NEED_BVECS) ||
!(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
+ !(c->btree_iters_bufs = alloc_percpu(struct btree_iter_buf)) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
btree_bytes(c)) ||
mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
+ !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
+ sizeof(u64), GFP_KERNEL)) ||
bch2_io_clock_init(&c->io_clock[READ]) ||
bch2_io_clock_init(&c->io_clock[WRITE]) ||
bch2_fs_journal_init(&c->journal) ||
@@ -799,18 +787,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
bch2_dev_alloc(c, i))
goto err;
- /*
- * Now that all allocations have succeeded, init various refcounty
- * things that let us shutdown:
- */
- closure_init(&c->cl, NULL);
-
- c->kobj.kset = bcachefs_kset;
- kobject_init(&c->kobj, &bch2_fs_ktype);
- kobject_init(&c->internal, &bch2_fs_internal_ktype);
- kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
- kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
-
mutex_lock(&bch_fs_list_lock);
err = bch2_fs_online(c);
mutex_unlock(&bch_fs_list_lock);
@@ -906,6 +882,13 @@ int bch2_fs_start(struct bch_fs *c)
set_bit(BCH_FS_STARTED, &c->flags);
+ /*
+ * Allocator threads don't start filling copygc reserve until after we
+ * set BCH_FS_STARTED - wake them now:
+ */
+ for_each_online_member(ca, c, i)
+ bch2_wake_allocator(ca);
+
if (c->opts.read_only || c->opts.nochanges) {
bch2_fs_read_only(c);
} else {
@@ -1683,6 +1666,11 @@ have_slot:
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
+ err = "alloc write failed";
+ ret = bch2_dev_alloc_write(c, ca, 0);
+ if (ret)
+ goto err;
+
if (ca->mi.state == BCH_MEMBER_STATE_RW) {
err = __bch2_dev_read_write(c, ca);
if (err)
@@ -2037,7 +2025,6 @@ static void bcachefs_exit(void)
static int __init bcachefs_init(void)
{
bch2_bkey_pack_test();
- bch2_inode_pack_test();
if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
bch2_chardev_init() ||