summaryrefslogtreecommitdiff
path: root/libbcachefs/super.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/super.c')
-rw-r--r--libbcachefs/super.c132
1 files changed, 96 insertions, 36 deletions
diff --git a/libbcachefs/super.c b/libbcachefs/super.c
index 1b389172..3bcc3240 100644
--- a/libbcachefs/super.c
+++ b/libbcachefs/super.c
@@ -289,8 +289,10 @@ static void bch2_writes_disabled(struct percpu_ref *writes)
void bch2_fs_read_only(struct bch_fs *c)
{
- if (c->state == BCH_FS_RO)
+ if (!test_bit(BCH_FS_RW, &c->flags)) {
+ cancel_delayed_work_sync(&c->journal.reclaim_work);
return;
+ }
BUG_ON(test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
@@ -332,10 +334,9 @@ void bch2_fs_read_only(struct bch_fs *c)
!test_bit(BCH_FS_ERROR, &c->flags) &&
!test_bit(BCH_FS_EMERGENCY_RO, &c->flags) &&
test_bit(BCH_FS_STARTED, &c->flags))
- bch2_fs_mark_clean(c, true);
+ bch2_fs_mark_clean(c);
- if (c->state != BCH_FS_STOPPING)
- c->state = BCH_FS_RO;
+ clear_bit(BCH_FS_RW, &c->flags);
}
static void bch2_fs_read_only_work(struct work_struct *work)
@@ -364,55 +365,106 @@ bool bch2_fs_emergency_read_only(struct bch_fs *c)
return ret;
}
-const char *bch2_fs_read_write(struct bch_fs *c)
+static int bch2_fs_read_write_late(struct bch_fs *c)
{
struct bch_dev *ca;
- const char *err = NULL;
unsigned i;
+ int ret;
- if (c->state == BCH_FS_RW)
- return NULL;
+ ret = bch2_gc_thread_start(c);
+ if (ret) {
+ bch_err(c, "error starting gc thread");
+ return ret;
+ }
+
+ for_each_rw_member(ca, c, i) {
+ ret = bch2_copygc_start(c, ca);
+ if (ret) {
+ bch_err(c, "error starting copygc threads");
+ percpu_ref_put(&ca->io_ref);
+ return ret;
+ }
+ }
+
+ ret = bch2_rebalance_start(c);
+ if (ret) {
+ bch_err(c, "error starting rebalance thread");
+ return ret;
+ }
+
+ schedule_delayed_work(&c->pd_controllers_update, 5 * HZ);
+
+ return 0;
+}
+
+int __bch2_fs_read_write(struct bch_fs *c, bool early)
+{
+ struct bch_dev *ca;
+ unsigned i;
+ int ret;
+
+ if (test_bit(BCH_FS_RW, &c->flags))
+ return 0;
- bch2_fs_mark_clean(c, false);
+ ret = bch2_fs_mark_dirty(c);
+ if (ret)
+ goto err;
for_each_rw_member(ca, c, i)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
- err = "error starting allocator thread";
- for_each_rw_member(ca, c, i)
- if (bch2_dev_allocator_start(ca)) {
- percpu_ref_put(&ca->io_ref);
+ if (!test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags)) {
+ ret = bch2_fs_allocator_start(c);
+ if (ret) {
+ bch_err(c, "error initializing allocator");
goto err;
}
- set_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags);
-
- err = "error starting btree GC thread";
- if (bch2_gc_thread_start(c))
- goto err;
+ set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
+ }
- err = "error starting copygc thread";
- for_each_rw_member(ca, c, i)
- if (bch2_copygc_start(c, ca)) {
+ for_each_rw_member(ca, c, i) {
+ ret = bch2_dev_allocator_start(ca);
+ if (ret) {
+ bch_err(c, "error starting allocator threads");
percpu_ref_put(&ca->io_ref);
goto err;
}
+ }
- err = "error starting rebalance thread";
- if (bch2_rebalance_start(c))
- goto err;
+ set_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags);
- schedule_delayed_work(&c->pd_controllers_update, 5 * HZ);
+ if (!early) {
+ ret = bch2_fs_read_write_late(c);
+ if (ret)
+ goto err;
+ }
- if (c->state != BCH_FS_STARTING)
- percpu_ref_reinit(&c->writes);
+ percpu_ref_reinit(&c->writes);
+ set_bit(BCH_FS_RW, &c->flags);
- c->state = BCH_FS_RW;
- return NULL;
+ queue_delayed_work(c->journal_reclaim_wq,
+ &c->journal.reclaim_work, 0);
+ return 0;
err:
__bch2_fs_read_only(c);
- return err;
+ return ret;
+}
+
+int bch2_fs_read_write(struct bch_fs *c)
+{
+ return __bch2_fs_read_write(c, false);
+}
+
+int bch2_fs_read_write_early(struct bch_fs *c)
+{
+ lockdep_assert_held(&c->state_lock);
+
+ if (c->opts.read_only)
+ return -EROFS;
+
+ return __bch2_fs_read_write(c, true);
}
/* Filesystem startup/shutdown: */
@@ -435,7 +487,7 @@ static void bch2_fs_free(struct bch_fs *c)
bch2_io_clock_exit(&c->io_clock[READ]);
bch2_fs_compress_exit(c);
percpu_free_rwsem(&c->mark_lock);
- free_percpu(c->usage_scratch);
+ kfree(c->usage_scratch);
free_percpu(c->usage[0]);
free_percpu(c->pcpu);
mempool_exit(&c->btree_iters_pool);
@@ -604,6 +656,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
mutex_init(&c->btree_reserve_cache_lock);
mutex_init(&c->btree_interior_update_lock);
+ mutex_init(&c->usage_scratch_lock);
+
mutex_init(&c->bio_bounce_pages_lock);
bio_list_init(&c->btree_write_error_list);
@@ -626,7 +680,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
c->journal.write_time = &c->times[BCH_TIME_journal_write];
c->journal.delay_time = &c->times[BCH_TIME_journal_delay];
- c->journal.blocked_time = &c->times[BCH_TIME_journal_blocked];
+ c->journal.blocked_time = &c->times[BCH_TIME_blocked_journal];
c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq];
bch2_fs_btree_cache_init_early(&c->btree_cache);
@@ -668,7 +722,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
!(c->journal_reclaim_wq = alloc_workqueue("bcache_journal",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
- percpu_ref_init(&c->writes, bch2_writes_disabled, 0, GFP_KERNEL) ||
+ percpu_ref_init(&c->writes, bch2_writes_disabled,
+ PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
mempool_init_kmalloc_pool(&c->btree_reserve_pool, 1,
sizeof(struct btree_reserve)) ||
mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
@@ -742,7 +797,7 @@ const char *bch2_fs_start(struct bch_fs *c)
mutex_lock(&c->state_lock);
- BUG_ON(c->state != BCH_FS_STARTING);
+ BUG_ON(test_bit(BCH_FS_STARTED, &c->flags));
mutex_lock(&c->sb_lock);
@@ -776,9 +831,12 @@ const char *bch2_fs_start(struct bch_fs *c)
if (c->opts.read_only) {
bch2_fs_read_only(c);
} else {
- err = bch2_fs_read_write(c);
- if (err)
+ if (!test_bit(BCH_FS_RW, &c->flags)
+ ? bch2_fs_read_write(c)
+ : bch2_fs_read_write_late(c)) {
+ err = "error going read write";
goto err;
+ }
}
set_bit(BCH_FS_STARTED, &c->flags);
@@ -882,6 +940,7 @@ static void bch2_dev_free(struct bch_dev *ca)
free_percpu(ca->io_done);
bioset_exit(&ca->replica_set);
bch2_dev_buckets_free(ca);
+ kfree(ca->sb_read_scratch);
bch2_time_stats_exit(&ca->io_latency[WRITE]);
bch2_time_stats_exit(&ca->io_latency[READ]);
@@ -995,6 +1054,7 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
0, GFP_KERNEL) ||
percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
+ !(ca->sb_read_scratch = kmalloc(4096, GFP_KERNEL)) ||
bch2_dev_buckets_alloc(c, ca) ||
bioset_init(&ca->replica_set, 4,
offsetof(struct bch_write_bio, bio), 0) ||