summaryrefslogtreecommitdiff
path: root/libbcachefs/journal.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/journal.c')
-rw-r--r--libbcachefs/journal.c98
1 files changed, 61 insertions, 37 deletions
diff --git a/libbcachefs/journal.c b/libbcachefs/journal.c
index 26c7ae7d..ac1219fc 100644
--- a/libbcachefs/journal.c
+++ b/libbcachefs/journal.c
@@ -134,6 +134,8 @@ static enum {
c->opts.block_size;
BUG_ON(j->prev_buf_sectors > j->cur_buf_sectors);
+ bkey_extent_init(&buf->key);
+
/*
* We have to set last_seq here, _before_ opening a new journal entry:
*
@@ -334,15 +336,14 @@ u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
}
static int __journal_res_get(struct journal *j, struct journal_res *res,
- unsigned u64s_min, unsigned u64s_max)
+ unsigned flags)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct journal_buf *buf;
int ret;
retry:
- ret = journal_res_get_fast(j, res, u64s_min, u64s_max);
- if (ret)
- return ret;
+ if (journal_res_get_fast(j, res))
+ return 0;
spin_lock(&j->lock);
/*
@@ -350,10 +351,9 @@ retry:
* that just did journal_entry_open() and call journal_entry_close()
* unnecessarily
*/
- ret = journal_res_get_fast(j, res, u64s_min, u64s_max);
- if (ret) {
+ if (journal_res_get_fast(j, res)) {
spin_unlock(&j->lock);
- return 1;
+ return 0;
}
/*
@@ -376,7 +376,12 @@ retry:
spin_unlock(&j->lock);
return -EROFS;
case JOURNAL_ENTRY_INUSE:
- /* haven't finished writing out the previous one: */
+ /*
+ * haven't finished writing out the previous entry, can't start
+ * another yet:
+ * signal to caller which sequence number we're trying to open:
+ */
+ res->seq = journal_cur_seq(j) + 1;
spin_unlock(&j->lock);
trace_journal_entry_full(c);
goto blocked;
@@ -388,6 +393,8 @@ retry:
/* We now have a new, closed journal buf - see if we can open it: */
ret = journal_entry_open(j);
+ if (!ret)
+ res->seq = journal_cur_seq(j);
spin_unlock(&j->lock);
if (ret < 0)
@@ -407,7 +414,7 @@ retry:
blocked:
if (!j->res_get_blocked_start)
j->res_get_blocked_start = local_clock() ?: 1;
- return 0;
+ return -EAGAIN;
}
/*
@@ -421,14 +428,14 @@ blocked:
* btree node write locks.
*/
int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
- unsigned u64s_min, unsigned u64s_max)
+ unsigned flags)
{
int ret;
wait_event(j->wait,
- (ret = __journal_res_get(j, res, u64s_min,
- u64s_max)));
- return ret < 0 ? ret : 0;
+ (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
+ (flags & JOURNAL_RES_GET_NONBLOCK));
+ return ret;
}
u64 bch2_journal_last_unwritten_seq(struct journal *j)
@@ -452,28 +459,55 @@ u64 bch2_journal_last_unwritten_seq(struct journal *j)
* btree root - every journal entry contains the roots of all the btrees, so it
* doesn't need to bother with getting a journal reservation
*/
-int bch2_journal_open_seq_async(struct journal *j, u64 seq, struct closure *parent)
+int bch2_journal_open_seq_async(struct journal *j, u64 seq, struct closure *cl)
{
- int ret;
-
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ bool need_reclaim = false;
+retry:
spin_lock(&j->lock);
- BUG_ON(seq > journal_cur_seq(j));
if (seq < journal_cur_seq(j) ||
journal_entry_is_open(j)) {
spin_unlock(&j->lock);
- return 1;
+ return 0;
+ }
+
+ if (journal_cur_seq(j) < seq) {
+ switch (journal_buf_switch(j, false)) {
+ case JOURNAL_ENTRY_ERROR:
+ spin_unlock(&j->lock);
+ return -EROFS;
+ case JOURNAL_ENTRY_INUSE:
+ /* haven't finished writing out the previous one: */
+ trace_journal_entry_full(c);
+ goto blocked;
+ case JOURNAL_ENTRY_CLOSED:
+ break;
+ case JOURNAL_UNLOCKED:
+ goto retry;
+ }
+ }
+
+ BUG_ON(journal_cur_seq(j) < seq);
+
+ if (!journal_entry_open(j)) {
+ need_reclaim = true;
+ goto blocked;
}
- ret = journal_entry_open(j);
- if (!ret)
- closure_wait(&j->async_wait, parent);
spin_unlock(&j->lock);
- if (!ret)
- bch2_journal_reclaim_work(&j->reclaim_work.work);
+ return 0;
+blocked:
+ if (!j->res_get_blocked_start)
+ j->res_get_blocked_start = local_clock() ?: 1;
- return ret;
+ closure_wait(&j->async_wait, cl);
+ spin_unlock(&j->lock);
+
+ if (need_reclaim)
+ bch2_journal_reclaim_work(&j->reclaim_work.work);
+ return -EAGAIN;
}
static int journal_seq_error(struct journal *j, u64 seq)
@@ -593,11 +627,10 @@ int bch2_journal_flush_seq(struct journal *j, u64 seq)
void bch2_journal_meta_async(struct journal *j, struct closure *parent)
{
struct journal_res res;
- unsigned u64s = jset_u64s(0);
memset(&res, 0, sizeof(res));
- bch2_journal_res_get(j, &res, u64s, u64s);
+ bch2_journal_res_get(j, &res, jset_u64s(0), 0);
bch2_journal_res_put(j, &res);
bch2_journal_flush_seq_async(j, res.seq, parent);
@@ -606,12 +639,11 @@ void bch2_journal_meta_async(struct journal *j, struct closure *parent)
int bch2_journal_meta(struct journal *j)
{
struct journal_res res;
- unsigned u64s = jset_u64s(0);
int ret;
memset(&res, 0, sizeof(res));
- ret = bch2_journal_res_get(j, &res, u64s, u64s);
+ ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
if (ret)
return ret;
@@ -751,9 +783,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_JOURNAL,
ca->mi.bucket_size,
gc_phase(GC_PHASE_SB),
- new_fs
- ? BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE
- : 0);
+ 0);
if (c) {
spin_unlock(&c->journal.lock);
@@ -861,10 +891,6 @@ static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
{
- spin_lock(&j->lock);
- bch2_extent_drop_device(bkey_i_to_s_extent(&j->key), ca->dev_idx);
- spin_unlock(&j->lock);
-
wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
}
@@ -1000,8 +1026,6 @@ int bch2_fs_journal_init(struct journal *j)
j->write_delay_ms = 1000;
j->reclaim_delay_ms = 100;
- bkey_extent_init(&j->key);
-
atomic64_set(&j->reservations.counter,
((union journal_res_state)
{ .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);