summaryrefslogtreecommitdiff
path: root/libbcachefs/journal.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-02-28 21:34:16 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2019-02-28 22:33:41 -0500
commita4eb187a6f0af8041ae2128e6ee82ab7a43cb87c (patch)
tree9560a9fc7b481ee827ee00ae48a77eca69b398a7 /libbcachefs/journal.c
parent17c5215c1c542dd7b6b4f891a0da16d8c98e0591 (diff)
Update bcachefs sources to 75e8a078b8 bcachefs: improved flush_held_btree_writes()
Diffstat (limited to 'libbcachefs/journal.c')
-rw-r--r--libbcachefs/journal.c446
1 files changed, 232 insertions, 214 deletions
diff --git a/libbcachefs/journal.c b/libbcachefs/journal.c
index 8ff8cfa8..f108a282 100644
--- a/libbcachefs/journal.c
+++ b/libbcachefs/journal.c
@@ -17,23 +17,14 @@
#include <trace/events/bcachefs.h>
-static bool journal_entry_is_open(struct journal *j)
+static bool __journal_entry_is_open(union journal_res_state state)
{
- return j->reservations.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
+ return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
}
-void bch2_journal_buf_put_slowpath(struct journal *j, bool need_write_just_set)
+static bool journal_entry_is_open(struct journal *j)
{
- struct journal_buf *w = journal_prev_buf(j);
-
- atomic_dec_bug(&journal_seq_pin(j, le64_to_cpu(w->data->seq))->count);
-
- if (!need_write_just_set &&
- test_bit(JOURNAL_NEED_WRITE, &j->flags))
- bch2_time_stats_update(j->delay_time,
- j->need_write_time);
-
- closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
+ return __journal_entry_is_open(j->reservations);
}
static void journal_pin_new_entry(struct journal *j, int count)
@@ -77,39 +68,71 @@ static inline bool journal_entry_empty(struct jset *j)
return true;
}
-static enum {
- JOURNAL_ENTRY_ERROR,
- JOURNAL_ENTRY_INUSE,
- JOURNAL_ENTRY_CLOSED,
- JOURNAL_UNLOCKED,
-} journal_buf_switch(struct journal *j, bool need_write_just_set)
+void bch2_journal_halt(struct journal *j)
+{
+ union journal_res_state old, new;
+ u64 v = atomic64_read(&j->reservations.counter);
+
+ do {
+ old.v = new.v = v;
+ if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
+ return;
+
+ new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
+ } while ((v = atomic64_cmpxchg(&j->reservations.counter,
+ old.v, new.v)) != old.v);
+
+ journal_wake(j);
+ closure_wake_up(&journal_cur_buf(j)->wait);
+}
+
+/* journal entry close/open: */
+
+void __bch2_journal_buf_put(struct journal *j, bool need_write_just_set)
+{
+ if (!need_write_just_set &&
+ test_bit(JOURNAL_NEED_WRITE, &j->flags))
+ bch2_time_stats_update(j->delay_time,
+ j->need_write_time);
+
+ clear_bit(JOURNAL_NEED_WRITE, &j->flags);
+
+ closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
+}
+
+/*
+ * Returns true if journal entry is now closed:
+ */
+static bool __journal_entry_close(struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct journal_buf *buf = journal_cur_buf(j);
union journal_res_state old, new;
u64 v = atomic64_read(&j->reservations.counter);
+ bool set_need_write = false;
+ unsigned sectors;
lockdep_assert_held(&j->lock);
do {
old.v = new.v = v;
if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
- return JOURNAL_ENTRY_CLOSED;
+ return true;
if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL) {
/* this entry will never be written: */
closure_wake_up(&buf->wait);
- return JOURNAL_ENTRY_ERROR;
+ return true;
}
- if (new.prev_buf_unwritten)
- return JOURNAL_ENTRY_INUSE;
+ if (!test_bit(JOURNAL_NEED_WRITE, &j->flags)) {
+ set_bit(JOURNAL_NEED_WRITE, &j->flags);
+ j->need_write_time = local_clock();
+ set_need_write = true;
+ }
- /*
- * avoid race between setting buf->data->u64s and
- * journal_res_put starting write:
- */
- journal_state_inc(&new);
+ if (new.prev_buf_unwritten)
+ return false;
new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
new.idx++;
@@ -119,15 +142,12 @@ static enum {
} while ((v = atomic64_cmpxchg(&j->reservations.counter,
old.v, new.v)) != old.v);
- clear_bit(JOURNAL_NEED_WRITE, &j->flags);
-
buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
- j->prev_buf_sectors =
- vstruct_blocks_plus(buf->data, c->block_bits,
- buf->u64s_reserved) *
- c->opts.block_size;
- BUG_ON(j->prev_buf_sectors > j->cur_buf_sectors);
+ sectors = vstruct_blocks_plus(buf->data, c->block_bits,
+ buf->u64s_reserved) << c->block_bits;
+ BUG_ON(sectors > buf->sectors);
+ buf->sectors = sectors;
bkey_extent_init(&buf->key);
@@ -150,7 +170,6 @@ static enum {
* Hence, we want update/set last_seq on the current journal entry right
* before we open a new one:
*/
- bch2_journal_reclaim_fast(j);
buf->data->last_seq = cpu_to_le64(journal_last_seq(j));
if (journal_entry_empty(buf->data))
@@ -163,32 +182,22 @@ static enum {
bch2_journal_buf_init(j);
cancel_delayed_work(&j->write_work);
- spin_unlock(&j->lock);
- /* ugh - might be called from __journal_res_get() under wait_event() */
- __set_current_state(TASK_RUNNING);
- bch2_journal_buf_put(j, old.idx, need_write_just_set);
+ bch2_journal_space_available(j);
- return JOURNAL_UNLOCKED;
+ bch2_journal_buf_put(j, old.idx, set_need_write);
+ return true;
}
-void bch2_journal_halt(struct journal *j)
+static bool journal_entry_close(struct journal *j)
{
- union journal_res_state old, new;
- u64 v = atomic64_read(&j->reservations.counter);
-
- do {
- old.v = new.v = v;
- if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
- return;
+ bool ret;
- new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
- } while ((v = atomic64_cmpxchg(&j->reservations.counter,
- old.v, new.v)) != old.v);
+ spin_lock(&j->lock);
+ ret = __journal_entry_close(j);
+ spin_unlock(&j->lock);
- journal_wake(j);
- closure_wake_up(&journal_cur_buf(j)->wait);
- closure_wake_up(&journal_prev_buf(j)->wait);
+ return ret;
}
/*
@@ -196,46 +205,39 @@ void bch2_journal_halt(struct journal *j)
* journal reservation - journal entry is open means journal is dirty:
*
* returns:
- * 1: success
- * 0: journal currently full (must wait)
- * -EROFS: insufficient rw devices
- * -EIO: journal error
+ * 0: success
+ * -ENOSPC: journal currently full, must invoke reclaim
+ * -EAGAIN: journal blocked, must wait
+ * -EROFS: insufficient rw devices or journal error
*/
static int journal_entry_open(struct journal *j)
{
struct journal_buf *buf = journal_cur_buf(j);
union journal_res_state old, new;
- ssize_t u64s;
- int sectors;
+ int u64s;
u64 v;
lockdep_assert_held(&j->lock);
BUG_ON(journal_entry_is_open(j));
- if (!fifo_free(&j->pin))
- return 0;
+ if (j->blocked)
+ return -EAGAIN;
- sectors = bch2_journal_entry_sectors(j);
- if (sectors <= 0)
- return sectors;
+ if (j->cur_entry_error)
+ return j->cur_entry_error;
- buf->disk_sectors = sectors;
- buf->u64s_reserved = j->entry_u64s_reserved;
+ BUG_ON(!j->cur_entry_sectors);
- sectors = min_t(unsigned, sectors, buf->size >> 9);
- j->cur_buf_sectors = sectors;
-
- u64s = (sectors << 9) / sizeof(u64);
-
- /* Subtract the journal header */
- u64s -= sizeof(struct jset) / sizeof(u64);
- u64s -= buf->u64s_reserved;
- u64s = max_t(ssize_t, 0L, u64s);
+ buf->u64s_reserved = j->entry_u64s_reserved;
+ buf->disk_sectors = j->cur_entry_sectors;
+ buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9);
- BUG_ON(u64s >= JOURNAL_ENTRY_CLOSED_VAL);
+ u64s = (int) (buf->sectors << 9) / sizeof(u64) -
+ journal_entry_overhead(j);
+ u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
if (u64s <= le32_to_cpu(buf->data->u64s))
- return 0;
+ return -ENOSPC;
/*
* Must be set before marking the journal entry as open:
@@ -246,11 +248,14 @@ static int journal_entry_open(struct journal *j)
do {
old.v = new.v = v;
+ EBUG_ON(journal_state_count(new, new.idx));
+
if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
- return -EIO;
+ return -EROFS;
/* Handle any already added entries */
new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
+ journal_state_inc(&new);
} while ((v = atomic64_cmpxchg(&j->reservations.counter,
old.v, new.v)) != old.v);
@@ -263,37 +268,22 @@ static int journal_entry_open(struct journal *j)
&j->write_work,
msecs_to_jiffies(j->write_delay_ms));
journal_wake(j);
- return 1;
+ return 0;
}
-static bool __journal_entry_close(struct journal *j)
+static bool journal_quiesced(struct journal *j)
{
- bool set_need_write;
-
- if (!journal_entry_is_open(j)) {
- spin_unlock(&j->lock);
- return true;
- }
-
- set_need_write = !test_and_set_bit(JOURNAL_NEED_WRITE, &j->flags);
- if (set_need_write)
- j->need_write_time = local_clock();
+ union journal_res_state state = READ_ONCE(j->reservations);
+ bool ret = !state.prev_buf_unwritten && !__journal_entry_is_open(state);
- switch (journal_buf_switch(j, set_need_write)) {
- case JOURNAL_ENTRY_INUSE:
- spin_unlock(&j->lock);
- return false;
- default:
- spin_unlock(&j->lock);
- case JOURNAL_UNLOCKED:
- return true;
- }
+ if (!ret)
+ journal_entry_close(j);
+ return ret;
}
-static bool journal_entry_close(struct journal *j)
+static void journal_quiesce(struct journal *j)
{
- spin_lock(&j->lock);
- return __journal_entry_close(j);
+ wait_event(j->wait, journal_quiesced(j));
}
static void journal_write_work(struct work_struct *work)
@@ -337,7 +327,11 @@ retry:
if (journal_res_get_fast(j, res, flags))
return 0;
+ if (bch2_journal_error(j))
+ return -EROFS;
+
spin_lock(&j->lock);
+
/*
* Recheck after taking the lock, so we don't race with another thread
* that just did journal_entry_open() and call journal_entry_close()
@@ -355,56 +349,43 @@ retry:
*/
buf = journal_cur_buf(j);
if (journal_entry_is_open(j) &&
- buf->size >> 9 < buf->disk_sectors &&
- buf->size < JOURNAL_ENTRY_SIZE_MAX)
- j->buf_size_want = max(j->buf_size_want, buf->size << 1);
+ buf->buf_size >> 9 < buf->disk_sectors &&
+ buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
+ j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
- /*
- * Close the current journal entry if necessary, then try to start a new
- * one:
- */
- switch (journal_buf_switch(j, false)) {
- case JOURNAL_ENTRY_ERROR:
- spin_unlock(&j->lock);
- return -EROFS;
- case JOURNAL_ENTRY_INUSE:
+ if (journal_entry_is_open(j) &&
+ !__journal_entry_close(j)) {
/*
- * The current journal entry is still open, but we failed to get
- * a journal reservation because there's not enough space in it,
- * and we can't close it and start another because we haven't
- * finished writing out the previous entry:
+ * We failed to get a reservation on the current open journal
+ * entry because it's full, and we can't close it because
+ * there's still a previous one in flight:
*/
- spin_unlock(&j->lock);
trace_journal_entry_full(c);
- goto blocked;
- case JOURNAL_ENTRY_CLOSED:
- break;
- case JOURNAL_UNLOCKED:
- goto retry;
+ ret = -EAGAIN;
+ } else {
+ ret = journal_entry_open(j);
}
- /* We now have a new, closed journal buf - see if we can open it: */
- ret = journal_entry_open(j);
+ if ((ret == -EAGAIN || ret == -ENOSPC) &&
+ !j->res_get_blocked_start)
+ j->res_get_blocked_start = local_clock() ?: 1;
+
spin_unlock(&j->lock);
- if (ret < 0)
- return ret;
- if (ret)
+ if (!ret)
goto retry;
+ if (ret == -ENOSPC) {
+ /*
+ * Journal is full - can't rely on reclaim from work item due to
+ * freezing:
+ */
+ trace_journal_full(c);
+ if (!(flags & JOURNAL_RES_GET_NONBLOCK))
+ bch2_journal_reclaim_work(&j->reclaim_work.work);
+ ret = -EAGAIN;
+ }
- /* Journal's full, we have to wait */
-
- /*
- * Direct reclaim - can't rely on reclaim from work item
- * due to freezing..
- */
- bch2_journal_reclaim_work(&j->reclaim_work.work);
-
- trace_journal_full(c);
-blocked:
- if (!j->res_get_blocked_start)
- j->res_get_blocked_start = local_clock() ?: 1;
- return -EAGAIN;
+ return ret;
}
/*
@@ -422,7 +403,7 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
{
int ret;
- wait_event(j->wait,
+ closure_wait_event(&j->async_wait,
(ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
(flags & JOURNAL_RES_GET_NONBLOCK));
return ret;
@@ -441,9 +422,9 @@ void bch2_journal_entry_res_resize(struct journal *j,
j->entry_u64s_reserved += d;
if (d <= 0)
- goto out_unlock;
+ goto out;
- j->cur_entry_u64s -= d;
+ j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
smp_mb();
state = READ_ONCE(j->reservations);
@@ -454,15 +435,12 @@ void bch2_journal_entry_res_resize(struct journal *j,
* Not enough room in current journal entry, have to flush it:
*/
__journal_entry_close(j);
- goto out;
+ } else {
+ journal_cur_buf(j)->u64s_reserved += d;
}
-
- journal_cur_buf(j)->u64s_reserved += d;
-out_unlock:
- spin_unlock(&j->lock);
out:
+ spin_unlock(&j->lock);
res->u64s += d;
- return;
}
/* journal flushing: */
@@ -492,47 +470,47 @@ int bch2_journal_open_seq_async(struct journal *j, u64 seq, struct closure *cl)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
int ret;
-retry:
+
spin_lock(&j->lock);
- if (seq < journal_cur_seq(j) ||
+ /*
+ * Can't try to open more than one sequence number ahead:
+ */
+ BUG_ON(journal_cur_seq(j) < seq && !journal_entry_is_open(j));
+
+ if (journal_cur_seq(j) > seq ||
journal_entry_is_open(j)) {
spin_unlock(&j->lock);
return 0;
}
- if (journal_cur_seq(j) < seq) {
- switch (journal_buf_switch(j, false)) {
- case JOURNAL_ENTRY_ERROR:
- spin_unlock(&j->lock);
- return -EROFS;
- case JOURNAL_ENTRY_INUSE:
- /* haven't finished writing out the previous one: */
- trace_journal_entry_full(c);
- goto blocked;
- case JOURNAL_ENTRY_CLOSED:
- break;
- case JOURNAL_UNLOCKED:
- goto retry;
- }
- }
-
- BUG_ON(journal_cur_seq(j) < seq);
+ if (journal_cur_seq(j) < seq &&
+ !__journal_entry_close(j)) {
+ /* haven't finished writing out the previous one: */
+ trace_journal_entry_full(c);
+ ret = -EAGAIN;
+ } else {
+ BUG_ON(journal_cur_seq(j) != seq);
- ret = journal_entry_open(j);
- if (ret) {
- spin_unlock(&j->lock);
- return ret < 0 ? ret : 0;
+ ret = journal_entry_open(j);
}
-blocked:
- if (!j->res_get_blocked_start)
+
+ if ((ret == -EAGAIN || ret == -ENOSPC) &&
+ !j->res_get_blocked_start)
j->res_get_blocked_start = local_clock() ?: 1;
- closure_wait(&j->async_wait, cl);
+ if (ret == -EAGAIN || ret == -ENOSPC)
+ closure_wait(&j->async_wait, cl);
+
spin_unlock(&j->lock);
- bch2_journal_reclaim_work(&j->reclaim_work.work);
- return -EAGAIN;
+ if (ret == -ENOSPC) {
+ trace_journal_full(c);
+ bch2_journal_reclaim_work(&j->reclaim_work.work);
+ ret = -EAGAIN;
+ }
+
+ return ret;
}
static int journal_seq_error(struct journal *j, u64 seq)
@@ -615,8 +593,7 @@ void bch2_journal_flush_seq_async(struct journal *j, u64 seq,
if (seq == journal_cur_seq(j))
__journal_entry_close(j);
- else
- spin_unlock(&j->lock);
+ spin_unlock(&j->lock);
}
static int journal_seq_flushed(struct journal *j, u64 seq)
@@ -628,8 +605,7 @@ static int journal_seq_flushed(struct journal *j, u64 seq)
if (seq == journal_cur_seq(j))
__journal_entry_close(j);
- else
- spin_unlock(&j->lock);
+ spin_unlock(&j->lock);
return ret;
}
@@ -721,6 +697,26 @@ int bch2_journal_flush(struct journal *j)
return bch2_journal_flush_seq(j, seq);
}
+/* block/unlock the journal: */
+
+void bch2_journal_unblock(struct journal *j)
+{
+ spin_lock(&j->lock);
+ j->blocked--;
+ spin_unlock(&j->lock);
+
+ journal_wake(j);
+}
+
+void bch2_journal_block(struct journal *j)
+{
+ spin_lock(&j->lock);
+ j->blocked++;
+ spin_unlock(&j->lock);
+
+ journal_quiesce(j);
+}
+
/* allocate journal on a device: */
static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
@@ -743,7 +739,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
goto err;
journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
- nr + sizeof(*journal_buckets) / sizeof(u64));
+ nr + sizeof(*journal_buckets) / sizeof(u64));
if (!journal_buckets)
goto err;
@@ -806,9 +802,9 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
ja->nr++;
bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_JOURNAL,
- ca->mi.bucket_size,
- gc_phase(GC_PHASE_SB),
- 0);
+ ca->mi.bucket_size,
+ gc_phase(GC_PHASE_SB),
+ 0);
if (c) {
spin_unlock(&c->journal.lock);
@@ -859,7 +855,7 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
*/
if (bch2_disk_reservation_get(c, &disk_res,
- bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
+ bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
mutex_unlock(&c->sb_lock);
return -ENOSPC;
}
@@ -930,8 +926,7 @@ void bch2_fs_journal_stop(struct journal *j)
c->btree_roots_dirty)
bch2_journal_meta(j);
- BUG_ON(journal_entry_is_open(j) ||
- j->reservations.prev_buf_unwritten);
+ journal_quiesce(j);
BUG_ON(!bch2_journal_error(j) &&
test_bit(JOURNAL_NOT_EMPTY, &j->flags));
@@ -957,7 +952,7 @@ void bch2_fs_journal_start(struct journal *j)
journal_pin_new_entry(j, 0);
/*
- * journal_buf_switch() only inits the next journal entry when it
+ * __journal_entry_close() only inits the next journal entry when it
* closes an open journal entry - the very first journal entry gets
* initialized here:
*/
@@ -966,6 +961,7 @@ void bch2_fs_journal_start(struct journal *j)
c->last_bucket_seq_cleanup = journal_cur_seq(j);
+ bch2_journal_space_available(j);
spin_unlock(&j->lock);
/*
@@ -975,7 +971,7 @@ void bch2_fs_journal_start(struct journal *j)
*/
bch2_journal_seq_blacklist_write(j);
- queue_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
+ queue_delayed_work(c->journal_reclaim_wq, &j->reclaim_work, 0);
}
/* init/exit: */
@@ -1021,8 +1017,8 @@ int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
void bch2_fs_journal_exit(struct journal *j)
{
- kvpfree(j->buf[1].data, j->buf[1].size);
- kvpfree(j->buf[0].data, j->buf[0].size);
+ kvpfree(j->buf[1].data, j->buf[1].buf_size);
+ kvpfree(j->buf[0].data, j->buf[0].buf_size);
free_fifo(&j->pin);
}
@@ -1046,8 +1042,8 @@ int bch2_fs_journal_init(struct journal *j)
lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
- j->buf[0].size = JOURNAL_ENTRY_SIZE_MIN;
- j->buf[1].size = JOURNAL_ENTRY_SIZE_MIN;
+ j->buf[0].buf_size = JOURNAL_ENTRY_SIZE_MIN;
+ j->buf[1].buf_size = JOURNAL_ENTRY_SIZE_MIN;
j->write_delay_ms = 1000;
j->reclaim_delay_ms = 100;
@@ -1060,8 +1056,8 @@ int bch2_fs_journal_init(struct journal *j)
{ .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
- !(j->buf[0].data = kvpmalloc(j->buf[0].size, GFP_KERNEL)) ||
- !(j->buf[1].data = kvpmalloc(j->buf[1].size, GFP_KERNEL))) {
+ !(j->buf[0].data = kvpmalloc(j->buf[0].buf_size, GFP_KERNEL)) ||
+ !(j->buf[1].data = kvpmalloc(j->buf[1].buf_size, GFP_KERNEL))) {
ret = -ENOMEM;
goto out;
}
@@ -1078,35 +1074,54 @@ ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
{
struct printbuf out = _PBUF(buf, PAGE_SIZE);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
- union journal_res_state *s = &j->reservations;
+ union journal_res_state s;
struct bch_dev *ca;
unsigned iter;
rcu_read_lock();
spin_lock(&j->lock);
+ s = READ_ONCE(j->reservations);
pr_buf(&out,
"active journal entries:\t%llu\n"
"seq:\t\t\t%llu\n"
"last_seq:\t\t%llu\n"
"last_seq_ondisk:\t%llu\n"
- "reservation count:\t%u\n"
- "reservation offset:\t%u\n"
- "current entry u64s:\t%u\n"
- "io in flight:\t\t%i\n"
- "need write:\t\t%i\n"
- "dirty:\t\t\t%i\n"
- "replay done:\t\t%i\n",
+ "current entry:\t\t",
fifo_used(&j->pin),
journal_cur_seq(j),
journal_last_seq(j),
- j->last_seq_ondisk,
- journal_state_count(*s, s->idx),
- s->cur_entry_offset,
- j->cur_entry_u64s,
- s->prev_buf_unwritten,
+ j->last_seq_ondisk);
+
+ switch (s.cur_entry_offset) {
+ case JOURNAL_ENTRY_ERROR_VAL:
+ pr_buf(&out, "error\n");
+ break;
+ case JOURNAL_ENTRY_CLOSED_VAL:
+ pr_buf(&out, "closed\n");
+ break;
+ default:
+ pr_buf(&out, "%u/%u\n",
+ s.cur_entry_offset,
+ j->cur_entry_u64s);
+ break;
+ }
+
+ pr_buf(&out,
+ "current entry refs:\t%u\n"
+ "prev entry unwritten:\t",
+ journal_state_count(s, s.idx));
+
+ if (s.prev_buf_unwritten)
+ pr_buf(&out, "yes, ref %u\n",
+ journal_state_count(s, !s.idx));
+ else
+ pr_buf(&out, "no\n");
+
+ pr_buf(&out,
+ "need write:\t\t%i\n"
+ "replay done:\t\t%i\n",
test_bit(JOURNAL_NEED_WRITE, &j->flags),
- journal_entry_is_open(j),
test_bit(JOURNAL_REPLAY_DONE, &j->flags));
for_each_member_device_rcu(ca, c, iter,
@@ -1119,9 +1134,12 @@ ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
pr_buf(&out,
"dev %u:\n"
"\tnr\t\t%u\n"
+ "\tavailable\t%u:%u\n"
"\tcur_idx\t\t%u (seq %llu)\n"
"\tlast_idx\t%u (seq %llu)\n",
iter, ja->nr,
+ bch2_journal_dev_buckets_available(j, ja),
+ ja->sectors_free,
ja->cur_idx, ja->bucket_seq[ja->cur_idx],
ja->last_idx, ja->bucket_seq[ja->last_idx]);
}