summaryrefslogtreecommitdiff
path: root/libbcachefs/alloc_background.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/alloc_background.c')
-rw-r--r--libbcachefs/alloc_background.c203
1 files changed, 107 insertions, 96 deletions
diff --git a/libbcachefs/alloc_background.c b/libbcachefs/alloc_background.c
index 39d8d317..a78232ed 100644
--- a/libbcachefs/alloc_background.c
+++ b/libbcachefs/alloc_background.c
@@ -386,14 +386,16 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
{
struct bch_alloc_v4 _a;
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
- const struct bch_backpointer *bps;
unsigned i;
prt_newline(out);
printbuf_indent_add(out, 2);
prt_printf(out, "gen %u oldest_gen %u data_type %s",
- a->gen, a->oldest_gen, bch2_data_types[a->data_type]);
+ a->gen, a->oldest_gen,
+ a->data_type < BCH_DATA_NR
+ ? bch2_data_types[a->data_type]
+ : "(invalid data type)");
prt_newline(out);
prt_printf(out, "journal_seq %llu", a->journal_seq);
prt_newline(out);
@@ -413,33 +415,41 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
prt_newline(out);
prt_printf(out, "io_time[WRITE] %llu", a->io_time[WRITE]);
prt_newline(out);
- prt_printf(out, "backpointers: %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a));
- printbuf_indent_add(out, 2);
- bps = alloc_v4_backpointers_c(a);
- for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a); i++) {
+ if (k.k->type == KEY_TYPE_alloc_v4) {
+ struct bkey_s_c_alloc_v4 a_raw = bkey_s_c_to_alloc_v4(k);
+ const struct bch_backpointer *bps = alloc_v4_backpointers_c(a_raw.v);
+
+ prt_printf(out, "bp_start %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a_raw.v));
prt_newline(out);
- bch2_backpointer_to_text(out, &bps[i]);
+
+ prt_printf(out, "backpointers: %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v));
+ printbuf_indent_add(out, 2);
+
+ for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v); i++) {
+ prt_newline(out);
+ bch2_backpointer_to_text(out, &bps[i]);
+ }
+
+ printbuf_indent_sub(out, 2);
}
- printbuf_indent_sub(out, 4);
+ printbuf_indent_sub(out, 2);
}
void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
{
if (k.k->type == KEY_TYPE_alloc_v4) {
- int d;
+ void *src, *dst;
*out = *bkey_s_c_to_alloc_v4(k).v;
- d = (int) BCH_ALLOC_V4_U64s -
- (int) (BCH_ALLOC_V4_BACKPOINTERS_START(out) ?: BCH_ALLOC_V4_U64s_V0);
- if (unlikely(d > 0)) {
- memset((u64 *) out + BCH_ALLOC_V4_BACKPOINTERS_START(out),
- 0,
- d * sizeof(u64));
- SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
- }
+ src = alloc_v4_backpointers(out);
+ SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
+ dst = alloc_v4_backpointers(out);
+
+ if (src < dst)
+ memset(src, 0, dst - src);
} else {
struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
@@ -465,20 +475,20 @@ static noinline struct bkey_i_alloc_v4 *
__bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
{
struct bkey_i_alloc_v4 *ret;
- unsigned bytes = k.k->type == KEY_TYPE_alloc_v4
- ? bkey_bytes(k.k)
- : sizeof(struct bkey_i_alloc_v4);
-
- /*
- * Reserve space for one more backpointer here:
- * Not sketchy at doing it this way, nope...
- */
- ret = bch2_trans_kmalloc(trans, bytes + sizeof(struct bch_backpointer));
- if (IS_ERR(ret))
- return ret;
-
if (k.k->type == KEY_TYPE_alloc_v4) {
- struct bch_backpointer *src, *dst;
+ struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
+ unsigned bytes = sizeof(struct bkey_i_alloc_v4) +
+ BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) *
+ sizeof(struct bch_backpointer);
+ void *src, *dst;
+
+ /*
+ * Reserve space for one more backpointer here:
+ * Not sketchy at doing it this way, nope...
+ */
+ ret = bch2_trans_kmalloc(trans, bytes + sizeof(struct bch_backpointer));
+ if (IS_ERR(ret))
+ return ret;
bkey_reassemble(&ret->k_i, k);
@@ -488,9 +498,15 @@ __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
memmove(dst, src, BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v) *
sizeof(struct bch_backpointer));
- memset(src, 0, dst - src);
+ if (src < dst)
+ memset(src, 0, dst - src);
set_alloc_v4_u64s(ret);
} else {
+ ret = bch2_trans_kmalloc(trans, sizeof(struct bkey_i_alloc_v4) +
+ sizeof(struct bch_backpointer));
+ if (IS_ERR(ret))
+ return ret;
+
bkey_alloc_v4_init(&ret->k_i);
ret->k.p = k.k->p;
bch2_alloc_to_v4(k, &ret->v);
@@ -508,10 +524,8 @@ static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_
*/
struct bkey_i_alloc_v4 *ret =
bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + sizeof(struct bch_backpointer));
- if (!IS_ERR(ret)) {
+ if (!IS_ERR(ret))
bkey_reassemble(&ret->k_i, k);
- memset((void *) ret + bkey_bytes(k.k), 0, sizeof(struct bch_backpointer));
- }
return ret;
}
@@ -789,6 +803,7 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
goto err;
if (ca->mi.freespace_initialized &&
+ test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags) &&
bch2_trans_inconsistent_on(old.k->type != old_type, trans,
"incorrect key when %s %s btree (got %s should be %s)\n"
" for %s",
@@ -900,13 +915,11 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
new_lru = alloc_lru_idx(*new_a);
if (old_lru != new_lru) {
- ret = bch2_lru_change(trans, new->k.p.inode, new->k.p.offset,
- old_lru, &new_lru, old);
+ ret = bch2_lru_change(trans, new->k.p.inode,
+ bucket_to_u64(new->k.p),
+ old_lru, new_lru);
if (ret)
return ret;
-
- if (new_a->data_type == BCH_DATA_cached)
- new_a->io_time[READ] = new_lru;
}
if (old_a->gen != new_a->gen) {
@@ -1244,7 +1257,15 @@ static int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
}
if (need_update) {
- ret = bch2_trans_update(trans, bucket_gens_iter, &g.k_i, 0);
+ struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(g));
+
+ ret = PTR_ERR_OR_ZERO(k);
+ if (ret)
+ goto err;
+
+ memcpy(k, &g, sizeof(g));
+
+ ret = bch2_trans_update(trans, bucket_gens_iter, k, 0);
if (ret)
goto err;
}
@@ -1370,7 +1391,7 @@ static int bch2_check_bucket_gens_key(struct btree_trans *trans,
k = bch2_trans_kmalloc(trans, sizeof(g));
ret = PTR_ERR_OR_ZERO(k);
if (ret)
- return ret;
+ goto out;
memcpy(k, &g, sizeof(g));
ret = bch2_trans_update(trans, iter, k, 0);
@@ -1422,7 +1443,7 @@ int bch2_check_alloc_info(struct bch_fs *c)
&freespace_iter,
&bucket_gens_iter);
if (ret)
- break;
+ goto bkey_err;
} else {
next = k.k->p;
@@ -1488,7 +1509,6 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
const struct bch_alloc_v4 *a;
struct bkey_s_c alloc_k, k;
struct printbuf buf = PRINTBUF;
- struct printbuf buf2 = PRINTBUF;
int ret;
alloc_k = bch2_btree_iter_peek(alloc_iter);
@@ -1505,8 +1525,9 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
return 0;
bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
- POS(alloc_k.k->p.inode, a->io_time[READ]), 0);
-
+ lru_pos(alloc_k.k->p.inode,
+ bucket_to_u64(alloc_k.k->p),
+ a->io_time[READ]), 0);
k = bch2_btree_iter_peek_slot(&lru_iter);
ret = bkey_err(k);
if (ret)
@@ -1517,21 +1538,18 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
" %s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
- fsck_err_on(k.k->type != KEY_TYPE_lru ||
- le64_to_cpu(bkey_s_c_to_lru(k).v->idx) != alloc_k.k->p.offset, c,
- "incorrect/missing lru entry\n"
- " %s\n"
+ fsck_err_on(k.k->type != KEY_TYPE_set, c,
+ "missing lru entry\n"
" %s",
(printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
- (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
+ bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
u64 read_time = a->io_time[READ] ?:
atomic64_read(&c->io_clock[READ].now);
ret = bch2_lru_set(trans,
alloc_k.k->p.inode,
- alloc_k.k->p.offset,
- &read_time);
+ bucket_to_u64(alloc_k.k->p),
+ read_time);
if (ret)
goto err;
@@ -1552,7 +1570,6 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
err:
fsck_err:
bch2_trans_iter_exit(trans, &lru_iter);
- printbuf_exit(&buf2);
printbuf_exit(&buf);
return ret;
}
@@ -1630,21 +1647,28 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
goto write;
}
- if (bch2_trans_inconsistent_on(a->v.journal_seq > c->journal.flushed_seq_ondisk, trans,
- "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
- "%s",
- a->v.journal_seq,
- c->journal.flushed_seq_ondisk,
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = -EIO;
+ if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
+ if (test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
+ bch2_trans_inconsistent(trans,
+ "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
+ "%s",
+ a->v.journal_seq,
+ c->journal.flushed_seq_ondisk,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ }
goto out;
}
- if (bch2_trans_inconsistent_on(a->v.data_type != BCH_DATA_need_discard, trans,
- "bucket incorrectly set in need_discard btree\n"
- "%s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = -EIO;
+ if (a->v.data_type != BCH_DATA_need_discard) {
+ if (test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
+ bch2_trans_inconsistent(trans,
+ "bucket incorrectly set in need_discard btree\n"
+ "%s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ }
+
goto out;
}
@@ -1732,51 +1756,34 @@ void bch2_do_discards(struct bch_fs *c)
}
static int invalidate_one_bucket(struct btree_trans *trans,
- struct btree_iter *lru_iter, struct bkey_s_c k,
- unsigned dev_idx, s64 *nr_to_invalidate)
+ struct btree_iter *lru_iter,
+ struct bpos bucket,
+ s64 *nr_to_invalidate)
{
struct bch_fs *c = trans->c;
struct btree_iter alloc_iter = { NULL };
struct bkey_i_alloc_v4 *a;
- struct bpos bucket;
struct printbuf buf = PRINTBUF;
unsigned cached_sectors;
int ret = 0;
- if (*nr_to_invalidate <= 0 || k.k->p.inode != dev_idx)
+ if (*nr_to_invalidate <= 0)
return 1;
- if (k.k->type != KEY_TYPE_lru) {
- prt_printf(&buf, "non lru key in lru btree:\n ");
- bch2_bkey_val_to_text(&buf, c, k);
-
- if (!test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
- bch_err(c, "%s", buf.buf);
- } else {
- bch2_trans_inconsistent(trans, "%s", buf.buf);
- ret = -EINVAL;
- }
-
- goto out;
- }
-
- bucket = POS(dev_idx, le64_to_cpu(bkey_s_c_to_lru(k).v->idx));
-
a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
ret = PTR_ERR_OR_ZERO(a);
if (ret)
goto out;
- if (k.k->p.offset != alloc_lru_idx(a->v)) {
+ if (lru_pos_time(lru_iter->pos) != alloc_lru_idx(a->v)) {
prt_printf(&buf, "alloc key does not point back to lru entry when invalidating bucket:\n ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
+ bch2_bpos_to_text(&buf, lru_iter->pos);
prt_printf(&buf, "\n ");
- bch2_bkey_val_to_text(&buf, c, k);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
- if (!test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
- bch_err(c, "%s", buf.buf);
- } else {
- bch2_trans_inconsistent(trans, "%s", buf.buf);
+ bch_err(c, "%s", buf.buf);
+ if (test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
+ bch2_inconsistent_error(c);
ret = -EINVAL;
}
@@ -1827,9 +1834,13 @@ static void bch2_do_invalidates_work(struct work_struct *work)
s64 nr_to_invalidate =
should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
- ret = for_each_btree_key2(&trans, iter, BTREE_ID_lru,
- POS(ca->dev_idx, 0), BTREE_ITER_INTENT, k,
- invalidate_one_bucket(&trans, &iter, k, ca->dev_idx, &nr_to_invalidate));
+ ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_lru,
+ lru_pos(ca->dev_idx, 0, 0),
+ lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
+ BTREE_ITER_INTENT, k,
+ invalidate_one_bucket(&trans, &iter,
+ u64_to_bucket(k.k->p.offset),
+ &nr_to_invalidate));
if (ret < 0) {
percpu_ref_put(&ca->ref);