summaryrefslogtreecommitdiff
path: root/libbcachefs/ec.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-03-06 02:35:56 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-03-06 04:11:50 -0500
commit0206d42daf4c4bd3bbcfa15a2bef34319524db49 (patch)
treea51d233e8ad9e9e730d1582519950f5af10532cd /libbcachefs/ec.c
parent61b25f2dd21e1abe11572f4065e75c9c4c304599 (diff)
Update bcachefs sources to 3856459b1b bcachefs: bch2_btree_iter_peek_node_and_restart()
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'libbcachefs/ec.c')
-rw-r--r--libbcachefs/ec.c88
1 files changed, 46 insertions, 42 deletions
diff --git a/libbcachefs/ec.c b/libbcachefs/ec.c
index c0342e60..7d43fd4a 100644
--- a/libbcachefs/ec.c
+++ b/libbcachefs/ec.c
@@ -213,8 +213,9 @@ static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
}
}
+/* XXX: this is a non-mempoolified memory allocation: */
static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
- unsigned offset, unsigned size)
+ unsigned offset, unsigned size)
{
struct bch_stripe *v = &buf->key.v;
unsigned csum_granularity = 1U << v->csum_granularity_bits;
@@ -241,7 +242,7 @@ static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
return 0;
err:
ec_stripe_buf_exit(buf);
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_stripe_buf;
}
/* Checksumming: */
@@ -914,6 +915,9 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
b = bch2_backpointer_get_node(trans, &node_iter, bucket, *bp_offset, bp);
bch2_trans_iter_exit(trans, &node_iter);
+ if (!b)
+ return 0;
+
prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b);
bch2_backpointer_to_text(&buf, &bp);
@@ -1099,6 +1103,7 @@ static void ec_stripe_create(struct ec_stripe_new *s)
}
BUG_ON(!s->allocated);
+ BUG_ON(!s->idx);
ec_generate_ec(&s->new_stripe);
@@ -1143,7 +1148,12 @@ err:
}
}
- bch2_stripe_close(c, s);
+ mutex_lock(&c->ec_stripe_new_lock);
+ list_del(&s->list);
+ mutex_unlock(&c->ec_stripe_new_lock);
+
+ if (s->idx)
+ bch2_stripe_close(c, s);
ec_stripe_buf_exit(&s->existing_stripe);
ec_stripe_buf_exit(&s->new_stripe);
@@ -1157,10 +1167,8 @@ static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c)
mutex_lock(&c->ec_stripe_new_lock);
list_for_each_entry(s, &c->ec_stripe_new_list, list)
- if (!atomic_read(&s->pin)) {
- list_del(&s->list);
+ if (!atomic_read(&s->pin))
goto out;
- }
s = NULL;
out:
mutex_unlock(&c->ec_stripe_new_lock);
@@ -1188,14 +1196,6 @@ void bch2_ec_do_stripe_creates(struct bch_fs *c)
bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
}
-static void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s)
-{
- BUG_ON(atomic_read(&s->pin) <= 0);
-
- if (atomic_dec_and_test(&s->pin))
- bch2_ec_do_stripe_creates(c);
-}
-
static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
{
struct ec_stripe_new *s = h->s;
@@ -1212,14 +1212,6 @@ static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
ec_stripe_new_put(c, s);
}
-/* have a full bucket - hand it off to be erasure coded: */
-void bch2_ec_bucket_written(struct bch_fs *c, struct open_bucket *ob)
-{
- struct ec_stripe_new *s = ob->ec;
-
- ec_stripe_new_put(c, s);
-}
-
void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
{
struct ec_stripe_new *s = ob->ec;
@@ -1236,6 +1228,8 @@ void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
if (!ob)
return NULL;
+ BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]);
+
ca = bch_dev_bkey_exists(c, ob->dev);
offset = ca->mi.bucket_size - ob->sectors_free;
@@ -1436,6 +1430,9 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
bool have_cache = true;
int ret = 0;
+ BUG_ON(h->s->new_stripe.key.v.nr_blocks != h->s->nr_data + h->s->nr_parity);
+ BUG_ON(h->s->new_stripe.key.v.nr_redundant != h->s->nr_parity);
+
for_each_set_bit(i, h->s->blocks_gotten, h->s->new_stripe.key.v.nr_blocks) {
__clear_bit(h->s->new_stripe.key.v.ptrs[i].dev, devs.d);
if (i < h->s->nr_data)
@@ -1546,9 +1543,13 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri
s64 idx;
int ret;
+ /*
+ * If we can't allocate a new stripe, and there's no stripes with empty
+ * blocks for us to reuse, that means we have to wait on copygc:
+ */
idx = get_existing_stripe(c, h);
if (idx < 0)
- return -BCH_ERR_ENOSPC_stripe_reuse;
+ return -BCH_ERR_stripe_alloc_blocked;
ret = get_stripe_key_trans(trans, idx, &h->s->existing_stripe);
if (ret) {
@@ -1558,12 +1559,14 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri
return ret;
}
- if (ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize)) {
- /*
- * this is a problem: we have deleted from the
- * stripes heap already
- */
- BUG();
+ BUG_ON(h->s->existing_stripe.key.v.nr_redundant != h->s->nr_parity);
+ h->s->nr_data = h->s->existing_stripe.key.v.nr_blocks -
+ h->s->existing_stripe.key.v.nr_redundant;
+
+ ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize);
+ if (ret) {
+ bch2_stripe_close(c, h->s);
+ return ret;
}
BUG_ON(h->s->existing_stripe.size != h->blocksize);
@@ -1675,9 +1678,6 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
bch_err(c, "failed to allocate new stripe");
goto err;
}
-
- if (ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize))
- BUG();
}
if (h->s->allocated)
@@ -1690,7 +1690,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
ret = new_stripe_alloc_buckets(trans, h, RESERVE_stripe, NULL) ?:
__bch2_ec_stripe_head_reserve(trans, h);
if (!ret)
- goto allocated;
+ goto allocate_buf;
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
bch2_err_matches(ret, ENOMEM))
goto err;
@@ -1703,8 +1703,6 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
ret = __bch2_ec_stripe_head_reuse(trans, h);
if (!ret)
break;
- if (ret == -BCH_ERR_ENOSPC_stripe_reuse && cl)
- ret = -BCH_ERR_stripe_alloc_blocked;
if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
goto err;
@@ -1723,10 +1721,16 @@ alloc_existing:
ret = new_stripe_alloc_buckets(trans, h, reserve, cl);
if (ret)
goto err;
-allocated:
+
+allocate_buf:
+ ret = ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize);
+ if (ret)
+ goto err;
+
h->s->allocated = true;
+allocated:
BUG_ON(!h->s->idx);
-
+ BUG_ON(!h->s->new_stripe.data[0]);
BUG_ON(trans->restarted);
return h;
err:
@@ -1839,8 +1843,8 @@ void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
h->target, h->algo, h->redundancy);
if (h->s)
- prt_printf(out, "\tpending: blocks %u+%u allocated %u\n",
- h->s->nr_data, h->s->nr_parity,
+ prt_printf(out, "\tpending: idx %llu blocks %u+%u allocated %u\n",
+ h->s->idx, h->s->nr_data, h->s->nr_parity,
bitmap_weight(h->s->blocks_allocated,
h->s->nr_data));
}
@@ -1848,9 +1852,9 @@ void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
mutex_lock(&c->ec_stripe_new_lock);
list_for_each_entry(s, &c->ec_stripe_new_list, list) {
- prt_printf(out, "\tin flight: blocks %u+%u pin %u\n",
- s->nr_data, s->nr_parity,
- atomic_read(&s->pin));
+ prt_printf(out, "\tin flight: idx %llu blocks %u+%u pin %u\n",
+ s->idx, s->nr_data, s->nr_parity,
+ atomic_read(&s->pin));
}
mutex_unlock(&c->ec_stripe_new_lock);
}