summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-12-23 21:35:28 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:19 -0400
commite75b2d4c1c829142f8e3e64a9b3cf7faedcfb640 (patch)
tree75f6894c21e6efb33e63fa06fcb5e2185f93842f
parent4b674b09a950fb20aa30e902331e4eba12059b80 (diff)
bcachefs: bch2_journal_key_insert() no longer transfers ownership
bch2_journal_key_insert() used to assume that the key passed to it was allocated with kmalloc(), and on success took ownership. This patch deletes that behaviour, making it more similar to bch2_trans_update()/bch2_trans_commit(). Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/btree_gc.c12
-rw-r--r--fs/bcachefs/buckets.c18
-rw-r--r--fs/bcachefs/recovery.c35
-rw-r--r--fs/bcachefs/recovery.h2
4 files changed, 34 insertions, 33 deletions
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 3fa9f5996fca..d525a3045766 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -169,7 +169,7 @@ static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
new->v.min_key = new_min;
SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
- ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i);
+ ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
if (ret) {
kfree(new);
return ret;
@@ -198,7 +198,7 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
new->k.p = new_max;
SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
- ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i);
+ ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
if (ret) {
kfree(new);
return ret;
@@ -690,7 +690,7 @@ found:
}
}
- ret = bch2_journal_key_insert(c, btree_id, level, new);
+ ret = bch2_journal_key_insert_take(c, btree_id, level, new);
if (ret)
kfree(new);
else
@@ -1390,8 +1390,7 @@ static int bch2_gc_reflink_done_initial_fn(struct btree_trans *trans,
}
ret = bch2_journal_key_insert(c, BTREE_ID_reflink, 0, new);
- if (ret)
- kfree(new);
+ kfree(new);
}
fsck_err:
return ret;
@@ -1516,8 +1515,7 @@ inconsistent:
stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
ret = bch2_journal_key_insert(c, BTREE_ID_stripes, 0, &new->k_i);
- if (ret)
- kfree(new);
+ kfree(new);
}
fsck_err:
return ret;
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 63409ddd975a..1959601fe056 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -1217,19 +1217,13 @@ not_found:
*/
if (fsck_err(c, "%llu:%llu len %u points to nonexistent indirect extent %llu",
p.k->p.inode, p.k->p.offset, p.k->size, *idx)) {
- struct bkey_i_error *new;
+ struct bkey_i_error new;
- new = kmalloc(sizeof(*new), GFP_KERNEL);
- if (!new) {
- bch_err(c, "%s: error allocating new key", __func__);
- return -ENOMEM;
- }
-
- bkey_init(&new->k);
- new->k.type = KEY_TYPE_error;
- new->k.p = p.k->p;
- new->k.size = p.k->size;
- ret = bch2_journal_key_insert(c, BTREE_ID_extents, 0, &new->k_i);
+ bkey_init(&new.k);
+ new.k.type = KEY_TYPE_error;
+ new.k.p = p.k->p;
+ new.k.size = p.k->size;
+ ret = bch2_journal_key_insert(c, BTREE_ID_extents, 0, &new.k_i);
}
fsck_err:
return ret;
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index d0ceac0f2b39..118d536b4376 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -109,8 +109,8 @@ static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsign
iter->idx++;
}
-int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
- unsigned level, struct bkey_i *k)
+int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
+ unsigned level, struct bkey_i *k)
{
struct journal_key n = {
.btree_id = id,
@@ -157,27 +157,34 @@ int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
return 0;
}
-int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
- unsigned level, struct bpos pos)
+int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
+ unsigned level, struct bkey_i *k)
{
- struct bkey_i *whiteout =
- kmalloc(sizeof(struct bkey), GFP_KERNEL);
+ struct bkey_i *n;
int ret;
- if (!whiteout) {
- bch_err(c, "%s: error allocating new key", __func__);
+ n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
+ if (!n)
return -ENOMEM;
- }
-
- bkey_init(&whiteout->k);
- whiteout->k.p = pos;
- ret = bch2_journal_key_insert(c, id, level, whiteout);
+ bkey_copy(n, k);
+ ret = bch2_journal_key_insert_take(c, id, level, n);
if (ret)
- kfree(whiteout);
+ kfree(n);
return ret;
}
+int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
+ unsigned level, struct bpos pos)
+{
+ struct bkey_i whiteout;
+
+ bkey_init(&whiteout.k);
+ whiteout.k.p = pos;
+
+ return bch2_journal_key_insert(c, id, level, &whiteout);
+}
+
static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
{
struct journal_key *k = iter->idx - iter->keys->nr
diff --git a/fs/bcachefs/recovery.h b/fs/bcachefs/recovery.h
index e45c70b3693f..1504e0bdb940 100644
--- a/fs/bcachefs/recovery.h
+++ b/fs/bcachefs/recovery.h
@@ -31,6 +31,8 @@ struct btree_and_journal_iter {
} last;
};
+int bch2_journal_key_insert_take(struct bch_fs *, enum btree_id,
+ unsigned, struct bkey_i *);
int bch2_journal_key_insert(struct bch_fs *, enum btree_id,
unsigned, struct bkey_i *);
int bch2_journal_key_delete(struct bch_fs *, enum btree_id,