diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2021-12-23 21:35:28 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2021-12-26 23:35:13 -0500 |
commit | 764691cb187583673f058fc67759400a7544b03c (patch) | |
tree | e73ae877ba92db43fc45bc7d24aab79093259f97 | |
parent | 96d4c918bf73de742aedc4d5642d105e673617f6 (diff) |
bcachefs: bch2_journal_key_insert() no longer transfers ownership
bch2_journal_key_insert() used to assume that the key passed to it was
allocated with kmalloc(), and on success took ownership. This patch
deletes that behaviour, making it more similar to
bch2_trans_update()/bch2_trans_commit().
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r-- | fs/bcachefs/btree_gc.c | 24 | ||||
-rw-r--r-- | fs/bcachefs/buckets.c | 18 | ||||
-rw-r--r-- | fs/bcachefs/recovery.c | 46 |
3 files changed, 37 insertions, 51 deletions
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 6d8d61e8cf46..6db92849b9d0 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -170,10 +170,10 @@ static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min) SET_BTREE_PTR_RANGE_UPDATED(&new->v, true); ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i); - if (ret) { - kfree(new); + kfree(new); + + if (ret) return ret; - } bch2_btree_node_drop_keys_outside_node(b); @@ -199,10 +199,10 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max) SET_BTREE_PTR_RANGE_UPDATED(&new->v, true); ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i); - if (ret) { - kfree(new); + kfree(new); + + if (ret) return ret; - } bch2_btree_node_drop_keys_outside_node(b); @@ -691,9 +691,9 @@ found: } ret = bch2_journal_key_insert(c, btree_id, level, new); - if (ret) - kfree(new); - else + kfree(new); + + if (!ret) *k = bkey_i_to_s_c(new); } fsck_err: @@ -1390,8 +1390,7 @@ static int bch2_gc_reflink_done_initial_fn(struct btree_trans *trans, } ret = bch2_journal_key_insert(c, BTREE_ID_reflink, 0, new); - if (ret) - kfree(new); + kfree(new); } fsck_err: return ret; @@ -1516,8 +1515,7 @@ inconsistent: stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0); ret = bch2_journal_key_insert(c, BTREE_ID_stripes, 0, &new->k_i); - if (ret) - kfree(new); + kfree(new); } fsck_err: return ret; diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index 6477192babf6..43dfc8e84555 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -1220,19 +1220,13 @@ not_found: */ if (fsck_err(c, "%llu:%llu len %u points to nonexistent indirect extent %llu", p.k->p.inode, p.k->p.offset, p.k->size, *idx)) { - struct bkey_i_error *new; + struct bkey_i_error new; - new = kmalloc(sizeof(*new), GFP_KERNEL); - if (!new) { - bch_err(c, "%s: error allocating new key", __func__); - return -ENOMEM; - } - - bkey_init(&new->k); - new->k.type = KEY_TYPE_error; - new->k.p = p.k->p; - new->k.size = p.k->size; - ret = bch2_journal_key_insert(c, BTREE_ID_extents, 0, &new->k_i); + bkey_init(&new.k); + new.k.type = KEY_TYPE_error; + new.k.p = p.k->p; + new.k.size = p.k->size; + ret = bch2_journal_key_insert(c, BTREE_ID_extents, 0, &new.k_i); } fsck_err: return ret; diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c index 9916fad292be..f870e9a0eac5 100644 --- a/fs/bcachefs/recovery.c +++ b/fs/bcachefs/recovery.c @@ -115,21 +115,12 @@ int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id, struct journal_key n = { .btree_id = id, .level = level, - .k = k, .allocated = true }; struct journal_keys *keys = &c->journal_keys; struct journal_iter *iter; unsigned idx = journal_key_search(keys, id, level, k->k.p); - if (idx < keys->nr && - journal_key_cmp(&n, &keys->d[idx]) == 0) { - if (keys->d[idx].allocated) - kfree(keys->d[idx].k); - keys->d[idx] = n; - return 0; - } - if (keys->nr == keys->size) { struct journal_keys new_keys = { .nr = keys->nr, @@ -149,10 +140,23 @@ int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id, *keys = new_keys; } - array_insert_item(keys->d, keys->nr, idx, n); + n.k = kmalloc(bkey_bytes(&k->k), GFP_KERNEL); + if (!n.k) + return -ENOMEM; + + bkey_copy(n.k, k); + + if (idx < keys->nr && + journal_key_cmp(&n, &keys->d[idx]) == 0) { + if (keys->d[idx].allocated) + kfree(keys->d[idx].k); + keys->d[idx] = n; + } else { + array_insert_item(keys->d, keys->nr, idx, n); - list_for_each_entry(iter, &c->journal_iters, list) - journal_iter_fix(c, iter, idx); + list_for_each_entry(iter, &c->journal_iters, list) + journal_iter_fix(c, iter, idx); + } return 0; } @@ -160,22 +164,12 @@ int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id, int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id, unsigned level, struct bpos pos) { - struct bkey_i *whiteout = - kmalloc(sizeof(struct bkey), GFP_KERNEL); - int ret; - - if (!whiteout) { - bch_err(c, "%s: error allocating new key", __func__); - return -ENOMEM; - } + struct bkey_i whiteout; - bkey_init(&whiteout->k); - whiteout->k.p = pos; + bkey_init(&whiteout.k); + whiteout.k.p = pos; - ret = bch2_journal_key_insert(c, id, level, whiteout); - if (ret) - kfree(whiteout); - return ret; + return bch2_journal_key_insert(c, id, level, &whiteout); } static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter) |