diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2019-03-16 14:27:40 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2019-03-17 14:30:13 -0400 |
commit | f61bb0b139797bc42a419d08c9a5891099906cc3 (patch) | |
tree | 9be42ce67d9bd576f5496d9c648d31420581157e | |
parent | 0bbf542eca7cfe93e0d6dd8103f58f545848d6a6 (diff) |
bcachefs: Always use bch2_extent_trim_atomic()
-rw-r--r-- | fs/bcachefs/btree_types.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_update.h | 3 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_leaf.c | 80 | ||||
-rw-r--r-- | fs/bcachefs/extents.c | 34 | ||||
-rw-r--r-- | fs/bcachefs/extents.h | 14 | ||||
-rw-r--r-- | fs/bcachefs/io.c | 28 |
6 files changed, 63 insertions, 98 deletions
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index a6aea023a562..696a07997855 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -489,8 +489,6 @@ struct btree_root { enum btree_insert_ret { BTREE_INSERT_OK, - /* extent spanned multiple leaf nodes: have to traverse to next node: */ - BTREE_INSERT_NEED_TRAVERSE, /* leaf node needs to be split */ BTREE_INSERT_BTREE_NODE_FULL, BTREE_INSERT_ENOSPC, diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h index 1f371b5a1b3d..f98024ed6699 100644 --- a/fs/bcachefs/btree_update.h +++ b/fs/bcachefs/btree_update.h @@ -127,9 +127,6 @@ enum { int bch2_btree_delete_at(struct btree_iter *, unsigned); -int bch2_btree_insert_list_at(struct btree_iter *, struct keylist *, - struct disk_reservation *, u64 *, unsigned); - int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *, struct disk_reservation *, u64 *, int flags); diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c index 008dcc961213..ee9a1fb736df 100644 --- a/fs/bcachefs/btree_update_leaf.c +++ b/fs/bcachefs/btree_update_leaf.c @@ -184,9 +184,8 @@ void bch2_btree_journal_key(struct btree_insert *trans, set_btree_node_dirty(b); } -static enum btree_insert_ret -bch2_insert_fixup_key(struct btree_insert *trans, - struct btree_insert_entry *insert) +static void bch2_insert_fixup_key(struct btree_insert *trans, + struct btree_insert_entry *insert) { struct btree_iter *iter = insert->iter; struct btree_iter_level *l = &iter->l[0]; @@ -198,30 +197,27 @@ bch2_insert_fixup_key(struct btree_insert *trans, if (bch2_btree_bset_insert_key(iter, l->b, &l->iter, insert->k)) bch2_btree_journal_key(trans, iter, insert->k); - - return BTREE_INSERT_OK; } /** * btree_insert_key - insert a key one key into a leaf node */ -static enum btree_insert_ret -btree_insert_key_leaf(struct btree_insert *trans, - struct btree_insert_entry *insert) +static void btree_insert_key_leaf(struct btree_insert *trans, + struct btree_insert_entry *insert) { struct bch_fs *c = trans->c; struct btree_iter *iter = insert->iter; struct btree *b = iter->l[0].b; - enum btree_insert_ret ret; int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s); int old_live_u64s = b->nr.live_u64s; int live_u64s_added, u64s_added; bch2_mark_update(trans, insert); - ret = !btree_node_is_extents(b) - ? bch2_insert_fixup_key(trans, insert) - : bch2_insert_fixup_extent(trans, insert); + if (!btree_node_is_extents(b)) + bch2_insert_fixup_key(trans, insert); + else + bch2_insert_fixup_extent(trans, insert); live_u64s_added = (int) b->nr.live_u64s - old_live_u64s; u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s; @@ -236,7 +232,6 @@ btree_insert_key_leaf(struct btree_insert *trans, bch2_btree_iter_reinit_node(iter, b); trace_btree_insert_key(c, b, insert->k); - return ret; } /* Deferred btree updates: */ @@ -290,9 +285,8 @@ static void deferred_update_flush(struct journal *j, kfree(k); } -static enum btree_insert_ret -btree_insert_key_deferred(struct btree_insert *trans, - struct btree_insert_entry *insert) +static void btree_insert_key_deferred(struct btree_insert *trans, + struct btree_insert_entry *insert) { struct bch_fs *c = trans->c; struct journal *j = &c->journal; @@ -320,8 +314,6 @@ btree_insert_key_deferred(struct btree_insert *trans, bch2_journal_pin_update(j, trans->journal_res.seq, &d->journal, deferred_update_flush); spin_unlock(&d->lock); - - return BTREE_INSERT_OK; } void bch2_deferred_update_free(struct bch_fs *c, @@ -484,13 +476,13 @@ btree_key_can_insert(struct btree_insert *trans, return BTREE_INSERT_OK; } -static inline enum btree_insert_ret -do_btree_insert_one(struct btree_insert *trans, - struct btree_insert_entry *insert) +static inline void do_btree_insert_one(struct btree_insert *trans, + struct btree_insert_entry *insert) { - return likely(!insert->deferred) - ? btree_insert_key_leaf(trans, insert) - : btree_insert_key_deferred(trans, insert); + if (likely(!insert->deferred)) + btree_insert_key_leaf(trans, insert); + else + btree_insert_key_deferred(trans, insert); } /* @@ -594,19 +586,8 @@ got_journal_res: } trans->did_work = true; - trans_for_each_entry(trans, i) { - switch (do_btree_insert_one(trans, i)) { - case BTREE_INSERT_OK: - break; - case BTREE_INSERT_NEED_TRAVERSE: - BUG_ON((trans->flags & - (BTREE_INSERT_ATOMIC|BTREE_INSERT_NOUNLOCK))); - ret = -EINTR; - goto out; - default: - BUG(); - } - } + trans_for_each_entry(trans, i) + do_btree_insert_one(trans, i); out: BUG_ON(ret && (trans->flags & BTREE_INSERT_JOURNAL_RESERVED) && @@ -628,6 +609,8 @@ static inline void btree_insert_entry_checks(struct bch_fs *c, if (!i->deferred) { BUG_ON(i->iter->level); BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos)); + EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) && + !bch2_extent_is_atomic(i->k, i->iter)); bch2_btree_iter_verify_locks(i->iter); } @@ -874,28 +857,6 @@ int bch2_btree_delete_at(struct btree_iter *iter, unsigned flags) BTREE_INSERT_ENTRY(iter, &k)); } -int bch2_btree_insert_list_at(struct btree_iter *iter, - struct keylist *keys, - struct disk_reservation *disk_res, - u64 *journal_seq, unsigned flags) -{ - BUG_ON(flags & BTREE_INSERT_ATOMIC); - BUG_ON(bch2_keylist_empty(keys)); - bch2_verify_keylist_sorted(keys); - - while (!bch2_keylist_empty(keys)) { - int ret = bch2_btree_insert_at(iter->c, disk_res, - journal_seq, flags, - BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys))); - if (ret) - return ret; - - bch2_keylist_pop_front(keys); - } - - return 0; -} - /** * bch_btree_insert - insert keys into the extent btree * @c: pointer to struct bch_fs @@ -961,6 +922,7 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id, /* create the biggest key we can */ bch2_key_resize(&delete.k, max_sectors); bch2_cut_back(end, &delete.k); + bch2_extent_trim_atomic(&delete, &iter); } ret = bch2_btree_insert_at(c, NULL, journal_seq, diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index 369b100a0a58..d50e985adf93 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -928,15 +928,25 @@ static void extent_insert_committed(struct extent_insert_state *s) insert->k.needs_whiteout = false; } -void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter) +static inline struct bpos +bch2_extent_atomic_end(struct bkey_i *k, struct btree_iter *iter) { struct btree *b = iter->l[0].b; BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK); + BUG_ON(bkey_cmp(bkey_start_pos(&k->k), b->data->min_key) < 0); - bch2_cut_back(b->key.k.p, &k->k); + return bpos_min(k->k.p, b->key.k.p); +} - BUG_ON(bkey_cmp(bkey_start_pos(&k->k), b->data->min_key) < 0); +void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter) +{ + bch2_cut_back(bch2_extent_atomic_end(k, iter), &k->k); +} + +bool bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter) +{ + return !bkey_cmp(bch2_extent_atomic_end(k, iter), k->k.p); } enum btree_insert_ret @@ -952,9 +962,6 @@ bch2_extent_can_insert(struct btree_insert *trans, struct bkey_s_c k; int sectors; - BUG_ON(trans->flags & BTREE_INSERT_ATOMIC && - !bch2_extent_is_atomic(&insert->k->k, insert->iter)); - /* * We avoid creating whiteouts whenever possible when deleting, but * those optimizations mean we may potentially insert two whiteouts @@ -1216,12 +1223,10 @@ next: * If the end of iter->pos is not the same as the end of insert, then * key insertion needs to continue/be retried. */ -enum btree_insert_ret -bch2_insert_fixup_extent(struct btree_insert *trans, - struct btree_insert_entry *insert) +void bch2_insert_fixup_extent(struct btree_insert *trans, + struct btree_insert_entry *insert) { struct btree_iter *iter = insert->iter; - struct btree *b = iter->l[0].b; struct extent_insert_state s = { .trans = trans, .insert = insert, @@ -1248,16 +1253,9 @@ bch2_insert_fixup_extent(struct btree_insert *trans, extent_insert_committed(&s); + BUG_ON(insert->k->k.size); EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k))); EBUG_ON(bkey_cmp(iter->pos, s.committed)); - - if (insert->k->k.size) { - /* got to the end of this leaf node */ - BUG_ON(bkey_cmp(iter->pos, b->key.k.p)); - return BTREE_INSERT_NEED_TRAVERSE; - } - - return BTREE_INSERT_OK; } const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k) diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h index 698b25818afb..026960c82d04 100644 --- a/fs/bcachefs/extents.h +++ b/fs/bcachefs/extents.h @@ -406,21 +406,13 @@ enum merge_result bch2_reservation_merge(struct bch_fs *, } void bch2_extent_trim_atomic(struct bkey_i *, struct btree_iter *); - -static inline bool bch2_extent_is_atomic(struct bkey *k, - struct btree_iter *iter) -{ - struct btree *b = iter->l[0].b; - - return bkey_cmp(k->p, b->key.k.p) <= 0 && - bkey_cmp(bkey_start_pos(k), b->data->min_key) >= 0; -} +bool bch2_extent_is_atomic(struct bkey_i *, struct btree_iter *); enum btree_insert_ret bch2_extent_can_insert(struct btree_insert *, struct btree_insert_entry *, unsigned *); -enum btree_insert_ret -bch2_insert_fixup_extent(struct btree_insert *, struct btree_insert_entry *); +void bch2_insert_fixup_extent(struct btree_insert *, + struct btree_insert_entry *); void bch2_extent_mark_replicas_cached(struct bch_fs *, struct bkey_s_extent, unsigned, unsigned); diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c index 64637687bf40..bf86bc11ae27 100644 --- a/fs/bcachefs/io.c +++ b/fs/bcachefs/io.c @@ -276,18 +276,36 @@ static void bch2_write_done(struct closure *cl) int bch2_write_index_default(struct bch_write_op *op) { + struct bch_fs *c = op->c; struct keylist *keys = &op->insert_keys; struct btree_iter iter; int ret; - bch2_btree_iter_init(&iter, op->c, BTREE_ID_EXTENTS, + bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, bkey_start_pos(&bch2_keylist_front(keys)->k), BTREE_ITER_INTENT); - ret = bch2_btree_insert_list_at(&iter, keys, &op->res, - op_journal_seq(op), - BTREE_INSERT_NOFAIL| - BTREE_INSERT_USE_RESERVE); + do { + BKEY_PADDED(k) split; + + bkey_copy(&split.k, bch2_keylist_front(keys)); + + bch2_extent_trim_atomic(&split.k, &iter); + + ret = bch2_btree_insert_at(c, &op->res, + op_journal_seq(op), + BTREE_INSERT_NOFAIL| + BTREE_INSERT_USE_RESERVE, + BTREE_INSERT_ENTRY(&iter, &split.k)); + if (ret) + break; + + if (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) < 0) + bch2_cut_front(iter.pos, bch2_keylist_front(keys)); + else + bch2_keylist_pop_front(keys); + } while (!bch2_keylist_empty(keys)); + bch2_btree_iter_unlock(&iter); return ret; |