diff options
Diffstat (limited to 'libbcachefs/btree_write_buffer.c')
-rw-r--r-- | libbcachefs/btree_write_buffer.c | 605 |
1 files changed, 176 insertions, 429 deletions
diff --git a/libbcachefs/btree_write_buffer.c b/libbcachefs/btree_write_buffer.c index d3c38d2c..6a191568 100644 --- a/libbcachefs/btree_write_buffer.c +++ b/libbcachefs/btree_write_buffer.c @@ -7,130 +7,43 @@ #include "btree_write_buffer.h" #include "error.h" #include "journal.h" -#include "journal_io.h" #include "journal_reclaim.h" -#include <linux/prefetch.h> +#include <linux/sort.h> static int bch2_btree_write_buffer_journal_flush(struct journal *, struct journal_entry_pin *, u64); -static int bch2_journal_keys_to_write_buffer(struct bch_fs *, struct journal_buf *); - -static inline bool __wb_key_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r) -{ - return (cmp_int(l->hi, r->hi) ?: - cmp_int(l->mi, r->mi) ?: - cmp_int(l->lo, r->lo)) >= 0; -} - -static inline bool wb_key_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r) -{ -#ifdef CONFIG_X86_64 - int cmp; - - asm("mov (%[l]), %%rax;" - "sub (%[r]), %%rax;" - "mov 8(%[l]), %%rax;" - "sbb 8(%[r]), %%rax;" - "mov 16(%[l]), %%rax;" - "sbb 16(%[r]), %%rax;" - : "=@ccae" (cmp) - : [l] "r" (l), [r] "r" (r) - : "rax", "cc"); - - EBUG_ON(cmp != __wb_key_cmp(l, r)); - return cmp; -#else - return __wb_key_cmp(l, r); -#endif -} - -/* Compare excluding idx, the low 24 bits: */ -static inline bool wb_key_eq(const void *_l, const void *_r) +static int btree_write_buffered_key_cmp(const void *_l, const void *_r) { - const struct wb_key_ref *l = _l; - const struct wb_key_ref *r = _r; + const struct btree_write_buffered_key *l = _l; + const struct btree_write_buffered_key *r = _r; - return !((l->hi ^ r->hi)| - (l->mi ^ r->mi)| - ((l->lo >> 24) ^ (r->lo >> 24))); + return cmp_int(l->btree, r->btree) ?: + bpos_cmp(l->k.k.p, r->k.k.p) ?: + cmp_int(l->journal_seq, r->journal_seq) ?: + cmp_int(l->journal_offset, r->journal_offset); } -static noinline void wb_sort(struct wb_key_ref *base, size_t num) +static int btree_write_buffered_journal_cmp(const void *_l, const void *_r) { - size_t n = num, a = num / 2; - - if (!a) /* num < 2 || size == 0 */ - return; - - for (;;) { - size_t b, c, d; - - if (a) /* Building heap: sift down --a */ - --a; - else if (--n) /* Sorting: Extract root to --n */ - swap(base[0], base[n]); - else /* Sort complete */ - break; - - /* - * Sift element at "a" down into heap. This is the - * "bottom-up" variant, which significantly reduces - * calls to cmp_func(): we find the sift-down path all - * the way to the leaves (one compare per level), then - * backtrack to find where to insert the target element. - * - * Because elements tend to sift down close to the leaves, - * this uses fewer compares than doing two per level - * on the way down. (A bit more than half as many on - * average, 3/4 worst-case.) - */ - for (b = a; c = 2*b + 1, (d = c + 1) < n;) - b = wb_key_cmp(base + c, base + d) ? c : d; - if (d == n) /* Special case last leaf with no sibling */ - b = c; - - /* Now backtrack from "b" to the correct location for "a" */ - while (b != a && wb_key_cmp(base + a, base + b)) - b = (b - 1) / 2; - c = b; /* Where "a" belongs */ - while (b != a) { /* Shift it into place */ - b = (b - 1) / 2; - swap(base[b], base[c]); - } - } -} - -static noinline int wb_flush_one_slowpath(struct btree_trans *trans, - struct btree_iter *iter, - struct btree_write_buffered_key *wb) -{ - bch2_btree_node_unlock_write(trans, iter->path, iter->path->l[0].b); - - trans->journal_res.seq = wb->journal_seq; + const struct btree_write_buffered_key *l = _l; + const struct btree_write_buffered_key *r = _r; - return bch2_trans_update(trans, iter, &wb->k, - BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: - bch2_trans_commit(trans, NULL, NULL, - BCH_TRANS_COMMIT_no_enospc| - BCH_TRANS_COMMIT_no_check_rw| - BCH_TRANS_COMMIT_no_journal_res| - BCH_TRANS_COMMIT_journal_reclaim); + return cmp_int(l->journal_seq, r->journal_seq); } -static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *iter, - struct btree_write_buffered_key *wb, - bool *write_locked, size_t *fast) +static int bch2_btree_write_buffer_flush_one(struct btree_trans *trans, + struct btree_iter *iter, + struct btree_write_buffered_key *wb, + unsigned commit_flags, + bool *write_locked, + size_t *fast) { struct bch_fs *c = trans->c; struct btree_path *path; int ret; - EBUG_ON(!wb->journal_seq); - EBUG_ON(!c->btree_write_buffer.flushing.pin.seq); - EBUG_ON(c->btree_write_buffer.flushing.pin.seq > wb->journal_seq); - ret = bch2_btree_iter_traverse(iter); if (ret) return ret; @@ -153,14 +66,46 @@ static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *ite *write_locked = true; } - if (unlikely(!bch2_btree_node_insert_fits(c, path->l[0].b, wb->k.k.u64s))) { + if (!bch2_btree_node_insert_fits(c, path->l[0].b, wb->k.k.u64s)) { + bch2_btree_node_unlock_write(trans, path, path->l[0].b); *write_locked = false; - return wb_flush_one_slowpath(trans, iter, wb); + goto trans_commit; } bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq); (*fast)++; return 0; +trans_commit: + trans->journal_res.seq = wb->journal_seq; + + return bch2_trans_update(trans, iter, &wb->k, + BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: + bch2_trans_commit(trans, NULL, NULL, + commit_flags| + BCH_TRANS_COMMIT_no_check_rw| + BCH_TRANS_COMMIT_no_enospc| + BCH_TRANS_COMMIT_no_journal_res| + BCH_TRANS_COMMIT_journal_reclaim); +} + +static union btree_write_buffer_state btree_write_buffer_switch(struct btree_write_buffer *wb) +{ + union btree_write_buffer_state old, new; + u64 v = READ_ONCE(wb->state.v); + + do { + old.v = new.v = v; + + new.nr = 0; + new.idx++; + } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v); + + while (old.idx == 0 ? wb->state.ref0 : wb->state.ref1) + cpu_relax(); + + smp_mb(); + + return old; } /* @@ -192,79 +137,31 @@ btree_write_buffered_insert(struct btree_trans *trans, return ret; } -static void move_keys_from_inc_to_flushing(struct btree_write_buffer *wb) -{ - struct bch_fs *c = container_of(wb, struct bch_fs, btree_write_buffer); - struct journal *j = &c->journal; - - if (!wb->inc.keys.nr) - return; - - bch2_journal_pin_add(j, wb->inc.keys.data[0].journal_seq, &wb->flushing.pin, - bch2_btree_write_buffer_journal_flush); - - darray_resize(&wb->flushing.keys, min_t(size_t, 1U << 20, wb->flushing.keys.nr + wb->inc.keys.nr)); - darray_resize(&wb->sorted, wb->flushing.keys.size); - - if (!wb->flushing.keys.nr && wb->sorted.size >= wb->inc.keys.nr) { - swap(wb->flushing.keys, wb->inc.keys); - goto out; - } - - size_t nr = min(darray_room(wb->flushing.keys), - wb->sorted.size - wb->flushing.keys.nr); - nr = min(nr, wb->inc.keys.nr); - - memcpy(&darray_top(wb->flushing.keys), - wb->inc.keys.data, - sizeof(wb->inc.keys.data[0]) * nr); - - memmove(wb->inc.keys.data, - wb->inc.keys.data + nr, - sizeof(wb->inc.keys.data[0]) * (wb->inc.keys.nr - nr)); - - wb->flushing.keys.nr += nr; - wb->inc.keys.nr -= nr; -out: - if (!wb->inc.keys.nr) - bch2_journal_pin_drop(j, &wb->inc.pin); - else - bch2_journal_pin_update(j, wb->inc.keys.data[0].journal_seq, &wb->inc.pin, - bch2_btree_write_buffer_journal_flush); - - if (j->watermark) { - spin_lock(&j->lock); - bch2_journal_set_watermark(j); - spin_unlock(&j->lock); - } - - BUG_ON(wb->sorted.size < wb->flushing.keys.nr); -} - -static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) +int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) { struct bch_fs *c = trans->c; struct journal *j = &c->journal; struct btree_write_buffer *wb = &c->btree_write_buffer; - struct wb_key_ref *i; + struct journal_entry_pin pin; + struct btree_write_buffered_key *i, *keys; struct btree_iter iter = { NULL }; - size_t skipped = 0, fast = 0, slowpath = 0; + size_t nr = 0, skipped = 0, fast = 0, slowpath = 0; bool write_locked = false; + union btree_write_buffer_state s; int ret = 0; - bch2_trans_unlock(trans); - bch2_trans_begin(trans); + memset(&pin, 0, sizeof(pin)); - mutex_lock(&wb->inc.lock); - move_keys_from_inc_to_flushing(wb); - mutex_unlock(&wb->inc.lock); + bch2_journal_pin_copy(j, &pin, &wb->journal_pin, + bch2_btree_write_buffer_journal_flush); + bch2_journal_pin_drop(j, &wb->journal_pin); - for (size_t i = 0; i < wb->flushing.keys.nr; i++) { - wb->sorted.data[i].idx = i; - wb->sorted.data[i].btree = wb->flushing.keys.data[i].btree; - memcpy(&wb->sorted.data[i].pos, &wb->flushing.keys.data[i].k.k.p, sizeof(struct bpos)); - } - wb->sorted.nr = wb->flushing.keys.nr; + s = btree_write_buffer_switch(wb); + keys = wb->keys[s.idx]; + nr = s.nr; + + if (race_fault()) + goto slowpath; /* * We first sort so that we can detect and skip redundant updates, and @@ -280,151 +177,111 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) * If that happens, simply skip the key so we can optimistically insert * as many keys as possible in the fast path. */ - wb_sort(wb->sorted.data, wb->sorted.nr); - - darray_for_each(wb->sorted, i) { - struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx]; - - for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++) - prefetch(&wb->flushing.keys.data[n->idx]); - - BUG_ON(!k->journal_seq); - - if (i + 1 < &darray_top(wb->sorted) && - wb_key_eq(i, i + 1)) { - struct btree_write_buffered_key *n = &wb->flushing.keys.data[i[1].idx]; + sort(keys, nr, sizeof(keys[0]), + btree_write_buffered_key_cmp, NULL); + for (i = keys; i < keys + nr; i++) { + if (i + 1 < keys + nr && + i[0].btree == i[1].btree && + bpos_eq(i[0].k.k.p, i[1].k.k.p)) { skipped++; - n->journal_seq = min_t(u64, n->journal_seq, k->journal_seq); - k->journal_seq = 0; + i->journal_seq = 0; continue; } if (write_locked && - (iter.path->btree_id != k->btree || - bpos_gt(k->k.k.p, iter.path->l[0].b->key.k.p))) { + (iter.path->btree_id != i->btree || + bpos_gt(i->k.k.p, iter.path->l[0].b->key.k.p))) { bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b); write_locked = false; } - if (!iter.path || iter.path->btree_id != k->btree) { + if (!iter.path || iter.path->btree_id != i->btree) { bch2_trans_iter_exit(trans, &iter); - bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p, + bch2_trans_iter_init(trans, &iter, i->btree, i->k.k.p, BTREE_ITER_INTENT|BTREE_ITER_ALL_SNAPSHOTS); } - bch2_btree_iter_set_pos(&iter, k->k.k.p); + bch2_btree_iter_set_pos(&iter, i->k.k.p); iter.path->preserve = false; do { - if (race_fault()) { - ret = -BCH_ERR_journal_reclaim_would_deadlock; - break; - } - - ret = wb_flush_one(trans, &iter, k, &write_locked, &fast); + ret = bch2_btree_write_buffer_flush_one(trans, &iter, i, 0, + &write_locked, &fast); if (!write_locked) bch2_trans_begin(trans); } while (bch2_err_matches(ret, BCH_ERR_transaction_restart)); - if (!ret) { - k->journal_seq = 0; - } else if (ret == -BCH_ERR_journal_reclaim_would_deadlock) { + if (ret == -BCH_ERR_journal_reclaim_would_deadlock) { slowpath++; - ret = 0; - } else + continue; + } + if (ret) break; + + i->journal_seq = 0; } if (write_locked) bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b); bch2_trans_iter_exit(trans, &iter); - if (ret) - goto err; - - if (slowpath) { - /* - * Flush in the order they were present in the journal, so that - * we can release journal pins: - * The fastpath zapped the seq of keys that were successfully flushed so - * we can skip those here. - */ - trace_write_buffer_flush_slowpath(trans, slowpath, wb->flushing.keys.nr); - - struct btree_write_buffered_key *i; - darray_for_each(wb->flushing.keys, i) { - if (!i->journal_seq) - continue; - - bch2_journal_pin_update(j, i->journal_seq, &wb->flushing.pin, - bch2_btree_write_buffer_journal_flush); - - bch2_trans_begin(trans); - - ret = commit_do(trans, NULL, NULL, - BCH_WATERMARK_reclaim| - BCH_TRANS_COMMIT_no_check_rw| - BCH_TRANS_COMMIT_no_enospc| - BCH_TRANS_COMMIT_no_journal_res| - BCH_TRANS_COMMIT_journal_reclaim, - btree_write_buffered_insert(trans, i)); - if (ret) - goto err; - } - } -err: + trace_write_buffer_flush(trans, nr, skipped, fast, wb->size); + + if (slowpath) + goto slowpath; + bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret)); - trace_write_buffer_flush(trans, wb->flushing.keys.nr, skipped, fast, 0); - bch2_journal_pin_drop(j, &wb->flushing.pin); - wb->flushing.keys.nr = 0; +out: + bch2_journal_pin_drop(j, &pin); return ret; -} +slowpath: + trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, nr); -static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 seq) -{ - struct journal *j = &c->journal; - struct journal_buf *buf; - int ret = 0; + /* + * Now sort the rest by journal seq and bump the journal pin as we go. + * The slowpath zapped the seq of keys that were successfully flushed so + * we can skip those here. + */ + sort(keys, nr, sizeof(keys[0]), + btree_write_buffered_journal_cmp, + NULL); - mutex_lock(&j->buf_lock); - while ((buf = bch2_next_write_buffer_flush_journal_buf(j, seq))) - if (bch2_journal_keys_to_write_buffer(c, buf)) { - ret = -ENOMEM; + for (i = keys; i < keys + nr; i++) { + if (!i->journal_seq) + continue; + + bch2_journal_pin_update(j, i->journal_seq, &pin, + bch2_btree_write_buffer_journal_flush); + + ret = commit_do(trans, NULL, NULL, + BCH_WATERMARK_reclaim| + BCH_TRANS_COMMIT_no_check_rw| + BCH_TRANS_COMMIT_no_enospc| + BCH_TRANS_COMMIT_no_journal_res| + BCH_TRANS_COMMIT_journal_reclaim, + btree_write_buffered_insert(trans, i)); + if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret))) break; - } - mutex_unlock(&j->buf_lock); + } - return ret; + goto out; } int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans) { struct bch_fs *c = trans->c; - struct btree_write_buffer *wb = &c->btree_write_buffer; - int ret = 0, fetch_from_journal_err; - - trace_write_buffer_flush_sync(trans, _RET_IP_); -retry: - bch2_trans_unlock(trans); - bch2_journal_block_reservations(&c->journal); - fetch_from_journal_err = fetch_wb_keys_from_journal(c, U64_MAX); - bch2_journal_unblock(&c->journal); - - /* - * On memory allocation failure, bch2_btree_write_buffer_flush_locked() - * is not guaranteed to empty wb->inc: - */ - mutex_lock(&wb->flushing.lock); - while (!ret && - (wb->flushing.keys.nr || wb->inc.keys.nr)) - ret = bch2_btree_write_buffer_flush_locked(trans); - mutex_unlock(&wb->flushing.lock); + if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer)) + return -BCH_ERR_erofs_no_writes; - if (!ret && fetch_from_journal_err) - goto retry; + trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_); + bch2_trans_unlock(trans); + mutex_lock(&c->btree_write_buffer.flush_lock); + int ret = bch2_btree_write_buffer_flush_locked(trans); + mutex_unlock(&c->btree_write_buffer.flush_lock); + bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer); return ret; } @@ -434,9 +291,9 @@ int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans) struct btree_write_buffer *wb = &c->btree_write_buffer; int ret = 0; - if (mutex_trylock(&wb->flushing.lock)) { + if (mutex_trylock(&wb->flush_lock)) { ret = bch2_btree_write_buffer_flush_locked(trans); - mutex_unlock(&wb->flushing.lock); + mutex_unlock(&wb->flush_lock); } return ret; @@ -459,195 +316,85 @@ static int bch2_btree_write_buffer_journal_flush(struct journal *j, { struct bch_fs *c = container_of(j, struct bch_fs, journal); struct btree_write_buffer *wb = &c->btree_write_buffer; - int ret, fetch_from_journal_err; - do { - fetch_from_journal_err = fetch_wb_keys_from_journal(c, seq); - - mutex_lock(&wb->flushing.lock); - ret = bch2_trans_run(c, bch2_btree_write_buffer_flush_locked(trans)); - mutex_unlock(&wb->flushing.lock); - } while (!ret && - (fetch_from_journal_err || - (wb->flushing.pin.seq && wb->flushing.pin.seq <= seq) || - (wb->inc.pin.seq && wb->inc.pin.seq <= seq))); + mutex_lock(&wb->flush_lock); + int ret = bch2_trans_run(c, bch2_btree_write_buffer_flush_locked(trans)); + mutex_unlock(&wb->flush_lock); return ret; } -static void bch2_btree_write_buffer_flush_work(struct work_struct *work) +static inline u64 btree_write_buffer_ref(int idx) { - struct bch_fs *c = container_of(work, struct bch_fs, btree_write_buffer.flush_work); - struct btree_write_buffer *wb = &c->btree_write_buffer; - int ret; - - mutex_lock(&wb->flushing.lock); - do { - ret = bch2_trans_run(c, bch2_btree_write_buffer_flush_locked(trans)); - } while (!ret && bch2_btree_write_buffer_should_flush(c)); - mutex_unlock(&wb->flushing.lock); - - bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer); + return ((union btree_write_buffer_state) { + .ref0 = idx == 0, + .ref1 = idx == 1, + }).v; } -int __bch2_journal_key_to_wb(struct bch_fs *c, - struct journal_keys_to_wb *dst, - enum btree_id btree, struct bkey_i *k) +int bch2_btree_insert_keys_write_buffer(struct btree_trans *trans) { + struct bch_fs *c = trans->c; struct btree_write_buffer *wb = &c->btree_write_buffer; - int ret; -retry: - ret = darray_make_room_gfp(&dst->wb->keys, 1, GFP_KERNEL); - if (!ret && dst->wb == &wb->flushing) - ret = darray_resize(&wb->sorted, wb->flushing.keys.size); - - if (unlikely(ret)) { - if (dst->wb == &c->btree_write_buffer.flushing) { - mutex_unlock(&dst->wb->lock); - dst->wb = &c->btree_write_buffer.inc; - bch2_journal_pin_add(&c->journal, dst->seq, &dst->wb->pin, - bch2_btree_write_buffer_journal_flush); - goto retry; - } - - return ret; - } - - dst->room = darray_room(dst->wb->keys); - if (dst->wb == &wb->flushing) - dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr); - BUG_ON(!dst->room); - BUG_ON(!dst->seq); - - struct btree_write_buffered_key *wb_k = &darray_top(dst->wb->keys); - wb_k->journal_seq = dst->seq; - wb_k->btree = btree; - bkey_copy(&wb_k->k, k); - dst->wb->keys.nr++; - dst->room--; - return 0; -} + struct btree_write_buffered_key *i; + union btree_write_buffer_state old, new; + int ret = 0; + u64 v; -void bch2_journal_keys_to_write_buffer_start(struct bch_fs *c, struct journal_keys_to_wb *dst, u64 seq) -{ - struct btree_write_buffer *wb = &c->btree_write_buffer; + trans_for_each_wb_update(trans, i) { + EBUG_ON(i->k.k.u64s > BTREE_WRITE_BUFERED_U64s_MAX); - if (mutex_trylock(&wb->flushing.lock)) { - mutex_lock(&wb->inc.lock); - move_keys_from_inc_to_flushing(wb); + i->journal_seq = trans->journal_res.seq; + i->journal_offset = trans->journal_res.offset; + } - /* - * Attempt to skip wb->inc, and add keys directly to - * wb->flushing, saving us a copy later: - */ + preempt_disable(); + v = READ_ONCE(wb->state.v); + do { + old.v = new.v = v; - if (!wb->inc.keys.nr) { - dst->wb = &wb->flushing; - } else { - mutex_unlock(&wb->flushing.lock); - dst->wb = &wb->inc; + new.v += btree_write_buffer_ref(new.idx); + new.nr += trans->nr_wb_updates; + if (new.nr > wb->size) { + ret = -BCH_ERR_btree_insert_need_flush_buffer; + goto out; } - } else { - mutex_lock(&wb->inc.lock); - dst->wb = &wb->inc; - } + } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v); - dst->room = darray_room(dst->wb->keys); - if (dst->wb == &wb->flushing) - dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr); - dst->seq = seq; + memcpy(wb->keys[new.idx] + old.nr, + trans->wb_updates, + sizeof(trans->wb_updates[0]) * trans->nr_wb_updates); - bch2_journal_pin_add(&c->journal, seq, &dst->wb->pin, + bch2_journal_pin_add(&c->journal, trans->journal_res.seq, &wb->journal_pin, bch2_btree_write_buffer_journal_flush); -} -void bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys_to_wb *dst) -{ - struct btree_write_buffer *wb = &c->btree_write_buffer; - - if (!dst->wb->keys.nr) - bch2_journal_pin_drop(&c->journal, &dst->wb->pin); - - if (bch2_btree_write_buffer_should_flush(c) && - __bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer) && - !queue_work(system_unbound_wq, &c->btree_write_buffer.flush_work)) - bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer); - - if (dst->wb == &wb->flushing) - mutex_unlock(&wb->flushing.lock); - mutex_unlock(&wb->inc.lock); -} - -static int bch2_journal_keys_to_write_buffer(struct bch_fs *c, struct journal_buf *buf) -{ - struct journal_keys_to_wb dst; - struct jset_entry *entry; - struct bkey_i *k; - int ret = 0; - - bch2_journal_keys_to_write_buffer_start(c, &dst, le64_to_cpu(buf->data->seq)); - - for_each_jset_entry_type(entry, buf->data, BCH_JSET_ENTRY_write_buffer_keys) { - jset_entry_for_each_key(entry, k) { - ret = bch2_journal_key_to_wb(c, &dst, entry->btree_id, k); - if (ret) - goto out; - } - - entry->type = BCH_JSET_ENTRY_btree_keys; - } - - buf->need_flush_to_write_buffer = false; + atomic64_sub_return_release(btree_write_buffer_ref(new.idx), &wb->state.counter); out: - bch2_journal_keys_to_write_buffer_end(c, &dst); - return ret; -} - -static int wb_keys_resize(struct btree_write_buffer_keys *wb, size_t new_size) -{ - if (wb->keys.size >= new_size) - return 0; - - if (!mutex_trylock(&wb->lock)) - return -EINTR; - - int ret = darray_resize(&wb->keys, new_size); - mutex_unlock(&wb->lock); + preempt_enable(); return ret; } -int bch2_btree_write_buffer_resize(struct bch_fs *c, size_t new_size) -{ - struct btree_write_buffer *wb = &c->btree_write_buffer; - - return wb_keys_resize(&wb->flushing, new_size) ?: - wb_keys_resize(&wb->inc, new_size); -} - void bch2_fs_btree_write_buffer_exit(struct bch_fs *c) { struct btree_write_buffer *wb = &c->btree_write_buffer; - BUG_ON((wb->inc.keys.nr || wb->flushing.keys.nr) && - !bch2_journal_error(&c->journal)); + BUG_ON(wb->state.nr && !bch2_journal_error(&c->journal)); - darray_exit(&wb->sorted); - darray_exit(&wb->flushing.keys); - darray_exit(&wb->inc.keys); + kvfree(wb->keys[1]); + kvfree(wb->keys[0]); } int bch2_fs_btree_write_buffer_init(struct bch_fs *c) { struct btree_write_buffer *wb = &c->btree_write_buffer; - mutex_init(&wb->inc.lock); - mutex_init(&wb->flushing.lock); - INIT_WORK(&wb->flush_work, bch2_btree_write_buffer_flush_work); + mutex_init(&wb->flush_lock); + wb->size = c->opts.btree_write_buffer_size; - /* Will be resized by journal as needed: */ - unsigned initial_size = 1 << 16; + wb->keys[0] = kvmalloc_array(wb->size, sizeof(*wb->keys[0]), GFP_KERNEL); + wb->keys[1] = kvmalloc_array(wb->size, sizeof(*wb->keys[1]), GFP_KERNEL); + if (!wb->keys[0] || !wb->keys[1]) + return -BCH_ERR_ENOMEM_fs_btree_write_buffer_init; - return darray_make_room(&wb->inc.keys, initial_size) ?: - darray_make_room(&wb->flushing.keys, initial_size) ?: - darray_make_room(&wb->sorted, initial_size); + return 0; } |