summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-09-03 21:48:22 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2016-09-08 19:07:20 -0800
commitaff477f5af5e69778712e03c7b48c28cf1f9186c (patch)
treeecaeaa2f34c3ba69b377b1ff7b3daa81ff229216
parentf5a6dac229214474b886c78392fc39c9533fc0db (diff)
bcache: fix copygc + replication
-rw-r--r--drivers/md/bcache/buckets.c18
-rw-r--r--drivers/md/bcache/extents.c81
2 files changed, 56 insertions, 43 deletions
diff --git a/drivers/md/bcache/buckets.c b/drivers/md/bcache/buckets.c
index 3fa40106c783..818491c46317 100644
--- a/drivers/md/bcache/buckets.c
+++ b/drivers/md/bcache/buckets.c
@@ -438,7 +438,8 @@ static void bch_mark_pointer(struct cache_set *c,
* checked the gen
*/
if (ptr_stale(ca, ptr)) {
- BUG_ON(type == S_META);
+ BUG_ON(type != S_CACHED &&
+ test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags));
return;
}
@@ -450,17 +451,10 @@ static void bch_mark_pointer(struct cache_set *c,
if (!is_gc && gc_will_visit(c, gc_pos))
goto out;
- /*
- * Disallowed state transition - this means a bkey_cmpxchg()
- * operation is racing; just treat it like the pointer was
- * already stale
- */
- if (!may_make_unavailable &&
- type != S_CACHED &&
- is_available_bucket(old)) {
- BUG_ON(type == S_META);
- return;
- }
+ BUG_ON(type != S_CACHED &&
+ !may_make_unavailable &&
+ is_available_bucket(old) &&
+ test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags));
BUG_ON((old.dirty_sectors ||
old.cached_sectors) &&
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 0422fb27f1d7..380ee86d0da4 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -674,20 +674,6 @@ bool bch_cut_back(struct bpos where, struct bkey *k)
return true;
}
-/*
- * Returns a key corresponding to the start of @k split at @where, @k will be
- * the second half of the split
- */
-#define bch_key_split(_where, _k) \
-({ \
- BKEY_PADDED(k) __tmp; \
- \
- bkey_copy(&__tmp.k, _k); \
- bch_cut_back(_where, &__tmp.k.k); \
- bch_cut_front(_where, _k); \
- &__tmp.k; \
-})
-
/**
* bch_key_resize - adjust size of @k
*
@@ -1101,8 +1087,7 @@ static enum btree_insert_ret extent_insert_should_stop(struct btree_insert *tran
}
static void extent_do_insert(struct btree_iter *iter, struct bkey_i *insert,
- struct journal_res *res, unsigned flags,
- struct bucket_stats_cache_set *stats)
+ struct journal_res *res)
{
struct btree_keys *b = &iter->nodes[0]->keys;
struct btree_node_iter *node_iter = &iter->node_iters[0];
@@ -1112,11 +1097,6 @@ static void extent_do_insert(struct btree_iter *iter, struct bkey_i *insert,
bch_btree_node_iter_bset_pos(node_iter, b, i) ?:
bset_bkey_last(i);
- if (!(flags & BTREE_INSERT_NO_MARK_KEY))
- bch_add_sectors(iter, bkey_i_to_s_c(insert),
- bkey_start_offset(&insert->k),
- insert->k.size, stats);
-
bch_btree_journal_key(iter, insert, res);
if ((prev = bkey_prev(t, where)) &&
@@ -1136,20 +1116,42 @@ static void extent_insert_committed(struct btree_insert *trans,
struct journal_res *res,
struct bucket_stats_cache_set *stats)
{
+ struct btree_iter *iter = insert->iter;
- EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k->k), insert->iter->pos));
+ EBUG_ON(bkey_cmp(bkey_start_pos(&insert->k->k), iter->pos));
EBUG_ON(bkey_cmp(insert->k->k.p, committed_pos) < 0);
- EBUG_ON(bkey_cmp(committed_pos, insert->iter->pos) < 0);
+ EBUG_ON(bkey_cmp(committed_pos, iter->pos) < 0);
- if (bkey_cmp(committed_pos, insert->iter->pos) > 0) {
- struct bkey_i *split;
+ if (bkey_cmp(committed_pos, iter->pos) > 0) {
+ BKEY_PADDED(k) split;
EBUG_ON(bkey_deleted(&insert->k->k) || !insert->k->k.size);
- split = bch_key_split(committed_pos, insert->k);
- extent_do_insert(insert->iter, split, res, trans->flags, stats);
+ bkey_copy(&split.k, insert->k);
+
+ if (!(trans->flags & BTREE_INSERT_NO_MARK_KEY) &&
+ bkey_cmp(committed_pos, insert->k->k.p) &&
+ bkey_extent_is_compressed(trans->c,
+ bkey_i_to_s_c(insert->k))) {
+ /* XXX: possibly need to increase our reservation? */
+ bch_cut_subtract_back(iter, committed_pos,
+ bkey_i_to_s(&split.k), stats);
+ bch_cut_front(committed_pos, insert->k);
+ bch_add_sectors(iter, bkey_i_to_s_c(insert->k),
+ bkey_start_offset(&insert->k->k),
+ insert->k->k.size, stats);
+ } else {
+ bch_cut_back(committed_pos, &split.k.k);
+ bch_cut_front(committed_pos, insert->k);
+ }
+
+ if (debug_check_bkeys(iter->c))
+ bkey_debugcheck(iter->c, iter->nodes[iter->level],
+ bkey_i_to_s_c(&split.k));
+
+ extent_do_insert(iter, &split.k, res);
- bch_btree_iter_set_pos_same_leaf(insert->iter, committed_pos);
+ bch_btree_iter_set_pos_same_leaf(iter, committed_pos);
trans->did_work = true;
}
@@ -1184,7 +1186,8 @@ __extent_insert_advance_pos(struct btree_insert *trans,
case BTREE_HOOK_NO_INSERT:
extent_insert_committed(trans, insert, *committed_pos,
res, stats);
- __bch_cut_front(next_pos, bkey_i_to_s(insert->k));
+ bch_cut_subtract_front(insert->iter, next_pos,
+ bkey_i_to_s(insert->k), stats);
bch_btree_iter_set_pos_same_leaf(insert->iter, next_pos);
break;
@@ -1314,6 +1317,11 @@ bch_insert_fixup_extent(struct btree_insert *trans,
*/
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
+ if (!(trans->flags & BTREE_INSERT_NO_MARK_KEY))
+ bch_add_sectors(iter, bkey_i_to_s_c(insert->k),
+ bkey_start_offset(&insert->k->k),
+ insert->k->k.size, &stats);
+
while (bkey_cmp(committed_pos, insert->k->k.p) < 0 &&
(ret = extent_insert_should_stop(trans, insert, res,
start_time, nr_done)) == BTREE_INSERT_OK &&
@@ -1363,7 +1371,7 @@ bch_insert_fixup_extent(struct btree_insert *trans,
}
/* k is the key currently in the tree, 'insert' is the new key */
- switch (bch_extent_overlap(&insert->k->k, k.k)) {
+ switch (overlap) {
case BCH_EXTENT_OVERLAP_FRONT:
/* insert overlaps with start of k: */
bch_cut_subtract_front(iter, insert->k->k.p, k, &stats);
@@ -1450,11 +1458,13 @@ bch_insert_fixup_extent(struct btree_insert *trans,
bch_cut_back(bkey_start_pos(&insert->k->k), &split.k.k);
BUG_ON(bkey_deleted(&split.k.k));
- __bch_cut_front(bkey_start_pos(&insert->k->k), k);
bch_cut_subtract_front(iter, insert->k->k.p, k, &stats);
BUG_ON(bkey_deleted(k.k));
extent_save(&b->keys, node_iter, _k, k.k);
+ bch_add_sectors(iter, bkey_i_to_s_c(&split.k),
+ bkey_start_offset(&split.k.k),
+ split.k.k.size, &stats);
bch_btree_bset_insert(iter, b, node_iter, &split.k);
break;
}
@@ -1469,6 +1479,15 @@ bch_insert_fixup_extent(struct btree_insert *trans,
ret = BTREE_INSERT_NEED_TRAVERSE;
stop:
extent_insert_committed(trans, insert, committed_pos, res, &stats);
+ /*
+ * Subtract any remaining sectors from @insert, if we bailed out early
+ * and didn't fully insert @insert:
+ */
+ if (insert->k->k.size &&
+ !(trans->flags & BTREE_INSERT_NO_MARK_KEY))
+ bch_subtract_sectors(iter, bkey_i_to_s_c(insert->k),
+ bkey_start_offset(&insert->k->k),
+ insert->k->k.size, &stats);
bch_cache_set_stats_apply(c, &stats, trans->disk_res,
gc_pos_btree_node(b));