summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/extents.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/extents.c')
-rw-r--r--drivers/md/bcache/extents.c67
1 files changed, 33 insertions, 34 deletions
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 817346a2db13..541f985c2b1a 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -114,7 +114,7 @@ bch_insert_fixup_key(struct btree_insert_trans *trans,
struct journal_res *res)
{
struct btree_iter *iter = insert->iter;
- struct btree_iter_level *l = &iter->l[0];
+ struct btree_iter_level *l = &iter_s(insert->iter)->l[0];
BUG_ON(iter->level);
@@ -816,14 +816,10 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct btree_keys *b,
return nr;
}
-static void bch_add_sectors(struct btree_iter *iter, struct bkey_s_c k,
- u64 offset, s64 sectors,
+static void bch_add_sectors(struct cache_set *c, struct btree *b,
+ struct bkey_s_c k, u64 offset, s64 sectors,
struct bucket_stats_cache_set *stats)
{
- struct cache_set *c = iter->c;
- struct btree *b = iter->l[0].node;
-
- EBUG_ON(iter->level);
EBUG_ON(bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0);
if (!sectors)
@@ -836,39 +832,40 @@ static void bch_add_sectors(struct btree_iter *iter, struct bkey_s_c k,
bcache_dev_sectors_dirty_add(c, k.k->p.inode, offset, sectors);
}
-static void bch_subtract_sectors(struct btree_iter *iter, struct bkey_s_c k,
- u64 offset, s64 sectors,
+static void bch_subtract_sectors(struct cache_set *c, struct btree *b,
+ struct bkey_s_c k, u64 offset, s64 sectors,
struct bucket_stats_cache_set *stats)
{
- bch_add_sectors(iter, k, offset, -sectors, stats);
+ bch_add_sectors(c, b, k, offset, -sectors, stats);
}
/* These wrappers subtract exactly the sectors that we're removing from @k */
-static void bch_cut_subtract_back(struct btree_iter *iter,
+static void bch_cut_subtract_back(struct cache_set *c, struct btree *b,
struct bpos where, struct bkey_s k,
struct bucket_stats_cache_set *stats)
{
- bch_subtract_sectors(iter, k.s_c, where.offset,
+ bch_subtract_sectors(c, b, k.s_c, where.offset,
k.k->p.offset - where.offset,
stats);
bch_cut_back(where, k.k);
}
-static void bch_cut_subtract_front(struct btree_iter *iter,
+static void bch_cut_subtract_front(struct cache_set *c, struct btree *b,
struct bpos where, struct bkey_s k,
struct bucket_stats_cache_set *stats)
{
- bch_subtract_sectors(iter, k.s_c, bkey_start_offset(k.k),
+ bch_subtract_sectors(c, b, k.s_c, bkey_start_offset(k.k),
where.offset - bkey_start_offset(k.k),
stats);
__bch_cut_front(where, k);
}
-static void bch_drop_subtract(struct btree_iter *iter, struct bkey_s k,
+static void bch_drop_subtract(struct cache_set *c, struct btree *b,
+ struct bkey_s k,
struct bucket_stats_cache_set *stats)
{
if (k.k->size)
- bch_subtract_sectors(iter, k.s_c,
+ bch_subtract_sectors(c, b, k.s_c,
bkey_start_offset(k.k), k.k->size,
stats);
k.k->size = 0;
@@ -994,13 +991,14 @@ static bool bch_extent_merge_inline(struct btree_iter *iter,
#define MAX_LOCK_HOLD_TIME (5 * NSEC_PER_MSEC)
-static enum btree_insert_ret extent_insert_should_stop(struct btree_insert_trans *trans,
- struct btree_trans_entry *insert,
- struct journal_res *res,
- u64 start_time,
- unsigned nr_done)
+static enum btree_insert_ret extent_insert_should_stop(struct cache_set *c,
+ struct btree *b,
+ struct btree_insert_trans *trans,
+ struct btree_trans_entry *insert,
+ struct journal_res *res,
+ u64 start_time,
+ unsigned nr_done)
{
- struct btree *b = insert->iter->l[0].node;
/*
* Check if we have sufficient space in both the btree node and the
* journal reservation:
@@ -1014,7 +1012,7 @@ static enum btree_insert_ret extent_insert_should_stop(struct btree_insert_trans
* doing a lot of work under the btree node write lock - bail out if
* we've been running for too long and readers are waiting on the lock:
*/
- if (!bch_btree_node_insert_fits(insert->iter->c, b, insert->k->k.u64s))
+ if (!bch_btree_node_insert_fits(c, b, insert->k->k.u64s))
return BTREE_INSERT_BTREE_NODE_FULL;
else if (!journal_res_insert_fits(trans, insert, res))
return BTREE_INSERT_JOURNAL_RES_FULL; /* XXX worth tracing */
@@ -1041,9 +1039,10 @@ static void extent_do_insert(struct btree_iter *iter,
bset_bkey_last(i);
if (!(flags & BTREE_INSERT_NO_MARK_KEY))
- bch_add_sectors(iter, bkey_i_to_s_c(insert),
+ bch_add_sectors(iter->c, btree_iter_leaf(iter),
+ bkey_i_to_s_c(insert),
bkey_start_offset(&insert->k),
- insert->k.size, stats);
+ split->k.size, stats);
bch_btree_journal_key(iter, insert, res);
@@ -1141,7 +1140,7 @@ extent_insert_advance_pos(struct btree_insert_trans *trans,
struct journal_res *res, unsigned flags,
struct bucket_stats_cache_set *stats)
{
- struct btree *b = insert->iter->l[0].node;
+ struct btree *b = btree_iter_leaf(insert->iter);
struct bpos next_pos =
bpos_min(insert->k->k.p, k.k ? k.k->p : b->key.k.p);
@@ -1234,7 +1233,7 @@ bch_insert_fixup_extent(struct btree_insert_trans *trans,
{
struct btree_iter *iter = insert->iter;
struct cache_set *c = iter->c;
- struct btree_iter_level *l = &iter->l[0];
+ struct btree_iter_level *l = &iter_s(iter)->l[0];
struct btree *b = l->node;
struct btree_node_iter *node_iter = &l->node_iter;
struct btree_node_iter node_insert_iter = *node_iter;
@@ -1260,7 +1259,7 @@ bch_insert_fixup_extent(struct btree_insert_trans *trans,
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
while (bkey_cmp(committed_pos, insert->k->k.p) < 0 &&
- (ret = extent_insert_should_stop(trans, insert, res,
+ (ret = extent_insert_should_stop(c, b, trans, insert, res,
start_time, nr_done)) == BTREE_INSERT_OK &&
(_k = bch_btree_node_iter_peek_all(node_iter, &b->keys))) {
struct bkey_s k = __bkey_disassemble(f, _k, &unpacked);
@@ -1283,8 +1282,8 @@ bch_insert_fixup_extent(struct btree_insert_trans *trans,
* bkey_start_pos(k.k) not monotonically increasing except for
* ancestors of a given snapshot with nonzero size:
*/
- if (!bch_snapshot_is_descendant(c, trans->snapshot,
- k.k->p.snapshot))
+ if (!bch_is_snapshot_ancestor(c, trans->snapshot,
+ k.k->p.snapshot))
continue;
if (bkey_cmp(bkey_start_pos(k.k), insert->k->k.p) >= 0)
@@ -1341,14 +1340,14 @@ bch_insert_fixup_extent(struct btree_insert_trans *trans,
switch (overlap) {
case BCH_EXTENT_OVERLAP_FRONT:
/* insert overlaps with start of k: */
- bch_cut_subtract_front(iter, insert->k->k.p, k, &stats);
+ bch_cut_subtract_front(c, b, insert->k->k.p, k, &stats);
BUG_ON(bkey_deleted(k.k));
extent_save(&b->keys, node_iter, _k, k.k);
break;
case BCH_EXTENT_OVERLAP_BACK:
/* insert overlaps with end of k: */
- bch_cut_subtract_back(iter,
+ bch_cut_subtract_back(c, b,
bkey_start_pos(&insert->k->k),
k, &stats);
BUG_ON(bkey_deleted(k.k));
@@ -1368,7 +1367,7 @@ bch_insert_fixup_extent(struct btree_insert_trans *trans,
if (!bkey_deleted(_k))
btree_keys_account_key_drop(&b->keys.nr, _k);
- bch_drop_subtract(iter, k, &stats);
+ bch_drop_subtract(c, b, k, &stats);
extent_save(&b->keys, node_iter, _k, k.k);
break;
@@ -1393,7 +1392,7 @@ bch_insert_fixup_extent(struct btree_insert_trans *trans,
BUG_ON(bkey_deleted(&split.k.k));
__bch_cut_front(bkey_start_pos(&insert->k->k), k);
- bch_cut_subtract_front(iter, insert->k->k.p, k, &stats);
+ bch_cut_subtract_front(c, b, insert->k->k.p, k, &stats);
BUG_ON(bkey_deleted(k.k));
extent_save(&b->keys, node_iter, _k, k.k);