summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-09-08 18:26:27 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2016-09-08 18:55:10 -0800
commit19b114ebab825ca6ca11e1f58838582bfc7d1c1a (patch)
tree478acfe0d74f5fce4cdaf0ae782401b19a29b839
parent6e9e665255932b2dcc2985e3bbd603d7a673138d (diff)
bcache: don't walk nodes without pointers unnecessarily in gc
Will help mount times.
-rw-r--r--drivers/md/bcache/btree_gc.c27
-rw-r--r--drivers/md/bcache/btree_iter.c28
-rw-r--r--drivers/md/bcache/btree_iter.h23
-rw-r--r--drivers/md/bcache/debug.c2
-rw-r--r--drivers/md/bcache/fs.c3
-rw-r--r--drivers/md/bcache/migrate.c2
6 files changed, 53 insertions, 32 deletions
diff --git a/drivers/md/bcache/btree_gc.c b/drivers/md/bcache/btree_gc.c
index 486c47372458..ea1e72fe14db 100644
--- a/drivers/md/bcache/btree_gc.c
+++ b/drivers/md/bcache/btree_gc.c
@@ -33,14 +33,16 @@ struct range_checks {
struct bpos min;
struct bpos max;
} l[BTREE_MAX_DEPTH];
+ unsigned depth;
};
-static void btree_node_range_checks_init(struct range_checks *r)
+static void btree_node_range_checks_init(struct range_checks *r, unsigned depth)
{
unsigned i;
for (i = 0; i < BTREE_MAX_DEPTH; i++)
r->l[i].min = r->l[i].max = POS_MIN;
+ r->depth = depth;
}
static void btree_node_range_checks(struct cache_set *c, struct btree *b,
@@ -62,7 +64,7 @@ static void btree_node_range_checks(struct cache_set *c, struct btree *b,
l->max = b->data->max_key;
- if (b->level) {
+ if (b->level > r->depth) {
l = &r->l[b->level - 1];
cache_set_inconsistent_on(bkey_cmp(b->data->min_key,
@@ -183,11 +185,18 @@ static int bch_gc_btree(struct cache_set *c, enum btree_id btree_id)
struct btree *b;
bool should_rewrite;
struct range_checks r;
+ unsigned depth = btree_id == BTREE_ID_EXTENTS ? 0 : 1;
int ret;
- btree_node_range_checks_init(&r);
+ /*
+ * if expensive_debug_checks is on, run range_checks on all leaf nodes:
+ */
+ if (expensive_debug_checks(c))
+ depth = 0;
- for_each_btree_node(&iter, c, btree_id, POS_MIN, b) {
+ btree_node_range_checks_init(&r, depth);
+
+ for_each_btree_node(&iter, c, btree_id, POS_MIN, depth, b) {
btree_node_range_checks(c, b, &r);
bch_verify_btree_nr_keys(&b->keys);
@@ -670,7 +679,7 @@ static int bch_coalesce_btree(struct cache_set *c, enum btree_id btree_id)
*/
memset(merge, 0, sizeof(merge));
- __for_each_btree_node(&iter, c, btree_id, POS_MIN, b, U8_MAX) {
+ __for_each_btree_node(&iter, c, btree_id, POS_MIN, 0, b, U8_MAX) {
memmove(merge + 1, merge,
sizeof(merge) - sizeof(merge[0]));
memmove(lock_seq + 1, lock_seq,
@@ -837,13 +846,17 @@ static void bch_initial_gc_btree(struct cache_set *c, enum btree_id id)
struct btree_iter iter;
struct btree *b;
struct range_checks r;
+ unsigned depth = id == BTREE_ID_EXTENTS ? 0 : 1;
+
+ if (expensive_debug_checks(c))
+ depth = 0;
- btree_node_range_checks_init(&r);
+ btree_node_range_checks_init(&r, depth);
if (!c->btree_roots[id].b)
return;
- for_each_btree_node(&iter, c, id, POS_MIN, b) {
+ for_each_btree_node(&iter, c, id, POS_MIN, depth, b) {
btree_node_range_checks(c, b, &r);
if (btree_node_has_ptrs(b)) {
diff --git a/drivers/md/bcache/btree_iter.c b/drivers/md/bcache/btree_iter.c
index 0c10780b74fb..6385ec5d75f4 100644
--- a/drivers/md/bcache/btree_iter.c
+++ b/drivers/md/bcache/btree_iter.c
@@ -510,7 +510,8 @@ static void btree_iter_verify_locking(struct btree_iter *iter, unsigned level)
#endif
}
-static inline void btree_iter_lock_root(struct btree_iter *iter, struct bpos pos)
+static inline void btree_iter_lock_root(struct btree_iter *iter, struct bpos pos,
+ unsigned l)
{
struct cache_set *c = iter->c;
@@ -519,9 +520,14 @@ static inline void btree_iter_lock_root(struct btree_iter *iter, struct bpos pos
memset(iter->nodes, 0, sizeof(iter->nodes));
while (1) {
- struct btree *b = c->btree_roots[iter->btree_id].b;
+ struct btree *b = READ_ONCE(c->btree_roots[iter->btree_id].b);
- iter->level = b->level;
+ iter->level = READ_ONCE(b->level);
+
+ if (iter->level < l) {
+ iter->level = l;
+ break;
+ }
btree_iter_verify_locking(iter, iter->level);
@@ -631,7 +637,7 @@ retry:
return ret;
}
} else {
- btree_iter_lock_root(iter, pos);
+ btree_iter_lock_root(iter, pos, l);
}
return 0;
@@ -657,13 +663,15 @@ struct btree *bch_btree_iter_peek_node(struct btree_iter *iter)
b = iter->nodes[iter->level];
- EBUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
- iter->pos = b->key.k.p;
+ if (b) {
+ EBUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
+ iter->pos = b->key.k.p;
+ }
return b;
}
-struct btree *bch_btree_iter_next_node(struct btree_iter *iter)
+struct btree *bch_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
{
struct btree *b;
int ret;
@@ -685,7 +693,7 @@ struct btree *bch_btree_iter_next_node(struct btree_iter *iter)
if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
struct bpos pos = bkey_successor(iter->pos);
- ret = __bch_btree_iter_traverse(iter, 0, pos);
+ ret = __bch_btree_iter_traverse(iter, depth, pos);
if (ret)
return NULL;
@@ -835,9 +843,9 @@ recheck:
void __bch_btree_iter_init(struct btree_iter *iter, struct cache_set *c,
enum btree_id btree_id, struct bpos pos,
- int locks_want)
+ int locks_want, unsigned depth)
{
- iter->level = 0;
+ iter->level = depth;
iter->is_extents = btree_id == BTREE_ID_EXTENTS;
iter->nodes_locked = 0;
iter->nodes_intent_locked = 0;
diff --git a/drivers/md/bcache/btree_iter.h b/drivers/md/bcache/btree_iter.h
index c021a2633d2f..c0031d99b3dc 100644
--- a/drivers/md/bcache/btree_iter.h
+++ b/drivers/md/bcache/btree_iter.h
@@ -137,7 +137,7 @@ void bch_btree_iter_reinit_node(struct btree_iter *, struct btree *);
int __must_check bch_btree_iter_traverse(struct btree_iter *);
struct btree *bch_btree_iter_peek_node(struct btree_iter *);
-struct btree *bch_btree_iter_next_node(struct btree_iter *);
+struct btree *bch_btree_iter_next_node(struct btree_iter *, unsigned);
struct bkey_s_c bch_btree_iter_peek(struct btree_iter *);
struct bkey_s_c bch_btree_iter_peek_with_holes(struct btree_iter *);
@@ -147,14 +147,14 @@ void bch_btree_iter_advance_pos(struct btree_iter *);
void bch_btree_iter_rewind(struct btree_iter *, struct bpos);
void __bch_btree_iter_init(struct btree_iter *, struct cache_set *,
- enum btree_id, struct bpos, int);
+ enum btree_id, struct bpos, int, unsigned);
static inline void bch_btree_iter_init(struct btree_iter *iter,
struct cache_set *c,
enum btree_id btree_id,
struct bpos pos)
{
- __bch_btree_iter_init(iter, c, btree_id, pos, 0);
+ __bch_btree_iter_init(iter, c, btree_id, pos, 0, 0);
}
static inline void bch_btree_iter_init_intent(struct btree_iter *iter,
@@ -162,7 +162,7 @@ static inline void bch_btree_iter_init_intent(struct btree_iter *iter,
enum btree_id btree_id,
struct bpos pos)
{
- __bch_btree_iter_init(iter, c, btree_id, pos, 1);
+ __bch_btree_iter_init(iter, c, btree_id, pos, 1, 0);
}
int bch_btree_iter_unlink(struct btree_iter *);
@@ -191,21 +191,22 @@ static inline int btree_iter_cmp(const struct btree_iter *l,
return bkey_cmp(l->pos, r->pos);
}
-#define __for_each_btree_node(_iter, _c, _btree_id, _start, _b, _locks_want)\
+#define __for_each_btree_node(_iter, _c, _btree_id, _start, _depth, \
+ _b, _locks_want) \
for (__bch_btree_iter_init((_iter), (_c), (_btree_id), \
- _start, _locks_want), \
+ _start, _locks_want, _depth), \
(_iter)->is_extents = false, \
_b = bch_btree_iter_peek_node(_iter); \
(_b); \
- (_b) = bch_btree_iter_next_node(_iter))
+ (_b) = bch_btree_iter_next_node(_iter, _depth))
-#define for_each_btree_node(_iter, _c, _btree_id, _start, _b) \
- __for_each_btree_node(_iter, _c, _btree_id, _start, _b, 0)
+#define for_each_btree_node(_iter, _c, _btree_id, _start, _depth, _b) \
+ __for_each_btree_node(_iter, _c, _btree_id, _start, _depth, _b, 0)
#define __for_each_btree_key(_iter, _c, _btree_id, _start, \
_k, _locks_want) \
for (__bch_btree_iter_init((_iter), (_c), (_btree_id), \
- _start, _locks_want); \
+ _start, _locks_want, 0); \
((_k) = bch_btree_iter_peek(_iter)).k; \
bch_btree_iter_advance_pos(_iter))
@@ -218,7 +219,7 @@ static inline int btree_iter_cmp(const struct btree_iter *l,
#define __for_each_btree_key_with_holes(_iter, _c, _btree_id, \
_start, _k, _locks_want) \
for (__bch_btree_iter_init((_iter), (_c), (_btree_id), \
- _start, _locks_want); \
+ _start, _locks_want, 0); \
((_k) = bch_btree_iter_peek_with_holes(_iter)).k; \
bch_btree_iter_advance_pos(_iter))
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 224286b99973..6114741c5785 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -313,7 +313,7 @@ static ssize_t bch_read_btree_formats(struct file *file, char __user *buf,
if (!i->size || !bkey_cmp(POS_MAX, i->from))
return i->ret;
- for_each_btree_node(&iter, i->c, i->id, i->from, b) {
+ for_each_btree_node(&iter, i->c, i->id, i->from, 0, b) {
const struct bkey_format *f = &b->keys.format;
struct bset_stats stats;
diff --git a/drivers/md/bcache/fs.c b/drivers/md/bcache/fs.c
index 1253c1543d90..fee985fcb27a 100644
--- a/drivers/md/bcache/fs.c
+++ b/drivers/md/bcache/fs.c
@@ -238,8 +238,7 @@ static int bch_vfs_dirent_create(struct cache_set *c, struct inode *dir,
{
int ret;
- ret = bch_dirent_create(dir, type, name,
- dst->i_ino);
+ ret = bch_dirent_create(dir, type, name, dst->i_ino);
if (unlikely(ret))
return ret;
diff --git a/drivers/md/bcache/migrate.c b/drivers/md/bcache/migrate.c
index c33606865eb2..9241cca3fc5d 100644
--- a/drivers/md/bcache/migrate.c
+++ b/drivers/md/bcache/migrate.c
@@ -253,7 +253,7 @@ static int bch_move_btree_off(struct cache *ca,
unsigned moved = 0, seen = 0;
int ret;
- for_each_btree_node(&iter, ca->set, id, POS_MIN, b) {
+ for_each_btree_node(&iter, ca->set, id, POS_MIN, 0, b) {
struct bkey_s_c_extent e =
bkey_i_to_s_c_extent(&b->key);
seen++;