diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2018-05-15 21:26:13 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2018-05-22 00:44:18 -0400 |
commit | 6a43f09c97ad379cb46aea348acfbbec77cce5a3 (patch) | |
tree | 84c5f212a8cb835e4fd3004c762ab0f411da94d4 | |
parent | 3a8f5c1345ce86009ce2a820fa85fd5ebe60e52a (diff) |
bcachefs: fix integer promotion bug on 32 bit
-rw-r--r-- | fs/bcachefs/bset.c | 7 | ||||
-rw-r--r-- | fs/bcachefs/bset.h | 3 | ||||
-rw-r--r-- | fs/bcachefs/btree_io.c | 5 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_interior.h | 71 |
4 files changed, 47 insertions, 39 deletions
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c index 92046ae4915c..5c9b016f7706 100644 --- a/fs/bcachefs/bset.c +++ b/fs/bcachefs/bset.c @@ -6,6 +6,7 @@ */ #include "bcachefs.h" +#include "btree_cache.h" #include "bset.h" #include "eytzinger.h" #include "util.h" @@ -964,10 +965,14 @@ void bch2_bset_init_first(struct btree *b, struct bset *i) set_btree_bset(b, t, i); } -void bch2_bset_init_next(struct btree *b, struct bset *i) +void bch2_bset_init_next(struct bch_fs *c, struct btree *b, + struct btree_node_entry *bne) { + struct bset *i = &bne->keys; struct bset_tree *t; + BUG_ON(bset_byte_offset(b, bne) >= btree_bytes(c)); + BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b))); BUG_ON(b->nsets >= MAX_BSETS); memset(i, 0, sizeof(*i)); diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h index 1b67825ccef7..153e2b3f787f 100644 --- a/fs/bcachefs/bset.h +++ b/fs/bcachefs/bset.h @@ -339,7 +339,8 @@ int bch2_btree_keys_alloc(struct btree *, unsigned, gfp_t); void bch2_btree_keys_init(struct btree *, bool *); void bch2_bset_init_first(struct btree *, struct bset *); -void bch2_bset_init_next(struct btree *, struct bset *); +void bch2_bset_init_next(struct bch_fs *, struct btree *, + struct btree_node_entry *); void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool); void bch2_bset_fix_invalidated_key(struct btree *, struct bset_tree *, struct bkey_packed *); diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 07ad0cd7071c..74ffad4c38f3 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -878,7 +878,7 @@ void bch2_btree_init_next(struct bch_fs *c, struct btree *b, bne = want_new_bset(c, b); if (bne) - bch2_bset_init_next(b, &bne->keys); + bch2_bset_init_next(c, b, bne); bch2_btree_build_aux_trees(b); @@ -1743,6 +1743,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, BUG_ON((b->will_make_reachable != 0) != !b->written); BUG_ON(b->written >= c->opts.btree_node_size); + BUG_ON(b->written & (c->opts.block_size - 1)); BUG_ON(bset_written(b, btree_bset_last(b))); BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c)); BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format))); @@ -1973,7 +1974,7 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b) bne = want_new_bset(c, b); if (bne) - bch2_bset_init_next(b, &bne->keys); + bch2_bset_init_next(c, b, bne); bch2_btree_build_aux_trees(b); diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h index 3e66d69eda1b..25bfc7ab9ee0 100644 --- a/fs/bcachefs/btree_update_interior.h +++ b/fs/bcachefs/btree_update_interior.h @@ -226,11 +226,30 @@ static inline bool bset_unwritten(struct btree *b, struct bset *i) return (void *) i > write_block(b); } -static inline unsigned bset_end_sector(struct bch_fs *c, struct btree *b, - struct bset *i) +static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c, + struct btree *b, + void *end) { - return round_up(bset_byte_offset(b, vstruct_end(i)), - block_bytes(c)) >> 9; + ssize_t used = bset_byte_offset(b, end) / sizeof(u64) + + b->whiteout_u64s + + b->uncompacted_whiteout_u64s; + ssize_t total = c->opts.btree_node_size << 6; + + return total - used; +} + +static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c, + struct btree *b) +{ + ssize_t remaining = __bch_btree_u64s_remaining(c, b, + btree_bkey_last(b, bset_tree_last(b))); + + BUG_ON(remaining < 0); + + if (bset_written(b, btree_bset_last(b))) + return 0; + + return remaining; } static inline unsigned btree_write_set_buffer(struct btree *b) @@ -246,20 +265,19 @@ static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, struct btree *b) { struct bset *i = btree_bset_last(b); - unsigned offset = max_t(unsigned, b->written << 9, - bset_byte_offset(b, vstruct_end(i))); - ssize_t remaining_space = (ssize_t) btree_bytes(c) - (ssize_t) - (offset + sizeof(struct btree_node_entry) + - b->whiteout_u64s * sizeof(u64) + - b->uncompacted_whiteout_u64s * sizeof(u64)); - - EBUG_ON(offset > btree_bytes(c)); - - if ((unlikely(bset_written(b, i)) && - remaining_space > block_bytes(c)) || - (unlikely(vstruct_bytes(i) > btree_write_set_buffer(b)) && - remaining_space > btree_write_set_buffer(b))) - return (void *) b->data + offset; + struct btree_node_entry *bne = max(write_block(b), + (void *) btree_bkey_last(b, bset_tree_last(b))); + ssize_t remaining_space = + __bch_btree_u64s_remaining(c, b, &bne->keys.start[0]); + + if (unlikely(bset_written(b, i))) { + if (remaining_space > (ssize_t) (block_bytes(c) >> 3)) + return bne; + } else { + if (unlikely(vstruct_bytes(i) > btree_write_set_buffer(b)) && + remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3)) + return bne; + } return NULL; } @@ -285,23 +303,6 @@ static inline void reserve_whiteout(struct btree *b, struct bset_tree *t, } } -static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c, - struct btree *b) -{ - struct bset *i = btree_bset_last(b); - unsigned used = bset_byte_offset(b, vstruct_end(i)) / sizeof(u64) + - b->whiteout_u64s + - b->uncompacted_whiteout_u64s; - unsigned total = c->opts.btree_node_size << 6; - - EBUG_ON(used > total); - - if (bset_written(b, i)) - return 0; - - return total - used; -} - /* * write lock must be held on @b (else the dirty bset that we were going to * insert into could be written out from under us) |