summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-11-16 16:25:58 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2021-04-27 12:17:57 -0400
commit1e665ca7f4fbc577d2f78e50ece449dec130341e (patch)
tree769ddc7b29e20bbce904fe00fe490d39268ff7d4 /fs
parent8a1e6e7f79e7232982dbd0101985eb5db5cd3767 (diff)
bcachefs: Reorganize extents.c
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/bcachefs/extent_update.c2
-rw-r--r--fs/bcachefs/extents.c1382
-rw-r--r--fs/bcachefs/extents.h260
-rw-r--r--fs/bcachefs/fs-io.c8
-rw-r--r--fs/bcachefs/io.c4
-rw-r--r--fs/bcachefs/move.c8
-rw-r--r--fs/bcachefs/recovery.c4
7 files changed, 811 insertions, 857 deletions
diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c
index 21426e01c395..91ceb5d53f92 100644
--- a/fs/bcachefs/extent_update.c
+++ b/fs/bcachefs/extent_update.c
@@ -200,7 +200,7 @@ bch2_extent_can_insert(struct btree_trans *trans,
*u64s += _k->u64s;
if (overlap == BCH_EXTENT_OVERLAP_MIDDLE &&
- (sectors = bch2_extent_is_compressed(k))) {
+ (sectors = bch2_bkey_sectors_compressed(k))) {
int flags = trans->flags & BTREE_INSERT_NOFAIL
? BCH_DISK_RESERVATION_NOFAIL : 0;
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 2f1d4634ea09..6bcc178604b0 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -25,81 +25,15 @@
#include <trace/events/bcachefs.h>
-unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
-{
- struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
- unsigned nr_ptrs = 0;
-
- bkey_for_each_ptr(p, ptr)
- nr_ptrs++;
-
- return nr_ptrs;
-}
-
-unsigned bch2_bkey_nr_dirty_ptrs(struct bkey_s_c k)
-{
- unsigned nr_ptrs = 0;
-
- switch (k.k->type) {
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_extent:
- case KEY_TYPE_reflink_v: {
- struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
-
- bkey_for_each_ptr(p, ptr)
- nr_ptrs += !ptr->cached;
- BUG_ON(!nr_ptrs);
- break;
- }
- case KEY_TYPE_reservation:
- nr_ptrs = bkey_s_c_to_reservation(k).v->nr_replicas;
- break;
- }
-
- return nr_ptrs;
-}
-
-static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
- struct extent_ptr_decoded p)
-{
- unsigned durability = 0;
- struct bch_dev *ca;
-
- if (p.ptr.cached)
- return 0;
-
- ca = bch_dev_bkey_exists(c, p.ptr.dev);
-
- if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
- durability = max_t(unsigned, durability, ca->mi.durability);
-
- if (p.has_ec) {
- struct stripe *s =
- genradix_ptr(&c->stripes[0], p.ec.idx);
-
- if (WARN_ON(!s))
- goto out;
-
- durability = max_t(unsigned, durability, s->nr_redundant);
- }
-out:
- return durability;
-}
-
-unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- unsigned durability = 0;
-
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- durability += bch2_extent_ptr_durability(c, p);
+static unsigned bch2_crc_field_size_max[] = {
+ [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
+ [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
+ [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
+};
- return durability;
-}
+static void bch2_extent_crc_pack(union bch_extent_crc *,
+ struct bch_extent_crc_unpacked,
+ enum bch_extent_entry_type);
static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
unsigned dev)
@@ -219,172 +153,299 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
return ret;
}
-void bch2_bkey_append_ptr(struct bkey_i *k,
- struct bch_extent_ptr ptr)
+/* KEY_TYPE_btree_ptr: */
+
+const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
- EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
+ if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
+ return "value too big";
- switch (k->k.type) {
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_extent:
- EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
+ return bch2_bkey_ptrs_invalid(c, k);
+}
- ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
+void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+ const char *err;
+ char buf[160];
+ struct bucket_mark mark;
+ struct bch_dev *ca;
- memcpy((void *) &k->v + bkey_val_bytes(&k->k),
- &ptr,
- sizeof(ptr));
- k->u64s++;
- break;
- default:
- BUG();
+ bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
+ !bch2_bkey_replicas_marked(c, k, false), c,
+ "btree key bad (replicas not marked in superblock):\n%s",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
+
+ if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
+ return;
+
+ bkey_for_each_ptr(ptrs, ptr) {
+ ca = bch_dev_bkey_exists(c, ptr->dev);
+
+ mark = ptr_bucket_mark(ca, ptr);
+
+ err = "stale";
+ if (gen_after(mark.gen, ptr->gen))
+ goto err;
+
+ err = "inconsistent";
+ if (mark.data_type != BCH_DATA_BTREE ||
+ mark.dirty_sectors < c->opts.btree_node_size)
+ goto err;
}
+
+ return;
+err:
+ bch2_bkey_val_to_text(&PBUF(buf), c, k);
+ bch2_fs_bug(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
+ err, buf, PTR_BUCKET_NR(ca, ptr),
+ mark.gen, (unsigned) mark.v.counter);
}
-void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
+void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
{
- struct bch_extent_ptr *ptr;
+ bch2_bkey_ptrs_to_text(out, c, k);
+}
- bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
+/* KEY_TYPE_extent: */
+
+const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
+{
+ return bch2_bkey_ptrs_invalid(c, k);
}
-const struct bch_extent_ptr *
-bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
+void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
+ struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ char buf[160];
- bkey_for_each_ptr(ptrs, ptr)
- if (ptr->dev == dev)
- return ptr;
+ /*
+ * XXX: we should be doing most/all of these checks at startup time,
+ * where we check bch2_bkey_invalid() in btree_node_read_done()
+ *
+ * But note that we can't check for stale pointers or incorrect gc marks
+ * until after journal replay is done (it might be an extent that's
+ * going to get overwritten during replay)
+ */
- return NULL;
-}
+ if (percpu_down_read_trylock(&c->mark_lock)) {
+ bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
+ !bch2_bkey_replicas_marked_locked(c, e.s_c, false), c,
+ "extent key bad (replicas not marked in superblock):\n%s",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));
+ percpu_up_read(&c->mark_lock);
+ }
+ /*
+ * If journal replay hasn't finished, we might be seeing keys
+ * that will be overwritten by the time journal replay is done:
+ */
+ if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
+ return;
-bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
+ extent_for_each_ptr_decode(e, p, entry) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
+ unsigned stale = gen_after(mark.gen, p.ptr.gen);
+ unsigned disk_sectors = ptr_disk_sectors(p);
+ unsigned mark_sectors = p.ptr.cached
+ ? mark.cached_sectors
+ : mark.dirty_sectors;
- bkey_for_each_ptr(ptrs, ptr)
- if (bch2_dev_in_target(c, ptr->dev, target) &&
- (!ptr->cached ||
- !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
- return true;
+ bch2_fs_bug_on(stale && !p.ptr.cached, c,
+ "stale dirty pointer (ptr gen %u bucket %u",
+ p.ptr.gen, mark.gen);
- return false;
+ bch2_fs_bug_on(stale > 96, c, "key too stale: %i", stale);
+
+ bch2_fs_bug_on(!stale &&
+ (mark.data_type != BCH_DATA_USER ||
+ mark_sectors < disk_sectors), c,
+ "extent pointer not marked: %s:\n"
+ "type %u sectors %u < %u",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf),
+ mark.data_type,
+ mark_sectors, disk_sectors);
+ }
}
-/* extent specific utility code */
+void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ bch2_bkey_ptrs_to_text(out, c, k);
+}
-const struct bch_extent_ptr *
-bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
+enum merge_result bch2_extent_merge(struct bch_fs *c,
+ struct bkey_s _l, struct bkey_s _r)
{
- const struct bch_extent_ptr *ptr;
+ struct bkey_s_extent l = bkey_s_to_extent(_l);
+ struct bkey_s_extent r = bkey_s_to_extent(_r);
+ union bch_extent_entry *en_l = l.v->start;
+ union bch_extent_entry *en_r = r.v->start;
+ struct bch_extent_crc_unpacked crc_l, crc_r;
- extent_for_each_ptr(e, ptr)
- if (ptr->dev == dev)
- return ptr;
+ if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k))
+ return BCH_MERGE_NOMERGE;
- return NULL;
-}
+ crc_l = bch2_extent_crc_unpack(l.k, NULL);
-const struct bch_extent_ptr *
-bch2_extent_has_group(struct bch_fs *c, struct bkey_s_c_extent e, unsigned group)
-{
- const struct bch_extent_ptr *ptr;
+ extent_for_each_entry(l, en_l) {
+ en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
- extent_for_each_ptr(e, ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ if (extent_entry_type(en_l) != extent_entry_type(en_r))
+ return BCH_MERGE_NOMERGE;
- if (ca->mi.group &&
- ca->mi.group - 1 == group)
- return ptr;
+ switch (extent_entry_type(en_l)) {
+ case BCH_EXTENT_ENTRY_ptr: {
+ const struct bch_extent_ptr *lp = &en_l->ptr;
+ const struct bch_extent_ptr *rp = &en_r->ptr;
+ struct bch_dev *ca;
+
+ if (lp->offset + crc_l.compressed_size != rp->offset ||
+ lp->dev != rp->dev ||
+ lp->gen != rp->gen)
+ return BCH_MERGE_NOMERGE;
+
+ /* We don't allow extents to straddle buckets: */
+ ca = bch_dev_bkey_exists(c, lp->dev);
+
+ if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
+ return BCH_MERGE_NOMERGE;
+
+ break;
+ }
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ if (en_l->stripe_ptr.block != en_r->stripe_ptr.block ||
+ en_l->stripe_ptr.idx != en_r->stripe_ptr.idx)
+ return BCH_MERGE_NOMERGE;
+ break;
+ case BCH_EXTENT_ENTRY_crc32:
+ case BCH_EXTENT_ENTRY_crc64:
+ case BCH_EXTENT_ENTRY_crc128:
+ crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
+ crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
+
+ if (crc_l.csum_type != crc_r.csum_type ||
+ crc_l.compression_type != crc_r.compression_type ||
+ crc_l.nonce != crc_r.nonce)
+ return BCH_MERGE_NOMERGE;
+
+ if (crc_l.offset + crc_l.live_size != crc_l.compressed_size ||
+ crc_r.offset)
+ return BCH_MERGE_NOMERGE;
+
+ if (!bch2_checksum_mergeable(crc_l.csum_type))
+ return BCH_MERGE_NOMERGE;
+
+ if (crc_l.compression_type)
+ return BCH_MERGE_NOMERGE;
+
+ if (crc_l.csum_type &&
+ crc_l.uncompressed_size +
+ crc_r.uncompressed_size > c->sb.encoded_extent_max)
+ return BCH_MERGE_NOMERGE;
+
+ if (crc_l.uncompressed_size + crc_r.uncompressed_size - 1 >
+ bch2_crc_field_size_max[extent_entry_type(en_l)])
+ return BCH_MERGE_NOMERGE;
+
+ break;
+ default:
+ return BCH_MERGE_NOMERGE;
+ }
}
- return NULL;
-}
+ extent_for_each_entry(l, en_l) {
+ struct bch_extent_crc_unpacked crc_l, crc_r;
-unsigned bch2_extent_is_compressed(struct bkey_s_c k)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- unsigned ret = 0;
+ en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- if (!p.ptr.cached &&
- p.crc.compression_type != BCH_COMPRESSION_NONE)
- ret += p.crc.compressed_size;
+ if (!extent_entry_is_crc(en_l))
+ continue;
- return ret;
-}
+ crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
+ crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
-bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
- struct bch_extent_ptr m, u64 offset)
-{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
+ crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
+ crc_l.csum,
+ crc_r.csum,
+ crc_r.uncompressed_size << 9);
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
- if (p.ptr.dev == m.dev &&
- p.ptr.gen == m.gen &&
- (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
- (s64) m.offset - offset)
- return true;
+ crc_l.uncompressed_size += crc_r.uncompressed_size;
+ crc_l.compressed_size += crc_r.compressed_size;
- return false;
+ bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
+ extent_entry_type(en_l));
+ }
+
+ bch2_key_resize(l.k, l.k->size + r.k->size);
+
+ return BCH_MERGE_MERGE;
}
-static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
- union bch_extent_entry *entry)
+/* KEY_TYPE_reservation: */
+
+const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
- union bch_extent_entry *i = ptrs.start;
+ struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
- if (i == entry)
- return NULL;
+ if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
+ return "incorrect value size";
- while (extent_entry_next(i) != entry)
- i = extent_entry_next(i);
- return i;
+ if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
+ return "invalid nr_replicas";
+
+ return NULL;
}
-union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
- struct bch_extent_ptr *ptr)
+void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
- union bch_extent_entry *dst, *src, *prev;
- bool drop_crc = true;
-
- EBUG_ON(ptr < &ptrs.start->ptr ||
- ptr >= &ptrs.end->ptr);
- EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
+ struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
- src = extent_entry_next(to_entry(ptr));
- if (src != ptrs.end &&
- !extent_entry_is_crc(src))
- drop_crc = false;
+ pr_buf(out, "generation %u replicas %u",
+ le32_to_cpu(r.v->generation),
+ r.v->nr_replicas);
+}
- dst = to_entry(ptr);
- while ((prev = extent_entry_prev(ptrs, dst))) {
- if (extent_entry_is_ptr(prev))
- break;
+enum merge_result bch2_reservation_merge(struct bch_fs *c,
+ struct bkey_s _l, struct bkey_s _r)
+{
+ struct bkey_s_reservation l = bkey_s_to_reservation(_l);
+ struct bkey_s_reservation r = bkey_s_to_reservation(_r);
- if (extent_entry_is_crc(prev)) {
- if (drop_crc)
- dst = prev;
- break;
- }
+ if (l.v->generation != r.v->generation ||
+ l.v->nr_replicas != r.v->nr_replicas)
+ return BCH_MERGE_NOMERGE;
- dst = prev;
+ if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
+ bch2_key_resize(l.k, KEY_SIZE_MAX);
+ bch2_cut_front_s(l.k->p, r.s);
+ return BCH_MERGE_PARTIAL;
}
- memmove_u64s_down(dst, src,
- (u64 *) ptrs.end - (u64 *) src);
- k.k->u64s -= (u64 *) src - (u64 *) dst;
+ bch2_key_resize(l.k, l.k->size + r.k->size);
- return dst;
+ return BCH_MERGE_MERGE;
+}
+
+/* Extent checksum entries: */
+
+/* returns true if not equal */
+static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
+ struct bch_extent_crc_unpacked r)
+{
+ return (l.csum_type != r.csum_type ||
+ l.compression_type != r.compression_type ||
+ l.compressed_size != r.compressed_size ||
+ l.uncompressed_size != r.uncompressed_size ||
+ l.offset != r.offset ||
+ l.live_size != r.live_size ||
+ l.nonce != r.nonce ||
+ bch2_crc_cmp(l.csum, r.csum));
}
static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
@@ -463,52 +524,404 @@ restart_narrow_pointers:
return ret;
}
-/* returns true if not equal */
-static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
- struct bch_extent_crc_unpacked r)
+static void bch2_extent_crc_pack(union bch_extent_crc *dst,
+ struct bch_extent_crc_unpacked src,
+ enum bch_extent_entry_type type)
{
- return (l.csum_type != r.csum_type ||
- l.compression_type != r.compression_type ||
- l.compressed_size != r.compressed_size ||
- l.uncompressed_size != r.uncompressed_size ||
- l.offset != r.offset ||
- l.live_size != r.live_size ||
- l.nonce != r.nonce ||
- bch2_crc_cmp(l.csum, r.csum));
+#define set_common_fields(_dst, _src) \
+ _dst.type = 1 << type; \
+ _dst.csum_type = _src.csum_type, \
+ _dst.compression_type = _src.compression_type, \
+ _dst._compressed_size = _src.compressed_size - 1, \
+ _dst._uncompressed_size = _src.uncompressed_size - 1, \
+ _dst.offset = _src.offset
+
+ switch (type) {
+ case BCH_EXTENT_ENTRY_crc32:
+ set_common_fields(dst->crc32, src);
+ dst->crc32.csum = *((__le32 *) &src.csum.lo);
+ break;
+ case BCH_EXTENT_ENTRY_crc64:
+ set_common_fields(dst->crc64, src);
+ dst->crc64.nonce = src.nonce;
+ dst->crc64.csum_lo = src.csum.lo;
+ dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
+ break;
+ case BCH_EXTENT_ENTRY_crc128:
+ set_common_fields(dst->crc128, src);
+ dst->crc128.nonce = src.nonce;
+ dst->crc128.csum = src.csum;
+ break;
+ default:
+ BUG();
+ }
+#undef set_common_fields
}
-void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
+void bch2_extent_crc_append(struct bkey_i *k,
+ struct bch_extent_crc_unpacked new)
{
- union bch_extent_entry *entry;
- u64 *d = (u64 *) bkeyp_val(f, k);
- unsigned i;
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+ union bch_extent_crc *crc = (void *) ptrs.end;
+ enum bch_extent_entry_type type;
- for (i = 0; i < bkeyp_val_u64s(f, k); i++)
- d[i] = swab64(d[i]);
+ if (bch_crc_bytes[new.csum_type] <= 4 &&
+ new.uncompressed_size - 1 <= CRC32_SIZE_MAX &&
+ new.nonce <= CRC32_NONCE_MAX)
+ type = BCH_EXTENT_ENTRY_crc32;
+ else if (bch_crc_bytes[new.csum_type] <= 10 &&
+ new.uncompressed_size - 1 <= CRC64_SIZE_MAX &&
+ new.nonce <= CRC64_NONCE_MAX)
+ type = BCH_EXTENT_ENTRY_crc64;
+ else if (bch_crc_bytes[new.csum_type] <= 16 &&
+ new.uncompressed_size - 1 <= CRC128_SIZE_MAX &&
+ new.nonce <= CRC128_NONCE_MAX)
+ type = BCH_EXTENT_ENTRY_crc128;
+ else
+ BUG();
- for (entry = (union bch_extent_entry *) d;
- entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
- entry = extent_entry_next(entry)) {
- switch (extent_entry_type(entry)) {
- case BCH_EXTENT_ENTRY_ptr:
- break;
- case BCH_EXTENT_ENTRY_crc32:
- entry->crc32.csum = swab32(entry->crc32.csum);
+ bch2_extent_crc_pack(crc, new, type);
+
+ k->k.u64s += extent_entry_u64s(ptrs.end);
+
+ EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
+}
+
+/* Generic code for keys with pointers: */
+
+unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
+{
+ return bch2_bkey_devs(k).nr;
+}
+
+unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
+{
+ return k.k->type == KEY_TYPE_reservation
+ ? bkey_s_c_to_reservation(k).v->nr_replicas
+ : bch2_bkey_dirty_devs(k).nr;
+}
+
+unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
+{
+ unsigned ret = 0;
+
+ if (k.k->type == KEY_TYPE_reservation) {
+ ret = bkey_s_c_to_reservation(k).v->nr_replicas;
+ } else {
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ ret += !p.ptr.cached &&
+ p.crc.compression_type == BCH_COMPRESSION_NONE;
+ }
+
+ return ret;
+}
+
+unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ unsigned ret = 0;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ if (!p.ptr.cached &&
+ p.crc.compression_type != BCH_COMPRESSION_NONE)
+ ret += p.crc.compressed_size;
+
+ return ret;
+}
+
+bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
+ unsigned nr_replicas)
+{
+ struct btree_trans trans;
+ struct btree_iter *iter;
+ struct bpos end = pos;
+ struct bkey_s_c k;
+ bool ret = true;
+ int err;
+
+ end.offset += size;
+
+ bch2_trans_init(&trans, c, 0, 0);
+
+ for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
+ BTREE_ITER_SLOTS, k, err) {
+ if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
- case BCH_EXTENT_ENTRY_crc64:
- entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
- entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
+
+ if (nr_replicas > bch2_bkey_nr_ptrs_fully_allocated(k)) {
+ ret = false;
break;
- case BCH_EXTENT_ENTRY_crc128:
- entry->crc128.csum.hi = (__force __le64)
- swab64((__force u64) entry->crc128.csum.hi);
- entry->crc128.csum.lo = (__force __le64)
- swab64((__force u64) entry->crc128.csum.lo);
+ }
+ }
+ bch2_trans_exit(&trans);
+
+ return ret;
+}
+
+static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
+ struct extent_ptr_decoded p)
+{
+ unsigned durability = 0;
+ struct bch_dev *ca;
+
+ if (p.ptr.cached)
+ return 0;
+
+ ca = bch_dev_bkey_exists(c, p.ptr.dev);
+
+ if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
+ durability = max_t(unsigned, durability, ca->mi.durability);
+
+ if (p.has_ec) {
+ struct stripe *s =
+ genradix_ptr(&c->stripes[0], p.ec.idx);
+
+ if (WARN_ON(!s))
+ goto out;
+
+ durability = max_t(unsigned, durability, s->nr_redundant);
+ }
+out:
+ return durability;
+}
+
+unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ unsigned durability = 0;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ durability += bch2_extent_ptr_durability(c, p);
+
+ return durability;
+}
+
+void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
+ unsigned target,
+ unsigned nr_desired_replicas)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ int extra = bch2_bkey_durability(c, k.s_c) - nr_desired_replicas;
+
+ if (target && extra > 0)
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ int n = bch2_extent_ptr_durability(c, p);
+
+ if (n && n <= extra &&
+ !bch2_dev_in_target(c, p.ptr.dev, target)) {
+ entry->ptr.cached = true;
+ extra -= n;
+ }
+ }
+
+ if (extra > 0)
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ int n = bch2_extent_ptr_durability(c, p);
+
+ if (n && n <= extra) {
+ entry->ptr.cached = true;
+ extra -= n;
+ }
+ }
+}
+
+void bch2_bkey_append_ptr(struct bkey_i *k,
+ struct bch_extent_ptr ptr)
+{
+ EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
+
+ switch (k->k.type) {
+ case KEY_TYPE_btree_ptr:
+ case KEY_TYPE_extent:
+ EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
+
+ ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
+
+ memcpy((void *) &k->v + bkey_val_bytes(&k->k),
+ &ptr,
+ sizeof(ptr));
+ k->u64s++;
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void __extent_entry_insert(struct bkey_i *k,
+ union bch_extent_entry *dst,
+ union bch_extent_entry *new)
+{
+ union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
+
+ memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
+ dst, (u64 *) end - (u64 *) dst);
+ k->k.u64s += extent_entry_u64s(new);
+ memcpy(dst, new, extent_entry_bytes(new));
+}
+
+void bch2_extent_ptr_decoded_append(struct bkey_i *k,
+ struct extent_ptr_decoded *p)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+ struct bch_extent_crc_unpacked crc =
+ bch2_extent_crc_unpack(&k->k, NULL);
+ union bch_extent_entry *pos;
+
+ if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
+ pos = ptrs.start;
+ goto found;
+ }
+
+ bkey_for_each_crc(&k->k, ptrs, crc, pos)
+ if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
+ pos = extent_entry_next(pos);
+ goto found;
+ }
+
+ bch2_extent_crc_append(k, p->crc);
+ pos = bkey_val_end(bkey_i_to_s(k));
+found:
+ p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
+ __extent_entry_insert(k, pos, to_entry(&p->ptr));
+
+ if (p->has_ec) {
+ p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
+ __extent_entry_insert(k, pos, to_entry(&p->ec));
+ }
+}
+
+static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
+ union bch_extent_entry *entry)
+{
+ union bch_extent_entry *i = ptrs.start;
+
+ if (i == entry)
+ return NULL;
+
+ while (extent_entry_next(i) != entry)
+ i = extent_entry_next(i);
+ return i;
+}
+
+union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
+ struct bch_extent_ptr *ptr)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *dst, *src, *prev;
+ bool drop_crc = true;
+
+ EBUG_ON(ptr < &ptrs.start->ptr ||
+ ptr >= &ptrs.end->ptr);
+ EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
+
+ src = extent_entry_next(to_entry(ptr));
+ if (src != ptrs.end &&
+ !extent_entry_is_crc(src))
+ drop_crc = false;
+
+ dst = to_entry(ptr);
+ while ((prev = extent_entry_prev(ptrs, dst))) {
+ if (extent_entry_is_ptr(prev))
break;
- case BCH_EXTENT_ENTRY_stripe_ptr:
+
+ if (extent_entry_is_crc(prev)) {
+ if (drop_crc)
+ dst = prev;
break;
}
+
+ dst = prev;
}
+
+ memmove_u64s_down(dst, src,
+ (u64 *) ptrs.end - (u64 *) src);
+ k.k->u64s -= (u64 *) src - (u64 *) dst;
+
+ return dst;
+}
+
+void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
+{
+ struct bch_extent_ptr *ptr;
+
+ bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
+}
+
+const struct bch_extent_ptr *
+bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(ptrs, ptr)
+ if (ptr->dev == dev)
+ return ptr;
+
+ return NULL;
+}
+
+bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(ptrs, ptr)
+ if (bch2_dev_in_target(c, ptr->dev, target) &&
+ (!ptr->cached ||
+ !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
+ return true;
+
+ return false;
+}
+
+bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
+ struct bch_extent_ptr m, u64 offset)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ if (p.ptr.dev == m.dev &&
+ p.ptr.gen == m.gen &&
+ (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
+ (s64) m.offset - offset)
+ return true;
+
+ return false;
+}
+
+/*
+ * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
+ *
+ * Returns true if @k should be dropped entirely
+ *
+ * For existing keys, only called when btree nodes are being rewritten, not when
+ * they're merely being compacted/resorted in memory.
+ */
+bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
+{
+ struct bch_extent_ptr *ptr;
+
+ bch2_bkey_drop_ptrs(k, ptr,
+ ptr->cached &&
+ ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
+
+ /* will only happen if all pointers were cached: */
+ if (!bch2_bkey_nr_ptrs(k.s_c))
+ k.k->type = KEY_TYPE_discard;
+
+ return bkey_whiteout(k.k);
}
void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
@@ -659,63 +1072,41 @@ const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
return NULL;
}
-/* Btree ptrs */
-
-const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
-{
- if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
- return "value too big";
-
- return bch2_bkey_ptrs_invalid(c, k);
-}
-
-void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
+void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
- const char *err;
- char buf[160];
- struct bucket_mark mark;
- struct bch_dev *ca;
-
- bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
- !bch2_bkey_replicas_marked(c, k, false), c,
- "btree key bad (replicas not marked in superblock):\n%s",
- (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
-
- if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
- return;
-
- bkey_for_each_ptr(ptrs, ptr) {
- ca = bch_dev_bkey_exists(c, ptr->dev);
-
- mark = ptr_bucket_mark(ca, ptr);
+ union bch_extent_entry *entry;
+ u64 *d = (u64 *) bkeyp_val(f, k);
+ unsigned i;
- err = "stale";
- if (gen_after(mark.gen, ptr->gen))
- goto err;
+ for (i = 0; i < bkeyp_val_u64s(f, k); i++)
+ d[i] = swab64(d[i]);
- err = "inconsistent";
- if (mark.data_type != BCH_DATA_BTREE ||
- mark.dirty_sectors < c->opts.btree_node_size)
- goto err;
+ for (entry = (union bch_extent_entry *) d;
+ entry < (union bch_extent_entry *) (d + bkeyp_val_u64s(f, k));
+ entry = extent_entry_next(entry)) {
+ switch (extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ break;
+ case BCH_EXTENT_ENTRY_crc32:
+ entry->crc32.csum = swab32(entry->crc32.csum);
+ break;
+ case BCH_EXTENT_ENTRY_crc64:
+ entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
+ entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
+ break;
+ case BCH_EXTENT_ENTRY_crc128:
+ entry->crc128.csum.hi = (__force __le64)
+ swab64((__force u64) entry->crc128.csum.hi);
+ entry->crc128.csum.lo = (__force __le64)
+ swab64((__force u64) entry->crc128.csum.lo);
+ break;
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ break;
+ }
}
-
- return;
-err:
- bch2_bkey_val_to_text(&PBUF(buf), c, k);
- bch2_fs_bug(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
- err, buf, PTR_BUCKET_NR(ca, ptr),
- mark.gen, (unsigned) mark.v.counter);
}
-void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- bch2_bkey_ptrs_to_text(out, c, k);
-}
-
-/* Extents */
+/* Generic extent code: */
int bch2_cut_front_s(struct bpos where, struct bkey_s k)
{
@@ -831,442 +1222,3 @@ int bch2_cut_back_s(struct bpos where, struct bkey_s k)
memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
return -val_u64s_delta;
}
-
-const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
-{
- return bch2_bkey_ptrs_invalid(c, k);
-}
-
-void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- char buf[160];
-
- /*
- * XXX: we should be doing most/all of these checks at startup time,
- * where we check bch2_bkey_invalid() in btree_node_read_done()
- *
- * But note that we can't check for stale pointers or incorrect gc marks
- * until after journal replay is done (it might be an extent that's
- * going to get overwritten during replay)
- */
-
- if (percpu_down_read_trylock(&c->mark_lock)) {
- bch2_fs_bug_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
- !bch2_bkey_replicas_marked_locked(c, e.s_c, false), c,
- "extent key bad (replicas not marked in superblock):\n%s",
- (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));
- percpu_up_read(&c->mark_lock);
- }
- /*
- * If journal replay hasn't finished, we might be seeing keys
- * that will be overwritten by the time journal replay is done:
- */
- if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
- return;
-
- extent_for_each_ptr_decode(e, p, entry) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
- struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
- unsigned stale = gen_after(mark.gen, p.ptr.gen);
- unsigned disk_sectors = ptr_disk_sectors(p);
- unsigned mark_sectors = p.ptr.cached
- ? mark.cached_sectors
- : mark.dirty_sectors;
-
- bch2_fs_bug_on(stale && !p.ptr.cached, c,
- "stale dirty pointer (ptr gen %u bucket %u",
- p.ptr.gen, mark.gen);
-
- bch2_fs_bug_on(stale > 96, c, "key too stale: %i", stale);
-
- bch2_fs_bug_on(!stale &&
- (mark.data_type != BCH_DATA_USER ||
- mark_sectors < disk_sectors), c,
- "extent pointer not marked: %s:\n"
- "type %u sectors %u < %u",
- (bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf),
- mark.data_type,
- mark_sectors, disk_sectors);
- }
-}
-
-void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- bch2_bkey_ptrs_to_text(out, c, k);
-}
-
-static unsigned bch2_crc_field_size_max[] = {
- [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
- [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
- [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
-};
-
-static void bch2_extent_crc_pack(union bch_extent_crc *dst,
- struct bch_extent_crc_unpacked src,
- enum bch_extent_entry_type type)
-{
-#define set_common_fields(_dst, _src) \
- _dst.type = 1 << type; \
- _dst.csum_type = _src.csum_type, \
- _dst.compression_type = _src.compression_type, \
- _dst._compressed_size = _src.compressed_size - 1, \
- _dst._uncompressed_size = _src.uncompressed_size - 1, \
- _dst.offset = _src.offset
-
- switch (type) {
- case BCH_EXTENT_ENTRY_crc32:
- set_common_fields(dst->crc32, src);
- dst->crc32.csum = *((__le32 *) &src.csum.lo);
- break;
- case BCH_EXTENT_ENTRY_crc64:
- set_common_fields(dst->crc64, src);
- dst->crc64.nonce = src.nonce;
- dst->crc64.csum_lo = src.csum.lo;
- dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
- break;
- case BCH_EXTENT_ENTRY_crc128:
- set_common_fields(dst->crc128, src);
- dst->crc128.nonce = src.nonce;
- dst->crc128.csum = src.csum;
- break;
- default:
- BUG();
- }
-#undef set_common_fields
-}
-
-void bch2_extent_crc_append(struct bkey_i *k,
- struct bch_extent_crc_unpacked new)
-{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
- union bch_extent_crc *crc = (void *) ptrs.end;
- enum bch_extent_entry_type type;
-
- if (bch_crc_bytes[new.csum_type] <= 4 &&
- new.uncompressed_size - 1 <= CRC32_SIZE_MAX &&
- new.nonce <= CRC32_NONCE_MAX)
- type = BCH_EXTENT_ENTRY_crc32;
- else if (bch_crc_bytes[new.csum_type] <= 10 &&
- new.uncompressed_size - 1 <= CRC64_SIZE_MAX &&
- new.nonce <= CRC64_NONCE_MAX)
- type = BCH_EXTENT_ENTRY_crc64;
- else if (bch_crc_bytes[new.csum_type] <= 16 &&
- new.uncompressed_size - 1 <= CRC128_SIZE_MAX &&
- new.nonce <= CRC128_NONCE_MAX)
- type = BCH_EXTENT_ENTRY_crc128;
- else
- BUG();
-
- bch2_extent_crc_pack(crc, new, type);
-
- k->k.u64s += extent_entry_u64s(ptrs.end);
-
- EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
-}
-
-static inline void __extent_entry_insert(struct bkey_i *k,
- union bch_extent_entry *dst,
- union bch_extent_entry *new)
-{
- union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
-
- memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
- dst, (u64 *) end - (u64 *) dst);
- k->k.u64s += extent_entry_u64s(new);
- memcpy(dst, new, extent_entry_bytes(new));
-}
-
-void bch2_extent_ptr_decoded_append(struct bkey_i *k,
- struct extent_ptr_decoded *p)
-{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
- struct bch_extent_crc_unpacked crc =
- bch2_extent_crc_unpack(&k->k, NULL);
- union bch_extent_entry *pos;
-
- if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
- pos = ptrs.start;
- goto found;
- }
-
- bkey_for_each_crc(&k->k, ptrs, crc, pos)
- if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
- pos = extent_entry_next(pos);
- goto found;
- }
-
- bch2_extent_crc_append(k, p->crc);
- pos = bkey_val_end(bkey_i_to_s(k));
-found:
- p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
- __extent_entry_insert(k, pos, to_entry(&p->ptr));
-
- if (p->has_ec) {
- p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
- __extent_entry_insert(k, pos, to_entry(&p->ec));
- }
-}
-
-/*
- * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
- *
- * Returns true if @k should be dropped entirely
- *
- * For existing keys, only called when btree nodes are being rewritten, not when
- * they're merely being compacted/resorted in memory.
- */
-bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
-{
- struct bch_extent_ptr *ptr;
-
- bch2_bkey_drop_ptrs(k, ptr,
- ptr->cached &&
- ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
-
- /* will only happen if all pointers were cached: */
- if (!bkey_val_u64s(k.k))
- k.k->type = KEY_TYPE_discard;
-
- return bkey_whiteout(k.k);
-}
-
-void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
- unsigned target,
- unsigned nr_desired_replicas)
-{
- struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
- union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
- int extra = bch2_bkey_durability(c, k.s_c) - nr_desired_replicas;
-
- if (target && extra > 0)
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- int n = bch2_extent_ptr_durability(c, p);
-
- if (n && n <= extra &&
- !bch2_dev_in_target(c, p.ptr.dev, target)) {
- entry->ptr.cached = true;
- extra -= n;
- }
- }
-
- if (extra > 0)
- bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- int n = bch2_extent_ptr_durability(c, p);
-
- if (n && n <= extra) {
- entry->ptr.cached = true;
- extra -= n;
- }
- }
-}
-
-enum merge_result bch2_extent_merge(struct bch_fs *c,
- struct bkey_s _l, struct bkey_s _r)
-{
- struct bkey_s_extent l = bkey_s_to_extent(_l);
- struct bkey_s_extent r = bkey_s_to_extent(_r);
- union bch_extent_entry *en_l = l.v->start;
- union bch_extent_entry *en_r = r.v->start;
- struct bch_extent_crc_unpacked crc_l, crc_r;
-
- if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k))
- return BCH_MERGE_NOMERGE;
-
- crc_l = bch2_extent_crc_unpack(l.k, NULL);
-
- extent_for_each_entry(l, en_l) {
- en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
-
- if (extent_entry_type(en_l) != extent_entry_type(en_r))
- return BCH_MERGE_NOMERGE;
-
- switch (extent_entry_type(en_l)) {
- case BCH_EXTENT_ENTRY_ptr: {
- const struct bch_extent_ptr *lp = &en_l->ptr;
- const struct bch_extent_ptr *rp = &en_r->ptr;
- struct bch_dev *ca;
-
- if (lp->offset + crc_l.compressed_size != rp->offset ||
- lp->dev != rp->dev ||
- lp->gen != rp->gen)
- return BCH_MERGE_NOMERGE;
-
- /* We don't allow extents to straddle buckets: */
- ca = bch_dev_bkey_exists(c, lp->dev);
-
- if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
- return BCH_MERGE_NOMERGE;
-
- break;
- }
- case BCH_EXTENT_ENTRY_stripe_ptr:
- if (en_l->stripe_ptr.block != en_r->stripe_ptr.block ||
- en_l->stripe_ptr.idx != en_r->stripe_ptr.idx)
- return BCH_MERGE_NOMERGE;
- break;
- case BCH_EXTENT_ENTRY_crc32:
- case BCH_EXTENT_ENTRY_crc64:
- case BCH_EXTENT_ENTRY_crc128:
- crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
- crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
-
- if (crc_l.csum_type != crc_r.csum_type ||
- crc_l.compression_type != crc_r.compression_type ||
- crc_l.nonce != crc_r.nonce)
- return BCH_MERGE_NOMERGE;
-
- if (crc_l.offset + crc_l.live_size != crc_l.compressed_size ||
- crc_r.offset)
- return BCH_MERGE_NOMERGE;
-
- if (!bch2_checksum_mergeable(crc_l.csum_type))
- return BCH_MERGE_NOMERGE;
-
- if (crc_l.compression_type)
- return BCH_MERGE_NOMERGE;
-
- if (crc_l.csum_type &&
- crc_l.uncompressed_size +
- crc_r.uncompressed_size > c->sb.encoded_extent_max)
- return BCH_MERGE_NOMERGE;
-
- if (crc_l.uncompressed_size + crc_r.uncompressed_size - 1 >
- bch2_crc_field_size_max[extent_entry_type(en_l)])
- return BCH_MERGE_NOMERGE;
-
- break;
- default:
- return BCH_MERGE_NOMERGE;
- }
- }
-
- extent_for_each_entry(l, en_l) {
- struct bch_extent_crc_unpacked crc_l, crc_r;
-
- en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);
-
- if (!extent_entry_is_crc(en_l))
- continue;
-
- crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
- crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
-
- crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
- crc_l.csum,
- crc_r.csum,
- crc_r.uncompressed_size << 9);
-
- crc_l.uncompressed_size += crc_r.uncompressed_size;
- crc_l.compressed_size += crc_r.compressed_size;
-
- bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
- extent_entry_type(en_l));
- }
-
- bch2_key_resize(l.k, l.k->size + r.k->size);
-
- return BCH_MERGE_MERGE;
-}
-
-bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
- unsigned nr_replicas)
-{
- struct btree_trans trans;
- struct btree_iter *iter;
- struct bpos end = pos;
- struct bkey_s_c k;
- bool ret = true;
- int err;
-
- end.offset += size;
-
- bch2_trans_init(&trans, c, 0, 0);
-
- for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
- BTREE_ITER_SLOTS, k, err) {
- if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
- break;
-
- if (nr_replicas > bch2_bkey_nr_ptrs_allocated(k)) {
- ret = false;
- break;
- }
- }
- bch2_trans_exit(&trans);
-
- return ret;
-}
-
-unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
-{
- unsigned ret = 0;
-
- switch (k.k->type) {
- case KEY_TYPE_extent: {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
- const union bch_extent_entry *entry;
- struct extent_ptr_decoded p;
-
- extent_for_each_ptr_decode(e, p, entry)
- ret += !p.ptr.cached &&
- p.crc.compression_type == BCH_COMPRESSION_NONE;
- break;
- }
- case KEY_TYPE_reservation:
- ret = bkey_s_c_to_reservation(k).v->nr_replicas;
- break;
- }
-
- return ret;
-}
-
-/* KEY_TYPE_reservation: */
-
-const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
-{
- struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
-
- if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
- return "incorrect value size";
-
- if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
- return "invalid nr_replicas";
-
- return NULL;
-}
-
-void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
- struct bkey_s_c k)
-{
- struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
-
- pr_buf(out, "generation %u replicas %u",
- le32_to_cpu(r.v->generation),
- r.v->nr_replicas);
-}
-
-enum merge_result bch2_reservation_merge(struct bch_fs *c,
- struct bkey_s _l, struct bkey_s _r)
-{
- struct bkey_s_reservation l = bkey_s_to_reservation(_l);
- struct bkey_s_reservation r = bkey_s_to_reservation(_r);
-
- if (l.v->generation != r.v->generation ||
- l.v->nr_replicas != r.v->nr_replicas)
- return BCH_MERGE_NOMERGE;
-
- if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
- bch2_key_resize(l.k, KEY_SIZE_MAX);
- bch2_cut_front_s(l.k->p, r.s);
- return BCH_MERGE_PARTIAL;
- }
-
- bch2_key_resize(l.k, l.k->size + r.k->size);
-
- return BCH_MERGE_MERGE;
-}
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index 35a66d4f4ea2..1140d01a42ab 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -40,6 +40,9 @@ struct btree_insert_entry;
(union bch_extent_entry *) (_entry)); \
})
+#define extent_entry_next(_entry) \
+ ((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
+
static inline unsigned
__extent_entry_type(const union bch_extent_entry *e)
{
@@ -185,10 +188,52 @@ struct bkey_ptrs {
union bch_extent_entry *end;
};
-/* iterate over bkey ptrs */
+static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
+{
+ switch (k.k->type) {
+ case KEY_TYPE_btree_ptr: {
+ struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
+ return (struct bkey_ptrs_c) {
+ to_entry(&e.v->start[0]),
+ to_entry(extent_entry_last(e))
+ };
+ }
+ case KEY_TYPE_extent: {
+ struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ return (struct bkey_ptrs_c) {
+ e.v->start,
+ extent_entry_last(e)
+ };
+ }
+ case KEY_TYPE_stripe: {
+ struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
+ return (struct bkey_ptrs_c) {
+ to_entry(&s.v->ptrs[0]),
+ to_entry(&s.v->ptrs[s.v->nr_blocks]),
+ };
+ }
+ case KEY_TYPE_reflink_v: {
+ struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
-#define extent_entry_next(_entry) \
- ((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
+ return (struct bkey_ptrs_c) {
+ r.v->start,
+ bkey_val_end(r),
+ };
+ }
+ default:
+ return (struct bkey_ptrs_c) { NULL, NULL };
+ }
+}
+
+static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
+{
+ struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c);
+
+ return (struct bkey_ptrs) {
+ (void *) p.start,
+ (void *) p.end
+ };
+}
#define __bkey_extent_entry_for_each_from(_start, _end, _entry) \
for ((_entry) = (_start); \
@@ -281,96 +326,26 @@ out: \
#define bkey_for_each_crc(_k, _p, _crc, _iter) \
__bkey_for_each_crc(_k, (_p).start, (_p).end, _crc, _iter)
-/* utility code common to all keys with pointers: */
-
-static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
-{
- switch (k.k->type) {
- case KEY_TYPE_btree_ptr: {
- struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
- return (struct bkey_ptrs_c) {
- to_entry(&e.v->start[0]),
- to_entry(extent_entry_last(e))
- };
- }
- case KEY_TYPE_extent: {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
- return (struct bkey_ptrs_c) {
- e.v->start,
- extent_entry_last(e)
- };
- }
- case KEY_TYPE_stripe: {
- struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
- return (struct bkey_ptrs_c) {
- to_entry(&s.v->ptrs[0]),
- to_entry(&s.v->ptrs[s.v->nr_blocks]),
- };
- }
- case KEY_TYPE_reflink_v: {
- struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
-
- return (struct bkey_ptrs_c) {
- r.v->start,
- bkey_val_end(r),
- };
- }
- default:
- return (struct bkey_ptrs_c) { NULL, NULL };
- }
-}
-
-static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
-{
- struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c);
+/* Iterate over pointers in KEY_TYPE_extent: */
- return (struct bkey_ptrs) {
- (void *) p.start,
- (void *) p.end
- };
-}
-
-static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
-{
- struct bch_devs_list ret = (struct bch_devs_list) { 0 };
- struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
-
- bkey_for_each_ptr(p, ptr)
- ret.devs[ret.nr++] = ptr->dev;
-
- return ret;
-}
-
-static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
-{
- struct bch_devs_list ret = (struct bch_devs_list) { 0 };
- struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
-
- bkey_for_each_ptr(p, ptr)
- if (!ptr->cached)
- ret.devs[ret.nr++] = ptr->dev;
+#define extent_for_each_entry_from(_e, _entry, _start) \
+ __bkey_extent_entry_for_each_from(_start, \
+ extent_entry_last(_e),_entry)
- return ret;
-}
+#define extent_for_each_entry(_e, _entry) \
+ extent_for_each_entry_from(_e, _entry, (_e).v->start)
-static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
-{
- struct bch_devs_list ret = (struct bch_devs_list) { 0 };
- struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
- const struct bch_extent_ptr *ptr;
+#define extent_ptr_next(_e, _ptr) \
+ __bkey_ptr_next(_ptr, extent_entry_last(_e))
- bkey_for_each_ptr(p, ptr)
- if (ptr->cached)
- ret.devs[ret.nr++] = ptr->dev;
+#define extent_for_each_ptr(_e, _ptr) \
+ __bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
- return ret;
-}
+#define extent_for_each_ptr_decode(_e, _ptr, _entry) \
+ __bkey_for_each_ptr_decode((_e).k, (_e).v->start, \
+ extent_entry_last(_e), _ptr, _entry)
-unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
-unsigned bch2_bkey_nr_dirty_ptrs(struct bkey_s_c);
-unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
+/* utility code common to all keys with pointers: */
void bch2_mark_io_failure(struct bch_io_failures *,
struct extent_ptr_decoded *);
@@ -378,22 +353,12 @@ int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
struct bch_io_failures *,
struct extent_ptr_decoded *);
-void bch2_bkey_append_ptr(struct bkey_i *, struct bch_extent_ptr);
-void bch2_bkey_drop_device(struct bkey_s, unsigned);
-const struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s_c, unsigned);
-bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
-
-void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
- struct bkey_s_c);
-const char *bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c);
-
-/* bch_btree_ptr: */
+/* KEY_TYPE_btree_ptr: */
const char *bch2_btree_ptr_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_btree_ptr_debugcheck(struct bch_fs *, struct bkey_s_c);
void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
-void bch2_ptr_swab(const struct bkey_format *, struct bkey_packed *);
#define bch2_bkey_ops_btree_ptr (struct bkey_ops) { \
.key_invalid = bch2_btree_ptr_invalid, \
@@ -402,12 +367,11 @@ void bch2_ptr_swab(const struct bkey_format *, struct bkey_packed *);
.swab = bch2_ptr_swab, \
}
-/* bch_extent: */
+/* KEY_TYPE_extent: */
const char *bch2_extent_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_extent_debugcheck(struct bch_fs *, struct bkey_s_c);
void bch2_extent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
enum merge_result bch2_extent_merge(struct bch_fs *,
struct bkey_s, struct bkey_s);
@@ -420,7 +384,7 @@ enum merge_result bch2_extent_merge(struct bch_fs *,
.key_merge = bch2_extent_merge, \
}
-/* bch_reservation: */
+/* KEY_TYPE_reservation: */
const char *bch2_reservation_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
@@ -433,13 +397,15 @@ enum merge_result bch2_reservation_merge(struct bch_fs *,
.key_merge = bch2_reservation_merge, \
}
-void bch2_bkey_mark_replicas_cached(struct bch_fs *, struct bkey_s,
- unsigned, unsigned);
+/* Extent checksum entries: */
-unsigned bch2_extent_is_compressed(struct bkey_s_c);
+bool bch2_can_narrow_extent_crcs(struct bkey_s_c,
+ struct bch_extent_crc_unpacked);
+bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked);
+void bch2_extent_crc_append(struct bkey_i *,
+ struct bch_extent_crc_unpacked);
-bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
- struct bch_extent_ptr, u64);
+/* Generic code for keys with pointers: */
static inline bool bkey_extent_is_direct_data(const struct bkey *k)
{
@@ -477,34 +443,57 @@ static inline bool bkey_extent_is_allocation(const struct bkey *k)
}
}
-/* Extent entry iteration: */
+static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
+{
+ struct bch_devs_list ret = (struct bch_devs_list) { 0 };
+ struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
-#define extent_for_each_entry_from(_e, _entry, _start) \
- __bkey_extent_entry_for_each_from(_start, \
- extent_entry_last(_e),_entry)
+ bkey_for_each_ptr(p, ptr)
+ ret.devs[ret.nr++] = ptr->dev;
-#define extent_for_each_entry(_e, _entry) \
- extent_for_each_entry_from(_e, _entry, (_e).v->start)
+ return ret;
+}
-#define extent_ptr_next(_e, _ptr) \
- __bkey_ptr_next(_ptr, extent_entry_last(_e))
+static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
+{
+ struct bch_devs_list ret = (struct bch_devs_list) { 0 };
+ struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
-#define extent_for_each_ptr(_e, _ptr) \
- __bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
+ bkey_for_each_ptr(p, ptr)
+ if (!ptr->cached)
+ ret.devs[ret.nr++] = ptr->dev;
-#define extent_for_each_ptr_decode(_e, _ptr, _entry) \
- __bkey_for_each_ptr_decode((_e).k, (_e).v->start, \
- extent_entry_last(_e), _ptr, _entry)
+ return ret;
+}
-void bch2_extent_crc_append(struct bkey_i *,
- struct bch_extent_crc_unpacked);
-void bch2_extent_ptr_decoded_append(struct bkey_i *,
- struct extent_ptr_decoded *);
+static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
+{
+ struct bch_devs_list ret = (struct bch_devs_list) { 0 };
+ struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
-bool bch2_can_narrow_extent_crcs(struct bkey_s_c,
- struct bch_extent_crc_unpacked);
-bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked);
+ bkey_for_each_ptr(p, ptr)
+ if (ptr->cached)
+ ret.devs[ret.nr++] = ptr->dev;
+ return ret;
+}
+
+unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
+unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
+unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
+unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
+bool bch2_check_range_allocated(struct bch_fs *, struct bpos, u64, unsigned);
+unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
+
+void bch2_bkey_mark_replicas_cached(struct bch_fs *, struct bkey_s,
+ unsigned, unsigned);
+
+void bch2_bkey_append_ptr(struct bkey_i *, struct bch_extent_ptr);
+void bch2_extent_ptr_decoded_append(struct bkey_i *,
+ struct extent_ptr_decoded *);
union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
struct bch_extent_ptr *);
@@ -525,6 +514,22 @@ do { \
} \
} while (0)
+void bch2_bkey_drop_device(struct bkey_s, unsigned);
+const struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s_c, unsigned);
+bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
+
+bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
+ struct bch_extent_ptr, u64);
+
+bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
+void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
+ struct bkey_s_c);
+const char *bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c);
+
+void bch2_ptr_swab(const struct bkey_format *, struct bkey_packed *);
+
+/* Generic extent code: */
+
int bch2_cut_front_s(struct bpos, struct bkey_s);
int bch2_cut_back_s(struct bpos, struct bkey_s);
@@ -568,7 +573,4 @@ static inline void extent_save(struct btree *b, struct bkey_packed *dst,
BUG_ON(!bch2_bkey_pack_key(dst, src, f));
}
-bool bch2_check_range_allocated(struct bch_fs *, struct bpos, u64, unsigned);
-unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
-
#endif /* _BCACHEFS_EXTENTS_H */
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index c278add8aa43..485d26b35e34 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -750,7 +750,7 @@ static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
struct bvec_iter iter;
struct bio_vec bv;
unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
- ? 0 : bch2_bkey_nr_ptrs_allocated(k);
+ ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
unsigned state = k.k->type == KEY_TYPE_reservation
? SECTOR_RESERVED
: SECTOR_ALLOCATED;
@@ -2589,7 +2589,7 @@ reassemble:
} else {
/* We might end up splitting compressed extents: */
unsigned nr_ptrs =
- bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(copy.k));
+ bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
ret = bch2_disk_reservation_get(c, &disk_res,
copy.k->k.size, nr_ptrs,
@@ -2715,7 +2715,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
bch2_cut_back(end_pos, &reservation.k_i);
sectors = reservation.k.size;
- reservation.v.nr_replicas = bch2_bkey_nr_dirty_ptrs(k);
+ reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
if (!bkey_extent_is_allocation(k.k)) {
ret = bch2_quota_reservation_add(c, inode,
@@ -2726,7 +2726,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
}
if (reservation.v.nr_replicas < replicas ||
- bch2_extent_is_compressed(k)) {
+ bch2_bkey_sectors_compressed(k)) {
ret = bch2_disk_reservation_get(c, &disk_res, sectors,
replicas, 0);
if (unlikely(ret))
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index a544ef7de31f..ca891b52706f 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -192,8 +192,8 @@ static int sum_sector_overwrites(struct btree_trans *trans,
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, old, ret) {
if (!may_allocate &&
- bch2_bkey_nr_ptrs_allocated(old) <
- bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(new))) {
+ bch2_bkey_nr_ptrs_fully_allocated(old) <
+ bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new))) {
ret = -ENOSPC;
break;
}
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 15547e149b3e..acdc1730e218 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -135,11 +135,11 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
* If we're not fully overwriting @k, and it's compressed, we
* need a reservation for all the pointers in @insert
*/
- nr = bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(insert)) -
+ nr = bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(insert)) -
m->nr_ptrs_reserved;
if (insert->k.size < k.k->size &&
- bch2_extent_is_compressed(k) &&
+ bch2_bkey_sectors_compressed(k) &&
nr > 0) {
ret = bch2_disk_reservation_add(c, &op->res,
keylist_sectors(keys) * nr, 0);
@@ -251,7 +251,7 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m,
*/
#if 0
int nr = (int) io_opts.data_replicas -
- bch2_bkey_nr_dirty_ptrs(k);
+ bch2_bkey_nr_ptrs_allocated(k);
#endif
int nr = (int) io_opts.data_replicas;
@@ -600,7 +600,7 @@ peek:
if (rate)
bch2_ratelimit_increment(rate, k.k->size);
next:
- atomic64_add(k.k->size * bch2_bkey_nr_dirty_ptrs(k),
+ atomic64_add(k.k->size * bch2_bkey_nr_ptrs_allocated(k),
&stats->sectors_seen);
next_nondata:
bch2_btree_iter_next(iter);
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 9102a1ce1ec4..d4002b7fc917 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -254,7 +254,7 @@ static int bch2_extent_replay_key(struct bch_fs *c, enum btree_id btree_id,
* Some extents aren't equivalent - w.r.t. what the triggers do
* - if they're split:
*/
- bool remark_if_split = bch2_extent_is_compressed(bkey_i_to_s_c(k)) ||
+ bool remark_if_split = bch2_bkey_sectors_compressed(bkey_i_to_s_c(k)) ||
k->k.type == KEY_TYPE_reflink_p;
bool remark = false;
int ret;
@@ -289,7 +289,7 @@ retry:
bkey_cmp(atomic_end, k->k.p) < 0) {
ret = bch2_disk_reservation_add(c, &disk_res,
k->k.size *
- bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(k)),
+ bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(k)),
BCH_DISK_RESERVATION_NOFAIL);
BUG_ON(ret);