summaryrefslogtreecommitdiff
path: root/fs/bcachefs/bcachefs_format.h
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bcachefs/bcachefs_format.h')
-rw-r--r--fs/bcachefs/bcachefs_format.h539
1 files changed, 413 insertions, 126 deletions
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index 5153f0e42054..7d1c0b1e3c54 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -78,6 +78,21 @@
#include <linux/uuid.h>
#include "vstructs.h"
+#define BITMASK(name, type, field, offset, end) \
+static const unsigned name##_OFFSET = offset; \
+static const unsigned name##_BITS = (end - offset); \
+ \
+static inline __u64 name(const type *k) \
+{ \
+ return (k->field >> offset) & ~(~0ULL << (end - offset)); \
+} \
+ \
+static inline void SET_##name(type *k, __u64 v) \
+{ \
+ k->field &= ~(~(~0ULL << (end - offset)) << offset); \
+ k->field |= (v & ~(~0ULL << (end - offset))) << offset; \
+}
+
#define LE_BITMASK(_bits, name, type, field, offset, end) \
static const unsigned name##_OFFSET = offset; \
static const unsigned name##_BITS = (end - offset); \
@@ -132,7 +147,7 @@ struct bpos {
#else
#error edit for your odd byteorder.
#endif
-} __attribute__((packed, aligned(4)));
+} __packed __aligned(4);
#define KEY_INODE_MAX ((__u64)~0ULL)
#define KEY_OFFSET_MAX ((__u64)~0ULL)
@@ -166,7 +181,7 @@ struct bversion {
__u32 hi;
__u64 lo;
#endif
-} __attribute__((packed, aligned(4)));
+} __packed __aligned(4);
struct bkey {
/* Size of combined key and value, in u64s */
@@ -199,7 +214,7 @@ struct bkey {
__u8 pad[1];
#endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bkey_packed {
__u64 _data[0];
@@ -233,7 +248,7 @@ struct bkey_packed {
* to the same size as struct bkey should hopefully be safest.
*/
__u8 pad[sizeof(struct bkey) - 3];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
#define BKEY_U64s_MAX U8_MAX
@@ -275,16 +290,8 @@ enum bch_bkey_fields {
struct bkey_i {
__u64 _data[0];
- union {
- struct {
- /* Size of combined key and value, in u64s */
- __u8 u64s;
- };
- struct {
- struct bkey k;
- struct bch_val v;
- };
- };
+ struct bkey k;
+ struct bch_val v;
};
#define KEY(_inode, _offset, _size) \
@@ -303,7 +310,7 @@ static inline void bkey_init(struct bkey *k)
#define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
#define __BKEY_PADDED(key, pad) \
- struct { struct bkey_i key; __u64 key ## _pad[pad]; }
+ struct bkey_i key; __u64 key ## _pad[pad]
/*
* - DELETED keys are used internally to mark keys that should be ignored but
@@ -321,7 +328,7 @@ static inline void bkey_init(struct bkey *k)
* number.
*
* - WHITEOUT: for hash table btrees
-*/
+ */
#define BCH_BKEY_TYPES() \
x(deleted, 0) \
x(whiteout, 1) \
@@ -347,7 +354,13 @@ static inline void bkey_init(struct bkey *k)
x(subvolume, 21) \
x(snapshot, 22) \
x(inode_v2, 23) \
- x(alloc_v3, 24)
+ x(alloc_v3, 24) \
+ x(set, 25) \
+ x(lru, 26) \
+ x(alloc_v4, 27) \
+ x(backpointer, 28) \
+ x(inode_v3, 29) \
+ x(bucket_gens, 30)
enum bch_bkey_type {
#define x(name, nr) KEY_TYPE_##name = nr,
@@ -377,6 +390,10 @@ struct bch_hash_whiteout {
struct bch_val v;
};
+struct bch_set {
+ struct bch_val v;
+};
+
/* Extents */
/*
@@ -454,7 +471,7 @@ struct bch_hash_whiteout {
struct bch_csum {
__le64 lo;
__le64 hi;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BCH_EXTENT_ENTRY_TYPES() \
x(ptr, 0) \
@@ -491,7 +508,7 @@ struct bch_extent_crc32 {
_compressed_size:7,
type:2;
#endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define CRC32_SIZE_MAX (1U << 7)
#define CRC32_NONCE_MAX 0
@@ -517,7 +534,7 @@ struct bch_extent_crc64 {
type:3;
#endif
__u64 csum_lo;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define CRC64_SIZE_MAX (1U << 9)
#define CRC64_NONCE_MAX ((1U << 10) - 1)
@@ -541,7 +558,7 @@ struct bch_extent_crc128 {
type:4;
#endif
struct bch_csum csum;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define CRC128_SIZE_MAX (1U << 13)
#define CRC128_NONCE_MAX ((1U << 13) - 1)
@@ -554,7 +571,7 @@ struct bch_extent_ptr {
__u64 type:1,
cached:1,
unused:1,
- reservation:1,
+ unwritten:1,
offset:44, /* 8 petabytes */
dev:8,
gen:8;
@@ -562,12 +579,12 @@ struct bch_extent_ptr {
__u64 gen:8,
dev:8,
offset:44,
- reservation:1,
+ unwritten:1,
unused:1,
cached:1,
type:1;
#endif
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_extent_stripe_ptr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
@@ -617,9 +634,9 @@ union bch_extent_entry {
struct bch_btree_ptr {
struct bch_val v;
- struct bch_extent_ptr start[0];
__u64 _data[0];
-} __attribute__((packed, aligned(8)));
+ struct bch_extent_ptr start[];
+} __packed __aligned(8);
struct bch_btree_ptr_v2 {
struct bch_val v;
@@ -629,18 +646,18 @@ struct bch_btree_ptr_v2 {
__le16 sectors_written;
__le16 flags;
struct bpos min_key;
- struct bch_extent_ptr start[0];
__u64 _data[0];
-} __attribute__((packed, aligned(8)));
+ struct bch_extent_ptr start[];
+} __packed __aligned(8);
LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
struct bch_extent {
struct bch_val v;
- union bch_extent_entry start[0];
__u64 _data[0];
-} __attribute__((packed, aligned(8)));
+ union bch_extent_entry start[];
+} __packed __aligned(8);
struct bch_reservation {
struct bch_val v;
@@ -648,7 +665,7 @@ struct bch_reservation {
__le32 generation;
__u8 nr_replicas;
__u8 pad[3];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* Maximum size (in u64s) a single pointer could be: */
#define BKEY_EXTENT_PTR_U64s_MAX\
@@ -682,7 +699,7 @@ struct bch_inode {
__le32 bi_flags;
__le16 bi_mode;
__u8 fields[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_inode_v2 {
struct bch_val v;
@@ -692,20 +709,35 @@ struct bch_inode_v2 {
__le64 bi_flags;
__le16 bi_mode;
__u8 fields[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
+
+struct bch_inode_v3 {
+ struct bch_val v;
+
+ __le64 bi_journal_seq;
+ __le64 bi_hash_seed;
+ __le64 bi_flags;
+ __le64 bi_sectors;
+ __le64 bi_size;
+ __le64 bi_version;
+ __u8 fields[0];
+} __packed __aligned(8);
+
+#define INODEv3_FIELDS_START_INITIAL 6
+#define INODEv3_FIELDS_START_CUR (offsetof(struct bch_inode_v3, fields) / sizeof(u64))
struct bch_inode_generation {
struct bch_val v;
__le32 bi_generation;
__le32 pad;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/*
* bi_subvol and bi_parent_subvol are only set for subvolume roots:
*/
-#define BCH_INODE_FIELDS() \
+#define BCH_INODE_FIELDS_v2() \
x(bi_atime, 96) \
x(bi_ctime, 96) \
x(bi_mtime, 96) \
@@ -732,6 +764,32 @@ struct bch_inode_generation {
x(bi_subvol, 32) \
x(bi_parent_subvol, 32)
+#define BCH_INODE_FIELDS_v3() \
+ x(bi_atime, 96) \
+ x(bi_ctime, 96) \
+ x(bi_mtime, 96) \
+ x(bi_otime, 96) \
+ x(bi_uid, 32) \
+ x(bi_gid, 32) \
+ x(bi_nlink, 32) \
+ x(bi_generation, 32) \
+ x(bi_dev, 32) \
+ x(bi_data_checksum, 8) \
+ x(bi_compression, 8) \
+ x(bi_project, 32) \
+ x(bi_background_compression, 8) \
+ x(bi_data_replicas, 8) \
+ x(bi_promote_target, 16) \
+ x(bi_foreground_target, 16) \
+ x(bi_background_target, 16) \
+ x(bi_erasure_code, 16) \
+ x(bi_fields_set, 16) \
+ x(bi_dir, 64) \
+ x(bi_dir_offset, 64) \
+ x(bi_subvol, 32) \
+ x(bi_parent_subvol, 32) \
+ x(bi_nocow, 8)
+
/* subset of BCH_INODE_FIELDS */
#define BCH_INODE_OPTS() \
x(data_checksum, 8) \
@@ -742,7 +800,8 @@ struct bch_inode_generation {
x(promote_target, 16) \
x(foreground_target, 16) \
x(background_target, 16) \
- x(erasure_code, 16)
+ x(erasure_code, 16) \
+ x(nocow, 8)
enum inode_opt_id {
#define x(name, ...) \
@@ -757,16 +816,16 @@ enum {
* User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
* flags)
*/
- __BCH_INODE_SYNC = 0,
- __BCH_INODE_IMMUTABLE = 1,
- __BCH_INODE_APPEND = 2,
- __BCH_INODE_NODUMP = 3,
- __BCH_INODE_NOATIME = 4,
+ __BCH_INODE_SYNC = 0,
+ __BCH_INODE_IMMUTABLE = 1,
+ __BCH_INODE_APPEND = 2,
+ __BCH_INODE_NODUMP = 3,
+ __BCH_INODE_NOATIME = 4,
- __BCH_INODE_I_SIZE_DIRTY= 5,
- __BCH_INODE_I_SECTORS_DIRTY= 6,
- __BCH_INODE_UNLINKED = 7,
- __BCH_INODE_BACKPTR_UNTRUSTED = 8,
+ __BCH_INODE_I_SIZE_DIRTY = 5,
+ __BCH_INODE_I_SECTORS_DIRTY = 6,
+ __BCH_INODE_UNLINKED = 7,
+ __BCH_INODE_BACKPTR_UNTRUSTED = 8,
/* bits 20+ reserved for packed fields below: */
};
@@ -788,6 +847,13 @@ LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24);
LE64_BITMASK(INODEv2_NR_FIELDS, struct bch_inode_v2, bi_flags, 24, 31);
+LE64_BITMASK(INODEv3_STR_HASH, struct bch_inode_v3, bi_flags, 20, 24);
+LE64_BITMASK(INODEv3_NR_FIELDS, struct bch_inode_v3, bi_flags, 24, 31);
+
+LE64_BITMASK(INODEv3_FIELDS_START,
+ struct bch_inode_v3, bi_flags, 31, 36);
+LE64_BITMASK(INODEv3_MODE, struct bch_inode_v3, bi_flags, 36, 52);
+
/* Dirents */
/*
@@ -820,15 +886,14 @@ struct bch_dirent {
__u8 d_type;
__u8 d_name[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define DT_SUBVOL 16
#define BCH_DT_MAX 17
-#define BCH_NAME_MAX (U8_MAX * sizeof(u64) - \
+#define BCH_NAME_MAX ((unsigned) (U8_MAX * sizeof(u64) - \
sizeof(struct bkey) - \
- offsetof(struct bch_dirent, d_name))
-
+ offsetof(struct bch_dirent, d_name)))
/* Xattrs */
@@ -844,7 +909,7 @@ struct bch_xattr {
__u8 x_name_len;
__le16 x_val_len;
__u8 x_name[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* Bucket/allocation information: */
@@ -853,7 +918,7 @@ struct bch_alloc {
__u8 fields;
__u8 gen;
__u8 data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BCH_ALLOC_FIELDS_V1() \
x(read_time, 16) \
@@ -865,6 +930,12 @@ struct bch_alloc {
x(stripe, 32) \
x(stripe_redundancy, 8)
+enum {
+#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
+ BCH_ALLOC_FIELDS_V1()
+#undef x
+};
+
struct bch_alloc_v2 {
struct bch_val v;
__u8 nr_fields;
@@ -872,13 +943,13 @@ struct bch_alloc_v2 {
__u8 oldest_gen;
__u8 data_type;
__u8 data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BCH_ALLOC_FIELDS_V2() \
x(read_time, 64) \
x(write_time, 64) \
- x(dirty_sectors, 16) \
- x(cached_sectors, 16) \
+ x(dirty_sectors, 32) \
+ x(cached_sectors, 32) \
x(stripe, 32) \
x(stripe_redundancy, 8)
@@ -891,14 +962,55 @@ struct bch_alloc_v3 {
__u8 oldest_gen;
__u8 data_type;
__u8 data[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
-enum {
-#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
- BCH_ALLOC_FIELDS_V1()
-#undef x
- BCH_ALLOC_FIELD_NR
-};
+LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
+LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
+
+struct bch_alloc_v4 {
+ struct bch_val v;
+ __u64 journal_seq;
+ __u32 flags;
+ __u8 gen;
+ __u8 oldest_gen;
+ __u8 data_type;
+ __u8 stripe_redundancy;
+ __u32 dirty_sectors;
+ __u32 cached_sectors;
+ __u64 io_time[2];
+ __u32 stripe;
+ __u32 nr_external_backpointers;
+ __u64 fragmentation_lru;
+} __packed __aligned(8);
+
+#define BCH_ALLOC_V4_U64s_V0 6
+#define BCH_ALLOC_V4_U64s (sizeof(struct bch_alloc_v4) / sizeof(u64))
+
+BITMASK(BCH_ALLOC_V4_NEED_DISCARD, struct bch_alloc_v4, flags, 0, 1)
+BITMASK(BCH_ALLOC_V4_NEED_INC_GEN, struct bch_alloc_v4, flags, 1, 2)
+BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags, 2, 8)
+BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS, struct bch_alloc_v4, flags, 8, 14)
+
+#define BCH_ALLOC_V4_NR_BACKPOINTERS_MAX 40
+
+struct bch_backpointer {
+ struct bch_val v;
+ __u8 btree_id;
+ __u8 level;
+ __u8 data_type;
+ __u64 bucket_offset:40;
+ __u32 bucket_len;
+ struct bpos pos;
+} __packed __aligned(8);
+
+#define KEY_TYPE_BUCKET_GENS_BITS 8
+#define KEY_TYPE_BUCKET_GENS_NR (1U << KEY_TYPE_BUCKET_GENS_BITS)
+#define KEY_TYPE_BUCKET_GENS_MASK (KEY_TYPE_BUCKET_GENS_NR - 1)
+
+struct bch_bucket_gens {
+ struct bch_val v;
+ u8 gens[KEY_TYPE_BUCKET_GENS_NR];
+} __packed __aligned(8);
/* Quotas: */
@@ -923,7 +1035,7 @@ struct bch_quota_counter {
struct bch_quota {
struct bch_val v;
struct bch_quota_counter c[Q_COUNTERS];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* Erasure coding */
@@ -938,8 +1050,8 @@ struct bch_stripe {
__u8 csum_type;
__u8 pad;
- struct bch_extent_ptr ptrs[0];
-} __attribute__((packed, aligned(8)));
+ struct bch_extent_ptr ptrs[];
+} __packed __aligned(8);
/* Reflink: */
@@ -956,14 +1068,14 @@ struct bch_reflink_p {
*/
__le32 front_pad;
__le32 back_pad;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_reflink_v {
struct bch_val v;
__le64 refcount;
union bch_extent_entry start[0];
__u64 _data[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct bch_indirect_inline_data {
struct bch_val v;
@@ -1015,6 +1127,15 @@ LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 0, 1)
/* True if a subvolume points to this snapshot node: */
LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2)
+/* LRU btree: */
+
+struct bch_lru {
+ struct bch_val v;
+ __le64 idx;
+} __packed __aligned(8);
+
+#define LRU_ID_STRIPES (1U << 16)
+
/* Optional/variable size superblock sections: */
struct bch_sb_field {
@@ -1023,16 +1144,18 @@ struct bch_sb_field {
__le32 type;
};
-#define BCH_SB_FIELDS() \
- x(journal, 0) \
- x(members, 1) \
- x(crypt, 2) \
- x(replicas_v0, 3) \
- x(quota, 4) \
- x(disk_groups, 5) \
- x(clean, 6) \
- x(replicas, 7) \
- x(journal_seq_blacklist, 8)
+#define BCH_SB_FIELDS() \
+ x(journal, 0) \
+ x(members, 1) \
+ x(crypt, 2) \
+ x(replicas_v0, 3) \
+ x(quota, 4) \
+ x(disk_groups, 5) \
+ x(clean, 6) \
+ x(replicas, 7) \
+ x(journal_seq_blacklist, 8) \
+ x(journal_v2, 9) \
+ x(counters, 10)
enum bch_sb_field_type {
#define x(f, nr) BCH_SB_FIELD_##f = nr,
@@ -1041,6 +1164,14 @@ enum bch_sb_field_type {
BCH_SB_FIELD_NR
};
+/*
+ * Most superblock fields are replicated in all device's superblocks - a few are
+ * not:
+ */
+#define BCH_SINGLE_DEVICE_SB_FIELDS \
+ ((1U << BCH_SB_FIELD_journal)| \
+ (1U << BCH_SB_FIELD_journal_v2))
+
/* BCH_SB_FIELD_journal: */
struct bch_sb_field_journal {
@@ -1048,6 +1179,15 @@ struct bch_sb_field_journal {
__le64 buckets[0];
};
+struct bch_sb_field_journal_v2 {
+ struct bch_sb_field field;
+
+ struct bch_sb_field_journal_v2_entry {
+ __le64 start;
+ __le64 nr;
+ } d[0];
+};
+
/* BCH_SB_FIELD_members: */
#define BCH_MIN_NR_NBUCKETS (1 << 6)
@@ -1069,6 +1209,8 @@ LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags[0], 14, 15)
LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags[0], 15, 20)
LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags[0], 20, 28)
LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags[0], 28, 30)
+LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
+ struct bch_member, flags[0], 30, 31)
#if 0
LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
@@ -1144,13 +1286,16 @@ LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
/* BCH_SB_FIELD_replicas: */
#define BCH_DATA_TYPES() \
- x(none, 0) \
+ x(free, 0) \
x(sb, 1) \
x(journal, 2) \
x(btree, 3) \
x(user, 4) \
x(cached, 5) \
- x(parity, 6)
+ x(parity, 6) \
+ x(stripe, 7) \
+ x(need_gc_gens, 8) \
+ x(need_discard, 9)
enum bch_data_type {
#define x(t, n) BCH_DATA_##t,
@@ -1159,31 +1304,54 @@ enum bch_data_type {
BCH_DATA_NR
};
+static inline bool data_type_is_empty(enum bch_data_type type)
+{
+ switch (type) {
+ case BCH_DATA_free:
+ case BCH_DATA_need_gc_gens:
+ case BCH_DATA_need_discard:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool data_type_is_hidden(enum bch_data_type type)
+{
+ switch (type) {
+ case BCH_DATA_sb:
+ case BCH_DATA_journal:
+ return true;
+ default:
+ return false;
+ }
+}
+
struct bch_replicas_entry_v0 {
__u8 data_type;
__u8 nr_devs;
- __u8 devs[0];
-} __attribute__((packed));
+ __u8 devs[];
+} __packed;
struct bch_sb_field_replicas_v0 {
struct bch_sb_field field;
- struct bch_replicas_entry_v0 entries[0];
-} __attribute__((packed, aligned(8)));
+ struct bch_replicas_entry_v0 entries[];
+} __packed __aligned(8);
struct bch_replicas_entry {
__u8 data_type;
__u8 nr_devs;
__u8 nr_required;
- __u8 devs[0];
-} __attribute__((packed));
+ __u8 devs[];
+} __packed;
#define replicas_entry_bytes(_i) \
(offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
struct bch_sb_field_replicas {
struct bch_sb_field field;
- struct bch_replicas_entry entries[0];
-} __attribute__((packed, aligned(8)));
+ struct bch_replicas_entry entries[];
+} __packed __aligned(8);
/* BCH_SB_FIELD_quota: */
@@ -1200,7 +1368,7 @@ struct bch_sb_quota_type {
struct bch_sb_field_quota {
struct bch_sb_field field;
struct bch_sb_quota_type q[QTYP_NR];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/* BCH_SB_FIELD_disk_groups: */
@@ -1209,7 +1377,7 @@ struct bch_sb_field_quota {
struct bch_disk_group {
__u8 label[BCH_SB_LABEL_SIZE];
__le64 flags[2];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
@@ -1218,7 +1386,100 @@ LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
struct bch_sb_field_disk_groups {
struct bch_sb_field field;
struct bch_disk_group entries[0];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
+
+/* BCH_SB_FIELD_counters */
+
+#define BCH_PERSISTENT_COUNTERS() \
+ x(io_read, 0) \
+ x(io_write, 1) \
+ x(io_move, 2) \
+ x(bucket_invalidate, 3) \
+ x(bucket_discard, 4) \
+ x(bucket_alloc, 5) \
+ x(bucket_alloc_fail, 6) \
+ x(btree_cache_scan, 7) \
+ x(btree_cache_reap, 8) \
+ x(btree_cache_cannibalize, 9) \
+ x(btree_cache_cannibalize_lock, 10) \
+ x(btree_cache_cannibalize_lock_fail, 11) \
+ x(btree_cache_cannibalize_unlock, 12) \
+ x(btree_node_write, 13) \
+ x(btree_node_read, 14) \
+ x(btree_node_compact, 15) \
+ x(btree_node_merge, 16) \
+ x(btree_node_split, 17) \
+ x(btree_node_rewrite, 18) \
+ x(btree_node_alloc, 19) \
+ x(btree_node_free, 20) \
+ x(btree_node_set_root, 21) \
+ x(btree_path_relock_fail, 22) \
+ x(btree_path_upgrade_fail, 23) \
+ x(btree_reserve_get_fail, 24) \
+ x(journal_entry_full, 25) \
+ x(journal_full, 26) \
+ x(journal_reclaim_finish, 27) \
+ x(journal_reclaim_start, 28) \
+ x(journal_write, 29) \
+ x(read_promote, 30) \
+ x(read_bounce, 31) \
+ x(read_split, 33) \
+ x(read_retry, 32) \
+ x(read_reuse_race, 34) \
+ x(move_extent_read, 35) \
+ x(move_extent_write, 36) \
+ x(move_extent_finish, 37) \
+ x(move_extent_fail, 38) \
+ x(move_extent_alloc_mem_fail, 39) \
+ x(copygc, 40) \
+ x(copygc_wait, 41) \
+ x(gc_gens_end, 42) \
+ x(gc_gens_start, 43) \
+ x(trans_blocked_journal_reclaim, 44) \
+ x(trans_restart_btree_node_reused, 45) \
+ x(trans_restart_btree_node_split, 46) \
+ x(trans_restart_fault_inject, 47) \
+ x(trans_restart_iter_upgrade, 48) \
+ x(trans_restart_journal_preres_get, 49) \
+ x(trans_restart_journal_reclaim, 50) \
+ x(trans_restart_journal_res_get, 51) \
+ x(trans_restart_key_cache_key_realloced, 52) \
+ x(trans_restart_key_cache_raced, 53) \
+ x(trans_restart_mark_replicas, 54) \
+ x(trans_restart_mem_realloced, 55) \
+ x(trans_restart_memory_allocation_failure, 56) \
+ x(trans_restart_relock, 57) \
+ x(trans_restart_relock_after_fill, 58) \
+ x(trans_restart_relock_key_cache_fill, 59) \
+ x(trans_restart_relock_next_node, 60) \
+ x(trans_restart_relock_parent_for_fill, 61) \
+ x(trans_restart_relock_path, 62) \
+ x(trans_restart_relock_path_intent, 63) \
+ x(trans_restart_too_many_iters, 64) \
+ x(trans_restart_traverse, 65) \
+ x(trans_restart_upgrade, 66) \
+ x(trans_restart_would_deadlock, 67) \
+ x(trans_restart_would_deadlock_write, 68) \
+ x(trans_restart_injected, 69) \
+ x(trans_restart_key_cache_upgrade, 70) \
+ x(trans_traverse_all, 71) \
+ x(transaction_commit, 72) \
+ x(write_super, 73) \
+ x(trans_restart_would_deadlock_recursion_limit, 74) \
+ x(trans_restart_write_buffer_flush, 75) \
+ x(trans_restart_split_race, 76)
+
+enum bch_persistent_counters {
+#define x(t, n, ...) BCH_COUNTER_##t,
+ BCH_PERSISTENT_COUNTERS()
+#undef x
+ BCH_COUNTER_NR
+};
+
+struct bch_sb_field_counters {
+ struct bch_sb_field field;
+ __le64 d[0];
+};
/*
* On clean shutdown, store btree roots and current journal sequence number in
@@ -1275,19 +1536,33 @@ struct bch_sb_field_journal_seq_blacklist {
#define BCH_JSET_VERSION_OLD 2
#define BCH_BSET_VERSION_OLD 3
+#define BCH_METADATA_VERSIONS() \
+ x(bkey_renumber, 10) \
+ x(inode_btree_change, 11) \
+ x(snapshot, 12) \
+ x(inode_backpointers, 13) \
+ x(btree_ptr_sectors_written, 14) \
+ x(snapshot_2, 15) \
+ x(reflink_p_fix, 16) \
+ x(subvol_dirent, 17) \
+ x(inode_v2, 18) \
+ x(freespace, 19) \
+ x(alloc_v4, 20) \
+ x(new_data_types, 21) \
+ x(backpointers, 22) \
+ x(inode_v3, 23) \
+ x(unwritten_extents, 24) \
+ x(bucket_gens, 25) \
+ x(lru_v2, 26) \
+ x(fragmentation_lru, 27) \
+ x(no_bps_in_alloc_keys, 28)
+
enum bcachefs_metadata_version {
- bcachefs_metadata_version_min = 9,
- bcachefs_metadata_version_new_versioning = 10,
- bcachefs_metadata_version_bkey_renumber = 10,
- bcachefs_metadata_version_inode_btree_change = 11,
- bcachefs_metadata_version_snapshot = 12,
- bcachefs_metadata_version_inode_backpointers = 13,
- bcachefs_metadata_version_btree_ptr_sectors_written = 14,
- bcachefs_metadata_version_snapshot_2 = 15,
- bcachefs_metadata_version_reflink_p_fix = 16,
- bcachefs_metadata_version_subvol_dirent = 17,
- bcachefs_metadata_version_inode_v2 = 18,
- bcachefs_metadata_version_max = 19,
+ bcachefs_metadata_version_min = 9,
+#define x(t, n) bcachefs_metadata_version_##t = n,
+ BCH_METADATA_VERSIONS()
+#undef x
+ bcachefs_metadata_version_max
};
#define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
@@ -1302,7 +1577,7 @@ struct bch_sb_layout {
__u8 nr_superblocks;
__u8 pad[5];
__le64 sb_offset[61];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#define BCH_SB_LAYOUT_SECTOR 7
@@ -1312,7 +1587,7 @@ struct bch_sb_layout {
* @version_min - Oldest metadata version this filesystem contains; so we can
* safely drop compatibility code and refuse to mount filesystems
* we'd need it for
- * @magic - identifies as a bcachefs superblock (BCACHE_MAGIC)
+ * @magic - identifies as a bcachefs superblock (BCHFS_MAGIC)
* @seq - incremented each time superblock is written
* @uuid - used for generating various magic numbers and identifying
* member devices, never changes
@@ -1353,7 +1628,7 @@ struct bch_sb {
struct bch_sb_field start[0];
__le64 _data[0];
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/*
* Flags:
@@ -1428,6 +1703,8 @@ LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
+LE64_BITMASK(BCH_SB_NOCOW, struct bch_sb, flags[4], 33, 34);
+LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE, struct bch_sb, flags[4], 34, 54);
/*
* Features:
@@ -1626,6 +1903,9 @@ enum bch_compression_opts {
#define BCACHE_MAGIC \
UUID_LE(0xf67385c6, 0x1a4e, 0xca45, \
0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
+#define BCHFS_MAGIC \
+ UUID_LE(0xf67385c6, 0xce66, 0xa990, \
+ 0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
#define BCACHEFS_STATFS_MAGIC 0xca451a4e
@@ -1635,6 +1915,7 @@ enum bch_compression_opts {
static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
{
__le64 ret;
+
memcpy(&ret, &sb->uuid, sizeof(ret));
return ret;
}
@@ -1663,7 +1944,8 @@ static inline __u64 __bset_magic(struct bch_sb *sb)
x(data_usage, 6) \
x(clock, 7) \
x(dev_usage, 8) \
- x(log, 9)
+ x(log, 9) \
+ x(overwrite, 10)
enum {
#define x(f, nr) BCH_JSET_ENTRY_##f = nr,
@@ -1708,26 +1990,26 @@ enum {
struct jset_entry_usage {
struct jset_entry entry;
__le64 v;
-} __attribute__((packed));
+} __packed;
struct jset_entry_data_usage {
struct jset_entry entry;
__le64 v;
struct bch_replicas_entry r;
-} __attribute__((packed));
+} __packed;
struct jset_entry_clock {
struct jset_entry entry;
__u8 rw;
__u8 pad[7];
__le64 time;
-} __attribute__((packed));
+} __packed;
struct jset_entry_dev_usage_type {
__le64 buckets;
__le64 sectors;
__le64 fragmented;
-} __attribute__((packed));
+} __packed;
struct jset_entry_dev_usage {
struct jset_entry entry;
@@ -1735,10 +2017,10 @@ struct jset_entry_dev_usage {
__u32 pad;
__le64 buckets_ec;
- __le64 buckets_unavailable;
+ __le64 _buckets_unavailable; /* No longer used */
struct jset_entry_dev_usage_type d[];
-} __attribute__((packed));
+} __packed;
static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
{
@@ -1749,7 +2031,7 @@ static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage
struct jset_entry_log {
struct jset_entry entry;
u8 d[];
-} __attribute__((packed));
+} __packed;
/*
* On disk format for a journal entry:
@@ -1784,7 +2066,7 @@ struct jset {
struct jset_entry start[0];
__u64 _data[0];
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
@@ -1795,16 +2077,21 @@ LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
/* Btree: */
#define BCH_BTREE_IDS() \
- x(extents, 0) \
- x(inodes, 1) \
- x(dirents, 2) \
- x(xattrs, 3) \
- x(alloc, 4) \
- x(quotas, 5) \
- x(stripes, 6) \
- x(reflink, 7) \
- x(subvolumes, 8) \
- x(snapshots, 9)
+ x(extents, 0) \
+ x(inodes, 1) \
+ x(dirents, 2) \
+ x(xattrs, 3) \
+ x(alloc, 4) \
+ x(quotas, 5) \
+ x(stripes, 6) \
+ x(reflink, 7) \
+ x(subvolumes, 8) \
+ x(snapshots, 9) \
+ x(lru, 10) \
+ x(freespace, 11) \
+ x(need_discard, 12) \
+ x(backpointers, 13) \
+ x(bucket_gens, 14)
enum btree_id {
#define x(kwd, val) BTREE_ID_##kwd = val,
@@ -1843,7 +2130,7 @@ struct bset {
struct bkey_packed start[0];
__u64 _data[0];
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
@@ -1876,7 +2163,7 @@ struct btree_node {
};
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4);
LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
@@ -1897,6 +2184,6 @@ struct btree_node_entry {
};
};
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
#endif /* _BCACHEFS_FORMAT_H */