summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2018-05-09 21:41:24 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2018-05-22 00:44:18 -0400
commita4d6d5031ba12ffec9183c0d5910cac08362b8b1 (patch)
tree86b64d97adee2ddf9110f92e0a8491697e90233f
parent2a38aa9c1d89e8510fea3d9291397f8764d8f980 (diff)
bcachefs: fixes for 32 bit/big endian machines
-rw-r--r--fs/bcachefs/bcachefs_format.h28
-rw-r--r--fs/bcachefs/bcachefs_ioctl.h19
-rw-r--r--fs/bcachefs/bkey.h22
-rw-r--r--fs/bcachefs/bset.c8
-rw-r--r--fs/bcachefs/buckets.c10
-rw-r--r--fs/bcachefs/buckets.h10
-rw-r--r--fs/bcachefs/buckets_types.h2
-rw-r--r--fs/bcachefs/chardev.c17
-rw-r--r--fs/bcachefs/checksum.c2
-rw-r--r--fs/bcachefs/extents.c2
-rw-r--r--fs/bcachefs/extents.h30
-rw-r--r--fs/bcachefs/fs-io.c2
-rw-r--r--fs/bcachefs/fsck.c9
-rw-r--r--fs/bcachefs/io.c2
-rw-r--r--fs/bcachefs/journal_seq_blacklist.c2
-rw-r--r--fs/bcachefs/six.c11
-rw-r--r--fs/bcachefs/super-io.c2
-rw-r--r--fs/bcachefs/util.h4
18 files changed, 116 insertions, 66 deletions
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index db941ed25c51..ab8b944634e8 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -13,7 +13,7 @@
* - journal
* - btree
*
- * The btree is the primary structure, most metadata exists as keys in the
+ * The btree is the primary structure; most metadata exists as keys in the
* various btrees. There are only a small number of btrees, they're not
* sharded - we have one btree for extents, another for inodes, et cetera.
*
@@ -118,11 +118,11 @@ struct bpos {
* structure, it has to be byte swabbed when reading in metadata that
* wasn't written in native endian order:
*/
-#if defined(__LITTLE_ENDIAN)
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
__u32 snapshot;
__u64 offset;
__u64 inode;
-#elif defined(__BIG_ENDIAN)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
__u64 inode;
__u64 offset; /* Points to end of extent - sectors */
__u32 snapshot;
@@ -156,10 +156,10 @@ struct bch_val {
};
struct bversion {
-#if defined(__LITTLE_ENDIAN)
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
__u64 lo;
__u32 hi;
-#elif defined(__BIG_ENDIAN)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
__u32 hi;
__u64 lo;
#endif
@@ -183,13 +183,13 @@ struct bkey {
/* Type of the value */
__u8 type;
-#if defined(__LITTLE_ENDIAN)
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
__u8 pad[1];
struct bversion version;
__u32 size; /* extent size, in sectors */
struct bpos p;
-#elif defined(__BIG_ENDIAN)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
struct bpos p;
__u32 size; /* extent size, in sectors */
struct bversion version;
@@ -348,10 +348,10 @@ BKEY_VAL_TYPE(cookie, KEY_TYPE_COOKIE);
*
* If an extent is not checksummed or compressed, when the extent is trimmed we
* don't have to remember the extent we originally allocated and wrote: we can
- * merely adjust ptr->offset to point to the start of the start of the data that
- * is currently live. The size field in struct bkey records the current (live)
- * size of the extent, and is also used to mean "size of region on disk that we
- * point to" in this case.
+ * merely adjust ptr->offset to point to the start of the data that is currently
+ * live. The size field in struct bkey records the current (live) size of the
+ * extent, and is also used to mean "size of region on disk that we point to" in
+ * this case.
*
* Thus an extent that is not checksummed or compressed will consist only of a
* list of bch_extent_ptrs, with none of the fields in
@@ -519,11 +519,11 @@ struct bch_extent_crc128 {
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 compression_type:4,
csum_type:4,
- nonce:14,
+ nonce:13,
offset:13,
_uncompressed_size:13,
_compressed_size:13,
- type:3;
+ type:4;
#endif
struct bch_csum csum;
} __attribute__((packed, aligned(8)));
@@ -569,7 +569,7 @@ struct bch_extent_reservation {
};
union bch_extent_entry {
-#if defined(__LITTLE_ENDIAN) || __BITS_PER_LONG == 64
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64
unsigned long type;
#elif __BITS_PER_LONG == 32
struct {
diff --git a/fs/bcachefs/bcachefs_ioctl.h b/fs/bcachefs/bcachefs_ioctl.h
index ad5578488cc7..73e5d887ccd8 100644
--- a/fs/bcachefs/bcachefs_ioctl.h
+++ b/fs/bcachefs/bcachefs_ioctl.h
@@ -172,11 +172,9 @@ enum bch_data_ops {
* scrub, rereplicate, migrate).
*
* This ioctl kicks off a job in the background, and returns a file descriptor.
- * Reading from the file descriptor returns a struct bch_ioctl_data_progress,
+ * Reading from the file descriptor returns a struct bch_ioctl_data_event,
* indicating current progress, and closing the file descriptor will stop the
* job. The file descriptor is O_CLOEXEC.
- *
- * @start - position
*/
struct bch_ioctl_data {
__u32 op;
@@ -196,6 +194,12 @@ struct bch_ioctl_data {
};
} __attribute__((packed, aligned(8)));
+enum bch_data_event {
+ BCH_DATA_EVENT_PROGRESS = 0,
+ /* XXX: add an event for reporting errors */
+ BCH_DATA_EVENT_NR = 1,
+};
+
struct bch_ioctl_data_progress {
__u8 data_type;
__u8 btree_id;
@@ -206,6 +210,15 @@ struct bch_ioctl_data_progress {
__u64 sectors_total;
} __attribute__((packed, aligned(8)));
+struct bch_ioctl_data_event {
+ __u8 type;
+ __u8 pad[7];
+ union {
+ struct bch_ioctl_data_progress p;
+ __u64 pad2[15];
+ };
+} __attribute__((packed, aligned(8)));
+
struct bch_ioctl_dev_usage {
__u8 state;
__u8 alive;
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h
index 0a7b9327a288..2d6c8a230a73 100644
--- a/fs/bcachefs/bkey.h
+++ b/fs/bcachefs/bkey.h
@@ -588,25 +588,31 @@ BKEY_VAL_ACCESSORS(quota, BCH_QUOTA);
/* byte order helpers */
-#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
-#error edit for your odd byteorder.
-#endif
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-#ifdef __LITTLE_ENDIAN
+static inline unsigned high_word_offset(const struct bkey_format *f)
+{
+ return f->key_u64s - 1;
+}
#define high_bit_offset 0
-#define __high_word(u64s, k) ((k)->_data + (u64s) - 1)
#define nth_word(p, n) ((p) - (n))
-#else
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+
+static inline unsigned high_word_offset(const struct bkey_format *f)
+{
+ return 0;
+}
#define high_bit_offset KEY_PACKED_BITS_START
-#define __high_word(u64s, k) ((k)->_data)
#define nth_word(p, n) ((p) + (n))
+#else
+#error edit for your odd byteorder.
#endif
-#define high_word(format, k) __high_word((format)->key_u64s, k)
+#define high_word(f, k) ((k)->_data + high_word_offset(f))
#define next_word(p) nth_word(p, 1)
#define prev_word(p) nth_word(p, -1)
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index 5c9b016f7706..9a27477409ba 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -439,6 +439,10 @@ void bch2_btree_keys_free(struct btree *b)
b->aux_data = NULL;
}
+#ifndef PAGE_KERNEL_EXEC
+# define PAGE_KERNEL_EXEC PAGE_KERNEL
+#endif
+
int bch2_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp)
{
b->page_order = page_order;
@@ -673,7 +677,7 @@ static inline unsigned bkey_mantissa(const struct bkey_packed *k,
* (and then the bits we want are at the high end, so we shift them
* back down):
*/
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
v >>= f->exponent & 7;
#else
v >>= 64 - (f->exponent & 7) - (idx < BFLOAT_32BIT_NR ? 32 : 16);
@@ -762,7 +766,7 @@ static void make_bfloat(struct btree *b, struct bset_tree *t,
* Then we calculate the actual shift value, from the start of the key
* (k->_data), to get the key bits starting at exponent:
*/
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent;
EBUG_ON(shift + bits > b->format.key_u64s * 64);
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 03918e46b4d2..5dda22c73d43 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -555,9 +555,9 @@ static void bch2_mark_pointer(struct bch_fs *c,
return;
}
- v = READ_ONCE(g->_mark.counter);
+ v = atomic64_read(&g->_mark.v);
do {
- new.counter = old.counter = v;
+ new.v.counter = old.v.counter = v;
saturated = 0;
/*
@@ -600,9 +600,9 @@ static void bch2_mark_pointer(struct bch_fs *c,
g->_mark = new;
break;
}
- } while ((v = cmpxchg(&g->_mark.counter,
- old.counter,
- new.counter)) != old.counter);
+ } while ((v = atomic64_cmpxchg(&g->_mark.v,
+ old.v.counter,
+ new.v.counter)) != old.v.counter);
bch2_dev_usage_update(c, ca, old, new);
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 01f0b314c322..aefe602744b7 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -16,15 +16,15 @@
#define bucket_cmpxchg(g, new, expr) \
({ \
- u64 _v = READ_ONCE((g)->_mark.counter); \
+ u64 _v = atomic64_read(&(g)->_mark.v); \
struct bucket_mark _old; \
\
do { \
- (new).counter = _old.counter = _v; \
+ (new).v.counter = _old.v.counter = _v; \
expr; \
- } while ((_v = cmpxchg(&(g)->_mark.counter, \
- _old.counter, \
- (new).counter)) != _old.counter);\
+ } while ((_v = atomic64_cmpxchg(&(g)->_mark.v, \
+ _old.v.counter, \
+ (new).v.counter)) != _old.v.counter);\
_old; \
})
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index 28bd2c596477..10f00861385e 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -6,7 +6,7 @@
struct bucket_mark {
union {
struct {
- u64 counter;
+ atomic64_t v;
};
struct {
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 4600a2084c19..5593b9a1de27 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -300,18 +300,19 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
{
struct bch_data_ctx *ctx = file->private_data;
struct bch_fs *c = ctx->c;
- struct bch_ioctl_data_progress p = {
- .data_type = ctx->stats.data_type,
- .btree_id = ctx->stats.iter.btree_id,
- .pos = ctx->stats.iter.pos,
- .sectors_done = atomic64_read(&ctx->stats.sectors_seen),
- .sectors_total = bch2_fs_sectors_used(c, bch2_fs_usage_read(c)),
+ struct bch_ioctl_data_event e = {
+ .type = BCH_DATA_EVENT_PROGRESS,
+ .p.data_type = ctx->stats.data_type,
+ .p.btree_id = ctx->stats.iter.btree_id,
+ .p.pos = ctx->stats.iter.pos,
+ .p.sectors_done = atomic64_read(&ctx->stats.sectors_seen),
+ .p.sectors_total = bch2_fs_sectors_used(c, bch2_fs_usage_read(c)),
};
- if (len != sizeof(p))
+ if (len < sizeof(e))
return -EINVAL;
- return copy_to_user(buf, &p, sizeof(p)) ?: sizeof(p);
+ return copy_to_user(buf, &e, sizeof(e)) ?: sizeof(e);
}
static const struct file_operations bcachefs_data_ops = {
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
index 6d8543eb6500..28d086bc0e61 100644
--- a/fs/bcachefs/checksum.c
+++ b/fs/bcachefs/checksum.c
@@ -421,7 +421,7 @@ static struct bch_csum bch2_checksum_merge(unsigned type,
BUG_ON(!bch2_checksum_mergeable(type));
while (b_len) {
- unsigned b = min(b_len, PAGE_SIZE);
+ unsigned b = min_t(unsigned, b_len, PAGE_SIZE);
a.lo = bch2_checksum_update(type, a.lo,
page_address(ZERO_PAGE(0)), b);
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 035e6f9bd264..b85af711b9f9 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -730,7 +730,7 @@ err:
bch2_fs_bug(c, "%s btree pointer %s: bucket %zi "
"gen %i mark %08x",
err, buf, PTR_BUCKET_NR(ca, ptr),
- mark.gen, (unsigned) mark.counter);
+ mark.gen, (unsigned) mark.v.counter);
}
void bch2_btree_ptr_to_text(struct bch_fs *c, char *buf,
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index 338e9e01cf5d..08ad96472406 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -278,24 +278,38 @@ bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
.uncompressed_size = k->size,
.live_size = k->size,
};
- case BCH_EXTENT_CRC32:
- return (struct bch_extent_crc_unpacked) {
+ case BCH_EXTENT_CRC32: {
+ struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
common_fields(crc->crc32),
- .csum.lo = (__force __le64) crc->crc32.csum,
};
- case BCH_EXTENT_CRC64:
- return (struct bch_extent_crc_unpacked) {
+
+ *((__le32 *) &ret.csum.lo) = crc->crc32.csum;
+
+ memcpy(&ret.csum.lo, &crc->crc32.csum,
+ sizeof(crc->crc32.csum));
+
+ return ret;
+ }
+ case BCH_EXTENT_CRC64: {
+ struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
common_fields(crc->crc64),
.nonce = crc->crc64.nonce,
.csum.lo = (__force __le64) crc->crc64.csum_lo,
- .csum.hi = (__force __le64) crc->crc64.csum_hi,
};
- case BCH_EXTENT_CRC128:
- return (struct bch_extent_crc_unpacked) {
+
+ *((__le16 *) &ret.csum.hi) = crc->crc64.csum_hi;
+
+ return ret;
+ }
+ case BCH_EXTENT_CRC128: {
+ struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
common_fields(crc->crc128),
.nonce = crc->crc128.nonce,
.csum = crc->crc128.csum,
};
+
+ return ret;
+ }
default:
BUG();
}
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index b5e0a8c50eb7..1d9464af1db0 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -678,7 +678,7 @@ static void bch2_clear_page_bits(struct page *page)
if (!PagePrivate(page))
return;
- s = xchg(page_state(page), (struct bch_page_state) { .v = 0 });
+ s.v = xchg(&page_state(page)->v, 0);
ClearPagePrivate(page);
if (s.dirty_sectors)
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 2991a0dd3830..c554a987f3aa 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -610,9 +610,10 @@ static inline bool inode_bitmap_test(struct inode_bitmap *b, size_t nr)
static inline int inode_bitmap_set(struct inode_bitmap *b, size_t nr)
{
if (nr >= b->size) {
- size_t new_size = max(max(PAGE_SIZE * 8,
- b->size * 2),
- nr + 1);
+ size_t new_size = max_t(size_t, max_t(size_t,
+ PAGE_SIZE * 8,
+ b->size * 2),
+ nr + 1);
void *n;
new_size = roundup_pow_of_two(new_size);
@@ -642,7 +643,7 @@ struct pathbuf {
static int path_down(struct pathbuf *p, u64 inum)
{
if (p->nr == p->size) {
- size_t new_size = max(256UL, p->size * 2);
+ size_t new_size = max_t(size_t, 256UL, p->size * 2);
void *n = krealloc(p->entries,
new_size * sizeof(p->entries[0]),
GFP_KERNEL);
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index d2a2a6e2dd82..3762fb92b04f 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -1493,7 +1493,7 @@ csum_err:
}
bch2_dev_io_error(ca,
- "data checksum error, inode %llu offset %llu: expected %0llx%0llx got %0llx%0llx (type %u)",
+ "data checksum error, inode %llu offset %llu: expected %0llx:%0llx got %0llx:%0llx (type %u)",
rbio->pos.inode, (u64) rbio->bvec_iter.bi_sector,
rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
csum.hi, csum.lo, crc.csum_type);
diff --git a/fs/bcachefs/journal_seq_blacklist.c b/fs/bcachefs/journal_seq_blacklist.c
index b5301d96469e..567289e22ca0 100644
--- a/fs/bcachefs/journal_seq_blacklist.c
+++ b/fs/bcachefs/journal_seq_blacklist.c
@@ -247,7 +247,7 @@ int bch2_journal_seq_should_ignore(struct bch_fs *c, u64 seq, struct btree *b)
if (!bl->nr_entries ||
is_power_of_2(bl->nr_entries)) {
n = krealloc(bl->entries,
- max(bl->nr_entries * 2, 8UL) * sizeof(*n),
+ max_t(size_t, bl->nr_entries * 2, 8) * sizeof(*n),
GFP_KERNEL);
if (!n) {
ret = -ENOMEM;
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
index f0ff8d41923c..afa59a476a70 100644
--- a/fs/bcachefs/six.c
+++ b/fs/bcachefs/six.c
@@ -146,6 +146,8 @@ struct six_lock_waiter {
/* This is probably up there with the more evil things I've done */
#define waitlist_bitnr(id) ilog2((((union six_lock_state) { .waiters = 1 << (id) }).l))
+#ifdef CONFIG_LOCK_SPIN_ON_OWNER
+
static inline int six_can_spin_on_owner(struct six_lock *lock)
{
struct task_struct *owner;
@@ -257,6 +259,15 @@ fail:
return false;
}
+#else /* CONFIG_LOCK_SPIN_ON_OWNER */
+
+static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type type)
+{
+ return false;
+}
+
+#endif
+
noinline
static void __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type type)
{
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index a2b981a3c9c5..9772d5973078 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -624,7 +624,7 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
bio_set_dev(bio, ca->disk_sb.bdev);
bio->bi_iter.bi_sector = le64_to_cpu(sb->offset);
bio->bi_iter.bi_size =
- roundup(vstruct_bytes(sb),
+ roundup((size_t) vstruct_bytes(sb),
bdev_logical_block_size(ca->disk_sb.bdev));
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 49813c142aee..184915593e86 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -68,9 +68,9 @@ struct closure;
#define __flatten
#endif
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define CPU_BIG_ENDIAN 0
-#else
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#define CPU_BIG_ENDIAN 1
#endif