summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-05-22 14:39:44 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-06-20 22:55:57 -0400
commitc41c3ce4a50822450f673e19f1dbb0bef3aa1a35 (patch)
treeaa35113e4f91676407632e91f3a950a3e883fd21
parenta8474b8e82753b79eb4a47c2c775b0e7303a0869 (diff)
bcachefs: Fix a buffer overrun in bch2_fs_usage_read()
We were copying the size of a struct bch_fs_usage_online to a struct bch_fs_usage, which is 8 bytes smaller. This adds some new helpers so we can do this correctly, and get rid of some magic +1s too. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--fs/bcachefs/buckets.c16
-rw-r--r--fs/bcachefs/buckets.h18
2 files changed, 25 insertions, 9 deletions
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index bce42eef6f57..bd144182c1e1 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -137,17 +137,17 @@ u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
{
struct bch_fs_usage_online *ret;
- unsigned seq, i, v, u64s = fs_usage_u64s(c) + 1;
+ unsigned nr_replicas = READ_ONCE(c->replicas.nr);
+ unsigned seq, i;
retry:
- ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
+ ret = kmalloc(__fs_usage_online_u64s(nr_replicas) * sizeof(u64), GFP_NOFS);
if (unlikely(!ret))
return NULL;
percpu_down_read(&c->mark_lock);
- v = fs_usage_u64s(c) + 1;
- if (unlikely(u64s != v)) {
- u64s = v;
+ if (nr_replicas != c->replicas.nr) {
+ nr_replicas = c->replicas.nr;
percpu_up_read(&c->mark_lock);
kfree(ret);
goto retry;
@@ -157,10 +157,12 @@ retry:
do {
seq = read_seqcount_begin(&c->usage_lock);
- unsafe_memcpy(&ret->u, c->usage_base, u64s * sizeof(u64),
+ unsafe_memcpy(&ret->u, c->usage_base,
+ __fs_usage_u64s(nr_replicas) * sizeof(u64),
"embedded variable length struct");
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
- acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
+ acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i],
+ __fs_usage_u64s(nr_replicas));
} while (read_seqcount_retry(&c->usage_lock, seq));
return ret;
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index d677b0225c52..bdf4fff9cb8a 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -207,10 +207,24 @@ static inline u64 dev_buckets_available(struct bch_dev *ca,
/* Filesystem usage: */
+static inline unsigned __fs_usage_u64s(unsigned nr_replicas)
+{
+ return sizeof(struct bch_fs_usage) / sizeof(u64) + nr_replicas;
+}
+
static inline unsigned fs_usage_u64s(struct bch_fs *c)
{
- return sizeof(struct bch_fs_usage) / sizeof(u64) +
- READ_ONCE(c->replicas.nr);
+ return __fs_usage_u64s(READ_ONCE(c->replicas.nr));
+}
+
+static inline unsigned __fs_usage_online_u64s(unsigned nr_replicas)
+{
+ return sizeof(struct bch_fs_usage_online) / sizeof(u64) + nr_replicas;
+}
+
+static inline unsigned fs_usage_online_u64s(struct bch_fs *c)
+{
+ return __fs_usage_online_u64s(READ_ONCE(c->replicas.nr));
}
static inline unsigned dev_usage_u64s(void)