summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-01-10 18:54:57 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2022-01-10 18:54:57 -0500
commit1cb2aa75de60ac6a6ca7c2b2e12ff311fa8f34d9 (patch)
tree71790126b5f0015029b7f864be8540c50116d974
parentf624bc9c708170a7dad04250caa9b53a1f572d6c (diff)
Merge with 4f690b7c37 bcachefs: Don't use in-memory bucket array for alloc updates
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/acl.c10
-rw-r--r--fs/bcachefs/acl.h4
-rw-r--r--fs/bcachefs/alloc_background.c92
-rw-r--r--fs/bcachefs/bcachefs.h2
-rw-r--r--fs/bcachefs/btree_cache.c3
-rw-r--r--fs/bcachefs/btree_io.c6
-rw-r--r--fs/bcachefs/btree_io.h2
-rw-r--r--fs/bcachefs/btree_iter.c19
-rw-r--r--fs/bcachefs/btree_iter.h2
-rw-r--r--fs/bcachefs/btree_key_cache.c2
-rw-r--r--fs/bcachefs/btree_update.h17
-rw-r--r--fs/bcachefs/buckets.c16
-rw-r--r--fs/bcachefs/checksum.c31
-rw-r--r--fs/bcachefs/checksum.h6
-rw-r--r--fs/bcachefs/compress.c2
-rw-r--r--fs/bcachefs/ec.c2
-rw-r--r--fs/bcachefs/fs-io.c421
-rw-r--r--fs/bcachefs/fs-io.h7
-rw-r--r--fs/bcachefs/fs-ioctl.c29
-rw-r--r--fs/bcachefs/fs.c95
-rw-r--r--fs/bcachefs/fs.h3
-rw-r--r--fs/bcachefs/fsck.c4
-rw-r--r--fs/bcachefs/io.c6
-rw-r--r--fs/bcachefs/journal.h2
-rw-r--r--fs/bcachefs/journal_reclaim.c1
-rw-r--r--fs/bcachefs/move.c12
-rw-r--r--fs/bcachefs/move.h9
-rw-r--r--fs/bcachefs/str_hash.h2
-rw-r--r--fs/bcachefs/super-io.c2
-rw-r--r--fs/bcachefs/super.c70
-rw-r--r--fs/bcachefs/super.h1
-rw-r--r--fs/bcachefs/util.h34
-rw-r--r--fs/bcachefs/xattr.c2
-rw-r--r--include/trace/events/bcachefs.h4
34 files changed, 277 insertions, 643 deletions
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
index 2588812c5066..5070caf8f349 100644
--- a/fs/bcachefs/acl.c
+++ b/fs/bcachefs/acl.c
@@ -212,7 +212,7 @@ bch2_acl_to_xattr(struct btree_trans *trans,
return xattr;
}
-struct posix_acl *bch2_get_acl(struct inode *vinode, int type)
+struct posix_acl *bch2_get_acl(struct inode *vinode, int type, bool rcu)
{
struct bch_inode_info *inode = to_bch_ei(vinode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
@@ -224,6 +224,9 @@ struct posix_acl *bch2_get_acl(struct inode *vinode, int type)
struct bkey_s_c k;
int ret;
+ if (rcu)
+ return ERR_PTR(-ECHILD);
+
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
@@ -289,7 +292,8 @@ int bch2_set_acl_trans(struct btree_trans *trans, subvol_inum inum,
return ret == -ENOENT ? 0 : ret;
}
-int bch2_set_acl(struct inode *vinode, struct posix_acl *_acl, int type)
+int bch2_set_acl(struct user_namespace *mnt_userns,
+ struct inode *vinode, struct posix_acl *_acl, int type)
{
struct bch_inode_info *inode = to_bch_ei(vinode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
@@ -314,7 +318,7 @@ retry:
mode = inode_u.bi_mode;
if (type == ACL_TYPE_ACCESS) {
- ret = posix_acl_update_mode(&inode->v, &mode, &acl);
+ ret = posix_acl_update_mode(mnt_userns, &inode->v, &mode, &acl);
if (ret)
goto btree_err;
}
diff --git a/fs/bcachefs/acl.h b/fs/bcachefs/acl.h
index 14cabbc91808..2d76a4897ba8 100644
--- a/fs/bcachefs/acl.h
+++ b/fs/bcachefs/acl.h
@@ -26,12 +26,12 @@ typedef struct {
__le32 a_version;
} bch_acl_header;
-struct posix_acl *bch2_get_acl(struct inode *, int);
+struct posix_acl *bch2_get_acl(struct inode *, int, bool);
int bch2_set_acl_trans(struct btree_trans *, subvol_inum,
struct bch_inode_unpacked *,
struct posix_acl *, int);
-int bch2_set_acl(struct inode *, struct posix_acl *, int);
+int bch2_set_acl(struct user_namespace *, struct inode *, struct posix_acl *, int);
int bch2_acl_chmod(struct btree_trans *, subvol_inum,
struct bch_inode_unpacked *,
umode_t, struct posix_acl **);
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index b3641d196517..21d0754e0105 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -464,19 +464,20 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
+ struct bkey_s_c k;
struct bkey_alloc_unpacked u;
u64 *time, now;
int ret = 0;
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
BTREE_ITER_CACHED|
- BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
- ret = bch2_btree_iter_traverse(&iter);
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
if (ret)
goto out;
- u = alloc_mem_to_key(c, &iter);
+ u = bch2_alloc_unpack(k);
time = rw == READ ? &u.read_time : &u.write_time;
now = atomic64_read(&c->io_clock[rw].now);
@@ -667,33 +668,33 @@ static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
}
static int bucket_invalidate_btree(struct btree_trans *trans,
- struct bch_dev *ca, u64 b)
+ struct bch_dev *ca, u64 b,
+ struct bkey_alloc_unpacked *u)
{
struct bch_fs *c = trans->c;
- struct bkey_alloc_unpacked u;
struct btree_iter iter;
+ struct bkey_s_c k;
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
POS(ca->dev_idx, b),
BTREE_ITER_CACHED|
- BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
- ret = bch2_btree_iter_traverse(&iter);
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
if (ret)
goto err;
- u = alloc_mem_to_key(c, &iter);
+ *u = bch2_alloc_unpack(k);
+ u->gen++;
+ u->data_type = 0;
+ u->dirty_sectors = 0;
+ u->cached_sectors = 0;
+ u->read_time = atomic64_read(&c->io_clock[READ].now);
+ u->write_time = atomic64_read(&c->io_clock[WRITE].now);
- u.gen++;
- u.data_type = 0;
- u.dirty_sectors = 0;
- u.cached_sectors = 0;
- u.read_time = atomic64_read(&c->io_clock[READ].now);
- u.write_time = atomic64_read(&c->io_clock[WRITE].now);
-
- ret = bch2_alloc_write(trans, &iter, &u,
+ ret = bch2_alloc_write(trans, &iter, u,
BTREE_TRIGGER_BUCKET_INVALIDATE);
err:
bch2_trans_iter_exit(trans, &iter);
@@ -703,21 +704,24 @@ err:
static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
u64 *journal_seq, unsigned flags)
{
- struct bucket *g;
- struct bucket_mark m;
+ struct bkey_alloc_unpacked u;
size_t b;
+ u64 commit_seq = 0;
int ret = 0;
+ /*
+ * If the read-only path is trying to shut down, we can't be generating
+ * new btree updates:
+ */
+ if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags))
+ return 1;
+
BUG_ON(!ca->alloc_heap.used ||
!ca->alloc_heap.data[0].nr);
b = ca->alloc_heap.data[0].bucket;
/* first, put on free_inc and mark as owned by allocator: */
percpu_down_read(&c->mark_lock);
- g = bucket(ca, b);
- m = READ_ONCE(g->mark);
-
- BUG_ON(m.dirty_sectors);
bch2_mark_alloc_bucket(c, ca, b, true);
@@ -726,38 +730,15 @@ static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
BUG_ON(!fifo_push(&ca->free_inc, b));
spin_unlock(&c->freelist_lock);
- /*
- * If we're not invalidating cached data, we only increment the bucket
- * gen in memory here, the incremented gen will be updated in the btree
- * by bch2_trans_mark_pointer():
- */
- if (!m.data_type &&
- !bch2_bucket_needs_journal_commit(c, c->journal.flushed_seq_ondisk,
- ca->dev_idx, b)) {
- bucket_cmpxchg(g, m, m.gen++);
- *bucket_gen(ca, b) = m.gen;
- percpu_up_read(&c->mark_lock);
- goto out;
- }
-
percpu_up_read(&c->mark_lock);
- /*
- * If the read-only path is trying to shut down, we can't be generating
- * new btree updates:
- */
- if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
- ret = 1;
- goto out;
- }
-
- ret = bch2_trans_do(c, NULL, journal_seq,
+ ret = bch2_trans_do(c, NULL, &commit_seq,
BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_JOURNAL_RESERVED|
flags,
- bucket_invalidate_btree(&trans, ca, b));
-out:
+ bucket_invalidate_btree(&trans, ca, b, &u));
+
if (!ret) {
/* remove from alloc_heap: */
struct alloc_heap_entry e, *top = ca->alloc_heap.data;
@@ -767,6 +748,19 @@ out:
if (!top->nr)
heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
+
+ /*
+ * If we invalidating cached data then we need to wait on the
+ * journal commit:
+ */
+ if (u.data_type)
+ *journal_seq = max(*journal_seq, commit_seq);
+
+ /*
+ * We already waiting on u.alloc_seq when we filtered out
+ * buckets that need journal commit:
+ */
+ BUG_ON(*journal_seq > u.journal_seq);
} else {
size_t b2;
@@ -985,7 +979,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
lockdep_assert_held(&c->state_lock);
for_each_online_member(ca, c, i) {
- struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
+ struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
ra_pages += bdi->ra_pages;
}
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index f1e4871a74c3..a28ddcd5d7b7 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -822,7 +822,7 @@ struct bch_fs {
ZSTD_parameters zstd_params;
struct crypto_shash *sha256;
- struct crypto_skcipher *chacha20;
+ struct crypto_sync_skcipher *chacha20;
struct crypto_shash *poly1305;
atomic64_t key_version;
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index cfede3344883..986d08d708cc 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -78,8 +78,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
if (!b->data)
return -ENOMEM;
#ifdef __KERNEL__
- b->aux_data = __vmalloc(btree_aux_data_bytes(b), gfp,
- PAGE_KERNEL_EXEC);
+ b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp);
#else
b->aux_data = mmap(NULL, btree_aux_data_bytes(b),
PROT_READ|PROT_WRITE|PROT_EXEC,
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 4ff38c6395f3..a3651325a022 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -33,6 +33,8 @@ void bch2_btree_node_io_unlock(struct btree *b)
void bch2_btree_node_io_lock(struct btree *b)
{
+ BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
+
wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
TASK_UNINTERRUPTIBLE);
}
@@ -51,12 +53,16 @@ void __bch2_btree_node_wait_on_write(struct btree *b)
void bch2_btree_node_wait_on_read(struct btree *b)
{
+ BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
+
wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
TASK_UNINTERRUPTIBLE);
}
void bch2_btree_node_wait_on_write(struct btree *b)
{
+ BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
+
wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
TASK_UNINTERRUPTIBLE);
}
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
index f11a2e96227b..0f20224e2a77 100644
--- a/fs/bcachefs/btree_io.h
+++ b/fs/bcachefs/btree_io.h
@@ -122,7 +122,7 @@ static inline void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offse
bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
bytes);
- nonce = nonce_add(nonce, round_up(bytes, CHACHA20_BLOCK_SIZE));
+ nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE));
}
bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 05efcf7576b0..efe9b8cb9f1c 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -573,6 +573,8 @@ void bch2_trans_unlock(struct btree_trans *trans)
trans_for_each_path(trans, path)
__bch2_btree_path_unlock(path);
+
+ BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
}
/* Btree iterator: */
@@ -2192,6 +2194,23 @@ inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
return ret;
}
+static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
+ enum btree_id btree_id,
+ struct bpos pos)
+{
+ struct btree_insert_entry *i;
+
+ trans_for_each_update(trans, i)
+ if ((cmp_int(btree_id, i->btree_id) ?:
+ bpos_cmp(pos, i->k->k.p)) <= 0) {
+ if (btree_id == i->btree_id)
+ return i->k;
+ break;
+ }
+
+ return NULL;
+}
+
static noinline
struct bkey_i *__btree_trans_peek_journal(struct btree_trans *trans,
struct btree_path *path)
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 981817247dfe..5205d53ce8dc 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -136,7 +136,7 @@ int __must_check bch2_btree_path_traverse(struct btree_trans *,
struct btree_path *, unsigned);
struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
unsigned, unsigned, unsigned, unsigned long);
-struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
+inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
#ifdef CONFIG_BCACHEFS_DEBUG
void bch2_trans_verify_paths(struct btree_trans *);
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 61a447dc578a..faed51e7f4b8 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -604,7 +604,7 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
do {
struct rhash_head *pos, *next;
- pos = *rht_bucket(tbl, bc->shrink_iter);
+ pos = rht_ptr_rcu(rht_bucket(tbl, bc->shrink_iter));
while (!rht_is_a_nulls(pos)) {
next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter);
diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h
index 6b3d08406a81..5e5a1b5e750e 100644
--- a/fs/bcachefs/btree_update.h
+++ b/fs/bcachefs/btree_update.h
@@ -141,21 +141,4 @@ static inline int bch2_trans_commit(struct btree_trans *trans,
(_i) < (_trans)->updates + (_trans)->nr_updates; \
(_i)++)
-static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
- enum btree_id btree_id,
- struct bpos pos)
-{
- struct btree_insert_entry *i;
-
- trans_for_each_update(trans, i)
- if ((cmp_int(btree_id, i->btree_id) ?:
- bpos_cmp(pos, i->k->k.p)) <= 0) {
- if (btree_id == i->btree_id)
- return i->k;
- break;
- }
-
- return NULL;
-}
-
#endif /* _BCACHEFS_BTREE_UPDATE_H */
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index c5c6d9d50598..64bed7aa3eb9 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -1417,24 +1417,22 @@ static int bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree
{
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bpos pos = POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
- struct bkey_i *update = btree_trans_peek_updates(trans, BTREE_ID_alloc, pos);
+ struct bkey_s_c k;
int ret;
- bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos,
+ bch2_trans_iter_init(trans, iter, BTREE_ID_alloc,
+ POS(ptr->dev, PTR_BUCKET_NR(ca, ptr)),
+ BTREE_ITER_WITH_UPDATES|
BTREE_ITER_CACHED|
- BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
- ret = bch2_btree_iter_traverse(iter);
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
if (ret) {
bch2_trans_iter_exit(trans, iter);
return ret;
}
- *u = update && !bpos_cmp(update->k.p, pos)
- ? bch2_alloc_unpack(bkey_i_to_s_c(update))
- : alloc_mem_to_key(c, iter);
-
+ *u = bch2_alloc_unpack(k);
return 0;
}
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
index 95e80dbfed4c..a1d89923d361 100644
--- a/fs/bcachefs/checksum.c
+++ b/fs/bcachefs/checksum.c
@@ -11,7 +11,7 @@
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <crypto/algapi.h>
-#include <crypto/chacha20.h>
+#include <crypto/chacha.h>
#include <crypto/hash.h>
#include <crypto/poly1305.h>
#include <crypto/skcipher.h>
@@ -93,21 +93,21 @@ static void bch2_checksum_update(struct bch2_checksum_state *state, const void *
}
}
-static inline void do_encrypt_sg(struct crypto_skcipher *tfm,
+static inline void do_encrypt_sg(struct crypto_sync_skcipher *tfm,
struct nonce nonce,
struct scatterlist *sg, size_t len)
{
- SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
int ret;
- skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
ret = crypto_skcipher_encrypt(req);
BUG_ON(ret);
}
-static inline void do_encrypt(struct crypto_skcipher *tfm,
+static inline void do_encrypt(struct crypto_sync_skcipher *tfm,
struct nonce nonce,
void *buf, size_t len)
{
@@ -120,8 +120,8 @@ static inline void do_encrypt(struct crypto_skcipher *tfm,
int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
void *buf, size_t len)
{
- struct crypto_skcipher *chacha20 =
- crypto_alloc_skcipher("chacha20", 0, 0);
+ struct crypto_sync_skcipher *chacha20 =
+ crypto_alloc_sync_skcipher("chacha20", 0, 0);
int ret;
if (!chacha20) {
@@ -129,7 +129,8 @@ int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
return PTR_ERR(chacha20);
}
- ret = crypto_skcipher_setkey(chacha20, (void *) key, sizeof(*key));
+ ret = crypto_skcipher_setkey(&chacha20->base,
+ (void *) key, sizeof(*key));
if (ret) {
pr_err("crypto_skcipher_setkey() error: %i", ret);
goto err;
@@ -137,7 +138,7 @@ int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
do_encrypt(chacha20, nonce, buf, len);
err:
- crypto_free_skcipher(chacha20);
+ crypto_free_sync_skcipher(chacha20);
return ret;
}
@@ -230,7 +231,7 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
kunmap_atomic(p);
}
#else
- __bio_for_each_contig_segment(bv, bio, *iter, *iter)
+ __bio_for_each_bvec(bv, bio, *iter, *iter)
bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset,
bv.bv_len);
#endif
@@ -253,7 +254,7 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
kunmap_atomic(p);
}
#else
- __bio_for_each_contig_segment(bv, bio, *iter, *iter)
+ __bio_for_each_bvec(bv, bio, *iter, *iter)
crypto_shash_update(desc,
page_address(bv.bv_page) + bv.bv_offset,
bv.bv_len);
@@ -499,7 +500,7 @@ err:
static int bch2_alloc_ciphers(struct bch_fs *c)
{
if (!c->chacha20)
- c->chacha20 = crypto_alloc_skcipher("chacha20", 0, 0);
+ c->chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
if (IS_ERR(c->chacha20)) {
bch_err(c, "error requesting chacha20 module: %li",
PTR_ERR(c->chacha20));
@@ -582,7 +583,7 @@ int bch2_enable_encryption(struct bch_fs *c, bool keyed)
goto err;
}
- ret = crypto_skcipher_setkey(c->chacha20,
+ ret = crypto_skcipher_setkey(&c->chacha20->base,
(void *) &key.key, sizeof(key.key));
if (ret)
goto err;
@@ -610,7 +611,7 @@ void bch2_fs_encryption_exit(struct bch_fs *c)
if (!IS_ERR_OR_NULL(c->poly1305))
crypto_free_shash(c->poly1305);
if (!IS_ERR_OR_NULL(c->chacha20))
- crypto_free_skcipher(c->chacha20);
+ crypto_free_sync_skcipher(c->chacha20);
if (!IS_ERR_OR_NULL(c->sha256))
crypto_free_shash(c->sha256);
}
@@ -642,7 +643,7 @@ int bch2_fs_encryption_init(struct bch_fs *c)
if (ret)
goto out;
- ret = crypto_skcipher_setkey(c->chacha20,
+ ret = crypto_skcipher_setkey(&c->chacha20->base,
(void *) &key.key, sizeof(key.key));
if (ret)
goto out;
diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h
index 5e0e77ca71a9..f5c1a609c5c4 100644
--- a/fs/bcachefs/checksum.h
+++ b/fs/bcachefs/checksum.h
@@ -7,7 +7,7 @@
#include "super-io.h"
#include <linux/crc64.h>
-#include <crypto/chacha20.h>
+#include <crypto/chacha.h>
static inline bool bch2_checksum_mergeable(unsigned type)
{
@@ -140,9 +140,9 @@ static inline bool bch2_crc_cmp(struct bch_csum l, struct bch_csum r)
/* for skipping ahead and encrypting/decrypting at an offset: */
static inline struct nonce nonce_add(struct nonce nonce, unsigned offset)
{
- EBUG_ON(offset & (CHACHA20_BLOCK_SIZE - 1));
+ EBUG_ON(offset & (CHACHA_BLOCK_SIZE - 1));
- le32_add_cpu(&nonce.d[0], offset / CHACHA20_BLOCK_SIZE);
+ le32_add_cpu(&nonce.d[0], offset / CHACHA_BLOCK_SIZE);
return nonce;
}
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
index 691009fc2431..8e4179d8dc27 100644
--- a/fs/bcachefs/compress.c
+++ b/fs/bcachefs/compress.c
@@ -45,7 +45,7 @@ static bool bio_phys_contig(struct bio *bio, struct bvec_iter start)
struct bvec_iter iter;
void *expected_start = NULL;
- __bio_for_each_segment(bv, bio, iter, start) {
+ __bio_for_each_bvec(bv, bio, iter, start) {
if (expected_start &&
expected_start != page_address(bv.bv_page) + bv.bv_offset)
return false;
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 220ced2f9a17..9b45640e75dc 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -395,7 +395,7 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
while (offset < bytes) {
- unsigned nr_iovecs = min_t(size_t, BIO_MAX_PAGES,
+ unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS,
DIV_ROUND_UP(bytes, PAGE_SIZE));
unsigned b = min_t(size_t, bytes - offset,
nr_iovecs << PAGE_SHIFT);
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 8f0b2a745064..3b9b96e5a0a2 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -285,28 +285,13 @@ static inline struct bch_page_state *bch2_page_state(struct page *page)
/* for newly allocated pages: */
static void __bch2_page_state_release(struct page *page)
{
- struct bch_page_state *s = __bch2_page_state(page);
-
- if (!s)
- return;
-
- ClearPagePrivate(page);
- set_page_private(page, 0);
- put_page(page);
- kfree(s);
+ kfree(detach_page_private(page));
}
static void bch2_page_state_release(struct page *page)
{
- struct bch_page_state *s = bch2_page_state(page);
-
- if (!s)
- return;
-
- ClearPagePrivate(page);
- set_page_private(page, 0);
- put_page(page);
- kfree(s);
+ EBUG_ON(!PageLocked(page));
+ __bch2_page_state_release(page);
}
/* for newly allocated pages: */
@@ -320,13 +305,7 @@ static struct bch_page_state *__bch2_page_state_create(struct page *page,
return NULL;
spin_lock_init(&s->lock);
- /*
- * migrate_page_move_mapping() assumes that pages with private data
- * have their count elevated by 1.
- */
- get_page(page);
- set_page_private(page, (unsigned long) s);
- SetPagePrivate(page);
+ attach_page_private(page, s);
return s;
}
@@ -878,18 +857,12 @@ int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
EBUG_ON(!PageLocked(page));
EBUG_ON(!PageLocked(newpage));
- ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
+ ret = migrate_page_move_mapping(mapping, newpage, page, 0);
if (ret != MIGRATEPAGE_SUCCESS)
return ret;
- if (PagePrivate(page)) {
- ClearPagePrivate(page);
- get_page(newpage);
- set_page_private(newpage, page_private(page));
- set_page_private(page, 0);
- put_page(page);
- SetPagePrivate(newpage);
- }
+ if (PagePrivate(page))
+ attach_page_private(newpage, detach_page_private(page));
if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page);
@@ -903,10 +876,10 @@ int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
static void bch2_readpages_end_io(struct bio *bio)
{
+ struct bvec_iter_all iter;
struct bio_vec *bv;
- unsigned i;
- bio_for_each_segment_all(bv, bio, i) {
+ bio_for_each_segment_all(bv, bio, iter) {
struct page *page = bv->bv_page;
if (!bio->bi_status) {
@@ -925,31 +898,29 @@ struct readpages_iter {
struct address_space *mapping;
struct page **pages;
unsigned nr_pages;
- unsigned nr_added;
unsigned idx;
pgoff_t offset;
};
static int readpages_iter_init(struct readpages_iter *iter,
- struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
+ struct readahead_control *ractl)
{
+ unsigned i, nr_pages = readahead_count(ractl);
+
memset(iter, 0, sizeof(*iter));
- iter->mapping = mapping;
- iter->offset = list_last_entry(pages, struct page, lru)->index;
+ iter->mapping = ractl->mapping;
+ iter->offset = readahead_index(ractl);
+ iter->nr_pages = nr_pages;
iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
if (!iter->pages)
return -ENOMEM;
- while (!list_empty(pages)) {
- struct page *page = list_last_entry(pages, struct page, lru);
-
- __bch2_page_state_create(page, __GFP_NOFAIL);
-
- iter->pages[iter->nr_pages++] = page;
- list_del(&page->lru);
+ nr_pages = __readahead_batch(ractl, iter->pages, nr_pages);
+ for (i = 0; i < nr_pages; i++) {
+ __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL);
+ put_page(iter->pages[i]);
}
return 0;
@@ -957,41 +928,9 @@ static int readpages_iter_init(struct readpages_iter *iter,
static inline struct page *readpage_iter_next(struct readpages_iter *iter)
{
- struct page *page;
- unsigned i;
- int ret;
-
- BUG_ON(iter->idx > iter->nr_added);
- BUG_ON(iter->nr_added > iter->nr_pages);
-
- if (iter->idx < iter->nr_added)
- goto out;
-
- while (1) {
- if (iter->idx == iter->nr_pages)
- return NULL;
-
- ret = add_to_page_cache_lru_vec(iter->mapping,
- iter->pages + iter->nr_added,
- iter->nr_pages - iter->nr_added,
- iter->offset + iter->nr_added,
- GFP_NOFS);
- if (ret > 0)
- break;
-
- page = iter->pages[iter->nr_added];
- iter->idx++;
- iter->nr_added++;
-
- __bch2_page_state_release(page);
- put_page(page);
- }
-
- iter->nr_added += ret;
+ if (iter->idx >= iter->nr_pages)
+ return NULL;
- for (i = iter->idx; i < iter->nr_added; i++)
- put_page(iter->pages[i]);
-out:
EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
return iter->pages[iter->idx];
@@ -1029,11 +968,8 @@ static void readpage_bio_extend(struct readpages_iter *iter,
if (!get_more)
break;
- rcu_read_lock();
- page = radix_tree_lookup(&iter->mapping->i_pages, page_offset);
- rcu_read_unlock();
-
- if (page && !radix_tree_exceptional_entry(page))
+ page = xa_load(&iter->mapping->i_pages, page_offset);
+ if (page && !xa_is_value(page))
break;
page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
@@ -1169,10 +1105,9 @@ err:
bch2_bkey_buf_exit(&sk, c);
}
-int bch2_readpages(struct file *file, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
+void bch2_readahead(struct readahead_control *ractl)
{
- struct bch_inode_info *inode = to_bch_ei(mapping->host);
+ struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
struct btree_trans trans;
@@ -1180,7 +1115,7 @@ int bch2_readpages(struct file *file, struct address_space *mapping,
struct readpages_iter readpages_iter;
int ret;
- ret = readpages_iter_init(&readpages_iter, mapping, pages, nr_pages);
+ ret = readpages_iter_init(&readpages_iter, ractl);
BUG_ON(ret);
bch2_trans_init(&trans, c, 0, 0);
@@ -1192,7 +1127,7 @@ int bch2_readpages(struct file *file, struct address_space *mapping,
unsigned n = min_t(unsigned,
readpages_iter.nr_pages -
readpages_iter.idx,
- BIO_MAX_PAGES);
+ BIO_MAX_VECS);
struct bch_read_bio *rbio =
rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
opts);
@@ -1212,8 +1147,6 @@ int bch2_readpages(struct file *file, struct address_space *mapping,
bch2_trans_exit(&trans);
kfree(readpages_iter.pages);
-
- return 0;
}
static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
@@ -1308,36 +1241,37 @@ static void bch2_writepage_io_done(struct closure *cl)
struct bch_writepage_io, cl);
struct bch_fs *c = io->op.c;
struct bio *bio = &io->op.wbio.bio;
+ struct bvec_iter_all iter;
struct bio_vec *bvec;
- unsigned i, j;
+ unsigned i;
up(&io->op.c->io_in_flight);
if (io->op.error) {
set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter) {
struct bch_page_state *s;
SetPageError(bvec->bv_page);
- mapping_set_error(io->inode->v.i_mapping, -EIO);
+ mapping_set_error(bvec->bv_page->mapping, -EIO);
s = __bch2_page_state(bvec->bv_page);
spin_lock(&s->lock);
- for (j = 0; j < PAGE_SECTORS; j++)
- s->s[j].nr_replicas = 0;
+ for (i = 0; i < PAGE_SECTORS; i++)
+ s->s[i].nr_replicas = 0;
spin_unlock(&s->lock);
}
}
if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter) {
struct bch_page_state *s;
s = __bch2_page_state(bvec->bv_page);
spin_lock(&s->lock);
- for (j = 0; j < PAGE_SECTORS; j++)
- s->s[j].nr_replicas = 0;
+ for (i = 0; i < PAGE_SECTORS; i++)
+ s->s[i].nr_replicas = 0;
spin_unlock(&s->lock);
}
}
@@ -1361,7 +1295,7 @@ static void bch2_writepage_io_done(struct closure *cl)
*/
i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter) {
struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
if (atomic_dec_and_test(&s->write_count))
@@ -1395,7 +1329,7 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
{
struct bch_write_op *op;
- w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES,
+ w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS,
&c->writepage_bioset),
struct bch_writepage_io, op.wbio.bio);
@@ -1515,9 +1449,9 @@ do_io:
if (w->io &&
(w->io->op.res.nr_replicas != nr_replicas_this_write ||
- bio_full(&w->io->op.wbio.bio) ||
+ bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
- (BIO_MAX_PAGES * PAGE_SIZE) ||
+ (BIO_MAX_VECS * PAGE_SIZE) ||
bio_end_sector(&w->io->op.wbio.bio) != sector))
bch2_writepage_do_io(w);
@@ -1793,8 +1727,8 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
unsigned pg_len = min_t(unsigned, len - copied,
PAGE_SIZE - pg_offset);
- unsigned pg_copied = iov_iter_copy_from_user_atomic(page,
- iter, pg_offset, pg_len);
+ unsigned pg_copied = copy_page_from_iter_atomic(page,
+ pg_offset, pg_len,iter);
if (!pg_copied)
break;
@@ -1807,7 +1741,6 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
}
flush_dcache_page(page);
- iov_iter_advance(iter, pg_copied);
copied += pg_copied;
if (pg_copied != pg_len)
@@ -1925,18 +1858,6 @@ again:
/* O_DIRECT reads */
-static void bio_release_pages(struct bio *bio, bool mark_dirty)
-{
- struct bio_vec *bvec;
- unsigned i;
-
- bio_for_each_segment_all(bvec, bio, i) {
- if (mark_dirty && !PageCompound(bvec->bv_page))
- set_page_dirty_lock(bvec->bv_page);
- put_page(bvec->bv_page);
- }
-}
-
static void bio_check_or_release(struct bio *bio, bool check_dirty)
{
if (check_dirty) {
@@ -2000,7 +1921,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
iter->count -= shorten;
bio = bio_alloc_bioset(GFP_KERNEL,
- iov_iter_npages(iter, BIO_MAX_PAGES),
+ iov_iter_npages(iter, BIO_MAX_VECS),
&c->dio_read_bioset);
bio->bi_end_io = bch2_direct_IO_read_endio;
@@ -2035,7 +1956,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
goto start;
while (iter->count) {
bio = bio_alloc_bioset(GFP_KERNEL,
- iov_iter_npages(iter, BIO_MAX_PAGES),
+ iov_iter_npages(iter, BIO_MAX_VECS),
&c->bio_read);
bio->bi_end_io = bch2_direct_IO_read_split_endio;
start:
@@ -2168,8 +2089,9 @@ static long bch2_dio_write_loop(struct dio_write *dio)
struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bio *bio = &dio->op.wbio.bio;
+ struct bvec_iter_all iter;
struct bio_vec *bv;
- unsigned i, unaligned, iter_count;
+ unsigned unaligned, iter_count;
bool sync = dio->sync, dropped_locks;
long ret;
@@ -2182,7 +2104,7 @@ static long bch2_dio_write_loop(struct dio_write *dio)
iter_count = dio->iter.count;
if (kthread)
- use_mm(dio->mm);
+ kthread_use_mm(dio->mm);
BUG_ON(current->faults_disabled_mapping);
current->faults_disabled_mapping = mapping;
@@ -2192,7 +2114,7 @@ static long bch2_dio_write_loop(struct dio_write *dio)
current->faults_disabled_mapping = NULL;
if (kthread)
- unuse_mm(dio->mm);
+ kthread_unuse_mm(dio->mm);
/*
* If the fault handler returned an error but also signalled
@@ -2289,8 +2211,9 @@ loop:
i_size_write(&inode->v, req->ki_pos);
spin_unlock(&inode->v.i_lock);
- bio_for_each_segment_all(bv, bio, i)
- put_page(bv->bv_page);
+ if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
+ bio_for_each_segment_all(bv, bio, iter)
+ put_page(bv->bv_page);
bio->bi_vcnt = 0;
if (dio->op.error) {
@@ -2314,8 +2237,9 @@ err:
if (dio->free_iov)
kfree(dio->iter.iov);
- bio_for_each_segment_all(bv, bio, i)
- put_page(bv->bv_page);
+ if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
+ bio_for_each_segment_all(bv, bio, iter)
+ put_page(bv->bv_page);
bio_put(bio);
/* inode->i_dio_count is our ref on inode and thus bch_fs */
@@ -2382,7 +2306,9 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
}
bio = bio_alloc_bioset(GFP_KERNEL,
- iov_iter_npages(iter, BIO_MAX_PAGES),
+ iov_iter_is_bvec(iter)
+ ? 0
+ : iov_iter_npages(iter, BIO_MAX_VECS),
&c->dio_write_bioset);
dio = container_of(bio, struct dio_write, op.wbio.bio);
init_completion(&dio->done);
@@ -2669,7 +2595,7 @@ static int bch2_extend(struct user_namespace *mnt_userns,
truncate_setsize(&inode->v, iattr->ia_size);
- return bch2_setattr_nonsize(inode, iattr);
+ return bch2_setattr_nonsize(mnt_userns, inode, iattr);
}
static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
@@ -2789,7 +2715,7 @@ int bch2_truncate(struct user_namespace *mnt_userns,
ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0);
mutex_unlock(&inode->ei_update_lock);
- ret = bch2_setattr_nonsize(inode, iattr);
+ ret = bch2_setattr_nonsize(mnt_userns, inode, iattr);
err:
bch2_pagecache_block_put(&inode->ei_pagecache_lock);
return ret;
@@ -3230,235 +3156,6 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
return ret;
}
-static int generic_access_check_limits(struct file *file, loff_t pos,
- loff_t *count)
-{
- struct inode *inode = file->f_mapping->host;
- loff_t max_size = inode->i_sb->s_maxbytes;
-
- if (!(file->f_flags & O_LARGEFILE))
- max_size = MAX_NON_LFS;
-
- if (unlikely(pos >= max_size))
- return -EFBIG;
- *count = min(*count, max_size - pos);
- return 0;
-}
-
-static int generic_write_check_limits(struct file *file, loff_t pos,
- loff_t *count)
-{
- loff_t limit = rlimit(RLIMIT_FSIZE);
-
- if (limit != RLIM_INFINITY) {
- if (pos >= limit) {
- send_sig(SIGXFSZ, current, 0);
- return -EFBIG;
- }
- *count = min(*count, limit - pos);
- }
-
- return generic_access_check_limits(file, pos, count);
-}
-
-static int generic_remap_checks(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t *req_count, unsigned int remap_flags)
-{
- struct inode *inode_in = file_in->f_mapping->host;
- struct inode *inode_out = file_out->f_mapping->host;
- uint64_t count = *req_count;
- uint64_t bcount;
- loff_t size_in, size_out;
- loff_t bs = inode_out->i_sb->s_blocksize;
- int ret;
-
- /* The start of both ranges must be aligned to an fs block. */
- if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
- return -EINVAL;
-
- /* Ensure offsets don't wrap. */
- if (pos_in + count < pos_in || pos_out + count < pos_out)
- return -EINVAL;
-
- size_in = i_size_read(inode_in);
- size_out = i_size_read(inode_out);
-
- /* Dedupe requires both ranges to be within EOF. */
- if ((remap_flags & REMAP_FILE_DEDUP) &&
- (pos_in >= size_in || pos_in + count > size_in ||
- pos_out >= size_out || pos_out + count > size_out))
- return -EINVAL;
-
- /* Ensure the infile range is within the infile. */
- if (pos_in >= size_in)
- return -EINVAL;
- count = min(count, size_in - (uint64_t)pos_in);
-
- ret = generic_access_check_limits(file_in, pos_in, &count);
- if (ret)
- return ret;
-
- ret = generic_write_check_limits(file_out, pos_out, &count);
- if (ret)
- return ret;
-
- /*
- * If the user wanted us to link to the infile's EOF, round up to the
- * next block boundary for this check.
- *
- * Otherwise, make sure the count is also block-aligned, having
- * already confirmed the starting offsets' block alignment.
- */
- if (pos_in + count == size_in) {
- bcount = ALIGN(size_in, bs) - pos_in;
- } else {
- if (!IS_ALIGNED(count, bs))
- count = ALIGN_DOWN(count, bs);
- bcount = count;
- }
-
- /* Don't allow overlapped cloning within the same file. */
- if (inode_in == inode_out &&
- pos_out + bcount > pos_in &&
- pos_out < pos_in + bcount)
- return -EINVAL;
-
- /*
- * We shortened the request but the caller can't deal with that, so
- * bounce the request back to userspace.
- */
- if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
- return -EINVAL;
-
- *req_count = count;
- return 0;
-}
-
-static int generic_remap_check_len(struct inode *inode_in,
- struct inode *inode_out,
- loff_t pos_out,
- loff_t *len,
- unsigned int remap_flags)
-{
- u64 blkmask = i_blocksize(inode_in) - 1;
- loff_t new_len = *len;
-
- if ((*len & blkmask) == 0)
- return 0;
-
- if ((remap_flags & REMAP_FILE_DEDUP) ||
- pos_out + *len < i_size_read(inode_out))
- new_len &= ~blkmask;
-
- if (new_len == *len)
- return 0;
-
- if (remap_flags & REMAP_FILE_CAN_SHORTEN) {
- *len = new_len;
- return 0;
- }
-
- return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
-}
-
-static int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t *len, unsigned int remap_flags)
-{
- struct inode *inode_in = file_inode(file_in);
- struct inode *inode_out = file_inode(file_out);
- bool same_inode = (inode_in == inode_out);
- int ret;
-
- /* Don't touch certain kinds of inodes */
- if (IS_IMMUTABLE(inode_out))
- return -EPERM;
-
- if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
- return -ETXTBSY;
-
- /* Don't reflink dirs, pipes, sockets... */
- if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
- return -EISDIR;
- if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
- return -EINVAL;
-
- /* Zero length dedupe exits immediately; reflink goes to EOF. */
- if (*len == 0) {
- loff_t isize = i_size_read(inode_in);
-
- if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize)
- return 0;
- if (pos_in > isize)
- return -EINVAL;
- *len = isize - pos_in;
- if (*len == 0)
- return 0;
- }
-
- /* Check that we don't violate system file offset limits. */
- ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len,
- remap_flags);
- if (ret)
- return ret;
-
- /* Wait for the completion of any pending IOs on both files */
- inode_dio_wait(inode_in);
- if (!same_inode)
- inode_dio_wait(inode_out);
-
- ret = filemap_write_and_wait_range(inode_in->i_mapping,
- pos_in, pos_in + *len - 1);
- if (ret)
- return ret;
-
- ret = filemap_write_and_wait_range(inode_out->i_mapping,
- pos_out, pos_out + *len - 1);
- if (ret)
- return ret;
-
- /*
- * Check that the extents are the same.
- */
- if (remap_flags & REMAP_FILE_DEDUP) {
- bool is_same = false;
-
- ret = vfs_dedupe_file_range_compare(inode_in, pos_in,
- inode_out, pos_out, *len, &is_same);
- if (ret)
- return ret;
- if (!is_same)
- return -EBADE;
- }
-
- ret = generic_remap_check_len(inode_in, inode_out, pos_out, len,
- remap_flags);
- if (ret)
- return ret;
-
- /* If can't alter the file contents, we're done. */
- if (!(remap_flags & REMAP_FILE_DEDUP)) {
- /* Update the timestamps, since we can alter file contents. */
- if (!(file_out->f_mode & FMODE_NOCMTIME)) {
- ret = file_update_time(file_out);
- if (ret)
- return ret;
- }
-
- /*
- * Clear the security bits if the process is not being run by
- * root. This keeps people from modifying setuid and setgid
- * binaries.
- */
- ret = file_remove_privs(file_out);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
struct file *file_dst, loff_t pos_dst,
loff_t len, unsigned remap_flags)
diff --git a/fs/bcachefs/fs-io.h b/fs/bcachefs/fs-io.h
index f9e7f49b13c7..b24efeaf343e 100644
--- a/fs/bcachefs/fs-io.h
+++ b/fs/bcachefs/fs-io.h
@@ -19,8 +19,7 @@ int bch2_writepage(struct page *, struct writeback_control *);
int bch2_readpage(struct file *, struct page *);
int bch2_writepages(struct address_space *, struct writeback_control *);
-int bch2_readpages(struct file *, struct address_space *,
- struct list_head *, unsigned);
+void bch2_readahead(struct readahead_control *);
int bch2_write_begin(struct file *, struct address_space *, loff_t,
unsigned, unsigned, struct page **, void **);
@@ -36,10 +35,6 @@ int bch2_truncate(struct user_namespace *,
struct bch_inode_info *, struct iattr *);
long bch2_fallocate_dispatch(struct file *, int, loff_t, loff_t);
-#define REMAP_FILE_ADVISORY (0)
-#define REMAP_FILE_DEDUP (1 << 0)
-#define REMAP_FILE_CAN_SHORTEN (1 << 1)
-
loff_t bch2_remap_file_range(struct file *, loff_t, struct file *,
loff_t, loff_t, unsigned);
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
index a76017386593..9f329a624c12 100644
--- a/fs/bcachefs/fs-ioctl.c
+++ b/fs/bcachefs/fs-ioctl.c
@@ -85,7 +85,7 @@ static int bch2_ioc_setflags(struct bch_fs *c,
return ret;
inode_lock(&inode->v);
- if (!inode_owner_or_capable(&inode->v)) {
+ if (!inode_owner_or_capable(file_mnt_user_ns(file), &inode->v)) {
ret = -EACCES;
goto setflags_out;
}
@@ -156,7 +156,7 @@ static int bch2_ioc_fssetxattr(struct bch_fs *c,
return ret;
inode_lock(&inode->v);
- if (!inode_owner_or_capable(&inode->v)) {
+ if (!inode_owner_or_capable(file_mnt_user_ns(file), &inode->v)) {
ret = -EACCES;
goto err;
}
@@ -268,22 +268,20 @@ static int bch2_ioc_goingdown(struct bch_fs *c, u32 __user *arg)
down_write(&c->vfs_sb->s_umount);
switch (flags) {
- case FSOP_GOING_FLAGS_DEFAULT: {
- struct super_block *sb = freeze_bdev(c->vfs_sb->s_bdev);
+ case FSOP_GOING_FLAGS_DEFAULT:
+ ret = freeze_bdev(c->vfs_sb->s_bdev);
if (ret)
goto err;
- if (sb && !IS_ERR(sb)) {
- bch2_journal_flush(&c->journal);
- c->vfs_sb->s_flags |= SB_RDONLY;
- bch2_fs_emergency_read_only(c);
- thaw_bdev(c->vfs_sb->s_bdev, sb);
- }
+ bch2_journal_flush(&c->journal);
+ c->vfs_sb->s_flags |= SB_RDONLY;
+ bch2_fs_emergency_read_only(c);
+ thaw_bdev(c->vfs_sb->s_bdev);
break;
- }
case FSOP_GOING_FLAGS_LOGFLUSH:
bch2_journal_flush(&c->journal);
+ fallthrough;
case FSOP_GOING_FLAGS_NOLOGFLUSH:
c->vfs_sb->s_flags |= SB_RDONLY;
@@ -379,7 +377,8 @@ retry:
goto err3;
}
- error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
+ error = inode_permission(file_mnt_user_ns(filp),
+ dir, MAY_WRITE | MAY_EXEC);
if (error)
goto err3;
@@ -394,7 +393,7 @@ retry:
!arg.src_ptr)
snapshot_src.subvol = to_bch_ei(dir)->ei_inode.bi_subvol;
- inode = __bch2_create(NULL, to_bch_ei(dir),
+ inode = __bch2_create(file_mnt_user_ns(filp), to_bch_ei(dir),
dst_dentry, arg.mode|S_IFDIR,
0, snapshot_src, create_flags);
error = PTR_ERR_OR_ZERO(inode);
@@ -443,8 +442,10 @@ static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp,
dir = path.dentry->d_parent->d_inode;
ret = __bch2_unlink(dir, path.dentry, true);
- if (!ret)
+ if (!ret) {
+ fsnotify_rmdir(dir, path.dentry);
d_delete(path.dentry);
+ }
path_put(&path);
return ret;
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 60e6ba4918c4..472c03d2adb5 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -263,7 +263,6 @@ __bch2_create(struct user_namespace *mnt_userns,
unsigned flags)
{
struct bch_fs *c = dir->v.i_sb->s_fs_info;
- struct user_namespace *ns = dir->v.i_sb->s_user_ns;
struct btree_trans trans;
struct bch_inode_unpacked dir_u;
struct bch_inode_info *inode, *old;
@@ -304,8 +303,8 @@ retry:
inode_inum(dir), &dir_u, &inode_u,
!(flags & BCH_CREATE_TMPFILE)
? &dentry->d_name : NULL,
- from_kuid(ns, current_fsuid()),
- from_kgid(ns, current_fsgid()),
+ from_kuid(mnt_userns, current_fsuid()),
+ from_kgid(mnt_userns, current_fsgid()),
mode, rdev,
default_acl, acl, snapshot_src, flags) ?:
bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1,
@@ -409,11 +408,12 @@ static struct dentry *bch2_lookup(struct inode *vdir, struct dentry *dentry,
return d_splice_alias(vinode, dentry);
}
-static int bch2_mknod(struct inode *vdir, struct dentry *dentry,
+static int bch2_mknod(struct user_namespace *mnt_userns,
+ struct inode *vdir, struct dentry *dentry,
umode_t mode, dev_t rdev)
{
struct bch_inode_info *inode =
- __bch2_create(NULL, to_bch_ei(vdir), dentry, mode, rdev,
+ __bch2_create(mnt_userns, to_bch_ei(vdir), dentry, mode, rdev,
(subvol_inum) { 0 }, 0);
if (IS_ERR(inode))
@@ -423,10 +423,11 @@ static int bch2_mknod(struct inode *vdir, struct dentry *dentry,
return 0;
}
-static int bch2_create(struct inode *vdir, struct dentry *dentry,
+static int bch2_create(struct user_namespace *mnt_userns,
+ struct inode *vdir, struct dentry *dentry,
umode_t mode, bool excl)
{
- return bch2_mknod(vdir, dentry, mode|S_IFREG, 0);
+ return bch2_mknod(mnt_userns, vdir, dentry, mode|S_IFREG, 0);
}
static int __bch2_link(struct bch_fs *c,
@@ -515,14 +516,15 @@ static int bch2_unlink(struct inode *vdir, struct dentry *dentry)
return __bch2_unlink(vdir, dentry, false);
}
-static int bch2_symlink(struct inode *vdir, struct dentry *dentry,
+static int bch2_symlink(struct user_namespace *mnt_userns,
+ struct inode *vdir, struct dentry *dentry,
const char *symname)
{
struct bch_fs *c = vdir->i_sb->s_fs_info;
struct bch_inode_info *dir = to_bch_ei(vdir), *inode;
int ret;
- inode = __bch2_create(NULL, dir, dentry, S_IFLNK|S_IRWXUGO, 0,
+ inode = __bch2_create(mnt_userns, dir, dentry, S_IFLNK|S_IRWXUGO, 0,
(subvol_inum) { 0 }, BCH_CREATE_TMPFILE);
if (unlikely(IS_ERR(inode)))
return PTR_ERR(inode);
@@ -549,12 +551,14 @@ err:
return ret;
}
-static int bch2_mkdir(struct inode *vdir, struct dentry *dentry, umode_t mode)
+static int bch2_mkdir(struct user_namespace *mnt_userns,
+ struct inode *vdir, struct dentry *dentry, umode_t mode)
{
- return bch2_mknod(vdir, dentry, mode|S_IFDIR, 0);
+ return bch2_mknod(mnt_userns, vdir, dentry, mode|S_IFDIR, 0);
}
-static int bch2_rename2(struct inode *src_vdir, struct dentry *src_dentry,
+static int bch2_rename2(struct user_namespace *mnt_userns,
+ struct inode *src_vdir, struct dentry *src_dentry,
struct inode *dst_vdir, struct dentry *dst_dentry,
unsigned flags)
{
@@ -660,7 +664,8 @@ err:
return ret;
}
-static void bch2_setattr_copy(struct bch_inode_info *inode,
+static void bch2_setattr_copy(struct user_namespace *mnt_userns,
+ struct bch_inode_info *inode,
struct bch_inode_unpacked *bi,
struct iattr *attr)
{
@@ -668,9 +673,9 @@ static void bch2_setattr_copy(struct bch_inode_info *inode,
unsigned int ia_valid = attr->ia_valid;
if (ia_valid & ATTR_UID)
- bi->bi_uid = from_kuid(c->vfs_sb->s_user_ns, attr->ia_uid);
+ bi->bi_uid = from_kuid(mnt_userns, attr->ia_uid);
if (ia_valid & ATTR_GID)
- bi->bi_gid = from_kgid(c->vfs_sb->s_user_ns, attr->ia_gid);
+ bi->bi_gid = from_kgid(mnt_userns, attr->ia_gid);
if (ia_valid & ATTR_SIZE)
bi->bi_size = attr->ia_size;
@@ -689,13 +694,14 @@ static void bch2_setattr_copy(struct bch_inode_info *inode,
: inode->v.i_gid;
if (!in_group_p(gid) &&
- !capable_wrt_inode_uidgid(&inode->v, CAP_FSETID))
+ !capable_wrt_inode_uidgid(mnt_userns, &inode->v, CAP_FSETID))
mode &= ~S_ISGID;
bi->bi_mode = mode;
}
}
-int bch2_setattr_nonsize(struct bch_inode_info *inode,
+int bch2_setattr_nonsize(struct user_namespace *mnt_userns,
+ struct bch_inode_info *inode,
struct iattr *attr)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
@@ -732,7 +738,7 @@ retry:
if (ret)
goto btree_err;
- bch2_setattr_copy(inode, &inode_u, attr);
+ bch2_setattr_copy(mnt_userns, inode, &inode_u, attr);
if (attr->ia_valid & ATTR_MODE) {
ret = bch2_acl_chmod(&trans, inode_inum(inode), &inode_u,
@@ -764,7 +770,8 @@ err:
return ret;
}
-static int bch2_getattr(const struct path *path, struct kstat *stat,
+static int bch2_getattr(struct user_namespace *mnt_userns,
+ const struct path *path, struct kstat *stat,
u32 request_mask, unsigned query_flags)
{
struct bch_inode_info *inode = to_bch_ei(d_inode(path->dentry));
@@ -804,26 +811,28 @@ static int bch2_getattr(const struct path *path, struct kstat *stat,
return 0;
}
-static int bch2_setattr(struct dentry *dentry, struct iattr *iattr)
+static int bch2_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *iattr)
{
struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
int ret;
lockdep_assert_held(&inode->v.i_rwsem);
- ret = setattr_prepare(dentry, iattr);
+ ret = setattr_prepare(mnt_userns, dentry, iattr);
if (ret)
return ret;
return iattr->ia_valid & ATTR_SIZE
- ? bch2_truncate(NULL, inode, iattr)
- : bch2_setattr_nonsize(inode, iattr);
+ ? bch2_truncate(mnt_userns, inode, iattr)
+ : bch2_setattr_nonsize(mnt_userns, inode, iattr);
}
-static int bch2_tmpfile(struct inode *vdir, struct dentry *dentry, umode_t mode)
+static int bch2_tmpfile(struct user_namespace *mnt_userns,
+ struct inode *vdir, struct dentry *dentry, umode_t mode)
{
struct bch_inode_info *inode =
- __bch2_create(NULL, to_bch_ei(vdir), dentry, mode, 0,
+ __bch2_create(mnt_userns, to_bch_ei(vdir), dentry, mode, 0,
(subvol_inum) { 0 }, BCH_CREATE_TMPFILE);
if (IS_ERR(inode))
@@ -902,6 +911,10 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
u32 snapshot;
int ret = 0;
+ ret = fiemap_prep(&ei->v, info, start, &len, FIEMAP_FLAG_SYNC);
+ if (ret)
+ return ret;
+
if (start + len < start)
return -EINVAL;
@@ -1017,15 +1030,6 @@ static int bch2_vfs_readdir(struct file *file, struct dir_context *ctx)
return bch2_readdir(c, inode_inum(inode), ctx);
}
-static int bch2_clone_file_range(struct file *file_src, loff_t pos_src,
- struct file *file_dst, loff_t pos_dst,
- u64 len)
-{
- return bch2_remap_file_range(file_src, pos_src,
- file_dst, pos_dst,
- len, 0);
-}
-
static const struct file_operations bch_file_operations = {
.llseek = bch2_llseek,
.read_iter = bch2_read_iter,
@@ -1040,7 +1044,7 @@ static const struct file_operations bch_file_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = bch2_compat_fs_ioctl,
#endif
- .clone_file_range = bch2_clone_file_range,
+ .remap_file_range = bch2_remap_file_range,
};
static const struct inode_operations bch_file_inode_operations = {
@@ -1110,7 +1114,7 @@ static const struct address_space_operations bch_address_space_operations = {
.writepage = bch2_writepage,
.readpage = bch2_readpage,
.writepages = bch2_writepages,
- .readpages = bch2_readpages,
+ .readahead = bch2_readahead,
.set_page_dirty = __set_page_dirty_nobuffers,
.write_begin = bch2_write_begin,
.write_end = bch2_write_end,
@@ -1567,13 +1571,14 @@ static int bch2_sync_fs(struct super_block *sb, int wait)
static struct bch_fs *bch2_path_to_fs(const char *path)
{
struct bch_fs *c;
- struct block_device *bdev = lookup_bdev(path);
+ dev_t dev;
+ int ret;
- if (IS_ERR(bdev))
- return ERR_CAST(bdev);
+ ret = lookup_bdev(path, &dev);
+ if (ret)
+ return ERR_PTR(ret);
- c = bch2_dev_to_fs(bdev->bd_dev);
- bdput(bdev);
+ c = bch2_dev_to_fs(dev);
if (c)
closure_put(&c->cl);
return c ?: ERR_PTR(-ENOENT);
@@ -1829,6 +1834,8 @@ got_sb:
sb->s_xattr = bch2_xattr_handlers;
sb->s_magic = BCACHEFS_STATFS_MAGIC;
sb->s_time_gran = c->sb.nsec_per_time_unit;
+ sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1;
+ sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec);
c->vfs_sb = sb;
strlcpy(sb->s_id, c->name, sizeof(sb->s_id));
@@ -1836,9 +1843,7 @@ got_sb:
if (ret)
goto err_put_super;
- sb->s_bdi->congested_fn = bch2_congested;
- sb->s_bdi->congested_data = c;
- sb->s_bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
+ sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
for_each_online_member(ca, c, i) {
struct block_device *bdev = ca->disk_sb.bdev;
@@ -1857,7 +1862,7 @@ got_sb:
sb->s_flags |= SB_POSIXACL;
#endif
- sb->s_shrink.seeks = 1;
+ sb->s_shrink.seeks = 0;
vinode = bch2_vfs_inode_get(c, BCACHEFS_ROOT_SUBVOL_INUM);
if (IS_ERR(vinode)) {
diff --git a/fs/bcachefs/fs.h b/fs/bcachefs/fs.h
index b5bc70afb100..b2211ec7f302 100644
--- a/fs/bcachefs/fs.h
+++ b/fs/bcachefs/fs.h
@@ -186,7 +186,8 @@ void bch2_inode_update_after_write(struct btree_trans *,
int __must_check bch2_write_inode(struct bch_fs *, struct bch_inode_info *,
inode_set_fn, void *, unsigned);
-int bch2_setattr_nonsize(struct bch_inode_info *,
+int bch2_setattr_nonsize(struct user_namespace *,
+ struct bch_inode_info *,
struct iattr *);
int __bch2_unlink(struct inode *, struct dentry *, bool);
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 69b4136364c3..43b6159be01b 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -2052,8 +2052,8 @@ static void inc_link(struct bch_fs *c, struct snapshots_seen *s,
if (inum < range_start || inum >= range_end)
return;
- link = bsearch(&key, links->d, links->nr,
- sizeof(links->d[0]), nlink_cmp);
+ link = __inline_bsearch(&key, links->d, links->nr,
+ sizeof(links->d[0]), nlink_cmp);
if (!link)
return;
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index 21e1e3956f10..73558cd00350 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -136,10 +136,10 @@ void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
{
+ struct bvec_iter_all iter;
struct bio_vec *bv;
- unsigned i;
- bio_for_each_segment_all(bv, bio, i)
+ bio_for_each_segment_all(bv, bio, iter)
if (bv->bv_page != ZERO_PAGE(0))
mempool_free(bv->bv_page, &c->bio_bounce_pages);
bio->bi_vcnt = 0;
@@ -699,7 +699,7 @@ static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
? ((unsigned long) buf & (PAGE_SIZE - 1))
: 0), PAGE_SIZE);
- pages = min_t(unsigned, pages, BIO_MAX_PAGES);
+ pages = min(pages, BIO_MAX_VECS);
bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write);
wbio = wbio_init(bio);
diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h
index 62f9aec4a427..b298873212d2 100644
--- a/fs/bcachefs/journal.h
+++ b/fs/bcachefs/journal.h
@@ -278,7 +278,7 @@ static inline void bch2_journal_res_put(struct journal *j,
if (!res->ref)
return;
- lock_release(&j->res_map, 0, _THIS_IP_);
+ lock_release(&j->res_map, _THIS_IP_);
while (res->u64s)
bch2_journal_add_entry(j, res,
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
index b9bf381ef15e..52a3935cff53 100644
--- a/fs/bcachefs/journal_reclaim.c
+++ b/fs/bcachefs/journal_reclaim.c
@@ -11,7 +11,6 @@
#include <linux/kthread.h>
#include <linux/sched/mm.h>
-#include <linux/sched/task.h>
#include <trace/events/bcachefs.h>
/* Free space calculations: */
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 9e6db2917a19..3e3dcec327a0 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -427,12 +427,12 @@ static void move_free(struct closure *cl)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
struct moving_context *ctxt = io->write.ctxt;
+ struct bvec_iter_all iter;
struct bio_vec *bv;
- unsigned i;
bch2_disk_reservation_put(io->write.op.c, &io->write.op.res);
- bio_for_each_segment_all(bv, &io->write.op.wbio.bio, i)
+ bio_for_each_segment_all(bv, &io->write.op.wbio.bio, iter)
if (bv->bv_page)
__free_page(bv->bv_page);
@@ -785,6 +785,14 @@ out:
return ret;
}
+inline void bch_move_stats_init(struct bch_move_stats *stats, char *name)
+{
+ memset(stats, 0, sizeof(*stats));
+
+ scnprintf(stats->name, sizeof(stats->name),
+ "%s", name);
+}
+
static inline void progress_list_add(struct bch_fs *c,
struct bch_move_stats *stats)
{
diff --git a/fs/bcachefs/move.h b/fs/bcachefs/move.h
index 98323ad93e7c..2a789a1158ca 100644
--- a/fs/bcachefs/move.h
+++ b/fs/bcachefs/move.h
@@ -66,13 +66,8 @@ int bch2_data_job(struct bch_fs *,
struct bch_move_stats *,
struct bch_ioctl_data);
-static inline void bch_move_stats_init(struct bch_move_stats *stats, char *name)
-{
- memset(stats, 0, sizeof(*stats));
-
- scnprintf(stats->name, sizeof(stats->name),
- "%s", name);
-}
+inline void bch_move_stats_init(struct bch_move_stats *stats,
+ char *name);
#endif /* _BCACHEFS_MOVE_H */
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
index 5de733b95aa4..57d636740d2f 100644
--- a/fs/bcachefs/str_hash.h
+++ b/fs/bcachefs/str_hash.h
@@ -13,7 +13,7 @@
#include <linux/crc32c.h>
#include <crypto/hash.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
static inline enum bch_str_hash_type
bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt)
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 6e2e077f5f8d..49dafdad77cd 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -288,7 +288,7 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, struct printbuf *out)
block_size = le16_to_cpu(sb->block_size);
if (block_size > PAGE_SECTORS) {
- pr_buf(out, "Block size too big (got %u, max %lu)",
+ pr_buf(out, "Block size too big (got %u, max %u)",
block_size, PAGE_SECTORS);
return -EINVAL;
}
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index e706b203a030..586ba60d03ea 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -167,44 +167,6 @@ static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
&c->dev_usage_journal_res, u64s * nr);
}
-int bch2_congested(void *data, int bdi_bits)
-{
- struct bch_fs *c = data;
- struct backing_dev_info *bdi;
- struct bch_dev *ca;
- unsigned i;
- int ret = 0;
-
- rcu_read_lock();
- if (bdi_bits & (1 << WB_sync_congested)) {
- /* Reads - check all devices: */
- for_each_readable_member(ca, c, i) {
- bdi = ca->disk_sb.bdev->bd_bdi;
-
- if (bdi_congested(bdi, bdi_bits)) {
- ret = 1;
- break;
- }
- }
- } else {
- const struct bch_devs_mask *devs =
- bch2_target_to_mask(c, c->opts.foreground_target) ?:
- &c->rw_devs[BCH_DATA_user];
-
- for_each_member_device_rcu(ca, c, i, devs) {
- bdi = ca->disk_sb.bdev->bd_bdi;
-
- if (bdi_congested(bdi, bdi_bits)) {
- ret = 1;
- break;
- }
- }
- }
- rcu_read_unlock();
-
- return ret;
-}
-
/* Filesystem RO/RW: */
/*
@@ -577,8 +539,7 @@ void __bch2_fs_stop(struct bch_fs *c)
for_each_member_device(ca, c, i)
if (ca->kobj.state_in_sysfs &&
ca->disk_sb.bdev)
- sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj,
- "bcachefs");
+ sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
if (c->kobj.state_in_sysfs)
kobject_del(&c->kobj);
@@ -1085,8 +1046,7 @@ static void bch2_dev_free(struct bch_dev *ca)
if (ca->kobj.state_in_sysfs &&
ca->disk_sb.bdev)
- sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj,
- "bcachefs");
+ sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
if (ca->kobj.state_in_sysfs)
kobject_del(&ca->kobj);
@@ -1122,10 +1082,7 @@ static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
wait_for_completion(&ca->io_ref_completion);
if (ca->kobj.state_in_sysfs) {
- struct kobject *block =
- &part_to_dev(ca->disk_sb.bdev->bd_part)->kobj;
-
- sysfs_remove_link(block, "bcachefs");
+ sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
sysfs_remove_link(&ca->kobj, "block");
}
@@ -1162,12 +1119,12 @@ static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca)
}
if (ca->disk_sb.bdev) {
- struct kobject *block =
- &part_to_dev(ca->disk_sb.bdev->bd_part)->kobj;
+ struct kobject *block = bdev_kobj(ca->disk_sb.bdev);
ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
if (ret)
return ret;
+
ret = sysfs_create_link(&ca->kobj, block, "block");
if (ret)
return ret;
@@ -1917,20 +1874,23 @@ err:
/* return with ref on ca->ref: */
struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *path)
{
- struct block_device *bdev = lookup_bdev(path);
struct bch_dev *ca;
+ dev_t dev;
unsigned i;
+ int ret;
- if (IS_ERR(bdev))
- return ERR_CAST(bdev);
+ ret = lookup_bdev(path, &dev);
+ if (ret)
+ return ERR_PTR(ret);
- for_each_member_device(ca, c, i)
- if (ca->disk_sb.bdev == bdev)
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i, NULL)
+ if (ca->dev == dev)
goto found;
-
ca = ERR_PTR(-ENOENT);
found:
- bdput(bdev);
+ rcu_read_unlock();
+
return ca;
}
diff --git a/fs/bcachefs/super.h b/fs/bcachefs/super.h
index 7e118244abe6..3f24ca5a853d 100644
--- a/fs/bcachefs/super.h
+++ b/fs/bcachefs/super.h
@@ -217,7 +217,6 @@ static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
struct bch_fs *bch2_dev_to_fs(dev_t);
struct bch_fs *bch2_uuid_to_fs(uuid_le);
-int bch2_congested(void *, int);
bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *,
enum bch_member_state, int);
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index df8c5d52f98a..3196bc303182 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -18,9 +18,6 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
-#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - 9)
-#define PAGE_SECTORS (1UL << PAGE_SECTORS_SHIFT)
-
struct closure;
#ifdef CONFIG_BCACHEFS_DEBUG
@@ -88,7 +85,7 @@ static inline void *vpmalloc(size_t size, gfp_t gfp_mask)
{
return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN,
get_order(size)) ?:
- __vmalloc(size, gfp_mask, PAGE_KERNEL);
+ __vmalloc(size, gfp_mask);
}
static inline void kvpfree(void *p, size_t size)
@@ -653,35 +650,6 @@ static inline void memset_u64s_tail(void *s, int c, unsigned bytes)
memset(s + bytes, c, rem);
}
-static inline struct bio_vec next_contig_bvec(struct bio *bio,
- struct bvec_iter *iter)
-{
- struct bio_vec bv = bio_iter_iovec(bio, *iter);
-
- bio_advance_iter(bio, iter, bv.bv_len);
-#ifndef CONFIG_HIGHMEM
- while (iter->bi_size) {
- struct bio_vec next = bio_iter_iovec(bio, *iter);
-
- if (page_address(bv.bv_page) + bv.bv_offset + bv.bv_len !=
- page_address(next.bv_page) + next.bv_offset)
- break;
-
- bv.bv_len += next.bv_len;
- bio_advance_iter(bio, iter, next.bv_len);
- }
-#endif
- return bv;
-}
-
-#define __bio_for_each_contig_segment(bv, bio, iter, start) \
- for (iter = (start); \
- (iter).bi_size && \
- ((bv = next_contig_bvec((bio), &(iter))), 1);)
-
-#define bio_for_each_contig_segment(bv, bio, iter) \
- __bio_for_each_contig_segment(bv, bio, iter, (bio)->bi_iter)
-
void sort_cmp_size(void *base, size_t num, size_t size,
int (*cmp_func)(const void *, const void *, size_t),
void (*swap_func)(void *, void *, size_t));
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index 7dec2f5e573e..4d7db64e3ef3 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -359,6 +359,7 @@ static int bch2_xattr_get_handler(const struct xattr_handler *handler,
}
static int bch2_xattr_set_handler(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *dentry, struct inode *vinode,
const char *name, const void *value,
size_t size, int flags)
@@ -491,6 +492,7 @@ static int inode_opt_set_fn(struct bch_inode_info *inode,
}
static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler,
+ struct user_namespace *mnt_userns,
struct dentry *dentry, struct inode *vinode,
const char *name, const void *value,
size_t size, int flags)
diff --git a/include/trace/events/bcachefs.h b/include/trace/events/bcachefs.h
index 1e82ff890a0c..8f10d13b27d5 100644
--- a/include/trace/events/bcachefs.h
+++ b/include/trace/events/bcachefs.h
@@ -71,10 +71,10 @@ DECLARE_EVENT_CLASS(bio,
),
TP_fast_assign(
- __entry->dev = bio->bi_disk ? bio_dev(bio) : 0;
+ __entry->dev = bio->bi_bdev ? bio_dev(bio) : 0;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
- blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
),
TP_printk("%d,%d %s %llu + %u",