summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/block-group.c28
-rw-r--r--fs/btrfs/free-space-cache.c8
-rw-r--r--fs/btrfs/fs.h7
-rw-r--r--fs/btrfs/inode.c7
-rw-r--r--fs/btrfs/space-info.c42
-rw-r--r--fs/btrfs/space-info.h2
-rw-r--r--fs/btrfs/volumes.c3
-rw-r--r--fs/btrfs/zoned.c45
-rw-r--r--fs/crypto/keyring.c23
-rw-r--r--fs/gfs2/dentry.c18
-rw-r--r--fs/lockd/clnt4xdr.c9
-rw-r--r--fs/lockd/xdr4.c13
-rw-r--r--fs/nfs/Kconfig2
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfsd/Kconfig2
-rw-r--r--fs/nfsd/vfs.c9
-rw-r--r--fs/super.c15
-rw-r--r--fs/verity/enable.c25
-rw-r--r--fs/verity/verify.c12
-rw-r--r--fs/zonefs/file.c4
21 files changed, 162 insertions, 117 deletions
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 0ef8b8926bfa..5fc670c27f86 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1175,14 +1175,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
< block_group->zone_unusable);
WARN_ON(block_group->space_info->disk_total
< block_group->length * factor);
- WARN_ON(test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
- &block_group->runtime_flags) &&
- block_group->space_info->active_total_bytes
- < block_group->length);
}
block_group->space_info->total_bytes -= block_group->length;
- if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
- block_group->space_info->active_total_bytes -= block_group->length;
block_group->space_info->bytes_readonly -=
(block_group->length - block_group->zone_unusable);
block_group->space_info->bytes_zone_unusable -=
@@ -3476,6 +3470,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&info->delalloc_root_lock);
while (total) {
+ struct btrfs_space_info *space_info;
bool reclaim = false;
cache = btrfs_lookup_block_group(info, bytenr);
@@ -3483,6 +3478,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
ret = -ENOENT;
break;
}
+ space_info = cache->space_info;
factor = btrfs_bg_type_to_factor(cache->flags);
/*
@@ -3497,7 +3493,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
byte_in_group = bytenr - cache->start;
WARN_ON(byte_in_group > cache->length);
- spin_lock(&cache->space_info->lock);
+ spin_lock(&space_info->lock);
spin_lock(&cache->lock);
if (btrfs_test_opt(info, SPACE_CACHE) &&
@@ -3510,24 +3506,24 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
old_val += num_bytes;
cache->used = old_val;
cache->reserved -= num_bytes;
- cache->space_info->bytes_reserved -= num_bytes;
- cache->space_info->bytes_used += num_bytes;
- cache->space_info->disk_used += num_bytes * factor;
+ space_info->bytes_reserved -= num_bytes;
+ space_info->bytes_used += num_bytes;
+ space_info->disk_used += num_bytes * factor;
spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
+ spin_unlock(&space_info->lock);
} else {
old_val -= num_bytes;
cache->used = old_val;
cache->pinned += num_bytes;
- btrfs_space_info_update_bytes_pinned(info,
- cache->space_info, num_bytes);
- cache->space_info->bytes_used -= num_bytes;
- cache->space_info->disk_used -= num_bytes * factor;
+ btrfs_space_info_update_bytes_pinned(info, space_info,
+ num_bytes);
+ space_info->bytes_used -= num_bytes;
+ space_info->disk_used -= num_bytes * factor;
reclaim = should_reclaim_block_group(cache, num_bytes);
spin_unlock(&cache->lock);
- spin_unlock(&cache->space_info->lock);
+ spin_unlock(&space_info->lock);
set_extent_dirty(&trans->transaction->pinned_extents,
bytenr, bytenr + num_bytes - 1,
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 0d250d052487..d84cef89cdff 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -2693,8 +2693,13 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
spin_lock(&ctl->tree_lock);
+ /* Count initial region as zone_unusable until it gets activated. */
if (!used)
to_free = size;
+ else if (initial &&
+ test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &block_group->fs_info->flags) &&
+ (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
+ to_free = 0;
else if (initial)
to_free = block_group->zone_capacity;
else if (offset >= block_group->alloc_offset)
@@ -2722,7 +2727,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
reclaimable_unusable = block_group->zone_unusable -
(block_group->length - block_group->zone_capacity);
/* All the region is now unusable. Mark it as unused and reclaim */
- if (block_group->zone_unusable == block_group->length) {
+ if (block_group->zone_unusable == block_group->length &&
+ block_group->alloc_offset) {
btrfs_mark_bg_unused(block_group);
} else if (bg_reclaim_threshold &&
reclaimable_unusable >=
diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
index 4c477eae6891..24cd49229408 100644
--- a/fs/btrfs/fs.h
+++ b/fs/btrfs/fs.h
@@ -120,11 +120,8 @@ enum {
/* Indicate that we want to commit the transaction. */
BTRFS_FS_NEED_TRANS_COMMIT,
- /*
- * Indicate metadata over-commit is disabled. This is set when active
- * zone tracking is needed.
- */
- BTRFS_FS_NO_OVERCOMMIT,
+ /* This is set when active zone tracking is needed. */
+ BTRFS_FS_ACTIVE_ZONE_TRACKING,
/*
* Indicate if we have some features changed, this is mostly for
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 6c18dc9a1831..957e4d76a7b6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5421,8 +5421,13 @@ static int btrfs_inode_by_name(struct btrfs_inode *dir, struct dentry *dentry,
return -ENOMEM;
ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname);
- if (ret)
+ if (ret < 0)
goto out;
+ /*
+ * fscrypt_setup_filename() should never return a positive value, but
+ * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
+ */
+ ASSERT(ret == 0);
/* This needs to handle no-key deletions later on */
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 69c09508afb5..3eecce86f63f 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -308,8 +308,6 @@ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
ASSERT(found);
spin_lock(&found->lock);
found->total_bytes += block_group->length;
- if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
- found->active_total_bytes += block_group->length;
found->disk_total += block_group->length * factor;
found->bytes_used += block_group->used;
found->disk_used += block_group->used * factor;
@@ -379,22 +377,6 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
return avail;
}
-static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *space_info)
-{
- /*
- * On regular filesystem, all total_bytes are always writable. On zoned
- * filesystem, there may be a limitation imposed by max_active_zones.
- * For metadata allocation, we cannot finish an existing active block
- * group to avoid a deadlock. Thus, we need to consider only the active
- * groups to be writable for metadata space.
- */
- if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
- return space_info->total_bytes;
-
- return space_info->active_total_bytes;
-}
-
int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush)
@@ -407,13 +389,13 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
return 0;
used = btrfs_space_info_used(space_info, true);
- if (test_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags) &&
+ if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags) &&
(space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
avail = 0;
else
avail = calc_available_free_space(fs_info, space_info, flush);
- if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
+ if (used + bytes < space_info->total_bytes + avail)
return 1;
return 0;
}
@@ -449,7 +431,7 @@ again:
ticket = list_first_entry(head, struct reserve_ticket, list);
/* Check and see if our ticket can be satisfied now. */
- if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) ||
+ if ((used + ticket->bytes <= space_info->total_bytes) ||
btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
flush)) {
btrfs_space_info_update_bytes_may_use(fs_info,
@@ -829,7 +811,6 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
{
u64 used;
u64 avail;
- u64 total;
u64 to_reclaim = space_info->reclaim_size;
lockdep_assert_held(&space_info->lock);
@@ -844,9 +825,8 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
* space. If that's the case add in our overage so we make sure to put
* appropriate pressure on the flushing state machine.
*/
- total = writable_total_bytes(fs_info, space_info);
- if (total + avail < used)
- to_reclaim += used - (total + avail);
+ if (space_info->total_bytes + avail < used)
+ to_reclaim += used - (space_info->total_bytes + avail);
return to_reclaim;
}
@@ -856,11 +836,10 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
{
u64 global_rsv_size = fs_info->global_block_rsv.reserved;
u64 ordered, delalloc;
- u64 total = writable_total_bytes(fs_info, space_info);
u64 thresh;
u64 used;
- thresh = mult_perc(total, 90);
+ thresh = mult_perc(space_info->total_bytes, 90);
lockdep_assert_held(&space_info->lock);
@@ -923,8 +902,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
BTRFS_RESERVE_FLUSH_ALL);
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_readonly + global_rsv_size;
- if (used < total)
- thresh += total - used;
+ if (used < space_info->total_bytes)
+ thresh += space_info->total_bytes - used;
thresh >>= space_info->clamp;
used = space_info->bytes_pinned;
@@ -1651,7 +1630,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
* can_overcommit() to ensure we can overcommit to continue.
*/
if (!pending_tickets &&
- ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) ||
+ ((used + orig_bytes <= space_info->total_bytes) ||
btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
btrfs_space_info_update_bytes_may_use(fs_info, space_info,
orig_bytes);
@@ -1665,8 +1644,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
*/
if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
used = btrfs_space_info_used(space_info, false);
- if (used + orig_bytes <=
- writable_total_bytes(fs_info, space_info)) {
+ if (used + orig_bytes <= space_info->total_bytes) {
btrfs_space_info_update_bytes_may_use(fs_info, space_info,
orig_bytes);
ret = 0;
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index fc99ea2b0c34..2033b71b18ce 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -96,8 +96,6 @@ struct btrfs_space_info {
u64 bytes_may_use; /* number of bytes that may be used for
delalloc/allocations */
u64 bytes_readonly; /* total bytes that are read only */
- /* Total bytes in the space, but only accounts active block groups. */
- u64 active_total_bytes;
u64 bytes_zone_unusable; /* total bytes that are unusable until
resetting the device zone */
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 7823168c08a6..6d0124b6e79e 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6363,7 +6363,8 @@ int __btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
ASSERT(op != BTRFS_MAP_DISCARD);
em = btrfs_get_chunk_map(fs_info, logical, *length);
- ASSERT(!IS_ERR(em));
+ if (IS_ERR(em))
+ return PTR_ERR(em);
map = em->map_lookup;
data_stripes = nr_data_stripes(map);
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index f95b2c94d619..45d04092f2f8 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -524,8 +524,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
}
atomic_set(&zone_info->active_zones_left,
max_active_zones - nactive);
- /* Overcommit does not work well with active zone tacking. */
- set_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags);
+ set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
}
/* Validate superblock log */
@@ -1581,9 +1580,19 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
return;
WARN_ON(cache->bytes_super != 0);
- unusable = (cache->alloc_offset - cache->used) +
- (cache->length - cache->zone_capacity);
- free = cache->zone_capacity - cache->alloc_offset;
+
+ /* Check for block groups never get activated */
+ if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &cache->fs_info->flags) &&
+ cache->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM) &&
+ !test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags) &&
+ cache->alloc_offset == 0) {
+ unusable = cache->length;
+ free = 0;
+ } else {
+ unusable = (cache->alloc_offset - cache->used) +
+ (cache->length - cache->zone_capacity);
+ free = cache->zone_capacity - cache->alloc_offset;
+ }
/* We only need ->free_space in ALLOC_SEQ block groups */
cache->cached = BTRFS_CACHE_FINISHED;
@@ -1902,7 +1911,11 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
/* Successfully activated all the zones */
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
- space_info->active_total_bytes += block_group->length;
+ WARN_ON(block_group->alloc_offset != 0);
+ if (block_group->zone_unusable == block_group->length) {
+ block_group->zone_unusable = block_group->length - block_group->zone_capacity;
+ space_info->bytes_zone_unusable -= block_group->zone_capacity;
+ }
spin_unlock(&block_group->lock);
btrfs_try_granting_tickets(fs_info, space_info);
spin_unlock(&space_info->lock);
@@ -2086,11 +2099,21 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
if (!device->bdev)
continue;
- if (!zinfo->max_active_zones ||
- atomic_read(&zinfo->active_zones_left)) {
+ if (!zinfo->max_active_zones) {
ret = true;
break;
}
+
+ switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ case 0: /* single */
+ ret = (atomic_read(&zinfo->active_zones_left) >= 1);
+ break;
+ case BTRFS_BLOCK_GROUP_DUP:
+ ret = (atomic_read(&zinfo->active_zones_left) >= 2);
+ break;
+ }
+ if (ret)
+ break;
}
mutex_unlock(&fs_info->chunk_mutex);
@@ -2256,7 +2279,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
u64 avail;
spin_lock(&block_group->lock);
- if (block_group->reserved ||
+ if (block_group->reserved || block_group->alloc_offset == 0 ||
(block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)) {
spin_unlock(&block_group->lock);
continue;
@@ -2293,10 +2316,6 @@ int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
return 0;
- /* No more block groups to activate */
- if (space_info->active_total_bytes == space_info->total_bytes)
- return 0;
-
for (;;) {
int ret;
bool need_finish = false;
diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
index 78086f8dbda5..13d336a6cc5d 100644
--- a/fs/crypto/keyring.c
+++ b/fs/crypto/keyring.c
@@ -92,6 +92,8 @@ void fscrypt_put_master_key_activeref(struct super_block *sb,
* destroying any subkeys embedded in it.
*/
+ if (WARN_ON(!sb->s_master_keys))
+ return;
spin_lock(&sb->s_master_keys->lock);
hlist_del_rcu(&mk->mk_node);
spin_unlock(&sb->s_master_keys->lock);
@@ -207,10 +209,11 @@ static int allocate_filesystem_keyring(struct super_block *sb)
* Release all encryption keys that have been added to the filesystem, along
* with the keyring that contains them.
*
- * This is called at unmount time. The filesystem's underlying block device(s)
- * are still available at this time; this is important because after user file
- * accesses have been allowed, this function may need to evict keys from the
- * keyslots of an inline crypto engine, which requires the block device(s).
+ * This is called at unmount time, after all potentially-encrypted inodes have
+ * been evicted. The filesystem's underlying block device(s) are still
+ * available at this time; this is important because after user file accesses
+ * have been allowed, this function may need to evict keys from the keyslots of
+ * an inline crypto engine, which requires the block device(s).
*/
void fscrypt_destroy_keyring(struct super_block *sb)
{
@@ -227,12 +230,12 @@ void fscrypt_destroy_keyring(struct super_block *sb)
hlist_for_each_entry_safe(mk, tmp, bucket, mk_node) {
/*
- * Since all inodes were already evicted, every key
- * remaining in the keyring should have an empty inode
- * list, and should only still be in the keyring due to
- * the single active ref associated with ->mk_secret.
- * There should be no structural refs beyond the one
- * associated with the active ref.
+ * Since all potentially-encrypted inodes were already
+ * evicted, every key remaining in the keyring should
+ * have an empty inode list, and should only still be in
+ * the keyring due to the single active ref associated
+ * with ->mk_secret. There should be no structural refs
+ * beyond the one associated with the active ref.
*/
WARN_ON(refcount_read(&mk->mk_active_refs) != 1);
WARN_ON(refcount_read(&mk->mk_struct_refs) != 1);
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index 6fe9ca253b70..2e215e8c3c88 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -83,8 +83,26 @@ static int gfs2_dhash(const struct dentry *dentry, struct qstr *str)
return 0;
}
+static int gfs2_dentry_delete(const struct dentry *dentry)
+{
+ struct gfs2_inode *ginode;
+
+ if (d_really_is_negative(dentry))
+ return 0;
+
+ ginode = GFS2_I(d_inode(dentry));
+ if (!gfs2_holder_initialized(&ginode->i_iopen_gh))
+ return 0;
+
+ if (test_bit(GLF_DEMOTE, &ginode->i_iopen_gh.gh_gl->gl_flags))
+ return 1;
+
+ return 0;
+}
+
const struct dentry_operations gfs2_dops = {
.d_revalidate = gfs2_drevalidate,
.d_hash = gfs2_dhash,
+ .d_delete = gfs2_dentry_delete,
};
diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
index 7df6324ccb8a..8161667c976f 100644
--- a/fs/lockd/clnt4xdr.c
+++ b/fs/lockd/clnt4xdr.c
@@ -261,7 +261,6 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
u32 exclusive;
int error;
__be32 *p;
- s32 end;
memset(lock, 0, sizeof(*lock));
locks_init_lock(fl);
@@ -285,13 +284,7 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK;
p = xdr_decode_hyper(p, &l_offset);
xdr_decode_hyper(p, &l_len);
- end = l_offset + l_len - 1;
-
- fl->fl_start = (loff_t)l_offset;
- if (l_len == 0 || end < 0)
- fl->fl_end = OFFSET_MAX;
- else
- fl->fl_end = (loff_t)end;
+ nlm4svc_set_file_lock_range(fl, l_offset, l_len);
error = 0;
out:
return error;
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index 712fdfeb8ef0..5fcbf30cd275 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -33,6 +33,17 @@ loff_t_to_s64(loff_t offset)
return res;
}
+void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len)
+{
+ s64 end = off + len - 1;
+
+ fl->fl_start = off;
+ if (len == 0 || end < 0)
+ fl->fl_end = OFFSET_MAX;
+ else
+ fl->fl_end = end;
+}
+
/*
* NLM file handles are defined by specification to be a variable-length
* XDR opaque no longer than 1024 bytes. However, this implementation
@@ -80,7 +91,7 @@ svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock)
locks_init_lock(fl);
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK;
-
+ nlm4svc_set_file_lock_range(fl, lock->lock_start, lock->lock_len);
return true;
}
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 14a72224b657..450d6c3bc05e 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -75,7 +75,7 @@ config NFS_V3_ACL
config NFS_V4
tristate "NFS client support for NFS version 4"
depends on NFS_FS
- select SUNRPC_GSS
+ select RPCSEC_GSS_KRB5
select KEYS
help
This option enables support for version 4 of the NFS protocol
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index a41c3ee4549c..6fbcbb8d6587 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -3089,7 +3089,6 @@ static void nfs_access_add_rbtree(struct inode *inode,
else
goto found;
}
- set->timestamp = ktime_get_ns();
rb_link_node(&set->rb_node, parent, p);
rb_insert_color(&set->rb_node, root_node);
list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
@@ -3114,6 +3113,7 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set,
cache->fsgid = cred->fsgid;
cache->group_info = get_group_info(cred->group_info);
cache->mask = set->mask;
+ cache->timestamp = ktime_get_ns();
/* The above field assignments must be visible
* before this item appears on the lru. We cannot easily
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index c380cff4108e..e90988591df4 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -15,6 +15,7 @@
#include <linux/stat.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/task_io_accounting_ops.h>
#include <linux/pagemap.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
@@ -337,6 +338,7 @@ int nfs_read_folio(struct file *file, struct folio *folio)
trace_nfs_aop_readpage(inode, folio);
nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
+ task_io_account_read(folio_size(folio));
/*
* Try to flush any pending writes to the file..
@@ -393,6 +395,7 @@ void nfs_readahead(struct readahead_control *ractl)
trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
+ task_io_account_read(readahead_length(ractl));
ret = -ESTALE;
if (NFS_STALE(inode))
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 7c441f2bd444..43b88eaf0673 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -73,7 +73,7 @@ config NFSD_V4
bool "NFS server support for NFS version 4"
depends on NFSD && PROC_FS
select FS_POSIX_ACL
- select SUNRPC_GSS
+ select RPCSEC_GSS_KRB5
select CRYPTO
select CRYPTO_MD5
select CRYPTO_SHA256
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 502e1b7742db..5783209f17fc 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -941,8 +941,15 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct page *last_page;
last_page = page + (offset + sd->len - 1) / PAGE_SIZE;
- for (page += offset / PAGE_SIZE; page <= last_page; page++)
+ for (page += offset / PAGE_SIZE; page <= last_page; page++) {
+ /*
+ * Skip page replacement when extending the contents
+ * of the current page.
+ */
+ if (page == *(rqstp->rq_next_page - 1))
+ continue;
svc_rqst_replace_page(rqstp, page);
+ }
if (rqstp->rq_res.page_len == 0) // first call
rqstp->rq_res.page_base = offset % PAGE_SIZE;
rqstp->rq_res.page_len += sd->len;
diff --git a/fs/super.c b/fs/super.c
index 84332d5cb817..04bc62ab7dfe 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -475,13 +475,22 @@ void generic_shutdown_super(struct super_block *sb)
cgroup_writeback_umount();
- /* evict all inodes with zero refcount */
+ /* Evict all inodes with zero refcount. */
evict_inodes(sb);
- /* only nonzero refcount inodes can have marks */
+
+ /*
+ * Clean up and evict any inodes that still have references due
+ * to fsnotify or the security policy.
+ */
fsnotify_sb_delete(sb);
- fscrypt_destroy_keyring(sb);
security_sb_delete(sb);
+ /*
+ * Now that all potentially-encrypted inodes have been evicted,
+ * the fscrypt keyring can be destroyed.
+ */
+ fscrypt_destroy_keyring(sb);
+
if (sb->s_dio_done_wq) {
destroy_workqueue(sb->s_dio_done_wq);
sb->s_dio_done_wq = NULL;
diff --git a/fs/verity/enable.c b/fs/verity/enable.c
index e13db6507b38..7a0e3a84d370 100644
--- a/fs/verity/enable.c
+++ b/fs/verity/enable.c
@@ -8,7 +8,6 @@
#include "fsverity_private.h"
#include <linux/mount.h>
-#include <linux/pagemap.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
@@ -367,25 +366,27 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
goto out_drop_write;
err = enable_verity(filp, &arg);
- if (err)
- goto out_allow_write_access;
/*
- * Some pages of the file may have been evicted from pagecache after
- * being used in the Merkle tree construction, then read into pagecache
- * again by another process reading from the file concurrently. Since
- * these pages didn't undergo verification against the file digest which
- * fs-verity now claims to be enforcing, we have to wipe the pagecache
- * to ensure that all future reads are verified.
+ * We no longer drop the inode's pagecache after enabling verity. This
+ * used to be done to try to avoid a race condition where pages could be
+ * evicted after being used in the Merkle tree construction, then
+ * re-instantiated by a concurrent read. Such pages are unverified, and
+ * the backing storage could have filled them with different content, so
+ * they shouldn't be used to fulfill reads once verity is enabled.
+ *
+ * But, dropping the pagecache has a big performance impact, and it
+ * doesn't fully solve the race condition anyway. So for those reasons,
+ * and also because this race condition isn't very important relatively
+ * speaking (especially for small-ish files, where the chance of a page
+ * being used, evicted, *and* re-instantiated all while enabling verity
+ * is quite small), we no longer drop the inode's pagecache.
*/
- filemap_write_and_wait(inode->i_mapping);
- invalidate_inode_pages2(inode->i_mapping);
/*
* allow_write_access() is needed to pair with deny_write_access().
* Regardless, the filesystem won't allow writing to verity files.
*/
-out_allow_write_access:
allow_write_access(filp);
out_drop_write:
mnt_drop_write_file(filp);
diff --git a/fs/verity/verify.c b/fs/verity/verify.c
index f50e3b5b52c9..e2508222750b 100644
--- a/fs/verity/verify.c
+++ b/fs/verity/verify.c
@@ -387,15 +387,15 @@ EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
int __init fsverity_init_workqueue(void)
{
/*
- * Use an unbound workqueue to allow bios to be verified in parallel
- * even when they happen to complete on the same CPU. This sacrifices
- * locality, but it's worthwhile since hashing is CPU-intensive.
+ * Use a high-priority workqueue to prioritize verification work, which
+ * blocks reads from completing, over regular application tasks.
*
- * Also use a high-priority workqueue to prioritize verification work,
- * which blocks reads from completing, over regular application tasks.
+ * For performance reasons, don't use an unbound workqueue. Using an
+ * unbound workqueue for crypto operations causes excessive scheduler
+ * latency on ARM64.
*/
fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
- WQ_UNBOUND | WQ_HIGHPRI,
+ WQ_HIGHPRI,
num_online_cpus());
if (!fsverity_read_workqueue)
return -ENOMEM;
diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
index 738b0e28d74b..617e4f9db42e 100644
--- a/fs/zonefs/file.c
+++ b/fs/zonefs/file.c
@@ -383,7 +383,7 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
struct block_device *bdev = inode->i_sb->s_bdev;
unsigned int max = bdev_max_zone_append_sectors(bdev);
struct bio *bio;
- ssize_t size;
+ ssize_t size = 0;
int nr_pages;
ssize_t ret;
@@ -426,7 +426,7 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
if (bio->bi_iter.bi_sector != wpsector) {
zonefs_warn(inode->i_sb,
"Corrupted write pointer %llu for zone at %llu\n",
- wpsector, z->z_sector);
+ bio->bi_iter.bi_sector, z->z_sector);
ret = -EIO;
}
}