summaryrefslogtreecommitdiff
path: root/fs/ext4/super.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ext4/super.c')
-rw-r--r--fs/ext4/super.c232
1 files changed, 151 insertions, 81 deletions
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 52b0530c5d65..a9448db1cf7e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -38,7 +38,7 @@
#include <linux/log2.h>
#include <linux/crc16.h>
#include <linux/cleancache.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
@@ -438,6 +438,9 @@ void __ext4_error(struct super_block *sb, const char *function,
struct va_format vaf;
va_list args;
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
+ return;
+
if (ext4_error_ratelimit(sb)) {
va_start(args, fmt);
vaf.fmt = fmt;
@@ -459,6 +462,9 @@ void __ext4_error_inode(struct inode *inode, const char *function,
struct va_format vaf;
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ return;
+
es->s_last_error_ino = cpu_to_le32(inode->i_ino);
es->s_last_error_block = cpu_to_le64(block);
if (ext4_error_ratelimit(inode->i_sb)) {
@@ -491,6 +497,9 @@ void __ext4_error_file(struct file *file, const char *function,
struct inode *inode = file_inode(file);
char pathname[80], *path;
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ return;
+
es = EXT4_SB(inode->i_sb)->s_es;
es->s_last_error_ino = cpu_to_le32(inode->i_ino);
if (ext4_error_ratelimit(inode->i_sb)) {
@@ -567,6 +576,9 @@ void __ext4_std_error(struct super_block *sb, const char *function,
char nbuf[16];
const char *errstr;
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
+ return;
+
/* Special case: if the error is EROFS, and we're not already
* inside a transaction, then there's really no point in logging
* an error. */
@@ -600,6 +612,9 @@ void __ext4_abort(struct super_block *sb, const char *function,
struct va_format vaf;
va_list args;
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
+ return;
+
save_error_info(sb, function, line);
va_start(args, fmt);
vaf.fmt = fmt;
@@ -695,6 +710,9 @@ __acquires(bitlock)
va_list args;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
+ return;
+
es->s_last_error_ino = cpu_to_le32(ino);
es->s_last_error_block = cpu_to_le64(block);
__save_error_info(sb, function, line);
@@ -825,6 +843,7 @@ static void ext4_put_super(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
+ int aborted = 0;
int i, err;
ext4_unregister_li_request(sb);
@@ -834,9 +853,10 @@ static void ext4_put_super(struct super_block *sb)
destroy_workqueue(sbi->rsv_conversion_wq);
if (sbi->s_journal) {
+ aborted = is_journal_aborted(sbi->s_journal);
err = jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
- if (err < 0)
+ if ((err < 0) && !aborted)
ext4_abort(sb, "Couldn't clean up the journal");
}
@@ -847,7 +867,7 @@ static void ext4_put_super(struct super_block *sb)
ext4_mb_release(sb);
ext4_ext_release(sb);
- if (!(sb->s_flags & MS_RDONLY)) {
+ if (!(sb->s_flags & MS_RDONLY) && !aborted) {
ext4_clear_feature_journal_needs_recovery(sb);
es->s_state = cpu_to_le16(sbi->s_mount_state);
}
@@ -863,7 +883,6 @@ static void ext4_put_super(struct super_block *sb)
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
- brelse(sbi->s_sbh);
#ifdef CONFIG_QUOTA
for (i = 0; i < EXT4_MAXQUOTAS; i++)
kfree(sbi->s_qf_names[i]);
@@ -895,6 +914,7 @@ static void ext4_put_super(struct super_block *sb)
}
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
+ brelse(sbi->s_sbh);
sb->s_fs_info = NULL;
/*
* Now that we are completely done shutting down the
@@ -1100,51 +1120,62 @@ static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
}
-static int ext4_key_prefix(struct inode *inode, u8 **key)
-{
- *key = EXT4_SB(inode->i_sb)->key_prefix;
- return EXT4_SB(inode->i_sb)->key_prefix_size;
-}
-
-static int ext4_prepare_context(struct inode *inode)
-{
- return ext4_convert_inline_data(inode);
-}
-
static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
void *fs_data)
{
- handle_t *handle;
- int res, res2;
+ handle_t *handle = fs_data;
+ int res, res2, retries = 0;
- /* fs_data is null when internally used. */
- if (fs_data) {
- res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
- EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx,
- len, 0);
+ res = ext4_convert_inline_data(inode);
+ if (res)
+ return res;
+
+ /*
+ * If a journal handle was specified, then the encryption context is
+ * being set on a new inode via inheritance and is part of a larger
+ * transaction to create the inode. Otherwise the encryption context is
+ * being set on an existing inode in its own transaction. Only in the
+ * latter case should the "retry on ENOSPC" logic be used.
+ */
+
+ if (handle) {
+ res = ext4_xattr_set_handle(handle, inode,
+ EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ ctx, len, 0);
if (!res) {
ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
ext4_clear_inode_state(inode,
EXT4_STATE_MAY_INLINE_DATA);
+ /*
+ * Update inode->i_flags - e.g. S_DAX may get disabled
+ */
+ ext4_set_inode_flags(inode);
}
return res;
}
+retry:
handle = ext4_journal_start(inode, EXT4_HT_MISC,
ext4_jbd2_credits_xattr(inode));
if (IS_ERR(handle))
return PTR_ERR(handle);
- res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
- EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx,
- len, 0);
+ res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+ ctx, len, 0);
if (!res) {
ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+ /* Update inode->i_flags - e.g. S_DAX may get disabled */
+ ext4_set_inode_flags(inode);
res = ext4_mark_inode_dirty(handle, inode);
if (res)
EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
}
res2 = ext4_journal_stop(handle);
+
+ if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ goto retry;
if (!res)
res = res2;
return res;
@@ -1161,10 +1192,9 @@ static unsigned ext4_max_namelen(struct inode *inode)
EXT4_NAME_LEN;
}
-static struct fscrypt_operations ext4_cryptops = {
+static const struct fscrypt_operations ext4_cryptops = {
+ .key_prefix = "ext4:",
.get_context = ext4_get_context,
- .key_prefix = ext4_key_prefix,
- .prepare_context = ext4_prepare_context,
.set_context = ext4_set_context,
.dummy_context = ext4_dummy_context,
.is_encrypted = ext4_encrypted_inode,
@@ -1172,7 +1202,7 @@ static struct fscrypt_operations ext4_cryptops = {
.max_namelen = ext4_max_namelen,
};
#else
-static struct fscrypt_operations ext4_cryptops = {
+static const struct fscrypt_operations ext4_cryptops = {
.is_encrypted = ext4_encrypted_inode,
};
#endif
@@ -1187,7 +1217,7 @@ static int ext4_release_dquot(struct dquot *dquot);
static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
- struct path *path);
+ const struct path *path);
static int ext4_quota_off(struct super_block *sb, int type);
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
@@ -1272,7 +1302,7 @@ enum {
Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
- Opt_lazytime, Opt_nolazytime,
+ Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
Opt_inode_readahead_blks, Opt_journal_ioprio,
Opt_dioread_nolock, Opt_dioread_lock,
@@ -1340,6 +1370,7 @@ static const match_table_t tokens = {
{Opt_delalloc, "delalloc"},
{Opt_lazytime, "lazytime"},
{Opt_nolazytime, "nolazytime"},
+ {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
{Opt_nodelalloc, "nodelalloc"},
{Opt_removed, "mblk_io_submit"},
{Opt_removed, "nomblk_io_submit"},
@@ -1545,6 +1576,7 @@ static const struct mount_opts {
#endif
{Opt_nouid32, EXT4_MOUNT_NO_UID32, MOPT_SET},
{Opt_debug, EXT4_MOUNT_DEBUG, MOPT_SET},
+ {Opt_debug_want_extra_isize, 0, MOPT_GTE0},
{Opt_quota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA, MOPT_SET | MOPT_Q},
{Opt_usrquota, EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA,
MOPT_SET | MOPT_Q},
@@ -1658,6 +1690,8 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
if (arg == 0)
arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
sbi->s_commit_interval = HZ * arg;
+ } else if (token == Opt_debug_want_extra_isize) {
+ sbi->s_want_extra_isize = arg;
} else if (token == Opt_max_batch_time) {
sbi->s_max_batch_time = arg;
} else if (token == Opt_min_batch_time) {
@@ -1883,12 +1917,6 @@ static int parse_options(char *options, struct super_block *sb,
return 0;
}
}
- if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
- test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
- ext4_msg(sb, KERN_ERR, "can't mount with journal_async_commit "
- "in data=ordered mode");
- return 0;
- }
return 1;
}
@@ -2330,7 +2358,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
struct ext4_super_block *es)
{
unsigned int s_flags = sb->s_flags;
- int nr_orphans = 0, nr_truncates = 0;
+ int ret, nr_orphans = 0, nr_truncates = 0;
#ifdef CONFIG_QUOTA
int i;
#endif
@@ -2412,7 +2440,9 @@ static void ext4_orphan_cleanup(struct super_block *sb,
inode->i_ino, inode->i_size);
inode_lock(inode);
truncate_inode_pages(inode->i_mapping, inode->i_size);
- ext4_truncate(inode);
+ ret = ext4_truncate(inode);
+ if (ret)
+ ext4_std_error(inode->i_sb, ret);
inode_unlock(inode);
nr_truncates++;
} else {
@@ -2605,9 +2635,9 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
ret = sbi->s_stripe;
- else if (stripe_width <= sbi->s_blocks_per_group)
+ else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
ret = stripe_width;
- else if (stride <= sbi->s_blocks_per_group)
+ else if (stride && stride <= sbi->s_blocks_per_group)
ret = stride;
else
ret = 0;
@@ -3193,10 +3223,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
ext4_set_bit(s++, buf);
count++;
}
- for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
- ext4_set_bit(EXT4_B2C(sbi, s++), buf);
- count++;
+ j = ext4_bg_num_gdb(sb, grp);
+ if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
+ ext4_error(sb, "Invalid number of block group "
+ "descriptor blocks: %d", j);
+ j = EXT4_BLOCKS_PER_GROUP(sb) - s;
}
+ count += j;
+ for (; j > 0; j--)
+ ext4_set_bit(EXT4_B2C(sbi, s++), buf);
}
if (!count)
return 0;
@@ -3301,7 +3336,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
char *orig_data = kstrdup(data, GFP_KERNEL);
struct buffer_head *bh;
struct ext4_super_block *es = NULL;
- struct ext4_sb_info *sbi;
+ struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
ext4_fsblk_t block;
ext4_fsblk_t sb_block = get_sb_block(&data);
ext4_fsblk_t logical_sb_block;
@@ -3320,16 +3355,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
ext4_group_t first_not_zeroed;
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
- goto out_free_orig;
+ if ((data && !orig_data) || !sbi)
+ goto out_free_base;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
- if (!sbi->s_blockgroup_lock) {
- kfree(sbi);
- goto out_free_orig;
- }
+ if (!sbi->s_blockgroup_lock)
+ goto out_free_base;
+
sb->s_fs_info = sbi;
sbi->s_sb = sb;
sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
@@ -3475,11 +3508,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
*/
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
- if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
- &journal_devnum, &journal_ioprio, 0)) {
- ext4_msg(sb, KERN_WARNING,
- "failed to parse options in superblock: %s",
- sbi->s_es->s_mount_opts);
+ if (sbi->s_es->s_mount_opts[0]) {
+ char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
+ sizeof(sbi->s_es->s_mount_opts),
+ GFP_KERNEL);
+ if (!s_mount_opts)
+ goto failed_mount;
+ if (!parse_options(s_mount_opts, sb, &journal_devnum,
+ &journal_ioprio, 0)) {
+ ext4_msg(sb, KERN_WARNING,
+ "failed to parse options in superblock: %s",
+ s_mount_opts);
+ }
+ kfree(s_mount_opts);
}
sbi->s_def_mount_opt = sbi->s_mount_opt;
if (!parse_options((char *) data, sb, &journal_devnum,
@@ -3505,6 +3546,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"both data=journal and dax");
goto failed_mount;
}
+ if (ext4_has_feature_encrypt(sb)) {
+ ext4_msg(sb, KERN_WARNING,
+ "encrypted files will use data=ordered "
+ "instead of data journaling mode");
+ }
if (test_opt(sb, DELALLOC))
clear_opt(sb, DELALLOC);
} else {
@@ -3660,12 +3706,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
- if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
- goto cantfind_ext4;
sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
if (sbi->s_inodes_per_block == 0)
goto cantfind_ext4;
+ if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+ sbi->s_inodes_per_group > blocksize * 8) {
+ ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
+ sbi->s_blocks_per_group);
+ goto failed_mount;
+ }
sbi->s_itb_per_group = sbi->s_inodes_per_group /
sbi->s_inodes_per_block;
sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
@@ -3748,13 +3798,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
sbi->s_cluster_ratio = clustersize / blocksize;
- if (sbi->s_inodes_per_group > blocksize * 8) {
- ext4_msg(sb, KERN_ERR,
- "#inodes per group too big: %lu",
- sbi->s_inodes_per_group);
- goto failed_mount;
- }
-
/* Do we have standard group size of clustersize * 8 blocks ? */
if (sbi->s_blocks_per_group == clustersize << 3)
set_opt2(sb, STD_GROUP_SIZE);
@@ -3814,6 +3857,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
+ if (ext4_has_feature_meta_bg(sb)) {
+ if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
+ ext4_msg(sb, KERN_WARNING,
+ "first meta block group too large: %u "
+ "(group descriptor block count %u)",
+ le32_to_cpu(es->s_first_meta_bg), db_count);
+ goto failed_mount;
+ }
+ }
sbi->s_group_desc = ext4_kvmalloc(db_count *
sizeof(struct buffer_head *),
GFP_KERNEL);
@@ -3889,7 +3941,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
* root first: it may be modified in the journal!
*/
if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
- if (ext4_load_journal(sb, es, journal_devnum))
+ err = ext4_load_journal(sb, es, journal_devnum);
+ if (err)
goto failed_mount3a;
} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
ext4_has_feature_journal_needs_recovery(sb)) {
@@ -3967,6 +4020,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
default:
break;
}
+
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
+ test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "journal_async_commit in data=ordered mode");
+ goto failed_mount_wq;
+ }
+
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
@@ -4043,7 +4104,8 @@ no_journal:
sb->s_flags |= MS_RDONLY;
/* determine the minimum size of new large inodes, if present */
- if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
+ if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
+ sbi->s_want_extra_isize == 0) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
if (ext4_has_feature_extra_isize(sb)) {
@@ -4160,7 +4222,9 @@ no_journal:
if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
- "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
+ "Opts: %.*s%s%s", descr,
+ (int) sizeof(sbi->s_es->s_mount_opts),
+ sbi->s_es->s_mount_opts,
*sbi->s_es->s_mount_opts ? "; " : "", orig_data);
if (es->s_error_count)
@@ -4172,11 +4236,6 @@ no_journal:
ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
kfree(orig_data);
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- memcpy(sbi->key_prefix, EXT4_KEY_DESC_PREFIX,
- EXT4_KEY_DESC_PREFIX_SIZE);
- sbi->key_prefix_size = EXT4_KEY_DESC_PREFIX_SIZE;
-#endif
return 0;
cantfind_ext4:
@@ -4239,8 +4298,8 @@ failed_mount:
out_fail:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
+out_free_base:
kfree(sbi);
-out_free_orig:
kfree(orig_data);
return err ? err : ret;
}
@@ -4550,7 +4609,8 @@ static int ext4_commit_super(struct super_block *sb, int sync)
&EXT4_SB(sb)->s_freeinodes_counter));
BUFFER_TRACE(sbh, "marking dirty");
ext4_superblock_csum_set(sb);
- lock_buffer(sbh);
+ if (sync)
+ lock_buffer(sbh);
if (buffer_write_io_error(sbh)) {
/*
* Oh, dear. A previous attempt to write the
@@ -4566,10 +4626,10 @@ static int ext4_commit_super(struct super_block *sb, int sync)
set_buffer_uptodate(sbh);
}
mark_buffer_dirty(sbh);
- unlock_buffer(sbh);
if (sync) {
+ unlock_buffer(sbh);
error = __sync_dirty_buffer(sbh,
- test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC);
+ test_opt(sb, BARRIER) ? REQ_FUA : REQ_SYNC);
if (error)
return error;
@@ -4673,6 +4733,9 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
bool needs_barrier = false;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
+ return 0;
+
trace_ext4_sync_fs(sb, wait);
flush_workqueue(sbi->rsv_conversion_wq);
/*
@@ -4756,7 +4819,7 @@ out:
*/
static int ext4_unfreeze(struct super_block *sb)
{
- if (sb->s_flags & MS_RDONLY)
+ if ((sb->s_flags & MS_RDONLY) || ext4_forced_shutdown(EXT4_SB(sb)))
return 0;
if (EXT4_SB(sb)->s_journal) {
@@ -4857,6 +4920,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
err = -EINVAL;
goto restore_opts;
}
+ } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
+ if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "journal_async_commit in data=ordered mode");
+ err = -EINVAL;
+ goto restore_opts;
+ }
}
if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
@@ -5239,7 +5309,7 @@ static void lockdep_set_quota_inode(struct inode *inode, int subclass)
* Standard function to be called on quota_on
*/
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
- struct path *path)
+ const struct path *path)
{
int err;
@@ -5366,7 +5436,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
if (IS_ERR(handle))
goto out;
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->i_mtime = inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);