From 6ca154c1b239d1e1fa9153d66272a1bf9e34c65e Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Thu, 22 Jun 2023 14:54:38 +0800 Subject: btrfs: add a test case to verify the write behavior of large RAID5 data chunks There is a recent regression during v6.4 merge window, that a u32 left shift overflow can cause problems with large data chunks (over 4G) sized. This is especially nasty for RAID56, which can lead to ASSERT() during regular writes, or corrupt memory if CONFIG_BTRFS_ASSERT is not enabled. This is the regression test case for it. Unlike btrfs/292, btrfs doesn't support trim inside RAID56 chunks, thus the workflow is simplified: - Create a RAID5 or RAID6 data chunk during mkfs - Fill the fs with 5G data and sync For unpatched kernel, the sync would crash the kernel. - Make sure everything is fine Signed-off-by: Qu Wenruo Reviewed-by: Anand Jain Signed-off-by: Zorro Lang --- tests/btrfs/294 | 72 +++++++++++++++++++++++++++++++++++++++++++++++++++++ tests/btrfs/294.out | 2 ++ 2 files changed, 74 insertions(+) create mode 100755 tests/btrfs/294 create mode 100644 tests/btrfs/294.out (limited to 'tests/btrfs') diff --git a/tests/btrfs/294 b/tests/btrfs/294 new file mode 100755 index 00000000..61ce7d97 --- /dev/null +++ b/tests/btrfs/294 @@ -0,0 +1,72 @@ +#! /bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2023 SUSE Linux Products GmbH. All Rights Reserved. +# +# FS QA Test No. 294 +# +# Test btrfs write behavior with large RAID56 chunks (size beyond 4G). +# +. ./common/preamble +_begin_fstest auto raid volume + +# Import common functions. +. ./common/filter + +# real QA test starts here + +# Modify as appropriate. +_supported_fs btrfs +_require_scratch_dev_pool 8 +_fixed_by_kernel_commit a7299a18a179 \ + "btrfs: fix u32 overflows when left shifting @stripe_nr" +_fixed_by_kernel_commit xxxxxxxxxxxx \ + "btrfs: use a dedicated helper to convert stripe_nr to offset" + +_scratch_dev_pool_get 8 + +datasize=$((5 * 1024 * 1024 * 1024)) + + +workload() +{ + local data_profile=$1 + + _scratch_pool_mkfs -m raid1 -d $data_profile >> $seqres.full 2>&1 + _scratch_mount + $XFS_IO_PROG -f -c "pwrite -b 1m 0 $datasize" $SCRATCH_MNT/foobar > /dev/null + + # Unpatched kernel would trigger an ASSERT() or crash at writeback. + sync + + echo "=== With initial 5G data written ($data_profile) ===" >> $seqres.full + $BTRFS_UTIL_PROG filesystem df $SCRATCH_MNT >> $seqres.full + _scratch_unmount + + # Make sure we haven't corrupted anything. + $BTRFS_UTIL_PROG check --check-data-csum $SCRATCH_DEV >> $seqres.full 2>&1 + if [ $? -ne 0 ]; then + _scratch_dev_pool_put + _fail "data corruption detected after initial data filling" + fi +} + +# Make sure each device has at least 2G. +# Btrfs has a limits on per-device stripe length of 1G. +# Double that so that we can ensure a RAID6 data chunk with 6G size. +for i in $SCRATCH_DEV_POOL; do + devsize=$(blockdev --getsize64 "$i") + if [ $devsize -lt $((2 * 1024 * 1024 * 1024)) ]; then + _scratch_dev_pool_put + _notrun "device $i is too small, need at least 2G" + fi +done + +workload raid5 +workload raid6 + +_scratch_dev_pool_put +echo "Silence is golden" + +# success, all done +status=0 +exit diff --git a/tests/btrfs/294.out b/tests/btrfs/294.out new file mode 100644 index 00000000..c09531de --- /dev/null +++ b/tests/btrfs/294.out @@ -0,0 +1,2 @@ +QA output created by 294 +Silence is golden -- cgit v1.2.3