summaryrefslogtreecommitdiff
path: root/tests/btrfs/179
blob: 2f17c9f9fb4adf5f53a857d1a0be2ec6d678f043 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2019 SUSE Linux Products GmbH. All Rights Reserved.
#
# FS QA Test 179
#
# Test if btrfs will lockup at subvolume deletion when qgroups are enabled.
#
# This bug is going to be fixed by a patch for the kernel titled
# "btrfs: qgroup: Don't trigger backref walk at delayed ref insert time".
#
. ./common/preamble
_begin_fstest auto qgroup dangerous

# Import common functions.
. ./common/filter

# real QA test starts here

# Modify as appropriate.
_supported_fs btrfs
_require_scratch

# default sleep interval
sleep_time=1

# stress test runtime
runtime=120

_scratch_mkfs > /dev/null 2>&1
_scratch_mount

mkdir -p "$SCRATCH_MNT/snapshots"
$BTRFS_UTIL_PROG subvolume create "$SCRATCH_MNT/src" > /dev/null
$BTRFS_UTIL_PROG quota enable "$SCRATCH_MNT" > /dev/null
$BTRFS_UTIL_PROG quota rescan -w "$SCRATCH_MNT" > /dev/null

fill_workload()
{
	trap "wait; exit" SIGTERM
	local i=0
	while true; do
		_pwrite_byte 0xcd 0 8K "$SCRATCH_MNT/src/large_$i" > /dev/null
		_pwrite_byte 0xcd 0 2K "$SCRATCH_MNT/src/inline_$i" > /dev/null

		# Randomly remove some files for every 5 loop
		if [ $(( $i % 5 )) -eq 0 ]; then
			victim=$(ls "$SCRATCH_MNT/src" | sort -R | head -n1)
			rm "$SCRATCH_MNT/src/$victim"
		fi
		i=$((i + 1))
	done
}

snapshot_workload()
{
	trap "wait; exit" SIGTERM
	local i=0
	while true; do
		sleep $sleep_time
		$BTRFS_UTIL_PROG subvolume snapshot "$SCRATCH_MNT/src" \
			"$SCRATCH_MNT/snapshots/$i" > /dev/null
		i=$((i + 1))
	done
}

delete_workload()
{
	trap "wait; exit" SIGTERM
	while true; do
		sleep $((sleep_time * 2))
		victim=$(ls "$SCRATCH_MNT/snapshots" | sort -R | head -n1)
		if [ -z "$victim" ]; then
			# No snapshots available, sleep and retry later.
			continue
		fi
		$BTRFS_UTIL_PROG subvolume delete \
			"$SCRATCH_MNT/snapshots/$victim" > /dev/null
	done
}

fill_workload &
fill_pid=$!

sleep $((sleep_time * 2))
snapshot_workload &
snapshot_pid=$!
delete_workload &
delete_pid=$!

sleep $runtime
kill $fill_pid
wait $fill_pid
kill $snapshot_pid
wait $snapshot_pid
kill $delete_pid
wait $delete_pid

# By the async nature of qgroup tree scan and subvolume delete, the latest
# qgroup counts at the time of umount might not be upto date, if it isn't
# then the check will report the difference in count. The difference in
# qgroup counts are anyway updated in the following mount, so it is not a
# real issue that this test case is trying to verify. So make sure the
# qgroup counts are in sync before unmount happens.

$BTRFS_UTIL_PROG subvolume sync $SCRATCH_MNT >> $seqres.full

# success, all done
echo "Silence is golden"

status=0
exit