summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.bcachefs_revision2
-rw-r--r--.github/workflows/deb-buildd.yml156
-rw-r--r--.github/workflows/deb-orchestrator.yml15
-rw-r--r--.github/workflows/deb-publish.yml47
-rw-r--r--.github/workflows/deb-src.yml74
-rw-r--r--.gitignore6
-rw-r--r--Cargo.lock2
-rw-r--r--Cargo.toml2
-rw-r--r--Makefile17
-rw-r--r--debian/changelog6
-rw-r--r--debian/control8
-rw-r--r--libbcachefs/Makefile3
-rw-r--r--libbcachefs/bcachefs.h2
-rw-r--r--libbcachefs/closure.h5
-rw-r--r--libbcachefs/fs.c14
-rw-r--r--libbcachefs/nocow_locking.c3
-rw-r--r--libbcachefs/util.h2
-rw-r--r--libbcachefs/vendor/closure.c218
-rw-r--r--libbcachefs/vendor/closure.h490
19 files changed, 967 insertions, 105 deletions
diff --git a/.bcachefs_revision b/.bcachefs_revision
index 8c2da63f..7b6e85f0 100644
--- a/.bcachefs_revision
+++ b/.bcachefs_revision
@@ -1 +1 @@
-68d390aa7f67b4ffb92497e0774560fc9ee5d188
+0c67e96b72019e02c92d2aed58de4c8b98e07d16
diff --git a/.github/workflows/deb-buildd.yml b/.github/workflows/deb-buildd.yml
index 85dd5aea..affab1b2 100644
--- a/.github/workflows/deb-buildd.yml
+++ b/.github/workflows/deb-buildd.yml
@@ -10,7 +10,10 @@ on:
arch:
required: true
type: string
- dist:
+ dist-name:
+ required: true
+ type: string
+ dist-version:
required: true
type: string
secrets:
@@ -67,8 +70,8 @@ jobs:
apt update
apt full-upgrade
apt install \
+ ${{ inputs.dist-name }}-keyring \
curl \
- debian-keyring \
devscripts \
gpg \
iproute2 \
@@ -82,27 +85,6 @@ jobs:
apt clean
USER=`whoami`
sudo usermod --add-subuids 100000-165535 --add-subgids 100000-165535 $USER
- BUILD_DIR="$GITHUB_WORKSPACE/deb-bin/${{ inputs.dist }}/${{ inputs.arch }}"
- mkdir -p "$BUILD_DIR"
- tee ~/.sbuildrc > /dev/null <<EOT
- \$verbose = 0;
- \$build_dir = '$BUILD_DIR';
- \$distribution = '${{ inputs.dist }}';
- #\$host_arch = '${{ inputs.arch }}';
- \$chroot_mode = 'unshare';
- \$unshare_tmpdir_template = '/tmp/tmp.sbuild.XXXXXXXXXX';
- \$key_id = '${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}';
- EOT
- if [ "${{ inputs.dist }}" != "unstable" ] \
- && [ "${{ inputs.dist }}" != "testing" ];
- then
- tee -a ~/.sbuildrc > /dev/null <<EOT
- \$extra_repositories = [
- 'deb http://deb.debian.org/debian ${{ inputs.dist }}-updates main',
- 'deb http://deb.debian.org/debian ${{ inputs.dist }}-backports main'
- ];
- EOT
- fi
- name: Import GPG key
timeout-minutes: 1
id: gpg
@@ -118,11 +100,12 @@ jobs:
run: |
set -xe
gpg --output /etc/apt/trusted.gpg.d/apt.bcachefs.org.asc --armor --export ${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
+ rm -f ~/.gnupg/trustedkeys.gpg
gpg --no-default-keyring --keyring ~/.gnupg/trustedkeys.gpg --import /etc/apt/trusted.gpg.d/apt.bcachefs.org.asc
tee -a ~/.gnupg/gpg.conf > /dev/null <<EOT
default-key ${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
EOT
- tee -a ~/.gbp.conf > /dev/null <<EOT
+ tee -a ~/.gbp.conf > /dev/null <<EOT
[buildpackage]
sign-tags = True
keyid = ${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
@@ -131,9 +114,82 @@ jobs:
DEBSIGN_KEYID=${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
EOT
tee -a ~/.sbuildrc > /dev/null <<EOT
- \$verbose = 1;
+ \$key_id = '${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}';
\$dpkg_buildpackage_user_options = ['--sign-keyid=${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}', '--force-sign'];
EOT
+ - name: Configure sbuild
+ timeout-minutes: 1
+ if: steps.gpg.conclusion != 'skipped'
+ run: |
+ set -xe
+ BUILD_DIR="$GITHUB_WORKSPACE/deb-bin/${{ inputs.dist-version }}/${{ inputs.arch }}"
+ mkdir -p "$BUILD_DIR"
+ tee -a ~/.sbuildrc > /dev/null <<EOT
+ \$verbose = 1;
+ \$build_dir = '$BUILD_DIR';
+ \$distribution = '${{ inputs.dist-version }}';
+ #\$host_arch = '${{ inputs.arch }}';
+ \$chroot_mode = 'unshare';
+ \$unshare_tmpdir_template = '/tmp/tmp.sbuild.XXXXXXXXXX';
+ EOT
+ tee -a ~/.sbuildrc > /dev/null <<EOT
+ push @{\$unshare_mmdebstrap_extra_args}, "*", [
+ '--variant=apt'
+ ];
+ EOT
+ if [ "${{ inputs.dist-name }}" = "debian" ];
+ then
+ tee -a ~/.sbuildrc > /dev/null <<EOT
+ push @{\$unshare_mmdebstrap_extra_args}, "*", [
+ 'http://deb.debian.org/debian',
+ '--keyring=/usr/share/keyrings/debian-keyring.gpg',
+ '--components=main'
+ ];
+ EOT
+ fi
+ if [ "${{ inputs.dist-name }}" = "debian" ] && [ "${{ inputs.dist-version }}" != "unstable" ];
+ then
+ tee -a ~/.sbuildrc > /dev/null <<EOT
+ \$extra_repositories = [
+ 'deb http://deb.debian.org/debian ${{ inputs.dist-version }}-updates main',
+ 'deb http://deb.debian.org/debian ${{ inputs.dist-version }}-backports main'
+ ];
+ EOT
+ fi
+ if [ "${{ inputs.dist-name }}" = "ubuntu" ];
+ then
+ tee -a ~/.sbuildrc > /dev/null <<EOT
+ push @{\$unshare_mmdebstrap_extra_args}, "*", [
+ '--keyring=/usr/share/keyrings/ubuntu-archive-keyring.gpg',
+ '--components=main,universe'
+ ];
+ EOT
+ fi
+ if [ "${{ inputs.dist-name }}" = "ubuntu" ] && [ "${{ inputs.arch }}" = "amd64" ];
+ then
+ tee -a ~/.sbuildrc > /dev/null <<EOT
+ push @{\$unshare_mmdebstrap_extra_args}, "*", [
+ 'http://archive.ubuntu.com/ubuntu',
+ ];
+ \$extra_repositories = [
+ 'deb http://archive.ubuntu.com/ubuntu ${{ inputs.dist-version }}-updates main universe',
+ 'deb http://archive.ubuntu.com/ubuntu ${{ inputs.dist-version }}-security main universe',
+ 'deb http://archive.ubuntu.com/ubuntu ${{ inputs.dist-version }}-backports main universe',
+ ];
+ EOT
+ elif [ "${{ inputs.dist-name }}" = "ubuntu" ] && [ "${{ inputs.arch }}" != "amd64" ];
+ then
+ tee -a ~/.sbuildrc > /dev/null <<EOT
+ push @{\$unshare_mmdebstrap_extra_args}, "*", [
+ 'http://ports.ubuntu.com/ubuntu-ports',
+ ];
+ \$extra_repositories = [
+ 'deb http://ports.ubuntu.com/ubuntu-ports ${{ inputs.dist-version }}-updates main universe',
+ 'deb http://ports.ubuntu.com/ubuntu-ports ${{ inputs.dist-version }}-security main universe',
+ 'deb http://ports.ubuntu.com/ubuntu-ports ${{ inputs.dist-version }}-backports main universe',
+ ];
+ EOT
+ fi
- name: Download source-only .deb to be built
timeout-minutes: 1
uses: actions/download-artifact@v5
@@ -146,37 +202,51 @@ jobs:
set -xe
cd "$GITHUB_WORKSPACE/deb-src"
tar -xf "$GITHUB_WORKSPACE/deb-src/artifact-src.tar"
- - name: Ensure that source package is signed
+ rm "$GITHUB_WORKSPACE/deb-src/artifact-src.tar"
+ - name: Ensure that all incoming source artifacts are signed
timeout-minutes: 1
if: steps.gpg.conclusion != 'skipped'
run: |
set -xe
- dscverify --verbose "$GITHUB_WORKSPACE/deb-src/"*.changes
+ cd "$GITHUB_WORKSPACE/deb-src"
+ find . -type f -not -iname '*.sig' -print0 | xargs --null -I'{}' sh -c "\
+ echo 'Processing {}' \
+ && ( \
+ gpg --verbose --no-default-keyring --keyring ~/.gnupg/trustedkeys.gpg --verify {} \
+ || gpg --verbose --no-default-keyring --keyring ~/.gnupg/trustedkeys.gpg --verify {}.sig \
+ || (echo 'Processing {}: NOT SIGNED!' && exit 1) \
+ ) \
+ && echo 'Processing {}: ok' \
+ "
- name: Build the package
timeout-minutes: 10
run: |
set -xe
- cd "$GITHUB_WORKSPACE/deb-bin/"
- sbuild --verbose "$GITHUB_WORKSPACE/deb-src/"*.dsc
- - name: Sign the .deb's
- timeout-minutes: 1
- if: steps.gpg.conclusion != 'skipped'
- run: |
- set -xe
- find "$GITHUB_WORKSPACE/deb-bin/" -type f -name '*.deb' -exec gpg --verbose --detach-sign {} ';'
- - name: Ensure that binary package is signed
+ cd "$GITHUB_WORKSPACE/deb-bin"
+ if [ "${{ inputs.dist-name }}" = "ubuntu" ];
+ then
+ export DEB_BUILD_OPTIONS=noautodbgsym
+ fi
+ sbuild --verbose --arch-any --arch-all "$GITHUB_WORKSPACE/deb-src/"*.dsc
+ - name: Ensure that all binary artifacts are signed, or sign them
timeout-minutes: 1
if: steps.gpg.conclusion != 'skipped'
run: |
set -xe
- cd "$GITHUB_WORKSPACE/deb-bin/"
- find -name '*.changes' -print0 | xargs -0 dscverify --verbose
+ cd "$GITHUB_WORKSPACE/deb-bin"
+ find . -type f -not -iname '*.sig' -print0 | xargs --null -I'{}' sh -c "\
+ echo 'Processing {}' && ( \
+ gpg --verbose --no-default-keyring --keyring ~/.gnupg/trustedkeys.gpg --verify {} \
+ || gpg --verbose --no-default-keyring --keyring ~/.gnupg/trustedkeys.gpg --verify {}.sig \
+ || gpg --verbose --detach-sign {} \
+ ) \
+ "
- name: Archive build artifacts
timeout-minutes: 1
run: |
set -xe
- cd "$GITHUB_WORKSPACE/deb-bin/"
- tar -cf "$GITHUB_WORKSPACE/deb-bin/artifact-bin-${{ inputs.dist }}-${{ inputs.arch }}.tar" *
+ cd "$GITHUB_WORKSPACE/deb-bin"
+ tar -cf "$GITHUB_WORKSPACE/deb-bin/artifact-bin-${{ inputs.dist-version }}-${{ inputs.arch }}.tar" *
- name: Attest build artifact
timeout-minutes: 1
if: github.event_name != 'pull_request'
@@ -188,8 +258,8 @@ jobs:
id: deb-bin-upload
uses: actions/upload-artifact@v4
with:
- name: artifact-bin-${{ inputs.dist }}-${{ inputs.arch }}.tar
- path: '${{ github.workspace }}/deb-bin/artifact-bin-${{ inputs.dist }}-${{ inputs.arch }}.tar'
+ name: artifact-bin-${{ inputs.dist-version }}-${{ inputs.arch }}.tar
+ path: '${{ github.workspace }}/deb-bin/artifact-bin-${{ inputs.dist-version }}-${{ inputs.arch }}.tar'
if-no-files-found: error
compression-level: 0
- name: Attest uploaded build artifact
@@ -197,5 +267,5 @@ jobs:
if: github.event_name != 'pull_request'
uses: actions/attest-build-provenance@v3
with:
- subject-name: artifact-bin-${{ inputs.dist }}-${{ inputs.arch }}.tar.zip
+ subject-name: artifact-bin-${{ inputs.dist-version }}-${{ inputs.arch }}.tar.zip
subject-digest: sha256:${{ steps.deb-bin-upload.outputs.artifact-digest }}
diff --git a/.github/workflows/deb-orchestrator.yml b/.github/workflows/deb-orchestrator.yml
index e43b5b46..9db55061 100644
--- a/.github/workflows/deb-orchestrator.yml
+++ b/.github/workflows/deb-orchestrator.yml
@@ -21,6 +21,10 @@ jobs:
contents: read
attestations: write
uses: ./.github/workflows/deb-src.yml
+ with:
+ runs-on: ubuntu-latest
+ dist-name: debian
+ dist-version: unstable
secrets:
GPG_SECRET_SUBKEYS: ${{ secrets.GPG_SECRET_SUBKEYS }}
GPG_SIGNING_SUBKEY_FINGERPRINT: ${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
@@ -36,19 +40,24 @@ jobs:
stack:
- { runs-on: "ubuntu-latest", arch: "amd64" }
- { runs-on: "ubuntu-24.04-arm", arch: "arm64" }
- dist: [ unstable, forky, trixie ]
+ dist:
+ - { name: debian, version: unstable }
+ - { name: debian, version: forky } # Debian 14
+ - { name: debian, version: trixie } # Debian 13
+ - { name: ubuntu, version: questing } # Ubuntu 25.10 (Questing Quokka) Beta
+ - { name: ubuntu, version: plucky } # Ubuntu 25.04 (Plucky Puffin)
uses: ./.github/workflows/deb-buildd.yml
with:
deb-src-artifact-id: ${{ needs.source-only.outputs.deb-src-artifact-id }}
runs-on: ${{ matrix.stack.runs-on }}
arch: ${{ matrix.stack.arch }}
- dist: ${{ matrix.dist }}
+ dist-name: ${{ matrix.dist.name }}
+ dist-version: ${{ matrix.dist.version }}
secrets:
GPG_SECRET_SUBKEYS: ${{ secrets.GPG_SECRET_SUBKEYS }}
GPG_SIGNING_SUBKEY_FINGERPRINT: ${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
publish:
needs: [ source-only, buildd ]
- if: github.event_name != 'pull_request' && (github.ref_type == 'tag' || (github.ref_type == 'branch' && github.ref_name == 'master'))
uses: ./.github/workflows/deb-publish.yml
secrets:
GPG_SECRET_SUBKEYS: ${{ secrets.GPG_SECRET_SUBKEYS }}
diff --git a/.github/workflows/deb-publish.yml b/.github/workflows/deb-publish.yml
index 5a341d8d..4aff5a52 100644
--- a/.github/workflows/deb-publish.yml
+++ b/.github/workflows/deb-publish.yml
@@ -67,7 +67,7 @@ jobs:
xz-utils \
zip
apt clean
- - name: Configure gpg-agent / ssh
+ - name: Pre-Configure gpg-agent / ssh
timeout-minutes: 1
run: |
set -xe
@@ -82,23 +82,31 @@ jobs:
echo "${{ secrets.SSH_SERVER_KEYS }}" >> /etc/ssh/ssh_known_hosts
- name: Import GPG key
timeout-minutes: 1
+ id: gpg
if: github.event_name != 'pull_request'
uses: crazy-max/ghaction-import-gpg@e89d40939c28e39f97cf32126055eeae86ba74ec # v6.3.0
with:
gpg_private_key: ${{ secrets.GPG_SECRET_SUBKEYS }}
fingerprint: ${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
trust_level: 5
- - name: Configure GPG
+ - name: Finish configuring gpg-agent / ssh
timeout-minutes: 1
+ if: steps.gpg.conclusion != 'skipped'
run: |
set -xe
gpg-connect-agent 'keyattr ${{ secrets.GPG_AUTH_SUBKEY_KEYGRIP }} Use-for-ssh: true' /bye
+ - name: Configure GPG
+ timeout-minutes: 1
+ if: steps.gpg.conclusion != 'skipped'
+ run: |
+ set -xe
gpg --output /etc/apt/trusted.gpg.d/apt.bcachefs.org.asc --armor --export ${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
+ rm -f ~/.gnupg/trustedkeys.gpg
gpg --no-default-keyring --keyring ~/.gnupg/trustedkeys.gpg --import /etc/apt/trusted.gpg.d/apt.bcachefs.org.asc
tee -a ~/.gnupg/gpg.conf > /dev/null <<EOT
default-key ${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
EOT
- tee -a ~/.gbp.conf > /dev/null <<EOT
+ tee -a ~/.gbp.conf > /dev/null <<EOT
[buildpackage]
sign-tags = True
keyid = ${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
@@ -106,10 +114,6 @@ jobs:
tee -a ~/.devscripts > /dev/null <<EOT
DEBSIGN_KEYID=${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
EOT
- tee -a ~/.sbuildrc > /dev/null <<EOT
- \$verbose = 1;
- \$dpkg_buildpackage_user_options = ['--sign-keyid=${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}', '--force-sign'];
- EOT
- name: Download all artifacts
timeout-minutes: 1
uses: actions/download-artifact@v5
@@ -119,13 +123,28 @@ jobs:
timeout-minutes: 1
run: |
set -xe
- SRC_DIR="$GITHUB_WORKSPACE/src-artifacts"
+ SRC_DIR="$GITHUB_WORKSPACE/incoming/src-artifacts"
mkdir -p "$SRC_DIR"
find "$GITHUB_WORKSPACE/packed-artifacts" -type f -name artifact-src.tar -exec tar -xf {} -C "$SRC_DIR" ';' -delete
- BIN_DIR="$GITHUB_WORKSPACE/bin-artifacts"
+ BIN_DIR="$GITHUB_WORKSPACE/incoming/bin-artifacts"
mkdir -p "$BIN_DIR"
find "$GITHUB_WORKSPACE/packed-artifacts" -type f -name '*.tar' -exec tar -xf {} -C "$BIN_DIR" ';' -delete
rm -rf "$GITHUB_WORKSPACE/packed-artifacts"
+ - name: Ensure that all incoming artifacts are signed
+ timeout-minutes: 1
+ if: steps.gpg.conclusion != 'skipped'
+ run: |
+ set -xe
+ cd "$GITHUB_WORKSPACE/incoming"
+ find . -type f -not -iname '*.sig' -print0 | xargs --null -I'{}' sh -c "\
+ echo 'Processing {}' \
+ && ( \
+ gpg --verbose --no-default-keyring --keyring ~/.gnupg/trustedkeys.gpg --verify {} \
+ || gpg --verbose --no-default-keyring --keyring ~/.gnupg/trustedkeys.gpg --verify {}.sig \
+ || (echo 'Processing {}: NOT SIGNED!' && exit 1) \
+ ) \
+ && echo 'Processing {}: ok' \
+ "
- name: Create and populate repos
timeout-minutes: 60
run: |
@@ -133,7 +152,9 @@ jobs:
ls -lahR
MOUNTPOINT="$GITHUB_WORKSPACE/remotefs"
mkdir -p "$MOUNTPOINT"
+ if [ -n "${{ secrets.SSH_HOST }}" ]; then
sshfs ${{ secrets.SSH_HOST }}/uploads "$MOUNTPOINT"
+ fi
REPO_ROOT="$MOUNTPOINT/public_html"
mkdir -p "$REPO_ROOT"
cp -f /etc/apt/trusted.gpg.d/apt.bcachefs.org.asc "$REPO_ROOT"
@@ -169,10 +190,10 @@ jobs:
Source, Debian tarballs and dsc files can be verified using https://github.com/sigstore/rekor.
EOR
- cd "$GITHUB_WORKSPACE/bin-artifacts"
+ cd "$GITHUB_WORKSPACE/incoming/bin-artifacts"
for DIST in *
do
- SRCDIR="$GITHUB_WORKSPACE/bin-artifacts/$DIST"
+ SRCDIR="$GITHUB_WORKSPACE/incoming/bin-artifacts/$DIST"
cd "$SRCDIR"
REPO="$REPO_ROOT/$DIST"
mkdir -p "$REPO/conf/distributions"
@@ -195,7 +216,7 @@ jobs:
verbose
ignore longkeyid
EOT
- reprepro --basedir "$REPO" --ignore=wrongdistribution include bcachefs-tools-$SUITE "$GITHUB_WORKSPACE/src-artifacts/"*.changes
+ reprepro --basedir "$REPO" --ignore=wrongdistribution include bcachefs-tools-$SUITE "$GITHUB_WORKSPACE/incoming/src-artifacts/"*.changes
for f in "$SRCDIR"/*/*.changes
do
reprepro --basedir "$REPO" --ignore=wrongdistribution include bcachefs-tools-$SUITE $f
@@ -203,4 +224,4 @@ jobs:
reprepro --basedir "$REPO" createsymlinks
reprepro --basedir "$REPO" export
done
- umount "$MOUNTPOINT"
+ umount "$MOUNTPOINT" || /bin/true
diff --git a/.github/workflows/deb-src.yml b/.github/workflows/deb-src.yml
index e33c47c3..917d907d 100644
--- a/.github/workflows/deb-src.yml
+++ b/.github/workflows/deb-src.yml
@@ -1,5 +1,15 @@
on:
workflow_call:
+ inputs:
+ runs-on:
+ required: true
+ type: string
+ dist-name:
+ required: true
+ type: string
+ dist-version:
+ required: true
+ type: string
outputs:
deb-src-artifact-id:
value: ${{ jobs.linux.outputs.deb-src-artifact-id }}
@@ -9,13 +19,11 @@ on:
jobs:
linux:
- runs-on: ubuntu-latest
+ runs-on: ${{ inputs.runs-on }}
container:
image: debian:unstable-slim
options: --cap-add=SYS_ADMIN --security-opt=apparmor:unconfined --tmpfs /tmp:exec --tmpfs /__w/${{ github.event.repository.name }}/${{ github.event.repository.name }}:exec
env:
- DIST: unstable
- ARCH: x86_64
RUST_VERSION: 1.89.0
DEBFULLNAME: apt.bcachefs.org CI bot
DEBEMAIL: linux-bcachefs@vger.kernel.org
@@ -57,6 +65,7 @@ jobs:
apt update
apt full-upgrade
apt install \
+ ${{ inputs.dist-name }}-keyring \
curl \
devscripts \
git \
@@ -73,12 +82,6 @@ jobs:
apt clean
USER=`whoami`
sudo usermod --add-subuids 100000-165535 --add-subgids 100000-165535 $USER
- tee ~/.sbuildrc > /dev/null <<EOT
- \$build_dir = '$GITHUB_WORKSPACE/deb-src';
- \$chroot_mode = 'unshare';
- \$unshare_tmpdir_template = '/tmp/tmp.sbuild.XXXXXXXXXX';
- \$key_id = '${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}';
- EOT
- name: Import GPG key
timeout-minutes: 1
id: gpg
@@ -94,11 +97,12 @@ jobs:
run: |
set -xe
gpg --output /etc/apt/trusted.gpg.d/apt.bcachefs.org.asc --armor --export ${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
+ rm -f ~/.gnupg/trustedkeys.gpg
gpg --no-default-keyring --keyring ~/.gnupg/trustedkeys.gpg --import /etc/apt/trusted.gpg.d/apt.bcachefs.org.asc
tee -a ~/.gnupg/gpg.conf > /dev/null <<EOT
default-key ${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
EOT
- tee -a ~/.gbp.conf > /dev/null <<EOT
+ tee -a ~/.gbp.conf > /dev/null <<EOT
[buildpackage]
sign-tags = True
keyid = ${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
@@ -107,9 +111,32 @@ jobs:
DEBSIGN_KEYID=${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}
EOT
tee -a ~/.sbuildrc > /dev/null <<EOT
- \$verbose = 1;
+ \$key_id = '${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}';
\$dpkg_buildpackage_user_options = ['--sign-keyid=${{ secrets.GPG_SIGNING_SUBKEY_FINGERPRINT }}', '--force-sign'];
EOT
+ - name: Configure sbuild
+ timeout-minutes: 1
+ if: steps.gpg.conclusion != 'skipped'
+ run: |
+ set -xe
+ BUILD_DIR="$GITHUB_WORKSPACE/deb-src"
+ mkdir -p "$BUILD_DIR"
+ tee -a ~/.sbuildrc > /dev/null <<EOT
+ \$verbose = 1;
+ \$build_dir = '$BUILD_DIR';
+ \$distribution = '${{ inputs.dist-version }}';
+ #\$host_arch = '${{ inputs.arch }}';
+ \$chroot_mode = 'unshare';
+ \$unshare_tmpdir_template = '/tmp/tmp.sbuild.XXXXXXXXXX';
+ EOT
+ tee -a ~/.sbuildrc > /dev/null <<EOT
+ push @{\$unshare_mmdebstrap_extra_args}, "*", [
+ 'http://deb.debian.org/debian',
+ '--variant=apt',
+ '--keyring=/usr/share/keyrings/debian-keyring.gpg',
+ '--components=main'
+ ];
+ EOT
- name: Install Rust / cargo
timeout-minutes: 1
run: |
@@ -129,7 +156,7 @@ jobs:
set -xe
git config --global user.email "${{ env.DEBFULLNAME }}"
git config --global user.name "${{ env.DEBEMAIL }}"
- cd "$GITHUB_WORKSPACE/bcachefs-tools/"
+ cd "$GITHUB_WORKSPACE/bcachefs-tools"
CURR_TAG="$(git describe --abbrev=0 --tags $(git rev-list HEAD --tags --skip=0 --max-count=1))"
PREV_TAG="$(git describe --abbrev=0 --tags $(git rev-list HEAD --tags --skip=1 --max-count=1))"
NEW_VERSION="${{ env.DEBPKG_EPOCH }}:$(echo $CURR_TAG | sed 's/^v//')"
@@ -145,27 +172,28 @@ jobs:
run: |
set -xe
. "$HOME/.cargo/env"
- cd "$GITHUB_WORKSPACE/bcachefs-tools/"
+ cd "$GITHUB_WORKSPACE/bcachefs-tools"
mkdir -p "$GITHUB_WORKSPACE/deb-src"
# FIXME: pubkey is not avaliable in chroot, .dsc signature verification fails
- gbp buildpackage --git-verbose --git-ignore-branch --no-clean --git-dist=${{ env.DIST }} --git-builder=sbuild --source --source-only-changes --no-arch-all --no-arch-any
- - name: Sign the source tarball
- timeout-minutes: 1
- if: steps.gpg.conclusion != 'skipped'
- run: |
- set -xe
- find "$GITHUB_WORKSPACE/deb-src/" -type f -name '*.tar.*' -exec gpg --verbose --detach-sign {} ';'
- - name: Ensure that source package is signed
+ gbp buildpackage --git-verbose --git-ignore-branch --no-clean --git-dist=${{ inputs.dist-version }} --git-builder=sbuild --source --source-only-changes --no-arch-all --no-arch-any
+ - name: Ensure that all source artifacts are signed, or sign them
timeout-minutes: 1
if: steps.gpg.conclusion != 'skipped'
run: |
set -xe
- dscverify --verbose "$GITHUB_WORKSPACE/deb-src/"*.changes
+ cd "$GITHUB_WORKSPACE/deb-src"
+ find . -type f -not -iname '*.sig' -print0 | xargs --null -I'{}' sh -c "\
+ echo 'Processing {}' && ( \
+ gpg --verbose --no-default-keyring --keyring ~/.gnupg/trustedkeys.gpg --verify {} \
+ || gpg --verbose --no-default-keyring --keyring ~/.gnupg/trustedkeys.gpg --verify {}.sig \
+ || gpg --verbose --detach-sign {} \
+ ) \
+ "
- name: Archive source build artifacts
timeout-minutes: 1
run: |
set -xe
- cd "$GITHUB_WORKSPACE/deb-src/"
+ cd "$GITHUB_WORKSPACE/deb-src"
tar -cf "$GITHUB_WORKSPACE/deb-src/artifact-src.tar" *
- name: Attest the source-only .deb package artifact
timeout-minutes: 1
diff --git a/.gitignore b/.gitignore
index fda984bc..97693a32 100644
--- a/.gitignore
+++ b/.gitignore
@@ -24,9 +24,9 @@ bcachefs-principles-of-operation.*
bch_bindgen/Cargo.lock
# will have compiled files and executables
-debug/
-target/
-vendor/
+/debug/
+/target/
+/vendor/
# These are backup files generated by rustfmt
**/*.rs.bk
diff --git a/Cargo.lock b/Cargo.lock
index 78dc598c..f771baa6 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -68,7 +68,7 @@ checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6"
[[package]]
name = "bcachefs-tools"
-version = "1.31.1"
+version = "1.31.2"
dependencies = [
"anyhow",
"bch_bindgen",
diff --git a/Cargo.toml b/Cargo.toml
index a7011aa8..6c9af387 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -4,7 +4,7 @@ default-members = [".", "bch_bindgen"]
[package]
name = "bcachefs-tools"
-version = "1.31.1"
+version = "1.31.2"
authors = ["Yuxuan Shui <yshuiv7@gmail.com>", "Kayla Firestack <dev@kaylafire.me>", "Kent Overstreet <kent.overstreet@linux.dev>" ]
edition = "2021"
rust-version = "1.77.0"
diff --git a/Makefile b/Makefile
index f230598b..fd553832 100644
--- a/Makefile
+++ b/Makefile
@@ -189,10 +189,11 @@ install: all install_dkms
.PHONY: install_dkms
install_dkms: dkms/dkms.conf
- $(INSTALL) -m0644 -D dkms/Makefile -t $(DESTDIR)$(DKMSDIR)
- $(INSTALL) -m0644 -D dkms/dkms.conf -t $(DESTDIR)$(DKMSDIR)
- $(INSTALL) -m0644 -D libbcachefs/Makefile -t $(DESTDIR)$(DKMSDIR)/src/fs/bcachefs
- $(INSTALL) -m0644 -D libbcachefs/*.[ch] -t $(DESTDIR)$(DKMSDIR)/src/fs/bcachefs
+ $(INSTALL) -m0644 -D dkms/Makefile -t $(DESTDIR)$(DKMSDIR)
+ $(INSTALL) -m0644 -D dkms/dkms.conf -t $(DESTDIR)$(DKMSDIR)
+ $(INSTALL) -m0644 -D libbcachefs/Makefile -t $(DESTDIR)$(DKMSDIR)/src/fs/bcachefs
+ $(INSTALL) -m0644 -D libbcachefs/*.[ch] -t $(DESTDIR)$(DKMSDIR)/src/fs/bcachefs
+ $(INSTALL) -m0644 -D libbcachefs/vendor/*.[ch] -t $(DESTDIR)$(DKMSDIR)/src/fs/bcachefs/vendor
sed -i "s|^#define TRACE_INCLUDE_PATH \\.\\./\\.\\./fs/bcachefs$$|#define TRACE_INCLUDE_PATH $(DKMSDIR)/src/fs/bcachefs|" \
$(DESTDIR)$(DKMSDIR)/src/fs/bcachefs/trace.h
@@ -225,16 +226,14 @@ cargo-update-msrv:
.PHONY: update-bcachefs-sources
update-bcachefs-sources:
git rm -rf --ignore-unmatch libbcachefs
- test -d libbcachefs || mkdir libbcachefs
+ mkdir -p libbcachefs/vendor
cp $(LINUX_DIR)/fs/bcachefs/*.[ch] libbcachefs/
+ cp $(LINUX_DIR)/fs/bcachefs/vendor/*.[ch] libbcachefs/vendor/
cp $(LINUX_DIR)/fs/bcachefs/Makefile libbcachefs/
git add libbcachefs/*.[ch]
+ git add libbcachefs/vendor/*.[ch]
git add libbcachefs/Makefile
git rm -f libbcachefs/mean_and_variance_test.c
- cp $(LINUX_DIR)/include/linux/closure.h include/linux/
- git add include/linux/closure.h
- cp $(LINUX_DIR)/lib/closure.c linux/
- git add linux/closure.c
cp $(LINUX_DIR)/include/linux/xxhash.h include/linux/
git add include/linux/xxhash.h
cp $(LINUX_DIR)/lib/xxhash.c linux/
diff --git a/debian/changelog b/debian/changelog
index 01295893..694c7f70 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+bcachefs-tools (1:1.31.2) UNRELEASED; urgency=medium
+
+ * DKMS now works on 6.16
+
+ -- Kent Overstreet <kent.overstreet@linux.dev> Fri, 19 Sep 2025 02:30:19 -0400
+
bcachefs-tools (1:1.31.1~1) unstable; urgency=medium
* Split DKMS into separate binary package
diff --git a/debian/control b/debian/control
index 4afd99f0..917b87f2 100644
--- a/debian/control
+++ b/debian/control
@@ -5,8 +5,8 @@ Priority: optional
Standards-Version: 4.7.0
Rules-Requires-Root: no
Build-Depends: debhelper-compat (= 13),
- dh-dkms,
cargo,
+ dh-dkms,
jq,
libaio-dev,
libblkid-dev,
@@ -17,11 +17,12 @@ Build-Depends: debhelper-compat (= 13),
libscrypt-dev,
libsodium-dev,
libudev-dev,
- liburcu-dev,
+ liburcu-dev (>= 0.15),
libzstd-dev,
pkgconf,
python3-docutils,
python3:native,
+ rustc (>= 1.77),
systemd-dev,
uuid-dev,
zlib1g-dev,
@@ -46,7 +47,8 @@ Description: bcachefs userspace tools
Package: bcachefs-kernel-dkms
Architecture: linux-any
-Depends: ${shlibs:Depends}, ${misc:Depends},
+Section: kernel
+Depends: ${misc:Depends},
initramfs-tools | linux-initramfs-tool,
linux-headers-amd64 [amd64],
linux-headers-arm64 [arm64],
diff --git a/libbcachefs/Makefile b/libbcachefs/Makefile
index a4258615..1e87eee9 100644
--- a/libbcachefs/Makefile
+++ b/libbcachefs/Makefile
@@ -98,7 +98,8 @@ bcachefs-y := \
two_state_shared_lock.o \
util.o \
varint.o \
- xattr.o
+ xattr.o \
+ vendor/closure.o
obj-$(CONFIG_MEAN_AND_VARIANCE_UNIT_TEST) += mean_and_variance_test.o
diff --git a/libbcachefs/bcachefs.h b/libbcachefs/bcachefs.h
index 83d6ab9c..3ccca855 100644
--- a/libbcachefs/bcachefs.h
+++ b/libbcachefs/bcachefs.h
@@ -196,7 +196,6 @@
#include <linux/backing-dev-defs.h>
#include <linux/bug.h>
#include <linux/bio.h>
-#include <linux/closure.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/math64.h>
@@ -217,6 +216,7 @@
#include "bcachefs_format.h"
#include "btree_journal_iter_types.h"
+#include "closure.h"
#include "disk_accounting_types.h"
#include "errcode.h"
#include "fast_list.h"
diff --git a/libbcachefs/closure.h b/libbcachefs/closure.h
new file mode 100644
index 00000000..d8d4c709
--- /dev/null
+++ b/libbcachefs/closure.h
@@ -0,0 +1,5 @@
+#include "vendor/closure.h"
+
+#define closure_wait bch2_closure_wait
+#define closure_return_sync bch2_closure_return_sync
+#define __closure_wake_up __bch2_closure_wake_up
diff --git a/libbcachefs/fs.c b/libbcachefs/fs.c
index 958849c3..f1849eb8 100644
--- a/libbcachefs/fs.c
+++ b/libbcachefs/fs.c
@@ -1521,6 +1521,7 @@ static const struct vm_operations_struct bch_vm_ops = {
.page_mkwrite = bch2_page_mkwrite,
};
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6,17,0)
static int bch2_mmap_prepare(struct vm_area_desc *desc)
{
file_accessed(desc->file);
@@ -1528,6 +1529,15 @@ static int bch2_mmap_prepare(struct vm_area_desc *desc)
desc->vm_ops = &bch_vm_ops;
return 0;
}
+#else
+static int bch2_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ file_accessed(file);
+
+ vma->vm_ops = &bch_vm_ops;
+ return 0;
+}
+#endif
/* Directories: */
@@ -1719,7 +1729,11 @@ static const struct file_operations bch_file_operations = {
.llseek = bch2_llseek,
.read_iter = bch2_read_iter,
.write_iter = bch2_write_iter,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6,17,0)
.mmap_prepare = bch2_mmap_prepare,
+#else
+ .mmap = bch2_mmap,
+#endif
.get_unmapped_area = thp_get_unmapped_area,
.fsync = bch2_fsync,
.splice_read = filemap_splice_read,
diff --git a/libbcachefs/nocow_locking.c b/libbcachefs/nocow_locking.c
index 58cfd540..71b17f18 100644
--- a/libbcachefs/nocow_locking.c
+++ b/libbcachefs/nocow_locking.c
@@ -2,11 +2,10 @@
#include "bcachefs.h"
#include "bkey_methods.h"
+#include "closure.h"
#include "nocow_locking.h"
#include "util.h"
-#include <linux/closure.h>
-
bool bch2_bucket_nocow_is_locked(struct bucket_nocow_lock_table *t, struct bpos bucket)
{
u64 dev_bucket = bucket_to_u64(bucket);
diff --git a/libbcachefs/util.h b/libbcachefs/util.h
index 52ac8230..555e0d8f 100644
--- a/libbcachefs/util.h
+++ b/libbcachefs/util.h
@@ -4,7 +4,6 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
-#include <linux/closure.h>
#include <linux/errno.h>
#include <linux/freezer.h>
#include <linux/kernel.h>
@@ -21,6 +20,7 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
+#include "closure.h"
#include "mean_and_variance.h"
#include "darray.h"
diff --git a/libbcachefs/vendor/closure.c b/libbcachefs/vendor/closure.c
new file mode 100644
index 00000000..bdafd3a5
--- /dev/null
+++ b/libbcachefs/vendor/closure.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Asynchronous refcounty things
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "closure.h"
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/sched/debug.h>
+
+static void closure_val_checks(struct closure *cl, unsigned new, int d)
+{
+ unsigned count = new & CLOSURE_REMAINING_MASK;
+
+ if (WARN(new & CLOSURE_GUARD_MASK,
+ "closure %ps has guard bits set: %x (%u), delta %i",
+ cl->fn,
+ new, (unsigned) __fls(new & CLOSURE_GUARD_MASK), d))
+ new &= ~CLOSURE_GUARD_MASK;
+
+ WARN(!count && (new & ~(CLOSURE_DESTRUCTOR|CLOSURE_SLEEPING)),
+ "closure %ps ref hit 0 with incorrect flags set: %x (%u)",
+ cl->fn,
+ new, (unsigned) __fls(new));
+}
+
+enum new_closure_state {
+ CLOSURE_normal_put,
+ CLOSURE_requeue,
+ CLOSURE_done,
+};
+
+/* For clearing flags with the same atomic op as a put */
+void bch2_closure_sub(struct closure *cl, int v)
+{
+ enum new_closure_state s;
+ struct task_struct *sleeper;
+
+ /* rcu_read_lock, atomic_read_acquire() are both for cl->sleeper: */
+ guard(rcu)();
+
+ int old = atomic_read_acquire(&cl->remaining), new;
+ do {
+ new = old - v;
+
+ if (new & CLOSURE_REMAINING_MASK) {
+ s = CLOSURE_normal_put;
+ } else {
+ if ((cl->fn || (new & CLOSURE_SLEEPING)) &&
+ !(new & CLOSURE_DESTRUCTOR)) {
+ s = CLOSURE_requeue;
+ new += CLOSURE_REMAINING_INITIALIZER;
+ } else
+ s = CLOSURE_done;
+
+ sleeper = new & CLOSURE_SLEEPING ? cl->sleeper : NULL;
+ new &= ~CLOSURE_SLEEPING;
+ }
+
+ closure_val_checks(cl, new, -v);
+ } while (!atomic_try_cmpxchg_release(&cl->remaining, &old, new));
+
+ if (s == CLOSURE_normal_put)
+ return;
+
+ if (sleeper) {
+ smp_mb();
+ wake_up_process(sleeper);
+ return;
+ }
+
+ if (s == CLOSURE_requeue) {
+ closure_queue(cl);
+ } else {
+ struct closure *parent = cl->parent;
+ closure_fn *destructor = cl->fn;
+
+ closure_debug_destroy(cl);
+
+ if (destructor)
+ destructor(&cl->work);
+
+ if (parent)
+ closure_put(parent);
+ }
+}
+
+/*
+ * closure_wake_up - wake up all closures on a wait list, without memory barrier
+ */
+void __bch2_closure_wake_up(struct closure_waitlist *wait_list)
+{
+ struct llist_node *list;
+ struct closure *cl, *t;
+ struct llist_node *reverse = NULL;
+
+ list = llist_del_all(&wait_list->list);
+
+ /* We first reverse the list to preserve FIFO ordering and fairness */
+ reverse = llist_reverse_order(list);
+
+ /* Then do the wakeups */
+ llist_for_each_entry_safe(cl, t, reverse, list) {
+ closure_set_waiting(cl, 0);
+ bch2_closure_sub(cl, CLOSURE_WAITING + 1);
+ }
+}
+
+/**
+ * closure_wait - add a closure to a waitlist
+ * @waitlist: will own a ref on @cl, which will be released when
+ * closure_wake_up() is called on @waitlist.
+ * @cl: closure pointer.
+ *
+ */
+bool bch2_closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
+{
+ if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
+ return false;
+
+ closure_set_waiting(cl, _RET_IP_);
+ unsigned r = atomic_add_return(CLOSURE_WAITING + 1, &cl->remaining);
+ closure_val_checks(cl, r, CLOSURE_WAITING + 1);
+
+ llist_add(&cl->list, &waitlist->list);
+
+ return true;
+}
+
+void __sched __bch2_closure_sync(struct closure *cl)
+{
+ cl->sleeper = current;
+ bch2_closure_sub(cl,
+ CLOSURE_REMAINING_INITIALIZER -
+ CLOSURE_SLEEPING);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
+ break;
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+}
+
+/*
+ * closure_return_sync - finish running a closure, synchronously (i.e. waiting
+ * for outstanding get()s to finish) and returning once closure refcount is 0.
+ *
+ * Unlike closure_sync() this doesn't reinit the ref to 1; subsequent
+ * closure_get_not_zero() calls will fail.
+ */
+void __sched bch2_closure_return_sync(struct closure *cl)
+{
+ cl->sleeper = current;
+ bch2_closure_sub(cl,
+ CLOSURE_REMAINING_INITIALIZER -
+ CLOSURE_DESTRUCTOR -
+ CLOSURE_SLEEPING);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
+ break;
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+
+ if (cl->parent)
+ closure_put(cl->parent);
+}
+
+int __sched __bch2_closure_sync_timeout(struct closure *cl, unsigned long timeout)
+{
+ int ret = 0;
+
+ cl->sleeper = current;
+ bch2_closure_sub(cl,
+ CLOSURE_REMAINING_INITIALIZER -
+ CLOSURE_SLEEPING);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ /*
+ * Carefully undo the continue_at() - but only if it
+ * hasn't completed, i.e. the final closure_put() hasn't
+ * happened yet:
+ */
+ unsigned old = atomic_read(&cl->remaining), new;
+ if (!(old & CLOSURE_SLEEPING))
+ goto success;
+
+ if (!timeout) {
+ do {
+ if (!(old & CLOSURE_SLEEPING))
+ goto success;
+
+ new = old + CLOSURE_REMAINING_INITIALIZER - CLOSURE_SLEEPING;
+ closure_val_checks(cl, new, CLOSURE_REMAINING_INITIALIZER - CLOSURE_SLEEPING);
+ } while (!atomic_try_cmpxchg(&cl->remaining, &old, new));
+
+ ret = -ETIME;
+ break;
+ }
+
+ timeout = schedule_timeout(timeout);
+ }
+success:
+ __set_current_state(TASK_RUNNING);
+ return ret;
+}
diff --git a/libbcachefs/vendor/closure.h b/libbcachefs/vendor/closure.h
new file mode 100644
index 00000000..79112efe
--- /dev/null
+++ b/libbcachefs/vendor/closure.h
@@ -0,0 +1,490 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CLOSURE_H
+#define _LINUX_CLOSURE_H
+
+#include <linux/llist.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/workqueue.h>
+
+/*
+ * Closure is perhaps the most overused and abused term in computer science, but
+ * since I've been unable to come up with anything better you're stuck with it
+ * again.
+ *
+ * What are closures?
+ *
+ * They embed a refcount. The basic idea is they count "things that are in
+ * progress" - in flight bios, some other thread that's doing something else -
+ * anything you might want to wait on.
+ *
+ * The refcount may be manipulated with closure_get() and closure_put().
+ * closure_put() is where many of the interesting things happen, when it causes
+ * the refcount to go to 0.
+ *
+ * Closures can be used to wait on things both synchronously and asynchronously,
+ * and synchronous and asynchronous use can be mixed without restriction. To
+ * wait synchronously, use closure_sync() - you will sleep until your closure's
+ * refcount hits 1.
+ *
+ * To wait asynchronously, use
+ * continue_at(cl, next_function, workqueue);
+ *
+ * passing it, as you might expect, the function to run when nothing is pending
+ * and the workqueue to run that function out of.
+ *
+ * continue_at() also, critically, requires a 'return' immediately following the
+ * location where this macro is referenced, to return to the calling function.
+ * There's good reason for this.
+ *
+ * To use safely closures asynchronously, they must always have a refcount while
+ * they are running owned by the thread that is running them. Otherwise, suppose
+ * you submit some bios and wish to have a function run when they all complete:
+ *
+ * foo_endio(struct bio *bio)
+ * {
+ * closure_put(cl);
+ * }
+ *
+ * closure_init(cl);
+ *
+ * do_stuff();
+ * closure_get(cl);
+ * bio1->bi_endio = foo_endio;
+ * bio_submit(bio1);
+ *
+ * do_more_stuff();
+ * closure_get(cl);
+ * bio2->bi_endio = foo_endio;
+ * bio_submit(bio2);
+ *
+ * continue_at(cl, complete_some_read, system_wq);
+ *
+ * If closure's refcount started at 0, complete_some_read() could run before the
+ * second bio was submitted - which is almost always not what you want! More
+ * importantly, it wouldn't be possible to say whether the original thread or
+ * complete_some_read()'s thread owned the closure - and whatever state it was
+ * associated with!
+ *
+ * So, closure_init() initializes a closure's refcount to 1 - and when a
+ * closure_fn is run, the refcount will be reset to 1 first.
+ *
+ * Then, the rule is - if you got the refcount with closure_get(), release it
+ * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
+ * on a closure because you called closure_init() or you were run out of a
+ * closure - _always_ use continue_at(). Doing so consistently will help
+ * eliminate an entire class of particularly pernicious races.
+ *
+ * Lastly, you might have a wait list dedicated to a specific event, and have no
+ * need for specifying the condition - you just want to wait until someone runs
+ * closure_wake_up() on the appropriate wait list. In that case, just use
+ * closure_wait(). It will return either true or false, depending on whether the
+ * closure was already on a wait list or not - a closure can only be on one wait
+ * list at a time.
+ *
+ * Parents:
+ *
+ * closure_init() takes two arguments - it takes the closure to initialize, and
+ * a (possibly null) parent.
+ *
+ * If parent is non null, the new closure will have a refcount for its lifetime;
+ * a closure is considered to be "finished" when its refcount hits 0 and the
+ * function to run is null. Hence
+ *
+ * continue_at(cl, NULL, NULL);
+ *
+ * returns up the (spaghetti) stack of closures, precisely like normal return
+ * returns up the C stack. continue_at() with non null fn is better thought of
+ * as doing a tail call.
+ *
+ * All this implies that a closure should typically be embedded in a particular
+ * struct (which its refcount will normally control the lifetime of), and that
+ * struct can very much be thought of as a stack frame.
+ */
+
+struct closure;
+struct closure_syncer;
+typedef void (closure_fn) (struct work_struct *);
+extern struct dentry *bcache_debug;
+
+struct closure_waitlist {
+ struct llist_head list;
+};
+
+enum closure_state {
+ /*
+ * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
+ * the thread that owns the closure, and cleared by the thread that's
+ * waking up the closure.
+ *
+ * The rest are for debugging and don't affect behaviour:
+ *
+ * CLOSURE_RUNNING: Set when a closure is running (i.e. by
+ * closure_init() and when closure_put() runs then next function), and
+ * must be cleared before remaining hits 0. Primarily to help guard
+ * against incorrect usage and accidentally transferring references.
+ * continue_at() and closure_return() clear it for you, if you're doing
+ * something unusual you can use closure_set_dead() which also helps
+ * annotate where references are being transferred.
+ */
+
+ CLOSURE_BITS_START = (1U << 24),
+ CLOSURE_DESTRUCTOR = (1U << 24),
+ CLOSURE_SLEEPING = (1U << 26),
+ CLOSURE_WAITING = (1U << 28),
+ CLOSURE_RUNNING = (1U << 30),
+};
+
+#define CLOSURE_GUARD_MASK \
+ (((CLOSURE_DESTRUCTOR|CLOSURE_SLEEPING|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)|(CLOSURE_BITS_START >> 1))
+
+#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
+#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
+
+struct closure {
+ union {
+ struct {
+ struct workqueue_struct *wq;
+ struct task_struct *sleeper;
+ struct llist_node list;
+ closure_fn *fn;
+ };
+ struct work_struct work;
+ };
+
+ struct closure *parent;
+
+ atomic_t remaining;
+
+#ifdef CONFIG_DEBUG_CLOSURES
+#define CLOSURE_MAGIC_DEAD 0xc054dead
+#define CLOSURE_MAGIC_ALIVE 0xc054a11e
+#define CLOSURE_MAGIC_STACK 0xc05451cc
+
+ unsigned int magic;
+ struct list_head all;
+ unsigned long ip;
+ unsigned long waiting_on;
+#endif
+};
+
+void bch2_closure_sub(struct closure *cl, int v);
+void __bch2_closure_wake_up(struct closure_waitlist *list);
+bool bch2_closure_wait(struct closure_waitlist *list, struct closure *cl);
+void __bch2_closure_sync(struct closure *cl);
+
+/*
+ * closure_put - decrement a closure's refcount
+ */
+static inline void closure_put(struct closure *cl)
+{
+ bch2_closure_sub(cl, 1);
+}
+
+static inline unsigned closure_nr_remaining(struct closure *cl)
+{
+ return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK;
+}
+
+/**
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
+ *
+ * Sleeps until the refcount hits 1 - the thread that's running the closure owns
+ * the last refcount.
+ */
+static inline void closure_sync(struct closure *cl)
+{
+ if (closure_nr_remaining(cl) > 1)
+ __bch2_closure_sync(cl);
+}
+
+int __bch2_closure_sync_timeout(struct closure *cl, unsigned long timeout);
+
+static inline int closure_sync_timeout(struct closure *cl, unsigned long timeout)
+{
+ return closure_nr_remaining(cl) > 1
+ ? __bch2_closure_sync_timeout(cl, timeout)
+ : 0;
+}
+
+//#ifdef CONFIG_DEBUG_CLOSURES
+#if 0
+
+void bch2_closure_debug_create(struct closure *cl);
+void closure_debug_destroy(struct closure *cl);
+
+#else
+
+static inline void bch2_closure_debug_create(struct closure *cl) {}
+static inline void closure_debug_destroy(struct closure *cl) {}
+
+#endif
+
+static inline void closure_set_ip(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->ip = _THIS_IP_;
+#endif
+}
+
+static inline void closure_set_ret_ip(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->ip = _RET_IP_;
+#endif
+}
+
+static inline void closure_set_waiting(struct closure *cl, unsigned long f)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->waiting_on = f;
+#endif
+}
+
+static inline void closure_set_stopped(struct closure *cl)
+{
+ atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+}
+
+static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
+ struct workqueue_struct *wq)
+{
+ closure_set_ip(cl);
+ cl->fn = fn;
+ cl->wq = wq;
+}
+
+static inline void closure_queue(struct closure *cl)
+{
+ struct workqueue_struct *wq = cl->wq;
+ /**
+ * Changes made to closure, work_struct, or a couple of other structs
+ * may cause work.func not pointing to the right location.
+ */
+ BUILD_BUG_ON(offsetof(struct closure, fn)
+ != offsetof(struct work_struct, func));
+
+ if (wq) {
+ INIT_WORK(&cl->work, cl->work.func);
+ BUG_ON(!queue_work(wq, &cl->work));
+ } else
+ cl->fn(&cl->work);
+}
+
+/**
+ * closure_get - increment a closure's refcount
+ */
+static inline void closure_get(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ BUG_ON((atomic_inc_return(&cl->remaining) &
+ CLOSURE_REMAINING_MASK) <= 1);
+#else
+ atomic_inc(&cl->remaining);
+#endif
+}
+
+/**
+ * closure_get_not_zero
+ */
+static inline bool closure_get_not_zero(struct closure *cl)
+{
+ unsigned old = atomic_read(&cl->remaining);
+ do {
+ if (!(old & CLOSURE_REMAINING_MASK))
+ return false;
+
+ } while (!atomic_try_cmpxchg_acquire(&cl->remaining, &old, old + 1));
+
+ return true;
+}
+
+/**
+ * closure_init - Initialize a closure, setting the refcount to 1
+ * @cl: closure to initialize
+ * @parent: parent of the new closure. cl will take a refcount on it for its
+ * lifetime; may be NULL.
+ */
+static inline void closure_init(struct closure *cl, struct closure *parent)
+{
+ cl->fn = NULL;
+ cl->parent = parent;
+ if (parent)
+ closure_get(parent);
+
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+
+ bch2_closure_debug_create(cl);
+ closure_set_ip(cl);
+}
+
+static inline void closure_init_stack(struct closure *cl)
+{
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->magic = CLOSURE_MAGIC_STACK;
+#endif
+}
+
+static inline void closure_init_stack_release(struct closure *cl)
+{
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->magic = CLOSURE_MAGIC_STACK;
+#endif
+}
+
+/**
+ * closure_wake_up - wake up all closures on a wait list,
+ * with memory barrier
+ */
+static inline void closure_wake_up(struct closure_waitlist *list)
+{
+ /* Memory barrier for the wait list */
+ smp_mb();
+ __bch2_closure_wake_up(list);
+}
+
+#define CLOSURE_CALLBACK(name) void name(struct work_struct *ws)
+#define closure_type(name, type, member) \
+ struct closure *cl = container_of(ws, struct closure, work); \
+ type *name = container_of(cl, type, member)
+
+/**
+ * continue_at - jump to another function with barrier
+ *
+ * After @cl is no longer waiting on anything (i.e. all outstanding refs have
+ * been dropped with closure_put()), it will resume execution at @fn running out
+ * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
+ *
+ * This is because after calling continue_at() you no longer have a ref on @cl,
+ * and whatever @cl owns may be freed out from under you - a running closure fn
+ * has a ref on its own closure which continue_at() drops.
+ *
+ * Note you are expected to immediately return after using this macro.
+ */
+#define continue_at(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ bch2_closure_sub(_cl, CLOSURE_RUNNING + 1); \
+} while (0)
+
+/**
+ * closure_return - finish execution of a closure
+ *
+ * This is used to indicate that @cl is finished: when all outstanding refs on
+ * @cl have been dropped @cl's ref on its parent closure (as passed to
+ * closure_init()) will be dropped, if one was specified - thus this can be
+ * thought of as returning to the parent closure.
+ */
+#define closure_return(_cl) continue_at((_cl), NULL, NULL)
+
+void bch2_closure_return_sync(struct closure *cl);
+
+/**
+ * continue_at_nobarrier - jump to another function without barrier
+ *
+ * Causes @fn to be executed out of @cl, in @wq context (or called directly if
+ * @wq is NULL).
+ *
+ * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
+ * thus it's not safe to touch anything protected by @cl after a
+ * continue_at_nobarrier().
+ */
+#define continue_at_nobarrier(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_queue(_cl); \
+} while (0)
+
+/**
+ * closure_return_with_destructor - finish execution of a closure,
+ * with destructor
+ *
+ * Works like closure_return(), except @destructor will be called when all
+ * outstanding refs on @cl have been dropped; @destructor may be used to safely
+ * free the memory occupied by @cl, and it is called with the ref on the parent
+ * closure still held - so @destructor could safely return an item to a
+ * freelist protected by @cl's parent.
+ */
+#define closure_return_with_destructor(_cl, _destructor) \
+do { \
+ set_closure_fn(_cl, _destructor, NULL); \
+ bch2_closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
+} while (0)
+
+/**
+ * closure_call - execute @fn out of a new, uninitialized closure
+ *
+ * Typically used when running out of one closure, and we want to run @fn
+ * asynchronously out of a new closure - @parent will then wait for @cl to
+ * finish.
+ */
+static inline void closure_call(struct closure *cl, closure_fn fn,
+ struct workqueue_struct *wq,
+ struct closure *parent)
+{
+ closure_init(cl, parent);
+ continue_at_nobarrier(cl, fn, wq);
+}
+
+#define __closure_wait_event(waitlist, _cond) \
+do { \
+ struct closure cl; \
+ \
+ closure_init_stack(&cl); \
+ \
+ while (1) { \
+ bch2_closure_wait(waitlist, &cl); \
+ if (_cond) \
+ break; \
+ closure_sync(&cl); \
+ } \
+ closure_wake_up(waitlist); \
+ closure_sync(&cl); \
+} while (0)
+
+#define closure_wait_event(waitlist, _cond) \
+do { \
+ if (!(_cond)) \
+ __closure_wait_event(waitlist, _cond); \
+} while (0)
+
+#define __closure_wait_event_timeout(waitlist, _cond, _until) \
+({ \
+ struct closure cl; \
+ long _t; \
+ \
+ closure_init_stack(&cl); \
+ \
+ while (1) { \
+ bch2_closure_wait(waitlist, &cl); \
+ if (_cond) { \
+ _t = max_t(long, 1L, _until - jiffies); \
+ break; \
+ } \
+ _t = max_t(long, 0L, _until - jiffies); \
+ if (!_t) \
+ break; \
+ closure_sync_timeout(&cl, _t); \
+ } \
+ closure_wake_up(waitlist); \
+ closure_sync(&cl); \
+ _t; \
+})
+
+/*
+ * Returns 0 if timeout expired, remaining time in jiffies (at least 1) if
+ * condition became true
+ */
+#define closure_wait_event_timeout(waitlist, _cond, _timeout) \
+({ \
+ unsigned long _until = jiffies + _timeout; \
+ (_cond) \
+ ? max_t(long, 1L, _until - jiffies) \
+ : __closure_wait_event_timeout(waitlist, _cond, _until);\
+})
+
+#endif /* _LINUX_CLOSURE_H */