summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/locking/seqlock.rst52
-rw-r--r--arch/ia64/include/asm/smp.h35
-rw-r--r--arch/ia64/include/asm/xtp.h46
-rw-r--r--arch/ia64/kernel/iosapic.c1
-rw-r--r--arch/ia64/kernel/irq.c1
-rw-r--r--arch/ia64/kernel/process.c1
-rw-r--r--arch/ia64/kernel/sal.c1
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/ia64/kernel/smp.c1
-rw-r--r--arch/parisc/include/asm/timex.h1
-rw-r--r--arch/sh/include/asm/io.h1
-rw-r--r--arch/sh/kernel/machvec.c1
-rw-r--r--arch/sparc/include/asm/timer_64.h1
-rw-r--r--arch/sparc/include/asm/vvar.h3
-rw-r--r--arch/sparc/kernel/vdso.c1
-rw-r--r--arch/x86/include/asm/fixmap.h2
-rw-r--r--arch/x86/include/asm/smp.h10
-rw-r--r--arch/x86/include/asm/tsc.h1
-rw-r--r--arch/x86/kernel/apic/apic.c1
-rw-r--r--arch/x86/kernel/apic/apic_noop.c1
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c1
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c1
-rw-r--r--arch/x86/kernel/apic/ipi.c1
-rw-r--r--arch/x86/kernel/apic/local.h1
-rw-r--r--arch/x86/kernel/apic/probe_32.c1
-rw-r--r--arch/x86/kernel/apic/probe_64.c1
-rw-r--r--arch/x86/kernel/cpu/amd.c1
-rw-r--r--arch/x86/kernel/cpu/common.c1
-rw-r--r--arch/x86/kernel/cpu/hygon.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c1
-rw-r--r--arch/x86/kernel/devicetree.c1
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/jailhouse.c2
-rw-r--r--arch/x86/kernel/mpparse.c2
-rw-r--r--arch/x86/kernel/setup.c1
-rw-r--r--arch/x86/kernel/topology.c1
-rw-r--r--arch/x86/kernel/tsc_msr.c1
-rw-r--r--arch/x86/mm/init_32.c1
-rw-r--r--arch/x86/xen/apic.c2
-rw-r--r--arch/x86/xen/enlighten_hvm.c1
-rw-r--r--arch/x86/xen/smp_hvm.c1
-rw-r--r--arch/x86/xen/smp_pv.c1
-rw-r--r--arch/x86/xen/suspend_pv.c4
-rw-r--r--block/blk-iocost.c5
-rw-r--r--drivers/dma-buf/dma-resv.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c2
-rw-r--r--drivers/iommu/intel/irq_remapping.c1
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/fs_struct.c4
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/userfaultfd.c4
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/dma-resv.h4
-rw-r--r--include/linux/dynamic_queue_limits.h2
-rw-r--r--include/linux/fs_struct.h2
-rw-r--r--include/linux/hrtimer.h3
-rw-r--r--include/linux/ktime.h1
-rw-r--r--include/linux/kvm_irqfd.h2
-rw-r--r--include/linux/lockdep.h1
-rw-r--r--include/linux/mutex.h11
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/seqlock.h368
-rw-r--r--include/linux/time.h1
-rw-r--r--include/linux/videodev2.h1
-rw-r--r--include/linux/ww_mutex.h8
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--init/init_task.c3
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/locking/lockdep_proc.c2
-rw-r--r--kernel/time/hrtimer.c13
-rw-r--r--kernel/time/timekeeping.c19
-rw-r--r--net/netfilter/nf_conntrack_core.c5
-rw-r--r--net/netfilter/nft_set_rbtree.c4
-rw-r--r--net/xfrm/xfrm_policy.c10
-rw-r--r--virt/kvm/eventfd.c2
78 files changed, 501 insertions, 203 deletions
diff --git a/Documentation/locking/seqlock.rst b/Documentation/locking/seqlock.rst
index 366dd368d90a..62c5ad98c11c 100644
--- a/Documentation/locking/seqlock.rst
+++ b/Documentation/locking/seqlock.rst
@@ -87,6 +87,58 @@ Read path::
} while (read_seqcount_retry(&foo_seqcount, seq));
+.. _seqcount_locktype_t:
+
+Sequence counters with associated locks (``seqcount_LOCKTYPE_t``)
+-----------------------------------------------------------------
+
+As discussed at :ref:`seqcount_t`, sequence count write side critical
+sections must be serialized and non-preemptible. This variant of
+sequence counters associate the lock used for writer serialization at
+initialization time, which enables lockdep to validate that the write
+side critical sections are properly serialized.
+
+This lock association is a NOOP if lockdep is disabled and has neither
+storage nor runtime overhead. If lockdep is enabled, the lock pointer is
+stored in struct seqcount and lockdep's "lock is held" assertions are
+injected at the beginning of the write side critical section to validate
+that it is properly protected.
+
+For lock types which do not implicitly disable preemption, preemption
+protection is enforced in the write side function.
+
+The following sequence counters with associated locks are defined:
+
+ - ``seqcount_spinlock_t``
+ - ``seqcount_raw_spinlock_t``
+ - ``seqcount_rwlock_t``
+ - ``seqcount_mutex_t``
+ - ``seqcount_ww_mutex_t``
+
+The plain seqcount read and write APIs branch out to the specific
+seqcount_LOCKTYPE_t implementation at compile-time. This avoids kernel
+API explosion per each new seqcount LOCKTYPE.
+
+Initialization (replace "LOCKTYPE" with one of the supported locks)::
+
+ /* dynamic */
+ seqcount_LOCKTYPE_t foo_seqcount;
+ seqcount_LOCKTYPE_init(&foo_seqcount, &lock);
+
+ /* static */
+ static seqcount_LOCKTYPE_t foo_seqcount =
+ SEQCNT_LOCKTYPE_ZERO(foo_seqcount, &lock);
+
+ /* C99 struct init */
+ struct {
+ .seq = SEQCNT_LOCKTYPE_ZERO(foo.seq, &lock),
+ } foo;
+
+Write path: same as in :ref:`seqcount_t`, while running from a context
+with the associated LOCKTYPE lock acquired.
+
+Read path: same as in :ref:`seqcount_t`.
+
.. _seqlock_t:
Sequential locks (``seqlock_t``)
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index 7847ae40a181..aa92234c0142 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -18,7 +18,6 @@
#include <linux/bitops.h>
#include <linux/irqreturn.h>
-#include <asm/io.h>
#include <asm/param.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
@@ -44,11 +43,6 @@ ia64_get_lid (void)
#ifdef CONFIG_SMP
-#define XTP_OFFSET 0x1e0008
-
-#define SMP_IRQ_REDIRECTION (1 << 0)
-#define SMP_IPI_REDIRECTION (1 << 1)
-
#define raw_smp_processor_id() (current_thread_info()->cpu)
extern struct smp_boot_data {
@@ -62,7 +56,6 @@ extern cpumask_t cpu_core_map[NR_CPUS];
DECLARE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
extern int smp_num_siblings;
extern void __iomem *ipi_base_addr;
-extern unsigned char smp_int_redirect;
extern volatile int ia64_cpu_to_sapicid[];
#define cpu_physical_id(i) ia64_cpu_to_sapicid[i]
@@ -84,34 +77,6 @@ cpu_logical_id (int cpuid)
return i;
}
-/*
- * XTP control functions:
- * min_xtp : route all interrupts to this CPU
- * normal_xtp: nominal XTP value
- * max_xtp : never deliver interrupts to this CPU.
- */
-
-static inline void
-min_xtp (void)
-{
- if (smp_int_redirect & SMP_IRQ_REDIRECTION)
- writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
-}
-
-static inline void
-normal_xtp (void)
-{
- if (smp_int_redirect & SMP_IRQ_REDIRECTION)
- writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
-}
-
-static inline void
-max_xtp (void)
-{
- if (smp_int_redirect & SMP_IRQ_REDIRECTION)
- writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
-}
-
/* Upping and downing of CPUs */
extern int __cpu_disable (void);
extern void __cpu_die (unsigned int cpu);
diff --git a/arch/ia64/include/asm/xtp.h b/arch/ia64/include/asm/xtp.h
new file mode 100644
index 000000000000..5bf1d70ad860
--- /dev/null
+++ b/arch/ia64/include/asm/xtp.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_XTP_H
+#define _ASM_IA64_XTP_H
+
+#include <asm/io.h>
+
+#ifdef CONFIG_SMP
+
+#define XTP_OFFSET 0x1e0008
+
+#define SMP_IRQ_REDIRECTION (1 << 0)
+#define SMP_IPI_REDIRECTION (1 << 1)
+
+extern unsigned char smp_int_redirect;
+
+/*
+ * XTP control functions:
+ * min_xtp : route all interrupts to this CPU
+ * normal_xtp: nominal XTP value
+ * max_xtp : never deliver interrupts to this CPU.
+ */
+
+static inline void
+min_xtp (void)
+{
+ if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+ writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */
+}
+
+static inline void
+normal_xtp (void)
+{
+ if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+ writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */
+}
+
+static inline void
+max_xtp (void)
+{
+ if (smp_int_redirect & SMP_IRQ_REDIRECTION)
+ writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
+}
+
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_IA64_XTP_Hy */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index fad4db20ce65..35adcf89035a 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -95,6 +95,7 @@
#include <asm/iosapic.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
+#include <asm/xtp.h>
#undef DEBUG_INTERRUPT_ROUTING
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 0a8e5e585edc..ecef17c7c35b 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -25,6 +25,7 @@
#include <linux/kernel_stat.h>
#include <asm/mca.h>
+#include <asm/xtp.h>
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 36eba4ba6543..f19cb97c0098 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -47,6 +47,7 @@
#include <linux/uaccess.h>
#include <asm/unwind.h>
#include <asm/user.h>
+#include <asm/xtp.h>
#include "entry.h"
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index c455ece977ad..e4f0705c0282 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -18,6 +18,7 @@
#include <asm/page.h>
#include <asm/sal.h>
#include <asm/pal.h>
+#include <asm/xtp.h>
__cacheline_aligned DEFINE_SPINLOCK(sal_lock);
unsigned long sal_platform_features;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index d2d440fe855b..dd595fbd8006 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -65,6 +65,7 @@
#include <asm/tlbflush.h>
#include <asm/unistd.h>
#include <asm/uv/uv.h>
+#include <asm/xtp.h>
#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
# error "struct cpuinfo_ia64 too big!"
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 0e2742003121..7b7b64eb3129 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -45,6 +45,7 @@
#include <asm/tlbflush.h>
#include <asm/unistd.h>
#include <asm/mca.h>
+#include <asm/xtp.h>
/*
* Note: alignment of 4 entries/cacheline was empirically determined
diff --git a/arch/parisc/include/asm/timex.h b/arch/parisc/include/asm/timex.h
index 45537cd4d1d3..06b510f8172e 100644
--- a/arch/parisc/include/asm/timex.h
+++ b/arch/parisc/include/asm/timex.h
@@ -7,6 +7,7 @@
#ifndef _ASMPARISC_TIMEX_H
#define _ASMPARISC_TIMEX_H
+#include <asm/special_insns.h>
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 26f0f9b4658b..ec587b583822 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -17,6 +17,7 @@
#include <asm/cache.h>
#include <asm/addrspace.h>
#include <asm/machvec.h>
+#include <asm/page.h>
#include <linux/pgtable.h>
#include <asm-generic/iomap.h>
diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c
index beadbbdb4486..76bd8955d4fe 100644
--- a/arch/sh/kernel/machvec.c
+++ b/arch/sh/kernel/machvec.c
@@ -15,6 +15,7 @@
#include <asm/setup.h>
#include <asm/io.h>
#include <asm/irq.h>
+#include <asm/processor.h>
#define MV_NAME_SIZE 32
diff --git a/arch/sparc/include/asm/timer_64.h b/arch/sparc/include/asm/timer_64.h
index c7e4fb601a57..dcfad4613e18 100644
--- a/arch/sparc/include/asm/timer_64.h
+++ b/arch/sparc/include/asm/timer_64.h
@@ -7,6 +7,7 @@
#ifndef _SPARC64_TIMER_H
#define _SPARC64_TIMER_H
+#include <uapi/asm/asi.h>
#include <linux/types.h>
#include <linux/init.h>
diff --git a/arch/sparc/include/asm/vvar.h b/arch/sparc/include/asm/vvar.h
index 0289503d1cb0..6eaf5cfcaae1 100644
--- a/arch/sparc/include/asm/vvar.h
+++ b/arch/sparc/include/asm/vvar.h
@@ -6,7 +6,8 @@
#define _ASM_SPARC_VVAR_DATA_H
#include <asm/clocksource.h>
-#include <linux/seqlock.h>
+#include <asm/processor.h>
+#include <asm/barrier.h>
#include <linux/time.h>
#include <linux/types.h>
diff --git a/arch/sparc/kernel/vdso.c b/arch/sparc/kernel/vdso.c
index 58880662b271..0e27437eb97b 100644
--- a/arch/sparc/kernel/vdso.c
+++ b/arch/sparc/kernel/vdso.c
@@ -7,7 +7,6 @@
* a different vsyscall implementation for Linux/IA32 and for the name.
*/
-#include <linux/seqlock.h>
#include <linux/time.h>
#include <linux/timekeeper_internal.h>
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index b9527a54db99..0f0dd645b594 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -26,9 +26,9 @@
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
-#include <asm/acpi.h>
#include <asm/apicdef.h>
#include <asm/page.h>
+#include <asm/pgtable_types.h>
#ifdef CONFIG_X86_32
#include <linux/threads.h>
#include <asm/kmap_types.h>
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index e15f364efbcc..c0538f82c9a2 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -5,16 +5,6 @@
#include <linux/cpumask.h>
#include <asm/percpu.h>
-/*
- * We need the APIC definitions automatically as part of 'smp.h'
- */
-#ifdef CONFIG_X86_LOCAL_APIC
-# include <asm/mpspec.h>
-# include <asm/apic.h>
-# ifdef CONFIG_X86_IO_APIC
-# include <asm/io_apic.h>
-# endif
-#endif
#include <asm/thread_info.h>
#include <asm/cpumask.h>
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index b7b2624fba86..01a300a9700b 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -6,6 +6,7 @@
#define _ASM_X86_TSC_H
#include <asm/processor.h>
+#include <asm/cpufeature.h>
/*
* Standard way to access the cycle counter.
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index ccf726cc87b7..5f943b938167 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -46,6 +46,7 @@
#include <asm/proto.h>
#include <asm/traps.h>
#include <asm/apic.h>
+#include <asm/acpi.h>
#include <asm/io_apic.h>
#include <asm/desc.h>
#include <asm/hpet.h>
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index 98c9bb75d185..780c702969b7 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -10,6 +10,7 @@
* like self-ipi, etc...
*/
#include <linux/cpumask.h>
+#include <linux/thread_info.h>
#include <asm/apic.h>
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 38b5b51d42f6..98d015a4405a 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -9,6 +9,7 @@
#include <linux/smp.h>
#include <asm/apic.h>
+#include <asm/io_apic.h>
#include "local.h"
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index d1fc62a67320..34a992e275ef 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -9,6 +9,7 @@
* Bits copied from original nmi.c file
*
*/
+#include <linux/thread_info.h>
#include <asm/apic.h>
#include <asm/nmi.h>
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 6ca0f91372fd..387154e39e08 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -2,6 +2,7 @@
#include <linux/cpumask.h>
#include <linux/smp.h>
+#include <asm/io_apic.h>
#include "local.h"
diff --git a/arch/x86/kernel/apic/local.h b/arch/x86/kernel/apic/local.h
index 04797f05ce94..a997d849509a 100644
--- a/arch/x86/kernel/apic/local.h
+++ b/arch/x86/kernel/apic/local.h
@@ -10,6 +10,7 @@
#include <linux/jump_label.h>
+#include <asm/irq_vectors.h>
#include <asm/apic.h>
/* APIC flat 64 */
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 67b33d67002f..7bda71def557 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/smp.h>
+#include <asm/io_apic.h>
#include <asm/apic.h>
#include <asm/acpi.h>
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index 29f0e0984557..bd3835d6b535 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -8,6 +8,7 @@
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
+#include <linux/thread_info.h>
#include <asm/apic.h>
#include "local.h"
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index d4806eac9325..dcc3d943c68f 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -15,6 +15,7 @@
#include <asm/cpu.h>
#include <asm/spec-ctrl.h>
#include <asm/smp.h>
+#include <asm/numa.h>
#include <asm/pci-direct.h>
#include <asm/delay.h>
#include <asm/debugreg.h>
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 965474d78cef..c5d6f17d9b9d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -45,6 +45,7 @@
#include <asm/mtrr.h>
#include <asm/hwcap2.h>
#include <linux/numa.h>
+#include <asm/numa.h>
#include <asm/asm.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index 4e28c1fc8749..ac6c30e5801d 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -10,6 +10,7 @@
#include <asm/cpu.h>
#include <asm/smp.h>
+#include <asm/numa.h>
#include <asm/cacheinfo.h>
#include <asm/spec-ctrl.h>
#include <asm/delay.h>
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b6b7b38dff5f..59a1e3ce3f14 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -23,6 +23,7 @@
#include <asm/cmdline.h>
#include <asm/traps.h>
#include <asm/resctrl.h>
+#include <asm/numa.h>
#ifdef CONFIG_X86_64
#include <linux/topology.h>
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 8d85e00bb40a..a0e8fc7d85f1 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -20,6 +20,7 @@
#include <asm/irqdomain.h>
#include <asm/hpet.h>
#include <asm/apic.h>
+#include <asm/io_apic.h>
#include <asm/pci_x86.h>
#include <asm/setup.h>
#include <asm/i8259.h>
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index dd73135d7cee..beb1bada1b0a 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -22,6 +22,8 @@
#include <asm/timer.h>
#include <asm/hw_irq.h>
#include <asm/desc.h>
+#include <asm/io_apic.h>
+#include <asm/acpi.h>
#include <asm/apic.h>
#include <asm/setup.h>
#include <asm/i8259.h>
diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c
index 6eb8b50ea07e..4eb8f2d19a87 100644
--- a/arch/x86/kernel/jailhouse.c
+++ b/arch/x86/kernel/jailhouse.c
@@ -13,6 +13,8 @@
#include <linux/reboot.h>
#include <linux/serial_8250.h>
#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/acpi.h>
#include <asm/cpu.h>
#include <asm/hypervisor.h>
#include <asm/i8259.h>
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index c27b82b62c8b..411af4aa7b51 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -19,6 +19,8 @@
#include <linux/smp.h>
#include <linux/pci.h>
+#include <asm/io_apic.h>
+#include <asm/acpi.h>
#include <asm/irqdomain.h>
#include <asm/mtrr.h>
#include <asm/mpspec.h>
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b9a68d8e06d8..3511736fbc74 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -25,6 +25,7 @@
#include <xen/xen.h>
#include <asm/apic.h>
+#include <asm/numa.h>
#include <asm/bios_ebda.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index b8810ebbc8ae..0a2ec801b63f 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -31,6 +31,7 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/irq.h>
+#include <asm/io_apic.h>
#include <asm/cpu.h>
static DEFINE_PER_CPU(struct x86_cpu, cpu_devices);
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 4fec6f3a1858..46c72f2ec32f 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -7,6 +7,7 @@
*/
#include <linux/kernel.h>
+#include <linux/thread_info.h>
#include <asm/apic.h>
#include <asm/cpu_device_id.h>
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 4cb958419fb0..7c055259de3a 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -52,6 +52,7 @@
#include <asm/cpu_entry_area.h>
#include <asm/init.h>
#include <asm/pgtable_areas.h>
+#include <asm/numa.h>
#include "mm_internal.h"
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index 5e53bfbe5823..1aff4ae65655 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
+#include <linux/thread_info.h>
#include <asm/x86_init.h>
#include <asm/apic.h>
+#include <asm/io_apic.h>
#include <asm/xen/hypercall.h>
#include <xen/xen.h>
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index 3e89b0067ff0..9e87ab010c82 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -11,6 +11,7 @@
#include <asm/cpu.h>
#include <asm/smp.h>
+#include <asm/io_apic.h>
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/idtentry.h>
diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c
index f8d39440b292..f5e7db4f82ab 100644
--- a/arch/x86/xen/smp_hvm.c
+++ b/arch/x86/xen/smp_hvm.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/thread_info.h>
#include <asm/smp.h>
#include <xen/events.h>
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 9ea598dcc132..47c8f4b444c9 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -29,6 +29,7 @@
#include <asm/idtentry.h>
#include <asm/desc.h>
#include <asm/cpu.h>
+#include <asm/io_apic.h>
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>
diff --git a/arch/x86/xen/suspend_pv.c b/arch/x86/xen/suspend_pv.c
index 8303b58c79a9..cae9660f4c67 100644
--- a/arch/x86/xen/suspend_pv.c
+++ b/arch/x86/xen/suspend_pv.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
-#include <asm/fixmap.h>
-
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
+#include <asm/fixmap.h>
+
#include "xen-ops.h"
void xen_pv_pre_suspend(void)
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 521c29b8ae29..413e0b5c8e6b 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -406,7 +406,7 @@ struct ioc {
enum ioc_running running;
atomic64_t vtime_rate;
- seqcount_t period_seqcount;
+ seqcount_spinlock_t period_seqcount;
u32 period_at; /* wallclock starttime */
u64 period_at_vtime; /* vtime starttime */
@@ -873,7 +873,6 @@ static void ioc_now(struct ioc *ioc, struct ioc_now *now)
static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
{
- lockdep_assert_held(&ioc->lock);
WARN_ON_ONCE(ioc->running != IOC_RUNNING);
write_seqcount_begin(&ioc->period_seqcount);
@@ -2001,7 +2000,7 @@ static int blk_iocost_init(struct request_queue *q)
ioc->running = IOC_IDLE;
atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
- seqcount_init(&ioc->period_seqcount);
+ seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
ioc->period_at = ktime_to_us(ktime_get());
atomic64_set(&ioc->cur_period, 0);
atomic_set(&ioc->hweight_gen, 0);
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 07f5273207e7..434a3314fb0e 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -52,12 +52,6 @@
DEFINE_WD_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
-struct lock_class_key reservation_seqcount_class;
-EXPORT_SYMBOL(reservation_seqcount_class);
-
-const char reservation_seqcount_string[] = "reservation_seqcount";
-EXPORT_SYMBOL(reservation_seqcount_string);
-
/**
* dma_resv_list_alloc - allocate fence list
* @shared_max: number of fences we need space for
@@ -143,9 +137,8 @@ subsys_initcall(dma_resv_lockdep);
void dma_resv_init(struct dma_resv *obj)
{
ww_mutex_init(&obj->lock, &reservation_ww_class);
+ seqcount_ww_mutex_init(&obj->seq, &obj->lock);
- __seqcount_init(&obj->seq, reservation_seqcount_string,
- &reservation_seqcount_class);
RCU_INIT_POINTER(obj->fence, NULL);
RCU_INIT_POINTER(obj->fence_excl, NULL);
}
@@ -275,7 +268,6 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
fobj = dma_resv_get_list(obj);
count = fobj->shared_count;
- preempt_disable();
write_seqcount_begin(&obj->seq);
for (i = 0; i < count; ++i) {
@@ -297,7 +289,6 @@ replace:
smp_store_mb(fobj->shared_count, count);
write_seqcount_end(&obj->seq);
- preempt_enable();
dma_fence_put(old);
}
EXPORT_SYMBOL(dma_resv_add_shared_fence);
@@ -324,14 +315,12 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
if (fence)
dma_fence_get(fence);
- preempt_disable();
write_seqcount_begin(&obj->seq);
/* write_seqcount_begin provides the necessary memory barrier */
RCU_INIT_POINTER(obj->fence_excl, fence);
if (old)
old->shared_count = 0;
write_seqcount_end(&obj->seq);
- preempt_enable();
/* inplace update, no shared fences */
while (i--)
@@ -409,13 +398,11 @@ retry:
src_list = dma_resv_get_list(dst);
old = dma_resv_get_excl(dst);
- preempt_disable();
write_seqcount_begin(&dst->seq);
/* write_seqcount_begin provides the necessary memory barrier */
RCU_INIT_POINTER(dst->fence_excl, new);
RCU_INIT_POINTER(dst->fence, dst_list);
write_seqcount_end(&dst->seq);
- preempt_enable();
dma_resv_list_free(src_list);
dma_fence_put(old);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index e5a5ba869eb4..a58af513c952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -258,11 +258,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
new->shared_count = k;
/* Install the new fence list, seqcount provides the barriers */
- preempt_disable();
write_seqcount_begin(&resv->seq);
RCU_INIT_POINTER(resv->fence, new);
write_seqcount_end(&resv->seq);
- preempt_enable();
/* Drop the references to the removed fences or move them to ef_list */
for (i = j, k = 0; i < old->shared_count; ++i) {
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index aa096b333a99..23583b0e66a5 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -15,6 +15,7 @@
#include <linux/irqdomain.h>
#include <linux/crash_dump.h>
#include <asm/io_apic.h>
+#include <asm/apic.h>
#include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/irq_remapping.h>
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index fb8d1fb14088..ef0fd4830803 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7019,7 +7019,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
} else
goto abort;
spin_lock_init(&conf->device_lock);
- seqcount_init(&conf->gen_lock);
+ seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock);
mutex_init(&conf->cache_size_mutex);
init_waitqueue_head(&conf->wait_for_quiescent);
init_waitqueue_head(&conf->wait_for_stripe);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 7fb3b26a181a..16fc29472f5c 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -582,7 +582,7 @@ struct r5conf {
int prev_chunk_sectors;
int prev_algo;
short generation; /* increments with every reshape */
- seqcount_t gen_lock; /* lock against generation changes */
+ seqcount_spinlock_t gen_lock; /* lock against generation changes */
unsigned long reshape_checkpoint; /* Time we last updated
* metadata */
long long min_offset_diff; /* minimum difference between
diff --git a/fs/dcache.c b/fs/dcache.c
index 361ea7ab30ea..ea0485861d93 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1746,7 +1746,7 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
dentry->d_lockref.count = 1;
dentry->d_flags = 0;
spin_lock_init(&dentry->d_lock);
- seqcount_init(&dentry->d_seq);
+ seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock);
dentry->d_inode = NULL;
dentry->d_parent = dentry;
dentry->d_sb = sb;
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index ca639ed967b7..04b3f5b9c629 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -117,7 +117,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
fs->users = 1;
fs->in_exec = 0;
spin_lock_init(&fs->lock);
- seqcount_init(&fs->seq);
+ seqcount_spinlock_init(&fs->seq, &fs->lock);
fs->umask = old->umask;
spin_lock(&old->lock);
@@ -163,6 +163,6 @@ EXPORT_SYMBOL(current_umask);
struct fs_struct init_fs = {
.users = 1,
.lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
- .seq = SEQCNT_ZERO(init_fs.seq),
+ .seq = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock),
.umask = 0022,
};
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 2b7f6dcd2eb8..210e590e1f71 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -117,7 +117,7 @@ struct nfs4_state_owner {
unsigned long so_flags;
struct list_head so_states;
struct nfs_seqid_counter so_seqid;
- seqcount_t so_reclaim_seqcount;
+ seqcount_spinlock_t so_reclaim_seqcount;
struct mutex so_delegreturn_mutex;
};
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index a8dc25ce48bb..b1dba24918f8 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -509,7 +509,7 @@ nfs4_alloc_state_owner(struct nfs_server *server,
nfs4_init_seqid_counter(&sp->so_seqid);
atomic_set(&sp->so_count, 1);
INIT_LIST_HEAD(&sp->so_lru);
- seqcount_init(&sp->so_reclaim_seqcount);
+ seqcount_spinlock_init(&sp->so_reclaim_seqcount, &sp->so_lock);
mutex_init(&sp->so_delegreturn_mutex);
return sp;
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 6e264dded46e..0e4a3837da52 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -61,7 +61,7 @@ struct userfaultfd_ctx {
/* waitqueue head for events */
wait_queue_head_t event_wqh;
/* a refile sequence protected by fault_pending_wqh lock */
- struct seqcount refile_seq;
+ seqcount_spinlock_t refile_seq;
/* pseudo fd refcounting */
refcount_t refcount;
/* userfaultfd syscall flags */
@@ -1961,7 +1961,7 @@ static void init_once_userfaultfd_ctx(void *mem)
init_waitqueue_head(&ctx->fault_wqh);
init_waitqueue_head(&ctx->event_wqh);
init_waitqueue_head(&ctx->fd_wqh);
- seqcount_init(&ctx->refile_seq);
+ seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
}
SYSCALL_DEFINE1(userfaultfd, int, flags)
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index a81f0c3cf352..65d975bf9390 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -89,7 +89,7 @@ extern struct dentry_stat_t dentry_stat;
struct dentry {
/* RCU lookup touched fields */
unsigned int d_flags; /* protected by d_lock */
- seqcount_t d_seq; /* per dentry seqlock */
+ seqcount_spinlock_t d_seq; /* per dentry seqlock */
struct hlist_bl_node d_hash; /* lookup hash list */
struct dentry *d_parent; /* parent directory */
struct qstr d_name;
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index ee50d10f052b..d44a77e8a7e3 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -46,8 +46,6 @@
#include <linux/rcupdate.h>
extern struct ww_class reservation_ww_class;
-extern struct lock_class_key reservation_seqcount_class;
-extern const char reservation_seqcount_string[];
/**
* struct dma_resv_list - a list of shared fences
@@ -71,7 +69,7 @@ struct dma_resv_list {
*/
struct dma_resv {
struct ww_mutex lock;
- seqcount_t seq;
+ seqcount_ww_mutex_t seq;
struct dma_fence __rcu *fence_excl;
struct dma_resv_list __rcu *fence;
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 99fc06f0afc1..407c2f281b64 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -38,6 +38,8 @@
#ifdef __KERNEL__
+#include <asm/bug.h>
+
struct dql {
/* Fields accessed in enqueue path (dql_queued) */
unsigned int num_queued; /* Total ever queued */
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index cf1015abfbf2..783b48dedb72 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -9,7 +9,7 @@
struct fs_struct {
int users;
spinlock_t lock;
- seqcount_t seq;
+ seqcount_spinlock_t seq;
int umask;
int in_exec;
struct path root, pwd;
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 15c8ac313678..107cedd7019a 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/percpu.h>
+#include <linux/seqlock.h>
#include <linux/timer.h>
#include <linux/timerqueue.h>
@@ -159,7 +160,7 @@ struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
unsigned int index;
clockid_t clockid;
- seqcount_t seq;
+ seqcount_raw_spinlock_t seq;
struct hrtimer *running;
struct timerqueue_head active;
ktime_t (*get_time)(void);
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 42d2e6ac35f2..a12b5523cc18 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -23,6 +23,7 @@
#include <linux/time.h>
#include <linux/jiffies.h>
+#include <asm/bug.h>
/* Nanosecond scalar representation for kernel time values */
typedef s64 ktime_t;
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h
index dc1da020305b..dac047abdba7 100644
--- a/include/linux/kvm_irqfd.h
+++ b/include/linux/kvm_irqfd.h
@@ -42,7 +42,7 @@ struct kvm_kernel_irqfd {
wait_queue_entry_t wait;
/* Update side is protected by irqfds.lock */
struct kvm_kernel_irq_routing_entry irq_entry;
- seqcount_t irq_entry_sc;
+ seqcount_spinlock_t irq_entry_sc;
/* Used for level IRQ fast-path */
int gsi;
struct work_struct inject;
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 39a35699d0d6..62a382d1845b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -11,6 +11,7 @@
#define __LINUX_LOCKDEP_H
#include <linux/lockdep_types.h>
+#include <linux/smp.h>
#include <asm/percpu.h>
struct task_struct;
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index ae197cc00cc8..dcd185cbfe79 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -65,6 +65,17 @@ struct mutex {
#endif
};
+struct ww_class;
+struct ww_acquire_ctx;
+
+struct ww_mutex {
+ struct mutex base;
+ struct ww_acquire_ctx *ctx;
+#ifdef CONFIG_DEBUG_MUTEXES
+ struct ww_class *ww_class;
+#endif
+};
+
/*
* This is the control structure for tasks blocked on mutex,
* which resides on the blocked task's kernel stack:
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 52bcc9f48e17..53ddc02e2e79 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -32,6 +32,7 @@
#include <linux/task_io_accounting.h>
#include <linux/posix-timers.h>
#include <linux/rseq.h>
+#include <linux/seqlock.h>
#include <linux/kcsan.h>
/* task_struct member predeclarations (sorted alphabetically): */
@@ -1049,7 +1050,7 @@ struct task_struct {
/* Protected by ->alloc_lock: */
nodemask_t mems_allowed;
/* Seqence number to catch updates: */
- seqcount_t mems_allowed_seq;
+ seqcount_spinlock_t mems_allowed_seq;
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 54bc20496392..962d9768945f 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -10,13 +10,16 @@
*
* Copyrights:
* - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
+ * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
*/
-#include <linux/spinlock.h>
-#include <linux/preempt.h>
-#include <linux/lockdep.h>
#include <linux/compiler.h>
#include <linux/kcsan-checks.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/preempt.h>
+#include <linux/spinlock.h>
+
#include <asm/processor.h>
/*
@@ -48,6 +51,10 @@
* This mechanism can't be used if the protected data contains pointers,
* as the writer can invalidate a pointer that a reader is following.
*
+ * If the write serialization mechanism is one of the common kernel
+ * locking primitives, use a sequence counter with associated lock
+ * (seqcount_LOCKTYPE_t) instead.
+ *
* If it's desired to automatically handle the sequence counter writer
* serialization and non-preemptibility requirements, use a sequential
* lock (seqlock_t) instead.
@@ -72,17 +79,18 @@ static inline void __seqcount_init(seqcount_t *s, const char *name,
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SEQCOUNT_DEP_MAP_INIT(lockname) \
- .dep_map = { .name = #lockname } \
+
+# define SEQCOUNT_DEP_MAP_INIT(lockname) \
+ .dep_map = { .name = #lockname }
/**
* seqcount_init() - runtime initializer for seqcount_t
* @s: Pointer to the seqcount_t instance
*/
-# define seqcount_init(s) \
- do { \
- static struct lock_class_key __key; \
- __seqcount_init((s), #s, &__key); \
+# define seqcount_init(s) \
+ do { \
+ static struct lock_class_key __key; \
+ __seqcount_init((s), #s, &__key); \
} while (0)
static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
@@ -108,9 +116,143 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
*/
#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
+/*
+ * Sequence counters with associated locks (seqcount_LOCKTYPE_t)
+ *
+ * A sequence counter which associates the lock used for writer
+ * serialization at initialization time. This enables lockdep to validate
+ * that the write side critical section is properly serialized.
+ *
+ * For associated locks which do not implicitly disable preemption,
+ * preemption protection is enforced in the write side function.
+ *
+ * Lockdep is never used in any for the raw write variants.
+ *
+ * See Documentation/locking/seqlock.rst
+ */
+
+#ifdef CONFIG_LOCKDEP
+#define __SEQ_LOCK(expr) expr
+#else
+#define __SEQ_LOCK(expr)
+#endif
+
+/**
+ * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated
+ * @seqcount: The real sequence counter
+ * @lock: Pointer to the associated spinlock
+ *
+ * A plain sequence counter with external writer synchronization by a
+ * spinlock. The spinlock is associated to the sequence count in the
+ * static initializer or init function. This enables lockdep to validate
+ * that the write side critical section is properly serialized.
+ */
+
+/**
+ * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
+ * @s: Pointer to the seqcount_LOCKNAME_t instance
+ * @lock: Pointer to the associated LOCKTYPE
+ */
+
+/*
+ * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
+ * @locktype: actual typename
+ * @lockname: name
+ * @preemptible: preemptibility of above locktype
+ * @lockmember: argument for lockdep_assert_held()
+ */
+#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \
+typedef struct seqcount_##lockname { \
+ seqcount_t seqcount; \
+ __SEQ_LOCK(locktype *lock); \
+} seqcount_##lockname##_t; \
+ \
+static __always_inline void \
+seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \
+{ \
+ seqcount_init(&s->seqcount); \
+ __SEQ_LOCK(s->lock = lock); \
+} \
+ \
+static __always_inline seqcount_t * \
+__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
+{ \
+ return &s->seqcount; \
+} \
+ \
+static __always_inline bool \
+__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s) \
+{ \
+ return preemptible; \
+} \
+ \
+static __always_inline void \
+__seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \
+{ \
+ __SEQ_LOCK(lockdep_assert_held(lockmember)); \
+}
+
+/*
+ * __seqprop() for seqcount_t
+ */
+
+static inline seqcount_t *__seqcount_ptr(seqcount_t *s)
+{
+ return s;
+}
+
+static inline bool __seqcount_preemptible(seqcount_t *s)
+{
+ return false;
+}
+
+static inline void __seqcount_assert(seqcount_t *s)
+{
+ lockdep_assert_preemption_disabled();
+}
+
+SEQCOUNT_LOCKTYPE(raw_spinlock_t, raw_spinlock, false, s->lock)
+SEQCOUNT_LOCKTYPE(spinlock_t, spinlock, false, s->lock)
+SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock)
+SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock)
+SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)
+
+/**
+ * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
+ * @name: Name of the seqcount_LOCKNAME_t instance
+ * @lock: Pointer to the associated LOCKTYPE
+ */
+
+#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \
+ .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
+ __SEQ_LOCK(.lock = (assoc_lock)) \
+}
+
+#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+
+
+#define __seqprop_case(s, lockname, prop) \
+ seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
+
+#define __seqprop(s, prop) _Generic(*(s), \
+ seqcount_t: __seqcount_##prop((void *)(s)), \
+ __seqprop_case((s), raw_spinlock, prop), \
+ __seqprop_case((s), spinlock, prop), \
+ __seqprop_case((s), rwlock, prop), \
+ __seqprop_case((s), mutex, prop), \
+ __seqprop_case((s), ww_mutex, prop))
+
+#define __seqcount_ptr(s) __seqprop(s, ptr)
+#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
+#define __seqcount_assert_lock_held(s) __seqprop(s, assert)
+
/**
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
@@ -122,7 +264,10 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
*
* Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned __read_seqcount_begin(const seqcount_t *s)
+#define __read_seqcount_begin(s) \
+ __read_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
{
unsigned ret;
@@ -138,32 +283,38 @@ repeat:
/**
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
+#define raw_read_seqcount_begin(s) \
+ raw_read_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
{
- unsigned ret = __read_seqcount_begin(s);
+ unsigned ret = __read_seqcount_t_begin(s);
smp_rmb();
return ret;
}
/**
* read_seqcount_begin() - begin a seqcount_t read critical section
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned read_seqcount_begin(const seqcount_t *s)
+#define read_seqcount_begin(s) \
+ read_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
{
seqcount_lockdep_reader_access(s);
- return raw_read_seqcount_begin(s);
+ return raw_read_seqcount_t_begin(s);
}
/**
* raw_read_seqcount() - read the raw seqcount_t counter value
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* raw_read_seqcount opens a read critical section of the given
* seqcount_t, without any lockdep checking, and without checking or
@@ -172,7 +323,10 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
*
* Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_read_seqcount(const seqcount_t *s)
+#define raw_read_seqcount(s) \
+ raw_read_seqcount_t(__seqcount_ptr(s))
+
+static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
@@ -183,7 +337,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
/**
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
* lockdep and w/o counter stabilization
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* raw_seqcount_begin opens a read critical section of the given
* seqcount_t. Unlike read_seqcount_begin(), this function will not wait
@@ -197,18 +351,21 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
*
* Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_seqcount_begin(const seqcount_t *s)
+#define raw_seqcount_begin(s) \
+ raw_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)
{
/*
* If the counter is odd, let read_seqcount_retry() fail
* by decrementing the counter.
*/
- return raw_read_seqcount(s) & ~1;
+ return raw_read_seqcount_t(s) & ~1;
}
/**
* __read_seqcount_retry() - end a seqcount_t read section w/o barrier
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
* @start: count, from read_seqcount_begin()
*
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
@@ -221,7 +378,10 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
*
* Return: true if a read section retry is required, else false
*/
-static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
+#define __read_seqcount_retry(s, start) \
+ __read_seqcount_t_retry(__seqcount_ptr(s), start)
+
+static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
{
kcsan_atomic_next(0);
return unlikely(READ_ONCE(s->sequence) != start);
@@ -229,7 +389,7 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
/**
* read_seqcount_retry() - end a seqcount_t read critical section
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
* @start: count, from read_seqcount_begin()
*
* read_seqcount_retry closes the read critical section of given
@@ -238,17 +398,28 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
*
* Return: true if a read section retry is required, else false
*/
-static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
+#define read_seqcount_retry(s, start) \
+ read_seqcount_t_retry(__seqcount_ptr(s), start)
+
+static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();
- return __read_seqcount_retry(s, start);
+ return __read_seqcount_t_retry(s, start);
}
/**
* raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*/
-static inline void raw_write_seqcount_begin(seqcount_t *s)
+#define raw_write_seqcount_begin(s) \
+do { \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_disable(); \
+ \
+ raw_write_seqcount_t_begin(__seqcount_ptr(s)); \
+} while (0)
+
+static inline void raw_write_seqcount_t_begin(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
@@ -257,49 +428,50 @@ static inline void raw_write_seqcount_begin(seqcount_t *s)
/**
* raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*/
-static inline void raw_write_seqcount_end(seqcount_t *s)
+#define raw_write_seqcount_end(s) \
+do { \
+ raw_write_seqcount_t_end(__seqcount_ptr(s)); \
+ \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_enable(); \
+} while (0)
+
+static inline void raw_write_seqcount_t_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
kcsan_nestable_atomic_end();
}
-static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass)
-{
- raw_write_seqcount_begin(s);
- seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
-}
-
/**
* write_seqcount_begin_nested() - start a seqcount_t write section with
* custom lockdep nesting level
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
* @subclass: lockdep nesting level
*
* See Documentation/locking/lockdep-design.rst
*/
-static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
+#define write_seqcount_begin_nested(s, subclass) \
+do { \
+ __seqcount_assert_lock_held(s); \
+ \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_disable(); \
+ \
+ write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \
+} while (0)
+
+static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
{
- lockdep_assert_preemption_disabled();
- __write_seqcount_begin_nested(s, subclass);
-}
-
-/*
- * A write_seqcount_begin() variant w/o lockdep non-preemptibility checks.
- *
- * Use for internal seqlock.h code where it's known that preemption is
- * already disabled. For example, seqlock_t write side functions.
- */
-static inline void __write_seqcount_begin(seqcount_t *s)
-{
- __write_seqcount_begin_nested(s, 0);
+ raw_write_seqcount_t_begin(s);
+ seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
}
/**
* write_seqcount_begin() - start a seqcount_t write side critical section
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* write_seqcount_begin opens a write side critical section of the given
* seqcount_t.
@@ -308,26 +480,44 @@ static inline void __write_seqcount_begin(seqcount_t *s)
* non-preemptible. If readers can be invoked from hardirq or softirq
* context, interrupts or bottom halves must be respectively disabled.
*/
-static inline void write_seqcount_begin(seqcount_t *s)
+#define write_seqcount_begin(s) \
+do { \
+ __seqcount_assert_lock_held(s); \
+ \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_disable(); \
+ \
+ write_seqcount_t_begin(__seqcount_ptr(s)); \
+} while (0)
+
+static inline void write_seqcount_t_begin(seqcount_t *s)
{
- write_seqcount_begin_nested(s, 0);
+ write_seqcount_t_begin_nested(s, 0);
}
/**
* write_seqcount_end() - end a seqcount_t write side critical section
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* The write section must've been opened with write_seqcount_begin().
*/
-static inline void write_seqcount_end(seqcount_t *s)
+#define write_seqcount_end(s) \
+do { \
+ write_seqcount_t_end(__seqcount_ptr(s)); \
+ \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_enable(); \
+} while (0)
+
+static inline void write_seqcount_t_end(seqcount_t *s)
{
seqcount_release(&s->dep_map, _RET_IP_);
- raw_write_seqcount_end(s);
+ raw_write_seqcount_t_end(s);
}
/**
* raw_write_seqcount_barrier() - do a seqcount_t write barrier
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* This can be used to provide an ordering guarantee instead of the usual
* consistency guarantee. It is one wmb cheaper, because it can collapse
@@ -366,7 +556,10 @@ static inline void write_seqcount_end(seqcount_t *s)
* WRITE_ONCE(X, false);
* }
*/
-static inline void raw_write_seqcount_barrier(seqcount_t *s)
+#define raw_write_seqcount_barrier(s) \
+ raw_write_seqcount_t_barrier(__seqcount_ptr(s))
+
+static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
@@ -378,12 +571,15 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
/**
* write_seqcount_invalidate() - invalidate in-progress seqcount_t read
* side operations
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* After write_seqcount_invalidate, no seqcount_t read side operations
* will complete successfully and see data older than this.
*/
-static inline void write_seqcount_invalidate(seqcount_t *s)
+#define write_seqcount_invalidate(s) \
+ write_seqcount_t_invalidate(__seqcount_ptr(s))
+
+static inline void write_seqcount_t_invalidate(seqcount_t *s)
{
smp_wmb();
kcsan_nestable_atomic_begin();
@@ -393,7 +589,7 @@ static inline void write_seqcount_invalidate(seqcount_t *s)
/**
* raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* Use seqcount_t latching to switch between two storage places protected
* by a sequence counter. Doing so allows having interruptible, preemptible,
@@ -406,7 +602,10 @@ static inline void write_seqcount_invalidate(seqcount_t *s)
* picking which data copy to read. The full counter value must then be
* checked with read_seqcount_retry().
*/
-static inline int raw_read_seqcount_latch(seqcount_t *s)
+#define raw_read_seqcount_latch(s) \
+ raw_read_seqcount_t_latch(__seqcount_ptr(s))
+
+static inline int raw_read_seqcount_t_latch(seqcount_t *s)
{
/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
int seq = READ_ONCE(s->sequence); /* ^^^ */
@@ -415,7 +614,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
/**
* raw_write_seqcount_latch() - redirect readers to even/odd copy
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
@@ -494,7 +693,10 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
* When data is a dynamic data structure; one should use regular RCU
* patterns to manage the lifetimes of the objects within.
*/
-static inline void raw_write_seqcount_latch(seqcount_t *s)
+#define raw_write_seqcount_latch(s) \
+ raw_write_seqcount_t_latch(__seqcount_ptr(s))
+
+static inline void raw_write_seqcount_t_latch(seqcount_t *s)
{
smp_wmb(); /* prior stores before incrementing "sequence" */
s->sequence++;
@@ -516,20 +718,20 @@ typedef struct {
spinlock_t lock;
} seqlock_t;
-#define __SEQLOCK_UNLOCKED(lockname) \
- { \
- .seqcount = SEQCNT_ZERO(lockname), \
- .lock = __SPIN_LOCK_UNLOCKED(lockname) \
+#define __SEQLOCK_UNLOCKED(lockname) \
+ { \
+ .seqcount = SEQCNT_ZERO(lockname), \
+ .lock = __SPIN_LOCK_UNLOCKED(lockname) \
}
/**
* seqlock_init() - dynamic initializer for seqlock_t
* @sl: Pointer to the seqlock_t instance
*/
-#define seqlock_init(sl) \
- do { \
- seqcount_init(&(sl)->seqcount); \
- spin_lock_init(&(sl)->lock); \
+#define seqlock_init(sl) \
+ do { \
+ seqcount_init(&(sl)->seqcount); \
+ spin_lock_init(&(sl)->lock); \
} while (0)
/**
@@ -592,7 +794,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount);
}
/**
@@ -604,7 +806,7 @@ static inline void write_seqlock(seqlock_t *sl)
*/
static inline void write_sequnlock(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount);
spin_unlock(&sl->lock);
}
@@ -618,7 +820,7 @@ static inline void write_sequnlock(seqlock_t *sl)
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount);
}
/**
@@ -631,7 +833,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
*/
static inline void write_sequnlock_bh(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount);
spin_unlock_bh(&sl->lock);
}
@@ -645,7 +847,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount);
}
/**
@@ -657,7 +859,7 @@ static inline void write_seqlock_irq(seqlock_t *sl)
*/
static inline void write_sequnlock_irq(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount);
spin_unlock_irq(&sl->lock);
}
@@ -666,7 +868,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount);
return flags;
}
@@ -695,13 +897,13 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
- write_seqcount_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}
/**
* read_seqlock_excl() - begin a seqlock_t locking reader section
- * @sl: Pointer to seqlock_t
+ * @sl: Pointer to seqlock_t
*
* read_seqlock_excl opens a seqlock_t locking reader critical section. A
* locking reader exclusively locks out *both* other writers *and* other
diff --git a/include/linux/time.h b/include/linux/time.h
index 4c325bf44ce0..b142cb5f5a53 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -3,7 +3,6 @@
#define _LINUX_TIME_H
# include <linux/cache.h>
-# include <linux/seqlock.h>
# include <linux/math64.h>
# include <linux/time64.h>
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 16c0ed6c50a7..219037f4c08d 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -57,6 +57,7 @@
#define __LINUX_VIDEODEV2_H
#include <linux/time.h> /* need struct timeval */
+#include <linux/kernel.h>
#include <uapi/linux/videodev2.h>
#endif /* __LINUX_VIDEODEV2_H */
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index d7554252404c..850424e5d030 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -48,14 +48,6 @@ struct ww_acquire_ctx {
#endif
};
-struct ww_mutex {
- struct mutex base;
- struct ww_acquire_ctx *ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
- struct ww_class *ww_class;
-#endif
-};
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
, .ww_class = class
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index c7bfddfc65b0..439379ca9ffa 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -298,7 +298,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize);
extern struct hlist_nulls_head *nf_conntrack_hash;
extern unsigned int nf_conntrack_htable_size;
-extern seqcount_t nf_conntrack_generation;
+extern seqcount_spinlock_t nf_conntrack_generation;
extern unsigned int nf_conntrack_max;
/* must be called with rcu read lock held */
diff --git a/init/init_task.c b/init/init_task.c
index a3eb3847e1f4..89024e8c4e95 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -154,7 +154,8 @@ struct task_struct init_task
.trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
#endif
#ifdef CONFIG_CPUSETS
- .mems_allowed_seq = SEQCNT_ZERO(init_task.mems_allowed_seq),
+ .mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq,
+ &init_task.alloc_lock),
#endif
#ifdef CONFIG_RT_MUTEXES
.pi_waiters = RB_ROOT_CACHED,
diff --git a/kernel/fork.c b/kernel/fork.c
index 35e9894d394c..4d32190861bd 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2011,7 +2011,7 @@ static __latent_entropy struct task_struct *copy_process(
#ifdef CONFIG_CPUSETS
p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
- seqcount_init(&p->mems_allowed_seq);
+ seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
memset(&p->irqtrace, 0, sizeof(p->irqtrace));
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 5525cd3ba0c8..02ef87f50df2 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -423,7 +423,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
seq_time(m, lt->min);
seq_time(m, lt->max);
seq_time(m, lt->total);
- seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0);
+ seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0);
}
static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index d89da1c7e005..c4038511d5c9 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -135,7 +135,11 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
* timer->base->cpu_base
*/
static struct hrtimer_cpu_base migration_cpu_base = {
- .clock_base = { { .cpu_base = &migration_cpu_base, }, },
+ .clock_base = { {
+ .cpu_base = &migration_cpu_base,
+ .seq = SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base.seq,
+ &migration_cpu_base.lock),
+ }, },
};
#define migration_base migration_cpu_base.clock_base[0]
@@ -1998,8 +2002,11 @@ int hrtimers_prepare_cpu(unsigned int cpu)
int i;
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- cpu_base->clock_base[i].cpu_base = cpu_base;
- timerqueue_init_head(&cpu_base->clock_base[i].active);
+ struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i];
+
+ clock_b->cpu_base = cpu_base;
+ seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock);
+ timerqueue_init_head(&clock_b->active);
}
cpu_base->cpu = cpu;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 63a632f9896c..406306b33452 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -39,18 +39,19 @@ enum timekeeping_adv_mode {
TK_ADV_FREQ
};
+static DEFINE_RAW_SPINLOCK(timekeeper_lock);
+
/*
* The most important data for readout fits into a single 64 byte
* cache line.
*/
static struct {
- seqcount_t seq;
+ seqcount_raw_spinlock_t seq;
struct timekeeper timekeeper;
} tk_core ____cacheline_aligned = {
- .seq = SEQCNT_ZERO(tk_core.seq),
+ .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock),
};
-static DEFINE_RAW_SPINLOCK(timekeeper_lock);
static struct timekeeper shadow_timekeeper;
/**
@@ -63,7 +64,7 @@ static struct timekeeper shadow_timekeeper;
* See @update_fast_timekeeper() below.
*/
struct tk_fast {
- seqcount_t seq;
+ seqcount_raw_spinlock_t seq;
struct tk_read_base base[2];
};
@@ -80,11 +81,13 @@ static struct clocksource dummy_clock = {
};
static struct tk_fast tk_fast_mono ____cacheline_aligned = {
+ .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_mono.seq, &timekeeper_lock),
.base[0] = { .clock = &dummy_clock, },
.base[1] = { .clock = &dummy_clock, },
};
static struct tk_fast tk_fast_raw ____cacheline_aligned = {
+ .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_raw.seq, &timekeeper_lock),
.base[0] = { .clock = &dummy_clock, },
.base[1] = { .clock = &dummy_clock, },
};
@@ -157,7 +160,7 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
* tk_clock_read - atomic clocksource read() helper
*
* This helper is necessary to use in the read paths because, while the
- * seqlock ensures we don't return a bad value while structures are updated,
+ * seqcount ensures we don't return a bad value while structures are updated,
* it doesn't protect from potential crashes. There is the possibility that
* the tkr's clocksource may change between the read reference, and the
* clock reference passed to the read function. This can cause crashes if
@@ -222,10 +225,10 @@ static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
unsigned int seq;
/*
- * Since we're called holding a seqlock, the data may shift
+ * Since we're called holding a seqcount, the data may shift
* under us while we're doing the calculation. This can cause
* false positives, since we'd note a problem but throw the
- * results away. So nest another seqlock here to atomically
+ * results away. So nest another seqcount here to atomically
* grab the points we are checking with.
*/
do {
@@ -486,7 +489,7 @@ EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
*
* To keep it NMI safe since we're accessing from tracing, we're not using a
* separate timekeeper with updates to monotonic clock and boot offset
- * protected with seqlocks. This has the following minor side effects:
+ * protected with seqcounts. This has the following minor side effects:
*
* (1) Its possible that a timestamp be taken after the boot offset is updated
* but before the timekeeper is updated. If this happens, the new boot offset
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index e38b60fc183e..5b97d233f89b 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
unsigned int nf_conntrack_max __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_max);
-seqcount_t nf_conntrack_generation __read_mostly;
+seqcount_spinlock_t nf_conntrack_generation __read_mostly;
static unsigned int nf_conntrack_hash_rnd __read_mostly;
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
@@ -2588,7 +2588,8 @@ int nf_conntrack_init_start(void)
/* struct nf_ct_ext uses u8 to store offsets/size */
BUILD_BUG_ON(total_extension_size() > 255u);
- seqcount_init(&nf_conntrack_generation);
+ seqcount_spinlock_init(&nf_conntrack_generation,
+ &nf_conntrack_locks_all_lock);
for (i = 0; i < CONNTRACK_LOCKS; i++)
spin_lock_init(&nf_conntrack_locks[i]);
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index b6aad3fc46c3..4b2834fd17b2 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -18,7 +18,7 @@
struct nft_rbtree {
struct rb_root root;
rwlock_t lock;
- seqcount_t count;
+ seqcount_rwlock_t count;
struct delayed_work gc_work;
};
@@ -523,7 +523,7 @@ static int nft_rbtree_init(const struct nft_set *set,
struct nft_rbtree *priv = nft_set_priv(set);
rwlock_init(&priv->lock);
- seqcount_init(&priv->count);
+ seqcount_rwlock_init(&priv->count, &priv->lock);
priv->root = RB_ROOT;
INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 042ea9b40c7b..d5280fd6f9c1 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -122,7 +122,7 @@ struct xfrm_pol_inexact_bin {
/* list containing '*:*' policies */
struct hlist_head hhead;
- seqcount_t count;
+ seqcount_spinlock_t count;
/* tree sorted by daddr/prefix */
struct rb_root root_d;
@@ -155,7 +155,7 @@ static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
__read_mostly;
static struct kmem_cache *xfrm_dst_cache __ro_after_init;
-static __read_mostly seqcount_t xfrm_policy_hash_generation;
+static __read_mostly seqcount_mutex_t xfrm_policy_hash_generation;
static struct rhashtable xfrm_policy_inexact_table;
static const struct rhashtable_params xfrm_pol_inexact_params;
@@ -719,7 +719,7 @@ xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
INIT_HLIST_HEAD(&bin->hhead);
bin->root_d = RB_ROOT;
bin->root_s = RB_ROOT;
- seqcount_init(&bin->count);
+ seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
&bin->k, &bin->head,
@@ -1899,7 +1899,7 @@ static int xfrm_policy_match(const struct xfrm_policy *pol,
static struct xfrm_pol_inexact_node *
xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
- seqcount_t *count,
+ seqcount_spinlock_t *count,
const xfrm_address_t *addr, u16 family)
{
const struct rb_node *parent;
@@ -4157,7 +4157,7 @@ void __init xfrm_init(void)
{
register_pernet_subsys(&xfrm_net_ops);
xfrm_dev_init();
- seqcount_init(&xfrm_policy_hash_generation);
+ seqcount_mutex_init(&xfrm_policy_hash_generation, &hash_resize_mutex);
xfrm_input_init();
#ifdef CONFIG_XFRM_ESPINTCP
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index ef7ed916ad4a..d6408bb497dc 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -303,7 +303,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
INIT_LIST_HEAD(&irqfd->list);
INIT_WORK(&irqfd->inject, irqfd_inject);
INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
- seqcount_init(&irqfd->irq_entry_sc);
+ seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
f = fdget(args->fd);
if (!f.file) {