From d7196d87a1559db9ece24852e6167061b199d58d Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Fri, 6 Oct 2023 17:18:43 +0200 Subject: kasan: unify printk prefixes Unify prefixes for printk messages in mm/kasan/. Link: https://lkml.kernel.org/r/35589629806cf0840e5f01ec9d8011a7bad648df.1696605143.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: kernel test robot Signed-off-by: Andrew Morton --- mm/kasan/kasan_test.c | 2 +- mm/kasan/kasan_test_module.c | 2 +- mm/kasan/quarantine.c | 4 +++- mm/kasan/report_generic.c | 6 +++--- 4 files changed, 8 insertions(+), 6 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index b61cc6a42541..c707d6c6e019 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -5,7 +5,7 @@ * Author: Andrey Ryabinin */ -#define pr_fmt(fmt) "kasan_test: " fmt +#define pr_fmt(fmt) "kasan: test: " fmt #include #include diff --git a/mm/kasan/kasan_test_module.c b/mm/kasan/kasan_test_module.c index 7be7bed456ef..8b7b3ea2c74e 100644 --- a/mm/kasan/kasan_test_module.c +++ b/mm/kasan/kasan_test_module.c @@ -5,7 +5,7 @@ * Author: Andrey Ryabinin */ -#define pr_fmt(fmt) "kasan test: %s " fmt, __func__ +#define pr_fmt(fmt) "kasan: test: " fmt #include #include diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 152dca73f398..ca4529156735 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -8,6 +8,8 @@ * Based on code by Dmitry Chernenkov. */ +#define pr_fmt(fmt) "kasan: " fmt + #include #include #include @@ -414,7 +416,7 @@ static int __init kasan_cpu_quarantine_init(void) ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online", kasan_cpu_online, kasan_cpu_offline); if (ret < 0) - pr_err("kasan cpu quarantine register failed [%d]\n", ret); + pr_err("cpu quarantine register failed [%d]\n", ret); return ret; } late_initcall(kasan_cpu_quarantine_init); diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c index 51a1e8a8877f..99cbcd73cff7 100644 --- a/mm/kasan/report_generic.c +++ b/mm/kasan/report_generic.c @@ -220,7 +220,7 @@ static bool __must_check tokenize_frame_descr(const char **frame_descr, const size_t tok_len = sep - *frame_descr; if (tok_len + 1 > max_tok_len) { - pr_err("KASAN internal error: frame description too long: %s\n", + pr_err("internal error: frame description too long: %s\n", *frame_descr); return false; } @@ -233,7 +233,7 @@ static bool __must_check tokenize_frame_descr(const char **frame_descr, *frame_descr = sep + 1; if (value != NULL && kstrtoul(token, 10, value)) { - pr_err("KASAN internal error: not a valid number: %s\n", token); + pr_err("internal error: not a valid number: %s\n", token); return false; } @@ -323,7 +323,7 @@ static bool __must_check get_address_stack_frame_info(const void *addr, frame = (const unsigned long *)(mem_ptr + KASAN_GRANULE_SIZE); if (frame[0] != KASAN_CURRENT_STACK_FRAME_MAGIC) { - pr_err("KASAN internal error: frame info validation failed; invalid marker: %lu\n", + pr_err("internal error: frame has invalid marker: %lu\n", frame[0]); return false; } -- cgit v1.2.3 From 01a5ad81637672940844052404678678a0ec8854 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Fri, 6 Oct 2023 17:18:44 +0200 Subject: kasan: use unchecked __memset internally KASAN code is supposed to use the unchecked __memset implementation when accessing its metadata. Change uses of memset to __memset in mm/kasan/. Link: https://lkml.kernel.org/r/6f621966c6f52241b5aaa7220c348be90c075371.1696605143.git.andreyknvl@google.com Fixes: 59e6e098d1c1 ("kasan: introduce kasan_complete_mode_report_info") Fixes: 3c5c3cfb9ef4 ("kasan: support backing vmalloc space with real shadow memory") Signed-off-by: Andrey Konovalov Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: kernel test robot Signed-off-by: Andrew Morton --- mm/kasan/report.c | 4 ++-- mm/kasan/shadow.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 6e3cb118d20e..e77facb62900 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -538,7 +538,7 @@ void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_ty start_report(&flags, true); - memset(&info, 0, sizeof(info)); + __memset(&info, 0, sizeof(info)); info.type = type; info.access_addr = ptr; info.access_size = 0; @@ -576,7 +576,7 @@ bool kasan_report(const void *addr, size_t size, bool is_write, start_report(&irq_flags, true); - memset(&info, 0, sizeof(info)); + __memset(&info, 0, sizeof(info)); info.type = KASAN_REPORT_ACCESS; info.access_addr = addr; info.access_size = size; diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index dd772f9d0f08..d687f09a7ae3 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -324,7 +324,7 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, if (!page) return -ENOMEM; - memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); + __memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE); pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); spin_lock(&init_mm.page_table_lock); -- cgit v1.2.3 From ff093a9632d9de9ff66dfd9db211008138520e13 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Fri, 6 Oct 2023 17:18:45 +0200 Subject: kasan: fix and update KUNIT_EXPECT_KASAN_FAIL comment Update the comment for KUNIT_EXPECT_KASAN_FAIL to describe the parameters this macro accepts. Also drop the mention of the "kasan_status" KUnit resource, as it no longer exists. Link: https://lkml.kernel.org/r/6fad6661e72c407450ae4b385c71bc4a7e1579cd.1696605143.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202308171757.7V5YUcje-lkp@intel.com/ Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Dmitry Vyukov Signed-off-by: Andrew Morton --- mm/kasan/kasan_test.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index c707d6c6e019..2030c7ff7de9 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -91,10 +91,11 @@ static void kasan_test_exit(struct kunit *test) } /** - * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a - * KASAN report; causes a test failure otherwise. This relies on a KUnit - * resource named "kasan_status". Do not use this name for KUnit resources - * outside of KASAN tests. + * KUNIT_EXPECT_KASAN_FAIL - check that the executed expression produces a + * KASAN report; causes a KUnit test failure otherwise. + * + * @test: Currently executing KUnit test. + * @expression: Expression that must produce a KASAN report. * * For hardware tag-based KASAN, when a synchronous tag fault happens, tag * checking is auto-disabled. When this happens, this test handler reenables -- cgit v1.2.3 From 247dbcdbf790c52fc76cf8e327cd0a5778e41e66 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 4 Oct 2023 17:53:07 +0100 Subject: bitops: add xor_unlock_is_negative_byte() Replace clear_bit_and_unlock_is_negative_byte() with xor_unlock_is_negative_byte(). We have a few places that like to lock a folio, set a flag and unlock it again. Allow for the possibility of combining the latter two operations for efficiency. We are guaranteed that the caller holds the lock, so it is safe to unlock it with the xor. The caller must guarantee that nobody else will set the flag without holding the lock; it is not safe to do this with the PG_dirty flag, for example. Link: https://lkml.kernel.org/r/20231004165317.1061855-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Albert Ou Cc: Alexander Gordeev Cc: Andreas Dilger Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Geert Uytterhoeven Cc: Heiko Carstens Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Richard Henderson Cc: Sven Schnelle Cc: "Theodore Ts'o" Cc: Thomas Bogendoerfer Cc: Vasily Gorbik Signed-off-by: Andrew Morton --- arch/powerpc/include/asm/bitops.h | 17 +++++----------- arch/x86/include/asm/bitops.h | 11 +++++------ include/asm-generic/bitops/instrumented-lock.h | 27 ++++++++++++++------------ include/asm-generic/bitops/lock.h | 21 +++++--------------- kernel/kcsan/kcsan_test.c | 8 ++++---- kernel/kcsan/selftest.c | 8 ++++---- mm/filemap.c | 5 +++++ mm/kasan/kasan_test.c | 7 ++++--- 8 files changed, 47 insertions(+), 57 deletions(-) (limited to 'mm/kasan') diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 7e0f0322912b..40cc3ded60cb 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -234,32 +234,25 @@ static inline int arch_test_and_change_bit(unsigned long nr, } #ifdef CONFIG_PPC64 -static inline unsigned long -clear_bit_unlock_return_word(int nr, volatile unsigned long *addr) +static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, + volatile unsigned long *p) { unsigned long old, t; - unsigned long *p = (unsigned long *)addr + BIT_WORD(nr); - unsigned long mask = BIT_MASK(nr); __asm__ __volatile__ ( PPC_RELEASE_BARRIER "1:" PPC_LLARX "%0,0,%3,0\n" - "andc %1,%0,%2\n" + "xor %1,%0,%2\n" PPC_STLCX "%1,0,%3\n" "bne- 1b\n" : "=&r" (old), "=&r" (t) : "r" (mask), "r" (p) : "cc", "memory"); - return old; + return (old & BIT_MASK(7)) != 0; } -/* - * This is a special function for mm/filemap.c - * Bit 7 corresponds to PG_waiters. - */ -#define arch_clear_bit_unlock_is_negative_byte(nr, addr) \ - (clear_bit_unlock_return_word(nr, addr) & BIT_MASK(7)) +#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte #endif /* CONFIG_PPC64 */ diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 2edf68475fec..f03c0a50ec3a 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -94,18 +94,17 @@ arch___clear_bit(unsigned long nr, volatile unsigned long *addr) asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory"); } -static __always_inline bool -arch_clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) +static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, + volatile unsigned long *addr) { bool negative; - asm volatile(LOCK_PREFIX "andb %2,%1" + asm volatile(LOCK_PREFIX "xorb %2,%1" CC_SET(s) : CC_OUT(s) (negative), WBYTE_ADDR(addr) - : "ir" ((char) ~(1 << nr)) : "memory"); + : "iq" ((char)mask) : "memory"); return negative; } -#define arch_clear_bit_unlock_is_negative_byte \ - arch_clear_bit_unlock_is_negative_byte +#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte static __always_inline void arch___clear_bit_unlock(long nr, volatile unsigned long *addr) diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h index eb64bd4f11f3..e8ea3aeda9a9 100644 --- a/include/asm-generic/bitops/instrumented-lock.h +++ b/include/asm-generic/bitops/instrumented-lock.h @@ -58,27 +58,30 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) return arch_test_and_set_bit_lock(nr, addr); } -#if defined(arch_clear_bit_unlock_is_negative_byte) +#if defined(arch_xor_unlock_is_negative_byte) /** - * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom - * byte is negative, for unlock. - * @nr: the bit to clear - * @addr: the address to start counting from + * xor_unlock_is_negative_byte - XOR a single byte in memory and test if + * it is negative, for unlock. + * @mask: Change the bits which are set in this mask. + * @addr: The address of the word containing the byte to change. * + * Changes some of bits 0-6 in the word pointed to by @addr. * This operation is atomic and provides release barrier semantics. + * Used to optimise some folio operations which are commonly paired + * with an unlock or end of writeback. Bit 7 is used as PG_waiters to + * indicate whether anybody is waiting for the unlock. * - * This is a bit of a one-trick-pony for the filemap code, which clears - * PG_locked and tests PG_waiters, + * Return: Whether the top bit of the byte is set. */ -static inline bool -clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) +static inline bool xor_unlock_is_negative_byte(unsigned long mask, + volatile unsigned long *addr) { kcsan_release(); - instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); - return arch_clear_bit_unlock_is_negative_byte(nr, addr); + instrument_atomic_write(addr, sizeof(long)); + return arch_xor_unlock_is_negative_byte(mask, addr); } /* Let everybody know we have it. */ -#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte +#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte #endif #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */ diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 40913516e654..6a638e89d130 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h @@ -66,27 +66,16 @@ arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p) raw_atomic_long_set_release((atomic_long_t *)p, old); } -/** - * arch_clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom - * byte is negative, for unlock. - * @nr: the bit to clear - * @addr: the address to start counting from - * - * This is a bit of a one-trick-pony for the filemap code, which clears - * PG_locked and tests PG_waiters, - */ -#ifndef arch_clear_bit_unlock_is_negative_byte -static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr, - volatile unsigned long *p) +#ifndef arch_xor_unlock_is_negative_byte +static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, + volatile unsigned long *p) { long old; - unsigned long mask = BIT_MASK(nr); - p += BIT_WORD(nr); - old = raw_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); + old = raw_atomic_long_fetch_xor_release(mask, (atomic_long_t *)p); return !!(old & BIT(7)); } -#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte +#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte #endif #include diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c index 0ddbdab5903d..1333d23ac4ef 100644 --- a/kernel/kcsan/kcsan_test.c +++ b/kernel/kcsan/kcsan_test.c @@ -700,10 +700,10 @@ static void test_barrier_nothreads(struct kunit *test) KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false); KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true); -#ifdef clear_bit_unlock_is_negative_byte - KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); - KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); - KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true); +#ifdef xor_unlock_is_negative_byte + KCSAN_EXPECT_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); + KCSAN_EXPECT_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); + KCSAN_EXPECT_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); #endif kcsan_nestable_atomic_end(); } diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c index 8679322450f2..619be7417420 100644 --- a/kernel/kcsan/selftest.c +++ b/kernel/kcsan/selftest.c @@ -228,10 +228,10 @@ static bool __init test_barrier(void) spin_lock(&test_spinlock); KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock)); -#ifdef clear_bit_unlock_is_negative_byte - KCSAN_CHECK_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); - KCSAN_CHECK_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); - KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var)); +#ifdef xor_unlock_is_negative_byte + KCSAN_CHECK_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); + KCSAN_CHECK_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); + KCSAN_CHECK_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); #endif kcsan_nestable_atomic_end(); diff --git a/mm/filemap.c b/mm/filemap.c index 1ce78c619294..c637863f4643 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1482,6 +1482,11 @@ void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter) } EXPORT_SYMBOL_GPL(folio_add_wait_queue); +#ifdef xor_unlock_is_negative_byte +#define clear_bit_unlock_is_negative_byte(nr, p) \ + xor_unlock_is_negative_byte(1 << nr, p) +#endif + #ifndef clear_bit_unlock_is_negative_byte /* diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index 2030c7ff7de9..821c9ea0473a 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -1099,9 +1099,10 @@ static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr) KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr)); KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr)); -#if defined(clear_bit_unlock_is_negative_byte) - KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = - clear_bit_unlock_is_negative_byte(nr, addr)); +#if defined(xor_unlock_is_negative_byte) + if (nr < 7) + KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = + xor_unlock_is_negative_byte(1 << nr, addr)); #endif } -- cgit v1.2.3 From f12fb73b74fd23ca33e3f95fb996f295eeae1da7 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 4 Oct 2023 17:53:14 +0100 Subject: mm: delete checks for xor_unlock_is_negative_byte() Architectures which don't define their own use the one in asm-generic/bitops/lock.h. Get rid of all the ifdefs around "maybe we don't have it". Link: https://lkml.kernel.org/r/20231004165317.1061855-15-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Geert Uytterhoeven Cc: Albert Ou Cc: Alexander Gordeev Cc: Andreas Dilger Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Heiko Carstens Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Richard Henderson Cc: Sven Schnelle Cc: "Theodore Ts'o" Cc: Thomas Bogendoerfer Cc: Vasily Gorbik Signed-off-by: Andrew Morton --- arch/alpha/include/asm/bitops.h | 1 - arch/m68k/include/asm/bitops.h | 1 - arch/mips/include/asm/bitops.h | 1 - arch/riscv/include/asm/bitops.h | 1 - include/asm-generic/bitops/instrumented-lock.h | 5 ----- include/asm-generic/bitops/lock.h | 1 - kernel/kcsan/kcsan_test.c | 3 --- kernel/kcsan/selftest.c | 3 --- mm/filemap.c | 30 +------------------------- mm/kasan/kasan_test.c | 3 --- 10 files changed, 1 insertion(+), 48 deletions(-) (limited to 'mm/kasan') diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h index b50ad6b83e85..3e33621922c3 100644 --- a/arch/alpha/include/asm/bitops.h +++ b/arch/alpha/include/asm/bitops.h @@ -305,7 +305,6 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask, return (old & BIT(7)) != 0; } -#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte /* * ffz = Find First Zero in word. Undefined if no zero exists, diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index 80ee36095905..14c64a6f1217 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -339,7 +339,6 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask, return result; #endif } -#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte /* * The true 68020 and more advanced processors support the "bfffo" diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index d98a05c478f4..89f73d1a4ea4 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -301,7 +301,6 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask, return res; } -#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte #undef __bit_op #undef __test_bit_op diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h index 15e3044298a2..65f6eee4ab8d 100644 --- a/arch/riscv/include/asm/bitops.h +++ b/arch/riscv/include/asm/bitops.h @@ -202,7 +202,6 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask, : "memory"); return (res & BIT(7)) != 0; } -#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte #undef __test_and_op_bit #undef __op_bit diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h index e8ea3aeda9a9..542d3727ee4e 100644 --- a/include/asm-generic/bitops/instrumented-lock.h +++ b/include/asm-generic/bitops/instrumented-lock.h @@ -58,7 +58,6 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) return arch_test_and_set_bit_lock(nr, addr); } -#if defined(arch_xor_unlock_is_negative_byte) /** * xor_unlock_is_negative_byte - XOR a single byte in memory and test if * it is negative, for unlock. @@ -80,8 +79,4 @@ static inline bool xor_unlock_is_negative_byte(unsigned long mask, instrument_atomic_write(addr, sizeof(long)); return arch_xor_unlock_is_negative_byte(mask, addr); } -/* Let everybody know we have it. */ -#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte -#endif - #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */ diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h index 6a638e89d130..14d4ec8c5152 100644 --- a/include/asm-generic/bitops/lock.h +++ b/include/asm-generic/bitops/lock.h @@ -75,7 +75,6 @@ static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask, old = raw_atomic_long_fetch_xor_release(mask, (atomic_long_t *)p); return !!(old & BIT(7)); } -#define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte #endif #include diff --git a/kernel/kcsan/kcsan_test.c b/kernel/kcsan/kcsan_test.c index 1333d23ac4ef..015586217875 100644 --- a/kernel/kcsan/kcsan_test.c +++ b/kernel/kcsan/kcsan_test.c @@ -699,12 +699,9 @@ static void test_barrier_nothreads(struct kunit *test) KCSAN_EXPECT_RW_BARRIER(spin_unlock(&test_spinlock), true); KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false); KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true); - -#ifdef xor_unlock_is_negative_byte KCSAN_EXPECT_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); KCSAN_EXPECT_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); KCSAN_EXPECT_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var), true); -#endif kcsan_nestable_atomic_end(); } diff --git a/kernel/kcsan/selftest.c b/kernel/kcsan/selftest.c index 619be7417420..84a1200271af 100644 --- a/kernel/kcsan/selftest.c +++ b/kernel/kcsan/selftest.c @@ -227,12 +227,9 @@ static bool __init test_barrier(void) KCSAN_CHECK_RW_BARRIER(arch_spin_unlock(&arch_spinlock)); spin_lock(&test_spinlock); KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock)); - -#ifdef xor_unlock_is_negative_byte KCSAN_CHECK_RW_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); KCSAN_CHECK_READ_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); KCSAN_CHECK_WRITE_BARRIER(xor_unlock_is_negative_byte(1, &test_var)); -#endif kcsan_nestable_atomic_end(); return ret; diff --git a/mm/filemap.c b/mm/filemap.c index c637863f4643..458377e9a184 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1482,34 +1482,6 @@ void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter) } EXPORT_SYMBOL_GPL(folio_add_wait_queue); -#ifdef xor_unlock_is_negative_byte -#define clear_bit_unlock_is_negative_byte(nr, p) \ - xor_unlock_is_negative_byte(1 << nr, p) -#endif - -#ifndef clear_bit_unlock_is_negative_byte - -/* - * PG_waiters is the high bit in the same byte as PG_lock. - * - * On x86 (and on many other architectures), we can clear PG_lock and - * test the sign bit at the same time. But if the architecture does - * not support that special operation, we just do this all by hand - * instead. - * - * The read of PG_waiters has to be after (or concurrently with) PG_locked - * being cleared, but a memory barrier should be unnecessary since it is - * in the same byte as PG_locked. - */ -static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) -{ - clear_bit_unlock(nr, mem); - /* smp_mb__after_atomic(); */ - return test_bit(PG_waiters, mem); -} - -#endif - /** * folio_unlock - Unlock a locked folio. * @folio: The folio. @@ -1525,7 +1497,7 @@ void folio_unlock(struct folio *folio) BUILD_BUG_ON(PG_waiters != 7); BUILD_BUG_ON(PG_locked > 7); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); - if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0))) + if (xor_unlock_is_negative_byte(1 << PG_locked, folio_flags(folio, 0))) folio_wake_bit(folio, PG_locked); } EXPORT_SYMBOL(folio_unlock); diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index 821c9ea0473a..8281eb42464b 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -1098,12 +1098,9 @@ static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr) KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr)); KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr)); KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr)); - -#if defined(xor_unlock_is_negative_byte) if (nr < 7) KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = xor_unlock_is_negative_byte(1 << nr, addr)); -#endif } static void kasan_bitops_generic(struct kunit *test) -- cgit v1.2.3