diff options
author | Ingo Molnar <mingo@kernel.org> | 2025-04-11 07:40:20 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2025-04-11 11:01:33 +0200 |
commit | a5c832e0476e461af46a0aa9bda43a573adbe63f (patch) | |
tree | 2de4d28cd39c5e4805c6f21bdc72cc785a3f414c | |
parent | 5236b6a0fe921f5de53b8eeea2d8fdd6d643dd7f (diff) |
x86/alternatives: Rename 'poking_mm' to 'text_poke_mm'
Put it into the text_poke_* namespace of <asm/text-patching.h>.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: "H . Peter Anvin" <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250411054105.2341982-9-mingo@kernel.org
-rw-r--r-- | arch/x86/include/asm/text-patching.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/alternative.c | 18 | ||||
-rw-r--r-- | arch/x86/mm/init.c | 8 |
3 files changed, 14 insertions, 14 deletions
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h index 93a6b7bc78bd..7a95c0820b3e 100644 --- a/arch/x86/include/asm/text-patching.h +++ b/arch/x86/include/asm/text-patching.h @@ -128,7 +128,7 @@ void *text_gen_insn(u8 opcode, const void *addr, const void *dest) } extern int after_bootmem; -extern __ro_after_init struct mm_struct *poking_mm; +extern __ro_after_init struct mm_struct *text_poke_mm; extern __ro_after_init unsigned long poking_addr; #ifndef CONFIG_UML_X86 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index d2cd0d815130..8ce0d469e32f 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -2191,7 +2191,7 @@ static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm) return temp_state; } -__ro_after_init struct mm_struct *poking_mm; +__ro_after_init struct mm_struct *text_poke_mm; __ro_after_init unsigned long poking_addr; static inline void unuse_temporary_mm(temp_mm_state_t prev_state) @@ -2201,7 +2201,7 @@ static inline void unuse_temporary_mm(temp_mm_state_t prev_state) switch_mm_irqs_off(NULL, prev_state.mm, current); /* Clear the cpumask, to indicate no TLB flushing is needed anywhere */ - cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(poking_mm)); + cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(text_poke_mm)); /* * Restore the breakpoints if they were disabled before the temporary mm @@ -2266,7 +2266,7 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l /* * The lock is not really needed, but this allows to avoid open-coding. */ - ptep = get_locked_pte(poking_mm, poking_addr, &ptl); + ptep = get_locked_pte(text_poke_mm, poking_addr, &ptl); /* * This must not fail; preallocated in poking_init(). @@ -2276,18 +2276,18 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l local_irq_save(flags); pte = mk_pte(pages[0], pgprot); - set_pte_at(poking_mm, poking_addr, ptep, pte); + set_pte_at(text_poke_mm, poking_addr, ptep, pte); if (cross_page_boundary) { pte = mk_pte(pages[1], pgprot); - set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte); + set_pte_at(text_poke_mm, poking_addr + PAGE_SIZE, ptep + 1, pte); } /* * Loading the temporary mm behaves as a compiler barrier, which * guarantees that the PTE will be set at the time memcpy() is done. */ - prev = use_temporary_mm(poking_mm); + prev = use_temporary_mm(text_poke_mm); kasan_disable_current(); func((u8 *)poking_addr + offset_in_page(addr), src, len); @@ -2299,9 +2299,9 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l */ barrier(); - pte_clear(poking_mm, poking_addr, ptep); + pte_clear(text_poke_mm, poking_addr, ptep); if (cross_page_boundary) - pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1); + pte_clear(text_poke_mm, poking_addr + PAGE_SIZE, ptep + 1); /* * Loading the previous page-table hierarchy requires a serializing @@ -2314,7 +2314,7 @@ static void *__text_poke(text_poke_f func, void *addr, const void *src, size_t l * Flushing the TLB might involve IPIs, which would require enabled * IRQs, but not if the mm is not used, as it is in this point. */ - flush_tlb_mm_range(poking_mm, poking_addr, poking_addr + + flush_tlb_mm_range(text_poke_mm, poking_addr, poking_addr + (cross_page_boundary ? 2 : 1) * PAGE_SIZE, PAGE_SHIFT, false); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index bfa444a7dbb0..84b52a1ebd48 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -824,11 +824,11 @@ void __init poking_init(void) spinlock_t *ptl; pte_t *ptep; - poking_mm = mm_alloc(); - BUG_ON(!poking_mm); + text_poke_mm = mm_alloc(); + BUG_ON(!text_poke_mm); /* Xen PV guests need the PGD to be pinned. */ - paravirt_enter_mmap(poking_mm); + paravirt_enter_mmap(text_poke_mm); /* * Randomize the poking address, but make sure that the following page @@ -848,7 +848,7 @@ void __init poking_init(void) * needed for poking now. Later, poking may be performed in an atomic * section, which might cause allocation to fail. */ - ptep = get_locked_pte(poking_mm, poking_addr, &ptl); + ptep = get_locked_pte(text_poke_mm, poking_addr, &ptl); BUG_ON(!ptep); pte_unmap_unlock(ptep, ptl); } |