diff options
Diffstat (limited to 'arch/riscv/include')
-rw-r--r-- | arch/riscv/include/asm/asm.h | 10 | ||||
-rw-r--r-- | arch/riscv/include/asm/bitops.h | 138 | ||||
-rw-r--r-- | arch/riscv/include/asm/compat.h | 19 | ||||
-rw-r--r-- | arch/riscv/include/asm/cpufeature.h | 31 | ||||
-rw-r--r-- | arch/riscv/include/asm/elf.h | 11 | ||||
-rw-r--r-- | arch/riscv/include/asm/errata_list.h | 13 | ||||
-rw-r--r-- | arch/riscv/include/asm/hwcap.h | 1 | ||||
-rw-r--r-- | arch/riscv/include/asm/membarrier.h | 50 | ||||
-rw-r--r-- | arch/riscv/include/asm/pgalloc.h | 53 | ||||
-rw-r--r-- | arch/riscv/include/asm/pgtable.h | 36 | ||||
-rw-r--r-- | arch/riscv/include/asm/processor.h | 31 | ||||
-rw-r--r-- | arch/riscv/include/asm/simd.h | 4 | ||||
-rw-r--r-- | arch/riscv/include/asm/suspend.h | 3 | ||||
-rw-r--r-- | arch/riscv/include/asm/sync_core.h | 29 | ||||
-rw-r--r-- | arch/riscv/include/asm/tlb.h | 18 | ||||
-rw-r--r-- | arch/riscv/include/asm/vector.h | 11 | ||||
-rw-r--r-- | arch/riscv/include/asm/vendorid_list.h | 2 | ||||
-rw-r--r-- | arch/riscv/include/asm/vmalloc.h | 61 |
18 files changed, 273 insertions, 248 deletions
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h index b0487b39e674..776354895b81 100644 --- a/arch/riscv/include/asm/asm.h +++ b/arch/riscv/include/asm/asm.h @@ -183,6 +183,16 @@ REG_L x31, PT_T6(sp) .endm +/* Annotate a function as being unsuitable for kprobes. */ +#ifdef CONFIG_KPROBES +#define ASM_NOKPROBE(name) \ + .pushsection "_kprobe_blacklist", "aw"; \ + RISCV_PTR name; \ + .popsection +#else +#define ASM_NOKPROBE(name) +#endif + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_ASM_H */ diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h index 9ffc35537024..c4c2173dfe99 100644 --- a/arch/riscv/include/asm/bitops.h +++ b/arch/riscv/include/asm/bitops.h @@ -22,6 +22,16 @@ #include <asm-generic/bitops/fls.h> #else +#define __HAVE_ARCH___FFS +#define __HAVE_ARCH___FLS +#define __HAVE_ARCH_FFS +#define __HAVE_ARCH_FLS + +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/__fls.h> +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/fls.h> + #include <asm/alternative-macros.h> #include <asm/hwcap.h> @@ -37,8 +47,6 @@ static __always_inline unsigned long variable__ffs(unsigned long word) { - int num; - asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0, RISCV_ISA_EXT_ZBB, 1) : : : : legacy); @@ -52,32 +60,7 @@ static __always_inline unsigned long variable__ffs(unsigned long word) return word; legacy: - num = 0; -#if BITS_PER_LONG == 64 - if ((word & 0xffffffff) == 0) { - num += 32; - word >>= 32; - } -#endif - if ((word & 0xffff) == 0) { - num += 16; - word >>= 16; - } - if ((word & 0xff) == 0) { - num += 8; - word >>= 8; - } - if ((word & 0xf) == 0) { - num += 4; - word >>= 4; - } - if ((word & 0x3) == 0) { - num += 2; - word >>= 2; - } - if ((word & 0x1) == 0) - num += 1; - return num; + return generic___ffs(word); } /** @@ -93,8 +76,6 @@ legacy: static __always_inline unsigned long variable__fls(unsigned long word) { - int num; - asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0, RISCV_ISA_EXT_ZBB, 1) : : : : legacy); @@ -108,32 +89,7 @@ static __always_inline unsigned long variable__fls(unsigned long word) return BITS_PER_LONG - 1 - word; legacy: - num = BITS_PER_LONG - 1; -#if BITS_PER_LONG == 64 - if (!(word & (~0ul << 32))) { - num -= 32; - word <<= 32; - } -#endif - if (!(word & (~0ul << (BITS_PER_LONG - 16)))) { - num -= 16; - word <<= 16; - } - if (!(word & (~0ul << (BITS_PER_LONG - 8)))) { - num -= 8; - word <<= 8; - } - if (!(word & (~0ul << (BITS_PER_LONG - 4)))) { - num -= 4; - word <<= 4; - } - if (!(word & (~0ul << (BITS_PER_LONG - 2)))) { - num -= 2; - word <<= 2; - } - if (!(word & (~0ul << (BITS_PER_LONG - 1)))) - num -= 1; - return num; + return generic___fls(word); } /** @@ -149,46 +105,23 @@ legacy: static __always_inline int variable_ffs(int x) { - int r; - - if (!x) - return 0; - asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0, RISCV_ISA_EXT_ZBB, 1) : : : : legacy); + if (!x) + return 0; + asm volatile (".option push\n" ".option arch,+zbb\n" CTZW "%0, %1\n" ".option pop\n" - : "=r" (r) : "r" (x) :); + : "=r" (x) : "r" (x) :); - return r + 1; + return x + 1; legacy: - r = 1; - if (!(x & 0xffff)) { - x >>= 16; - r += 16; - } - if (!(x & 0xff)) { - x >>= 8; - r += 8; - } - if (!(x & 0xf)) { - x >>= 4; - r += 4; - } - if (!(x & 3)) { - x >>= 2; - r += 2; - } - if (!(x & 1)) { - x >>= 1; - r += 1; - } - return r; + return generic_ffs(x); } /** @@ -204,46 +137,23 @@ legacy: static __always_inline int variable_fls(unsigned int x) { - int r; - - if (!x) - return 0; - asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0, RISCV_ISA_EXT_ZBB, 1) : : : : legacy); + if (!x) + return 0; + asm volatile (".option push\n" ".option arch,+zbb\n" CLZW "%0, %1\n" ".option pop\n" - : "=r" (r) : "r" (x) :); + : "=r" (x) : "r" (x) :); - return 32 - r; + return 32 - x; legacy: - r = 32; - if (!(x & 0xffff0000u)) { - x <<= 16; - r -= 16; - } - if (!(x & 0xff000000u)) { - x <<= 8; - r -= 8; - } - if (!(x & 0xf0000000u)) { - x <<= 4; - r -= 4; - } - if (!(x & 0xc0000000u)) { - x <<= 2; - r -= 2; - } - if (!(x & 0x80000000u)) { - x <<= 1; - r -= 1; - } - return r; + return generic_fls(x); } /** diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h index 2ac955b51148..aa103530a5c8 100644 --- a/arch/riscv/include/asm/compat.h +++ b/arch/riscv/include/asm/compat.h @@ -14,9 +14,28 @@ static inline int is_compat_task(void) { + if (!IS_ENABLED(CONFIG_COMPAT)) + return 0; + return test_thread_flag(TIF_32BIT); } +static inline int is_compat_thread(struct thread_info *thread) +{ + if (!IS_ENABLED(CONFIG_COMPAT)) + return 0; + + return test_ti_thread_flag(thread, TIF_32BIT); +} + +static inline void set_compat_task(bool is_compat) +{ + if (is_compat) + set_thread_flag(TIF_32BIT); + else + clear_thread_flag(TIF_32BIT); +} + struct compat_user_regs_struct { compat_ulong_t pc; compat_ulong_t ra; diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index 5a626ed2c47a..46061f5e9764 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright 2022-2023 Rivos, Inc + * Copyright 2022-2024 Rivos, Inc */ #ifndef _ASM_CPUFEATURE_H @@ -28,29 +28,38 @@ struct riscv_isainfo { DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); -DECLARE_PER_CPU(long, misaligned_access_speed); - /* Per-cpu ISA extensions. */ extern struct riscv_isainfo hart_isa[NR_CPUS]; void riscv_user_isa_enable(void); -#ifdef CONFIG_RISCV_MISALIGNED -bool unaligned_ctl_available(void); -bool check_unaligned_access_emulated(int cpu); +#if defined(CONFIG_RISCV_MISALIGNED) +bool check_unaligned_access_emulated_all_cpus(void); void unaligned_emulation_finish(void); +bool unaligned_ctl_available(void); +DECLARE_PER_CPU(long, misaligned_access_speed); #else static inline bool unaligned_ctl_available(void) { return false; } +#endif + +#if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) +DECLARE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key); -static inline bool check_unaligned_access_emulated(int cpu) +static __always_inline bool has_fast_unaligned_accesses(void) { - return false; + return static_branch_likely(&fast_unaligned_access_speed_key); +} +#else +static __always_inline bool has_fast_unaligned_accesses(void) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) + return true; + else + return false; } - -static inline void unaligned_emulation_finish(void) {} #endif unsigned long riscv_get_elf_hwcap(void); @@ -135,6 +144,4 @@ static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsi return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); } -DECLARE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key); - #endif diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h index 06c236bfab53..c7aea7886d22 100644 --- a/arch/riscv/include/asm/elf.h +++ b/arch/riscv/include/asm/elf.h @@ -53,13 +53,9 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr); #define ELF_ET_DYN_BASE ((DEFAULT_MAP_WINDOW / 3) * 2) #ifdef CONFIG_64BIT -#ifdef CONFIG_COMPAT -#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \ +#define STACK_RND_MASK (is_compat_task() ? \ 0x7ff >> (PAGE_SHIFT - 12) : \ 0x3ffff >> (PAGE_SHIFT - 12)) -#else -#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) -#endif #endif /* @@ -139,10 +135,7 @@ do { \ #ifdef CONFIG_COMPAT #define SET_PERSONALITY(ex) \ -do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ - set_thread_flag(TIF_32BIT); \ - else \ - clear_thread_flag(TIF_32BIT); \ +do { set_compat_task((ex).e_ident[EI_CLASS] == ELFCLASS32); \ if (personality(current->personality) != PER_LINUX32) \ set_personality(PER_LINUX | \ (current->personality & (~PER_MASK))); \ diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h index ea33288f8a25..1f2dbfb8a8bf 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h @@ -12,8 +12,8 @@ #include <asm/vendorid_list.h> #ifdef CONFIG_ERRATA_ANDES -#define ERRATA_ANDESTECH_NO_IOCP 0 -#define ERRATA_ANDESTECH_NUMBER 1 +#define ERRATA_ANDES_NO_IOCP 0 +#define ERRATA_ANDES_NUMBER 1 #endif #ifdef CONFIG_ERRATA_SIFIVE @@ -112,15 +112,6 @@ asm volatile(ALTERNATIVE( \ #define THEAD_C9XX_RV_IRQ_PMU 17 #define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5 -#define ALT_SBI_PMU_OVERFLOW(__ovl) \ -asm volatile(ALTERNATIVE( \ - "csrr %0, " __stringify(CSR_SSCOUNTOVF), \ - "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \ - THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \ - CONFIG_ERRATA_THEAD_PMU) \ - : "=r" (__ovl) : \ - : "memory") - #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 5340f818746b..bae7eac76c18 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -80,6 +80,7 @@ #define RISCV_ISA_EXT_ZFA 71 #define RISCV_ISA_EXT_ZTSO 72 #define RISCV_ISA_EXT_ZACAS 73 +#define RISCV_ISA_EXT_XANDESPMU 74 #define RISCV_ISA_EXT_MAX 128 #define RISCV_ISA_EXT_INVALID U32_MAX diff --git a/arch/riscv/include/asm/membarrier.h b/arch/riscv/include/asm/membarrier.h new file mode 100644 index 000000000000..47b240d0d596 --- /dev/null +++ b/arch/riscv/include/asm/membarrier.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_RISCV_MEMBARRIER_H +#define _ASM_RISCV_MEMBARRIER_H + +static inline void membarrier_arch_switch_mm(struct mm_struct *prev, + struct mm_struct *next, + struct task_struct *tsk) +{ + /* + * Only need the full barrier when switching between processes. + * Barrier when switching from kernel to userspace is not + * required here, given that it is implied by mmdrop(). Barrier + * when switching from userspace to kernel is not needed after + * store to rq->curr. + */ + if (IS_ENABLED(CONFIG_SMP) && + likely(!(atomic_read(&next->membarrier_state) & + (MEMBARRIER_STATE_PRIVATE_EXPEDITED | + MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev)) + return; + + /* + * The membarrier system call requires a full memory barrier + * after storing to rq->curr, before going back to user-space. + * + * This barrier is also needed for the SYNC_CORE command when + * switching between processes; in particular, on a transition + * from a thread belonging to another mm to a thread belonging + * to the mm for which a membarrier SYNC_CORE is done on CPU0: + * + * - [CPU0] sets all bits in the mm icache_stale_mask (in + * prepare_sync_core_cmd()); + * + * - [CPU1] stores to rq->curr (by the scheduler); + * + * - [CPU0] loads rq->curr within membarrier and observes + * cpu_rq(1)->curr->mm != mm, so the IPI is skipped on + * CPU1; this means membarrier relies on switch_mm() to + * issue the sync-core; + * + * - [CPU1] switch_mm() loads icache_stale_mask; if the bit + * is zero, switch_mm() may incorrectly skip the sync-core. + * + * Matches a full barrier in the proximity of the membarrier + * system call entry. + */ + smp_mb(); +} + +#endif /* _ASM_RISCV_MEMBARRIER_H */ diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index d169a4f41a2e..deaf971253a2 100644 --- a/arch/riscv/include/asm/pgalloc.h +++ b/arch/riscv/include/asm/pgalloc.h @@ -95,7 +95,19 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) __pud_free(mm, pud); } -#define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud) +static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, + unsigned long addr) +{ + if (pgtable_l4_enabled) { + struct ptdesc *ptdesc = virt_to_ptdesc(pud); + + pagetable_pud_dtor(ptdesc); + if (riscv_use_ipi_for_rfence()) + tlb_remove_page_ptdesc(tlb, ptdesc); + else + tlb_remove_ptdesc(tlb, ptdesc); + } +} #define p4d_alloc_one p4d_alloc_one static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) @@ -124,7 +136,16 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) __p4d_free(mm, p4d); } -#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d) +static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, + unsigned long addr) +{ + if (pgtable_l5_enabled) { + if (riscv_use_ipi_for_rfence()) + tlb_remove_page_ptdesc(tlb, virt_to_ptdesc(p4d)); + else + tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d)); + } +} #endif /* __PAGETABLE_PMD_FOLDED */ static inline void sync_kernel_mappings(pgd_t *pgd) @@ -149,15 +170,31 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) #ifndef __PAGETABLE_PMD_FOLDED -#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) +static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long addr) +{ + struct ptdesc *ptdesc = virt_to_ptdesc(pmd); + + pagetable_pmd_dtor(ptdesc); + if (riscv_use_ipi_for_rfence()) + tlb_remove_page_ptdesc(tlb, ptdesc); + else + tlb_remove_ptdesc(tlb, ptdesc); +} #endif /* __PAGETABLE_PMD_FOLDED */ -#define __pte_free_tlb(tlb, pte, buf) \ -do { \ - pagetable_pte_dtor(page_ptdesc(pte)); \ - tlb_remove_page_ptdesc((tlb), page_ptdesc(pte));\ -} while (0) +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, + unsigned long addr) +{ + struct ptdesc *ptdesc = page_ptdesc(pte); + + pagetable_pte_dtor(ptdesc); + if (riscv_use_ipi_for_rfence()) + tlb_remove_page_ptdesc(tlb, ptdesc); + else + tlb_remove_ptdesc(tlb, ptdesc); +} #endif /* CONFIG_MMU */ #endif /* _ASM_RISCV_PGALLOC_H */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 0c94260b5d0c..b2e6965748f2 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -127,17 +127,11 @@ #define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1)) #define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1)) -#ifdef CONFIG_COMPAT #define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS) #define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39) #define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64) #define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64) #else -#define MMAP_VA_BITS ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS) -#define MMAP_MIN_VA_BITS (VA_BITS_SV39) -#endif /* CONFIG_COMPAT */ - -#else #include <asm/pgtable-32.h> #endif /* CONFIG_64BIT */ @@ -439,6 +433,12 @@ static inline pte_t pte_mkhuge(pte_t pte) return pte; } +#ifdef CONFIG_RISCV_ISA_SVNAPOT +#define pte_leaf_size(pte) (pte_napot(pte) ? \ + napot_cont_size(napot_cont_order(pte)) :\ + PAGE_SIZE) +#endif + #ifdef CONFIG_NUMA_BALANCING /* * See the comment in include/asm-generic/pgtable.h @@ -513,12 +513,12 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) WRITE_ONCE(*ptep, pteval); } -void flush_icache_pte(pte_t pte); +void flush_icache_pte(struct mm_struct *mm, pte_t pte); -static inline void __set_pte_at(pte_t *ptep, pte_t pteval) +static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval) { if (pte_present(pteval) && pte_exec(pteval)) - flush_icache_pte(pteval); + flush_icache_pte(mm, pteval); set_pte(ptep, pteval); } @@ -529,7 +529,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, page_table_check_ptes_set(mm, ptep, pteval, nr); for (;;) { - __set_pte_at(ptep, pteval); + __set_pte_at(mm, ptep, pteval); if (--nr == 0) break; ptep++; @@ -541,7 +541,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - __set_pte_at(ptep, __pte(0)); + __set_pte_at(mm, ptep, __pte(0)); } #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */ @@ -656,6 +656,12 @@ static inline int pmd_write(pmd_t pmd) return pte_write(pmd_pte(pmd)); } +#define pud_write pud_write +static inline int pud_write(pud_t pud) +{ + return pte_write(pud_pte(pud)); +} + #define pmd_dirty pmd_dirty static inline int pmd_dirty(pmd_t pmd) { @@ -707,14 +713,14 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { page_table_check_pmd_set(mm, pmdp, pmd); - return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd)); + return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd)); } static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, pud_t *pudp, pud_t pud) { page_table_check_pud_set(mm, pudp, pud); - return __set_pte_at((pte_t *)pudp, pud_pte(pud)); + return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud)); } #ifdef CONFIG_PAGE_TABLE_CHECK @@ -865,8 +871,8 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) #define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2) #ifdef CONFIG_COMPAT -#define TASK_SIZE_32 (_AC(0x80000000, UL)) -#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ +#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE) +#define TASK_SIZE (is_compat_task() ? \ TASK_SIZE_32 : TASK_SIZE_64) #else #define TASK_SIZE TASK_SIZE_64 diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index a8509cc31ab2..0faf5f161f1e 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -14,22 +14,21 @@ #include <asm/ptrace.h> -#ifdef CONFIG_64BIT -#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1)) -#define STACK_TOP_MAX TASK_SIZE - +/* + * addr is a hint to the maximum userspace address that mmap should provide, so + * this macro needs to return the largest address space available so that + * mmap_end < addr, being mmap_end the top of that address space. + * See Documentation/arch/riscv/vm-layout.rst for more details. + */ #define arch_get_mmap_end(addr, len, flags) \ ({ \ unsigned long mmap_end; \ typeof(addr) _addr = (addr); \ - if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \ - mmap_end = STACK_TOP_MAX; \ - else if ((_addr) >= VA_USER_SV57) \ + if ((_addr) == 0 || is_compat_task() || \ + ((_addr + len) > BIT(VA_BITS - 1))) \ mmap_end = STACK_TOP_MAX; \ - else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \ - mmap_end = VA_USER_SV48; \ else \ - mmap_end = VA_USER_SV39; \ + mmap_end = (_addr + len); \ mmap_end; \ }) @@ -39,17 +38,17 @@ typeof(addr) _addr = (addr); \ typeof(base) _base = (base); \ unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \ - if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \ + if ((_addr) == 0 || is_compat_task() || \ + ((_addr + len) > BIT(VA_BITS - 1))) \ mmap_base = (_base); \ - else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \ - mmap_base = VA_USER_SV57 - rnd_gap; \ - else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \ - mmap_base = VA_USER_SV48 - rnd_gap; \ else \ - mmap_base = VA_USER_SV39 - rnd_gap; \ + mmap_base = (_addr + len) - rnd_gap; \ mmap_base; \ }) +#ifdef CONFIG_64BIT +#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1)) +#define STACK_TOP_MAX TASK_SIZE_64 #else #define DEFAULT_MAP_WINDOW TASK_SIZE #define STACK_TOP_MAX TASK_SIZE diff --git a/arch/riscv/include/asm/simd.h b/arch/riscv/include/asm/simd.h index 54efbf523d49..adb50f3ec205 100644 --- a/arch/riscv/include/asm/simd.h +++ b/arch/riscv/include/asm/simd.h @@ -34,9 +34,9 @@ static __must_check inline bool may_use_simd(void) return false; /* - * Nesting is acheived in preempt_v by spreading the control for + * Nesting is achieved in preempt_v by spreading the control for * preemptible and non-preemptible kernel-mode Vector into two fields. - * Always try to match with prempt_v if kernel V-context exists. Then, + * Always try to match with preempt_v if kernel V-context exists. Then, * fallback to check non preempt_v if nesting happens, or if the config * is not set. */ diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h index 02f87867389a..076f8a9437cf 100644 --- a/arch/riscv/include/asm/suspend.h +++ b/arch/riscv/include/asm/suspend.h @@ -55,4 +55,7 @@ int hibernate_resume_nonboot_cpu_disable(void); asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp, unsigned long cpu_resume); asmlinkage int hibernate_core_restore_code(void); +bool riscv_sbi_hsm_is_supported(void); +bool riscv_sbi_suspend_state_is_valid(u32 state); +int riscv_sbi_hart_suspend(u32 state); #endif diff --git a/arch/riscv/include/asm/sync_core.h b/arch/riscv/include/asm/sync_core.h new file mode 100644 index 000000000000..9153016da8f1 --- /dev/null +++ b/arch/riscv/include/asm/sync_core.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_SYNC_CORE_H +#define _ASM_RISCV_SYNC_CORE_H + +/* + * RISC-V implements return to user-space through an xRET instruction, + * which is not core serializing. + */ +static inline void sync_core_before_usermode(void) +{ + asm volatile ("fence.i" ::: "memory"); +} + +#ifdef CONFIG_SMP +/* + * Ensure the next switch_mm() on every CPU issues a core serializing + * instruction for the given @mm. + */ +static inline void prepare_sync_core_cmd(struct mm_struct *mm) +{ + cpumask_setall(&mm->context.icache_stale_mask); +} +#else +static inline void prepare_sync_core_cmd(struct mm_struct *mm) +{ +} +#endif /* CONFIG_SMP */ + +#endif /* _ASM_RISCV_SYNC_CORE_H */ diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index 1eb5682b2af6..a0b8b853503f 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h @@ -10,6 +10,24 @@ struct mmu_gather; static void tlb_flush(struct mmu_gather *tlb); +#ifdef CONFIG_MMU +#include <linux/swap.h> + +/* + * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to + * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use + * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this + * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the + * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h + * for more details. + */ +static inline void __tlb_remove_table(void *table) +{ + free_page_and_swap_cache(table); +} + +#endif /* CONFIG_MMU */ + #define tlb_flush tlb_flush #include <asm-generic/tlb.h> diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h index 0cd6f0a027d1..731dcd0ed4de 100644 --- a/arch/riscv/include/asm/vector.h +++ b/arch/riscv/include/asm/vector.h @@ -284,4 +284,15 @@ static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; } #endif /* CONFIG_RISCV_ISA_V */ +/* + * Return the implementation's vlen value. + * + * riscv_v_vsize contains the value of "32 vector registers with vlenb length" + * so rebuild the vlen value in bits from it. + */ +static inline int riscv_vector_vlen(void) +{ + return riscv_v_vsize / 32 * 8; +} + #endif /* ! __ASM_RISCV_VECTOR_H */ diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h index e55407ace0c3..2f2bb0c84f9a 100644 --- a/arch/riscv/include/asm/vendorid_list.h +++ b/arch/riscv/include/asm/vendorid_list.h @@ -5,7 +5,7 @@ #ifndef ASM_VENDOR_LIST_H #define ASM_VENDOR_LIST_H -#define ANDESTECH_VENDOR_ID 0x31e +#define ANDES_VENDOR_ID 0x31e #define SIFIVE_VENDOR_ID 0x489 #define THEAD_VENDOR_ID 0x5b7 diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h index 924d01b56c9a..51f6dfe19745 100644 --- a/arch/riscv/include/asm/vmalloc.h +++ b/arch/riscv/include/asm/vmalloc.h @@ -19,65 +19,6 @@ static inline bool arch_vmap_pmd_supported(pgprot_t prot) return true; } -#ifdef CONFIG_RISCV_ISA_SVNAPOT -#include <linux/pgtable.h> +#endif -#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size -static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, - u64 pfn, unsigned int max_page_shift) -{ - unsigned long map_size = PAGE_SIZE; - unsigned long size, order; - - if (!has_svnapot()) - return map_size; - - for_each_napot_order_rev(order) { - if (napot_cont_shift(order) > max_page_shift) - continue; - - size = napot_cont_size(order); - if (end - addr < size) - continue; - - if (!IS_ALIGNED(addr, size)) - continue; - - if (!IS_ALIGNED(PFN_PHYS(pfn), size)) - continue; - - map_size = size; - break; - } - - return map_size; -} - -#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift -static inline int arch_vmap_pte_supported_shift(unsigned long size) -{ - int shift = PAGE_SHIFT; - unsigned long order; - - if (!has_svnapot()) - return shift; - - WARN_ON_ONCE(size >= PMD_SIZE); - - for_each_napot_order_rev(order) { - if (napot_cont_size(order) > size) - continue; - - if (!IS_ALIGNED(size, napot_cont_size(order))) - continue; - - shift = napot_cont_shift(order); - break; - } - - return shift; -} - -#endif /* CONFIG_RISCV_ISA_SVNAPOT */ -#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ #endif /* _ASM_RISCV_VMALLOC_H */ |