summaryrefslogtreecommitdiff
path: root/arch/riscv
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-14 15:23:49 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-14 15:23:49 -0800
commiteb67d239f3aa1711afb0a42eab50459d9f3d672e (patch)
tree4c762c7836cb8c5dbd3d4372b091e1659bcf6396 /arch/riscv
parent94a855111ed9106971ca2617c5d075269e6aefde (diff)
parent6e66e96e31b81fb08075d18a3e2c201f1e2171da (diff)
Merge tag 'riscv-for-linus-6.2-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V updates from Palmer Dabbelt: - Support for the T-Head PMU via the perf subsystem - ftrace support for rv32 - Support for non-volatile memory devices - Various fixes and cleanups * tag 'riscv-for-linus-6.2-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (52 commits) Documentation: RISC-V: patch-acceptance: s/implementor/implementer Documentation: RISC-V: Mention the UEFI Standards Documentation: RISC-V: Allow patches for non-standard behavior Documentation: RISC-V: Fix a typo in patch-acceptance riscv: Fixup compile error with !MMU riscv: Fix P4D_SHIFT definition for 3-level page table mode riscv: Apply a static assert to riscv_isa_ext_id RISC-V: Add some comments about the shadow and overflow stacks RISC-V: Align the shadow stack RISC-V: Ensure Zicbom has a valid block size RISC-V: Introduce riscv_isa_extension_check RISC-V: Improve use of isa2hwcap[] riscv: Don't duplicate _ALTERNATIVE_CFG* macros riscv: alternatives: Drop the underscores from the assembly macro names riscv: alternatives: Don't name unused macro parameters riscv: Don't duplicate __ALTERNATIVE_CFG in __ALTERNATIVE_CFG_2 riscv: mm: call best_map_size many times during linear-mapping riscv: Move cast inside kernel_mapping_[pv]a_to_[vp]a riscv: Fix crash during early errata patching riscv: boot: add zstd support ...
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/Kconfig19
-rw-r--r--arch/riscv/Kconfig.erratas13
-rw-r--r--arch/riscv/Kconfig.socs5
-rw-r--r--arch/riscv/boot/Makefile3
-rw-r--r--arch/riscv/configs/defconfig3
-rw-r--r--arch/riscv/errata/thead/errata.c19
-rw-r--r--arch/riscv/include/asm/alternative-macros.h99
-rw-r--r--arch/riscv/include/asm/cacheflush.h7
-rw-r--r--arch/riscv/include/asm/errata_list.h16
-rw-r--r--arch/riscv/include/asm/hugetlb.h6
-rw-r--r--arch/riscv/include/asm/hwcap.h3
-rw-r--r--arch/riscv/include/asm/io.h5
-rw-r--r--arch/riscv/include/asm/kexec.h5
-rw-r--r--arch/riscv/include/asm/kprobes.h2
-rw-r--r--arch/riscv/include/asm/mmu.h2
-rw-r--r--arch/riscv/include/asm/page.h18
-rw-r--r--arch/riscv/include/asm/pgtable-64.h6
-rw-r--r--arch/riscv/include/asm/pgtable.h5
-rw-r--r--arch/riscv/include/asm/sbi.h5
-rw-r--r--arch/riscv/include/asm/tlbflush.h18
-rw-r--r--arch/riscv/include/asm/vdso.h2
-rw-r--r--arch/riscv/include/asm/vmalloc.h18
-rw-r--r--arch/riscv/include/uapi/asm/ucontext.h12
-rw-r--r--arch/riscv/kernel/Makefile1
-rw-r--r--arch/riscv/kernel/cpu.c30
-rw-r--r--arch/riscv/kernel/cpufeature.c43
-rw-r--r--arch/riscv/kernel/crash_core.c21
-rw-r--r--arch/riscv/kernel/elf_kexec.c14
-rw-r--r--arch/riscv/kernel/entry.S21
-rw-r--r--arch/riscv/kernel/mcount.S44
-rw-r--r--arch/riscv/kernel/probes/Makefile2
-rw-r--r--arch/riscv/kernel/probes/kprobes.c13
-rw-r--r--arch/riscv/kernel/probes/rethook.c27
-rw-r--r--arch/riscv/kernel/probes/rethook.h8
-rw-r--r--arch/riscv/kernel/probes/rethook_trampoline.S (renamed from arch/riscv/kernel/probes/kprobes_trampoline.S)6
-rw-r--r--arch/riscv/kernel/signal.c34
-rw-r--r--arch/riscv/kernel/stacktrace.c11
-rw-r--r--arch/riscv/kernel/traps.c22
-rw-r--r--arch/riscv/mm/Makefile2
-rw-r--r--arch/riscv/mm/cacheflush.c7
-rw-r--r--arch/riscv/mm/context.c10
-rw-r--r--arch/riscv/mm/init.c25
-rw-r--r--arch/riscv/mm/pgtable.c83
-rw-r--r--arch/riscv/mm/physaddr.c2
-rw-r--r--arch/riscv/mm/pmem.c21
-rw-r--r--arch/riscv/mm/tlbflush.c28
46 files changed, 549 insertions, 217 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 593cf09264d8..e2b656043abf 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -25,6 +25,7 @@ config RISCV
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
select ARCH_HAS_MMIOWB
+ select ARCH_HAS_PMEM_API
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_DIRECT_MAP if MMU
select ARCH_HAS_SET_MEMORY if MMU
@@ -72,6 +73,8 @@ config RISCV
select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
+ select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && 64BIT
@@ -99,6 +102,7 @@ config RISCV
select HAVE_KPROBES if !XIP_KERNEL
select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
select HAVE_KRETPROBES if !XIP_KERNEL
+ select HAVE_RETHOOK if !XIP_KERNEL
select HAVE_MOVE_PMD
select HAVE_MOVE_PUD
select HAVE_PCI
@@ -123,12 +127,18 @@ config RISCV
select PCI_MSI if PCI
select RISCV_INTC
select RISCV_TIMER if RISCV_SBI
+ select SIFIVE_PLIC
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select TRACE_IRQFLAGS_SUPPORT
select UACCESS_MEMCPY if !MMU
select ZONE_DMA32 if 64BIT
+ select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER if !XIP_KERNEL
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
@@ -274,11 +284,6 @@ config ARCH_RV64I
bool "RV64I"
select 64BIT
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
- select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
- select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
- select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
- select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select SWIOTLB if MMU
endchoice
@@ -502,7 +507,7 @@ config KEXEC_FILE
select KEXEC_CORE
select KEXEC_ELF
select HAVE_IMA_KEXEC if IMA
- depends on 64BIT
+ depends on 64BIT && MMU
help
This is new version of kexec system call. This system call is
file based and takes file descriptors as system call argument
@@ -691,6 +696,8 @@ menu "CPU Power Management"
source "drivers/cpuidle/Kconfig"
+source "drivers/cpufreq/Kconfig"
+
endmenu # "CPU Power Management"
source "arch/riscv/kvm/Kconfig"
diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas
index f3623df23b5f..69621ae6d647 100644
--- a/arch/riscv/Kconfig.erratas
+++ b/arch/riscv/Kconfig.erratas
@@ -66,4 +66,17 @@ config ERRATA_THEAD_CMO
If you don't know what to do here, say "Y".
+config ERRATA_THEAD_PMU
+ bool "Apply T-Head PMU errata"
+ depends on ERRATA_THEAD && RISCV_PMU_SBI
+ default y
+ help
+ The T-Head C9xx cores implement a PMU overflow extension very
+ similar to the core SSCOFPMF extension.
+
+ This will apply the overflow errata to handle the non-standard
+ behaviour via the regular SBI PMU driver and interface.
+
+ If you don't know what to do here, say "Y".
+
endmenu # "CPU errata selection"
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index 75fb0390d6bd..4b6deb2715f1 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -3,7 +3,6 @@ menu "SoC selection"
config SOC_MICROCHIP_POLARFIRE
bool "Microchip PolarFire SoCs"
select MCHP_CLK_MPFS
- select SIFIVE_PLIC
help
This enables support for Microchip PolarFire SoC platforms.
@@ -18,7 +17,6 @@ config SOC_SIFIVE
select SERIAL_SIFIVE_CONSOLE if TTY
select CLK_SIFIVE
select CLK_SIFIVE_PRCI
- select SIFIVE_PLIC
select ERRATA_SIFIVE if !XIP_KERNEL
help
This enables support for SiFive SoC platform hardware.
@@ -27,7 +25,6 @@ config SOC_STARFIVE
bool "StarFive SoCs"
select PINCTRL
select RESET_CONTROLLER
- select SIFIVE_PLIC
help
This enables support for StarFive SoC platform hardware.
@@ -39,7 +36,6 @@ config SOC_VIRT
select POWER_RESET_SYSCON_POWEROFF
select GOLDFISH
select RTC_DRV_GOLDFISH if RTC_CLASS
- select SIFIVE_PLIC
select PM_GENERIC_DOMAINS if PM
select PM_GENERIC_DOMAINS_OF if PM && OF
select RISCV_SBI_CPUIDLE if CPU_IDLE && RISCV_SBI
@@ -52,7 +48,6 @@ config SOC_CANAAN
select CLINT_TIMER if RISCV_M_MODE
select SERIAL_SIFIVE if TTY
select SERIAL_SIFIVE_CONSOLE if TTY
- select SIFIVE_PLIC
select ARCH_HAS_RESET_CONTROLLER
select PINCTRL
select COMMON_CLK
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
index d1a49adcb1d7..c72de7232abb 100644
--- a/arch/riscv/boot/Makefile
+++ b/arch/riscv/boot/Makefile
@@ -56,6 +56,9 @@ $(obj)/Image.lzma: $(obj)/Image FORCE
$(obj)/Image.lzo: $(obj)/Image FORCE
$(call if_changed,lzo)
+$(obj)/Image.zst: $(obj)/Image FORCE
+ $(call if_changed,zstd)
+
$(obj)/loader.bin: $(obj)/loader FORCE
$(call if_changed,objcopy)
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index f7f32448f160..128dcf4c0814 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -39,6 +39,7 @@ CONFIG_KVM=m
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
+CONFIG_SPARSEMEM_MANUAL=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -123,6 +124,7 @@ CONFIG_MICROSEMI_PHY=y
CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_SH_SCI=y
CONFIG_VIRTIO_CONSOLE=y
@@ -162,6 +164,7 @@ CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_CTRL=y
CONFIG_RPMSG_VIRTIO=y
CONFIG_ARCH_R9A07G043=y
+CONFIG_LIBNVDIMM=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
index 21546937db39..fac5742d1c1e 100644
--- a/arch/riscv/errata/thead/errata.c
+++ b/arch/riscv/errata/thead/errata.c
@@ -47,6 +47,22 @@ static bool errata_probe_cmo(unsigned int stage,
return true;
}
+static bool errata_probe_pmu(unsigned int stage,
+ unsigned long arch_id, unsigned long impid)
+{
+ if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PMU))
+ return false;
+
+ /* target-c9xx cores report arch_id and impid as 0 */
+ if (arch_id != 0 || impid != 0)
+ return false;
+
+ if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
+ return false;
+
+ return true;
+}
+
static u32 thead_errata_probe(unsigned int stage,
unsigned long archid, unsigned long impid)
{
@@ -58,6 +74,9 @@ static u32 thead_errata_probe(unsigned int stage,
if (errata_probe_cmo(stage, archid, impid))
cpu_req_errata |= BIT(ERRATA_THEAD_CMO);
+ if (errata_probe_pmu(stage, archid, impid))
+ cpu_req_errata |= BIT(ERRATA_THEAD_PMU);
+
return cpu_req_errata;
}
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
index ec2f3f1b836f..7226e2462584 100644
--- a/arch/riscv/include/asm/alternative-macros.h
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -33,7 +33,7 @@
.endif
.endm
-.macro __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable
+.macro ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable
886 :
.option push
.option norvc
@@ -44,30 +44,14 @@
ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c
.endm
-#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
- __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)
-
-.macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
- new_c_2, vendor_id_2, errata_id_2, enable_2
-886 :
- .option push
- .option norvc
- .option norelax
- \old_c
- .option pop
-887 :
- ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1
+.macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
+ new_c_2, vendor_id_2, errata_id_2, enable_2
+ ALTERNATIVE_CFG \old_c, \new_c_1, \vendor_id_1, \errata_id_1, \enable_1
ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2
.endm
-#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
- CONFIG_k_1, \
- new_c_2, vendor_id_2, errata_id_2, \
- CONFIG_k_2) \
- __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, \
- IS_ENABLED(CONFIG_k_1), \
- new_c_2, vendor_id_2, errata_id_2, \
- IS_ENABLED(CONFIG_k_2)
+#define __ALTERNATIVE_CFG(...) ALTERNATIVE_CFG __VA_ARGS__
+#define __ALTERNATIVE_CFG_2(...) ALTERNATIVE_CFG_2 __VA_ARGS__
#else /* !__ASSEMBLY__ */
@@ -109,63 +93,44 @@
"887 :\n" \
ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)
-#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
- __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
-
-#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
- enable_1, \
- new_c_2, vendor_id_2, errata_id_2, \
- enable_2) \
- "886 :\n" \
- ".option push\n" \
- ".option norvc\n" \
- ".option norelax\n" \
- old_c "\n" \
- ".option pop\n" \
- "887 :\n" \
- ALT_NEW_CONTENT(vendor_id_1, errata_id_1, enable_1, new_c_1) \
+#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
+ new_c_2, vendor_id_2, errata_id_2, enable_2) \
+ __ALTERNATIVE_CFG(old_c, new_c_1, vendor_id_1, errata_id_1, enable_1) \
ALT_NEW_CONTENT(vendor_id_2, errata_id_2, enable_2, new_c_2)
-#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
- CONFIG_k_1, \
- new_c_2, vendor_id_2, errata_id_2, \
- CONFIG_k_2) \
- __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
- IS_ENABLED(CONFIG_k_1), \
- new_c_2, vendor_id_2, errata_id_2, \
- IS_ENABLED(CONFIG_k_2))
-
#endif /* __ASSEMBLY__ */
+#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
+ __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
+
+#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
+ new_c_2, vendor_id_2, errata_id_2, CONFIG_k_2) \
+ __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, IS_ENABLED(CONFIG_k_1), \
+ new_c_2, vendor_id_2, errata_id_2, IS_ENABLED(CONFIG_k_2))
+
#else /* CONFIG_RISCV_ALTERNATIVE */
#ifdef __ASSEMBLY__
-.macro __ALTERNATIVE_CFG old_c
+.macro ALTERNATIVE_CFG old_c
\old_c
.endm
-#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
- __ALTERNATIVE_CFG old_c
+#define _ALTERNATIVE_CFG(old_c, ...) \
+ ALTERNATIVE_CFG old_c
-#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
- CONFIG_k_1, \
- new_c_2, vendor_id_2, errata_id_2, \
- CONFIG_k_2) \
- __ALTERNATIVE_CFG old_c
+#define _ALTERNATIVE_CFG_2(old_c, ...) \
+ ALTERNATIVE_CFG old_c
#else /* !__ASSEMBLY__ */
-#define __ALTERNATIVE_CFG(old_c) \
+#define __ALTERNATIVE_CFG(old_c) \
old_c "\n"
-#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
+#define _ALTERNATIVE_CFG(old_c, ...) \
__ALTERNATIVE_CFG(old_c)
-#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \
- CONFIG_k_1, \
- new_c_2, vendor_id_2, errata_id_2, \
- CONFIG_k_2) \
- __ALTERNATIVE_CFG(old_c)
+#define _ALTERNATIVE_CFG_2(old_c, ...) \
+ __ALTERNATIVE_CFG(old_c)
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_RISCV_ALTERNATIVE */
@@ -193,13 +158,9 @@
* on the following sample code and then replace ALTERNATIVE() with
* ALTERNATIVE_2() to append its customized content.
*/
-#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, \
- errata_id_1, CONFIG_k_1, \
- new_content_2, vendor_id_2, \
- errata_id_2, CONFIG_k_2) \
- _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, \
- errata_id_1, CONFIG_k_1, \
- new_content_2, vendor_id_2, \
- errata_id_2, CONFIG_k_2)
+#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
+ new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) \
+ _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
+ new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2)
#endif
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index f6fbe7042f1c..03e3b95ae6da 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -17,6 +17,13 @@ static inline void local_flush_icache_all(void)
static inline void flush_dcache_page(struct page *page)
{
+ /*
+ * HugeTLB pages are always fully mapped and only head page will be
+ * set PG_dcache_clean (see comments in flush_icache_pte()).
+ */
+ if (PageHuge(page))
+ page = compound_head(page);
+
if (test_bit(PG_dcache_clean, &page->flags))
clear_bit(PG_dcache_clean, &page->flags);
}
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 19a771085781..4180312d2a70 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -6,6 +6,7 @@
#define ASM_ERRATA_LIST_H
#include <asm/alternative.h>
+#include <asm/csr.h>
#include <asm/vendorid_list.h>
#ifdef CONFIG_ERRATA_SIFIVE
@@ -17,7 +18,8 @@
#ifdef CONFIG_ERRATA_THEAD
#define ERRATA_THEAD_PBMT 0
#define ERRATA_THEAD_CMO 1
-#define ERRATA_THEAD_NUMBER 2
+#define ERRATA_THEAD_PMU 2
+#define ERRATA_THEAD_NUMBER 3
#endif
#define CPUFEATURE_SVPBMT 0
@@ -142,6 +144,18 @@ asm volatile(ALTERNATIVE_2( \
"r"((unsigned long)(_start) + (_size)) \
: "a0")
+#define THEAD_C9XX_RV_IRQ_PMU 17
+#define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5
+
+#define ALT_SBI_PMU_OVERFLOW(__ovl) \
+asm volatile(ALTERNATIVE( \
+ "csrr %0, " __stringify(CSR_SSCOUNTOVF), \
+ "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
+ THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
+ CONFIG_ERRATA_THEAD_PMU) \
+ : "=r" (__ovl) : \
+ : "memory")
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
index a5c2ca1d1cd8..ec19d6afc896 100644
--- a/arch/riscv/include/asm/hugetlb.h
+++ b/arch/riscv/include/asm/hugetlb.h
@@ -5,4 +5,10 @@
#include <asm-generic/hugetlb.h>
#include <asm/page.h>
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+
#endif /* _ASM_RISCV_HUGETLB_H */
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index b22525290073..86328e3acb02 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -59,8 +59,9 @@ enum riscv_isa_ext_id {
RISCV_ISA_EXT_ZIHINTPAUSE,
RISCV_ISA_EXT_SSTC,
RISCV_ISA_EXT_SVINVAL,
- RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX,
+ RISCV_ISA_EXT_ID_MAX
};
+static_assert(RISCV_ISA_EXT_ID_MAX <= RISCV_ISA_EXT_MAX);
/*
* This enum represents the logical ID for each RISC-V ISA extension static
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 92080a227937..42497d487a17 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -135,4 +135,9 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
#include <asm-generic/io.h>
+#ifdef CONFIG_MMU
+#define arch_memremap_wb(addr, size) \
+ ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL))
+#endif
+
#endif /* _ASM_RISCV_IO_H */
diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h
index eee260e8ab30..2b56769cb530 100644
--- a/arch/riscv/include/asm/kexec.h
+++ b/arch/riscv/include/asm/kexec.h
@@ -39,6 +39,7 @@ crash_setup_regs(struct pt_regs *newregs,
#define ARCH_HAS_KIMAGE_ARCH
struct kimage_arch {
+ void *fdt; /* For CONFIG_KEXEC_FILE */
unsigned long fdt_addr;
};
@@ -62,6 +63,10 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
+
+struct kimage;
+int arch_kimage_file_post_load_cleanup(struct kimage *image);
+#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
#endif
#endif
diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h
index 217ef89f22b9..e7882ccb0fd4 100644
--- a/arch/riscv/include/asm/kprobes.h
+++ b/arch/riscv/include/asm/kprobes.h
@@ -40,8 +40,6 @@ void arch_remove_kprobe(struct kprobe *p);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
bool kprobe_breakpoint_handler(struct pt_regs *regs);
bool kprobe_single_step_handler(struct pt_regs *regs);
-void __kretprobe_trampoline(void);
-void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
#endif /* CONFIG_KPROBES */
#endif /* _ASM_RISCV_KPROBES_H */
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index 0099dc116168..5ff1f19fd45c 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -19,6 +19,8 @@ typedef struct {
#ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */
cpumask_t icache_stale_mask;
+ /* A local tlb flush is needed before user execution can resume. */
+ cpumask_t tlb_stale_mask;
#endif
} mm_context_t;
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index ac70b0fd9a9a..9f432c1b5289 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -123,20 +123,20 @@ extern phys_addr_t phys_ram_base;
((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE))
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
-#define kernel_mapping_pa_to_va(y) ({ \
- unsigned long _y = y; \
- (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
- (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) : \
- (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
+#define kernel_mapping_pa_to_va(y) ({ \
+ unsigned long _y = (unsigned long)(y); \
+ (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
+ (void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \
+ (void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
})
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
#define kernel_mapping_va_to_pa(y) ({ \
- unsigned long _y = y; \
- (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
- ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \
- ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
+ unsigned long _y = (unsigned long)(y); \
+ (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
+ (_y - kernel_map.va_kernel_xip_pa_offset) : \
+ (_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
})
#define __va_to_pa_nodebug(x) ({ \
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index dc42375c2357..42a042c0e13e 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -25,7 +25,11 @@ extern bool pgtable_l5_enabled;
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
/* p4d is folded into pgd in case of 4-level page table */
-#define P4D_SHIFT 39
+#define P4D_SHIFT_L3 30
+#define P4D_SHIFT_L4 39
+#define P4D_SHIFT_L5 39
+#define P4D_SHIFT (pgtable_l5_enabled ? P4D_SHIFT_L5 : \
+ (pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3))
#define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
#define P4D_MASK (~(P4D_SIZE - 1))
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 7ee3ac315c7c..4eba9a98d0e3 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -415,9 +415,12 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
* Relying on flush_tlb_fix_spurious_fault would suffice, but
* the extra traps reduce performance. So, eagerly SFENCE.VMA.
*/
- local_flush_tlb_page(address);
+ flush_tlb_page(vma, address);
}
+#define __HAVE_ARCH_UPDATE_MMU_TLB
+#define update_mmu_tlb update_mmu_cache
+
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 2a0ef738695e..4ca7fbacff42 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -327,4 +327,9 @@ int sbi_err_map_linux_errno(int err);
static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; }
static inline void sbi_init(void) {}
#endif /* CONFIG_RISCV_SBI */
+
+unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
+unsigned long riscv_cached_marchid(unsigned int cpu_id);
+unsigned long riscv_cached_mimpid(unsigned int cpu_id);
+
#endif /* _ASM_RISCV_SBI_H */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 801019381dea..907b9efd39a8 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -22,6 +22,24 @@ static inline void local_flush_tlb_page(unsigned long addr)
{
ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
}
+
+static inline void local_flush_tlb_all_asid(unsigned long asid)
+{
+ __asm__ __volatile__ ("sfence.vma x0, %0"
+ :
+ : "r" (asid)
+ : "memory");
+}
+
+static inline void local_flush_tlb_page_asid(unsigned long addr,
+ unsigned long asid)
+{
+ __asm__ __volatile__ ("sfence.vma %0, %1"
+ :
+ : "r" (addr), "r" (asid)
+ : "memory");
+}
+
#else /* CONFIG_MMU */
#define local_flush_tlb_all() do { } while (0)
#define local_flush_tlb_page(addr) do { } while (0)
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index af981426fe0f..a7644f46d0e5 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -10,7 +10,7 @@
/*
* All systems with an MMU have a VDSO, but systems without an MMU don't
- * support shared libraries and therefor don't have one.
+ * support shared libraries and therefore don't have one.
*/
#ifdef CONFIG_MMU
diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
index ff9abc00d139..48da5371f1e9 100644
--- a/arch/riscv/include/asm/vmalloc.h
+++ b/arch/riscv/include/asm/vmalloc.h
@@ -1,4 +1,22 @@
#ifndef _ASM_RISCV_VMALLOC_H
#define _ASM_RISCV_VMALLOC_H
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#define IOREMAP_MAX_ORDER (PUD_SHIFT)
+
+#define arch_vmap_pud_supported arch_vmap_pud_supported
+static inline bool arch_vmap_pud_supported(pgprot_t prot)
+{
+ return true;
+}
+
+#define arch_vmap_pmd_supported arch_vmap_pmd_supported
+static inline bool arch_vmap_pmd_supported(pgprot_t prot)
+{
+ return true;
+}
+
+#endif
+
#endif /* _ASM_RISCV_VMALLOC_H */
diff --git a/arch/riscv/include/uapi/asm/ucontext.h b/arch/riscv/include/uapi/asm/ucontext.h
index 44eb993950e5..516bd0bb0da5 100644
--- a/arch/riscv/include/uapi/asm/ucontext.h
+++ b/arch/riscv/include/uapi/asm/ucontext.h
@@ -15,19 +15,23 @@ struct ucontext {
struct ucontext *uc_link;
stack_t uc_stack;
sigset_t uc_sigmask;
- /* There's some padding here to allow sigset_t to be expanded in the
+ /*
+ * There's some padding here to allow sigset_t to be expanded in the
* future. Though this is unlikely, other architectures put uc_sigmask
* at the end of this structure and explicitly state it can be
- * expanded, so we didn't want to box ourselves in here. */
+ * expanded, so we didn't want to box ourselves in here.
+ */
__u8 __unused[1024 / 8 - sizeof(sigset_t)];
- /* We can't put uc_sigmask at the end of this structure because we need
+ /*
+ * We can't put uc_sigmask at the end of this structure because we need
* to be able to expand sigcontext in the future. For example, the
* vector ISA extension will almost certainly add ISA state. We want
* to ensure all user-visible ISA state can be saved and restored via a
* ucontext, so we're putting this at the end in order to allow for
* infinite extensibility. Since we know this will be extended and we
* assume sigset_t won't be extended an extreme amount, we're
- * prioritizing this. */
+ * prioritizing this.
+ */
struct sigcontext uc_mcontext;
};
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index db6e4b1294ba..4cf303a779ab 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -81,6 +81,7 @@ obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KEXEC_CORE) += kexec_relocate.o crash_save_regs.o machine_kexec.o
obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_CRASH_CORE) += crash_core.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index 852ecccd8920..1b9a5a66e55a 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -70,8 +70,6 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
return -1;
}
-#ifdef CONFIG_PROC_FS
-
struct riscv_cpuinfo {
unsigned long mvendorid;
unsigned long marchid;
@@ -79,6 +77,30 @@ struct riscv_cpuinfo {
};
static DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
+unsigned long riscv_cached_mvendorid(unsigned int cpu_id)
+{
+ struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
+
+ return ci->mvendorid;
+}
+EXPORT_SYMBOL(riscv_cached_mvendorid);
+
+unsigned long riscv_cached_marchid(unsigned int cpu_id)
+{
+ struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
+
+ return ci->marchid;
+}
+EXPORT_SYMBOL(riscv_cached_marchid);
+
+unsigned long riscv_cached_mimpid(unsigned int cpu_id)
+{
+ struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
+
+ return ci->mimpid;
+}
+EXPORT_SYMBOL(riscv_cached_mimpid);
+
static int riscv_cpuinfo_starting(unsigned int cpu)
{
struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
@@ -113,7 +135,9 @@ static int __init riscv_cpuinfo_init(void)
return 0;
}
-device_initcall(riscv_cpuinfo_init);
+arch_initcall(riscv_cpuinfo_init);
+
+#ifdef CONFIG_PROC_FS
#define __RISCV_ISA_EXT_DATA(UPROP, EXTID) \
{ \
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 694267d1fe81..93e45560af30 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -9,6 +9,7 @@
#include <linux/bitmap.h>
#include <linux/ctype.h>
#include <linux/libfdt.h>
+#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
#include <asm/alternative.h>
@@ -68,21 +69,38 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit)
}
EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
+static bool riscv_isa_extension_check(int id)
+{
+ switch (id) {
+ case RISCV_ISA_EXT_ZICBOM:
+ if (!riscv_cbom_block_size) {
+ pr_err("Zicbom detected in ISA string, but no cbom-block-size found\n");
+ return false;
+ } else if (!is_power_of_2(riscv_cbom_block_size)) {
+ pr_err("cbom-block-size present, but is not a power-of-2\n");
+ return false;
+ }
+ return true;
+ }
+
+ return true;
+}
+
void __init riscv_fill_hwcap(void)
{
struct device_node *node;
const char *isa;
char print_str[NUM_ALPHA_EXTS + 1];
int i, j, rc;
- static unsigned long isa2hwcap[256] = {0};
+ unsigned long isa2hwcap[26] = {0};
unsigned long hartid;
- isa2hwcap['i'] = isa2hwcap['I'] = COMPAT_HWCAP_ISA_I;
- isa2hwcap['m'] = isa2hwcap['M'] = COMPAT_HWCAP_ISA_M;
- isa2hwcap['a'] = isa2hwcap['A'] = COMPAT_HWCAP_ISA_A;
- isa2hwcap['f'] = isa2hwcap['F'] = COMPAT_HWCAP_ISA_F;
- isa2hwcap['d'] = isa2hwcap['D'] = COMPAT_HWCAP_ISA_D;
- isa2hwcap['c'] = isa2hwcap['C'] = COMPAT_HWCAP_ISA_C;
+ isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I;
+ isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M;
+ isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A;
+ isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F;
+ isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D;
+ isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C;
elf_hwcap = 0;
@@ -189,15 +207,20 @@ void __init riscv_fill_hwcap(void)
#define SET_ISA_EXT_MAP(name, bit) \
do { \
if ((ext_end - ext == sizeof(name) - 1) && \
- !memcmp(ext, name, sizeof(name) - 1)) \
+ !memcmp(ext, name, sizeof(name) - 1) && \
+ riscv_isa_extension_check(bit)) \
set_bit(bit, this_isa); \
} while (false) \
if (unlikely(ext_err))
continue;
if (!ext_long) {
- this_hwcap |= isa2hwcap[(unsigned char)(*ext)];
- set_bit(*ext - 'a', this_isa);
+ int nr = *ext - 'a';
+
+ if (riscv_isa_extension_check(nr)) {
+ this_hwcap |= isa2hwcap[nr];
+ set_bit(nr, this_isa);
+ }
} else {
SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF);
SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT);
diff --git a/arch/riscv/kernel/crash_core.c b/arch/riscv/kernel/crash_core.c
new file mode 100644
index 000000000000..b351a3c01355
--- /dev/null
+++ b/arch/riscv/kernel/crash_core.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/crash_core.h>
+#include <linux/pagemap.h>
+
+void arch_crash_save_vmcoreinfo(void)
+{
+ VMCOREINFO_NUMBER(VA_BITS);
+ VMCOREINFO_NUMBER(phys_ram_base);
+
+ vmcoreinfo_append_str("NUMBER(PAGE_OFFSET)=0x%lx\n", PAGE_OFFSET);
+ vmcoreinfo_append_str("NUMBER(VMALLOC_START)=0x%lx\n", VMALLOC_START);
+ vmcoreinfo_append_str("NUMBER(VMALLOC_END)=0x%lx\n", VMALLOC_END);
+ vmcoreinfo_append_str("NUMBER(VMEMMAP_START)=0x%lx\n", VMEMMAP_START);
+ vmcoreinfo_append_str("NUMBER(VMEMMAP_END)=0x%lx\n", VMEMMAP_END);
+#ifdef CONFIG_64BIT
+ vmcoreinfo_append_str("NUMBER(MODULES_VADDR)=0x%lx\n", MODULES_VADDR);
+ vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END);
+#endif
+ vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR);
+}
diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c
index 0cb94992c15b..5372b708fae2 100644
--- a/arch/riscv/kernel/elf_kexec.c
+++ b/arch/riscv/kernel/elf_kexec.c
@@ -21,6 +21,18 @@
#include <linux/memblock.h>
#include <asm/setup.h>
+int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ kvfree(image->arch.fdt);
+ image->arch.fdt = NULL;
+
+ vfree(image->elf_headers);
+ image->elf_headers = NULL;
+ image->elf_headers_sz = 0;
+
+ return kexec_image_post_load_cleanup_default(image);
+}
+
static int riscv_kexec_elf_load(struct kimage *image, struct elfhdr *ehdr,
struct kexec_elf_info *elf_info, unsigned long old_pbase,
unsigned long new_pbase)
@@ -298,6 +310,8 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
pr_err("Error add DTB kbuf ret=%d\n", ret);
goto out_free_fdt;
}
+ /* Cache the fdt buffer address for memory cleanup */
+ image->arch.fdt = fdt;
pr_notice("Loaded device tree at 0x%lx\n", kbuf.mem);
goto out;
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 186abd146eaf..99d38fdf8b18 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -248,7 +248,7 @@ ret_from_syscall_rejected:
andi t0, t0, _TIF_SYSCALL_WORK
bnez t0, handle_syscall_trace_exit
-ret_from_exception:
+SYM_CODE_START_NOALIGN(ret_from_exception)
REG_L s0, PT_STATUS(sp)
csrc CSR_STATUS, SR_IE
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -262,13 +262,13 @@ ret_from_exception:
andi s0, s0, SR_SPP
#endif
bnez s0, resume_kernel
+SYM_CODE_END(ret_from_exception)
-resume_userspace:
/* Interrupts must be disabled here so flags are checked atomically */
REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
andi s1, s0, _TIF_WORK_MASK
- bnez s1, work_pending
-
+ bnez s1, resume_userspace_slow
+resume_userspace:
#ifdef CONFIG_CONTEXT_TRACKING_USER
call user_enter_callable
#endif
@@ -368,19 +368,12 @@ resume_kernel:
j restore_all
#endif
-work_pending:
+resume_userspace_slow:
/* Enter slow path for supplementary processing */
- la ra, ret_from_exception
- andi s1, s0, _TIF_NEED_RESCHED
- bnez s1, work_resched
-work_notifysig:
- /* Handle pending signals and notify-resume requests */
- csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
move a0, sp /* pt_regs */
move a1, s0 /* current_thread_info->flags */
- tail do_notify_resume
-work_resched:
- tail schedule
+ call do_work_pending
+ j resume_userspace
/* Slow paths for ptrace. */
handle_syscall_trace_enter:
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index 6d462681c9c0..30102aadc4d7 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -15,8 +15,8 @@
.macro SAVE_ABI_STATE
addi sp, sp, -16
- sd s0, 0(sp)
- sd ra, 8(sp)
+ REG_S s0, 0*SZREG(sp)
+ REG_S ra, 1*SZREG(sp)
addi s0, sp, 16
.endm
@@ -25,24 +25,26 @@
* register if a0 was not saved.
*/
.macro SAVE_RET_ABI_STATE
- addi sp, sp, -32
- sd s0, 16(sp)
- sd ra, 24(sp)
- sd a0, 8(sp)
- addi s0, sp, 32
+ addi sp, sp, -4*SZREG
+ REG_S s0, 2*SZREG(sp)
+ REG_S ra, 3*SZREG(sp)
+ REG_S a0, 1*SZREG(sp)
+ REG_S a1, 0*SZREG(sp)
+ addi s0, sp, 4*SZREG
.endm
.macro RESTORE_ABI_STATE
- ld ra, 8(sp)
- ld s0, 0(sp)
+ REG_L ra, 1*SZREG(sp)
+ REG_L s0, 0*SZREG(sp)
addi sp, sp, 16
.endm
.macro RESTORE_RET_ABI_STATE
- ld ra, 24(sp)
- ld s0, 16(sp)
- ld a0, 8(sp)
- addi sp, sp, 32
+ REG_L ra, 3*SZREG(sp)
+ REG_L s0, 2*SZREG(sp)
+ REG_L a0, 1*SZREG(sp)
+ REG_L a1, 0*SZREG(sp)
+ addi sp, sp, 4*SZREG
.endm
ENTRY(ftrace_stub)
@@ -71,9 +73,9 @@ ENTRY(return_to_handler)
mv a0, t6
#endif
call ftrace_return_to_handler
- mv a1, a0
+ mv a2, a0
RESTORE_RET_ABI_STATE
- jalr a1
+ jalr a2
ENDPROC(return_to_handler)
#endif
@@ -82,16 +84,16 @@ ENTRY(MCOUNT_NAME)
la t4, ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
la t0, ftrace_graph_return
- ld t1, 0(t0)
+ REG_L t1, 0(t0)
bne t1, t4, do_ftrace_graph_caller
la t3, ftrace_graph_entry
- ld t2, 0(t3)
+ REG_L t2, 0(t3)
la t6, ftrace_graph_entry_stub
bne t2, t6, do_ftrace_graph_caller
#endif
la t3, ftrace_trace_function
- ld t5, 0(t3)
+ REG_L t5, 0(t3)
bne t5, t4, do_trace
ret
@@ -101,10 +103,10 @@ ENTRY(MCOUNT_NAME)
* prepare_to_return(&ra_to_caller_of_caller, ra_to_caller)
*/
do_ftrace_graph_caller:
- addi a0, s0, -8
+ addi a0, s0, -SZREG
mv a1, ra
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
- ld a2, -16(s0)
+ REG_L a2, -2*SZREG(s0)
#endif
SAVE_ABI_STATE
call prepare_ftrace_return
@@ -117,7 +119,7 @@ do_ftrace_graph_caller:
* (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller)
*/
do_trace:
- ld a1, -8(s0)
+ REG_L a1, -SZREG(s0)
mv a0, ra
SAVE_ABI_STATE
diff --git a/arch/riscv/kernel/probes/Makefile b/arch/riscv/kernel/probes/Makefile
index 7f0840dcc31b..c40139e9ca47 100644
--- a/arch/riscv/kernel/probes/Makefile
+++ b/arch/riscv/kernel/probes/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o
-obj-$(CONFIG_KPROBES) += kprobes_trampoline.o
+obj-$(CONFIG_RETHOOK) += rethook.o rethook_trampoline.o
obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o
CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
index e6e950b7cf32..f21592d20306 100644
--- a/arch/riscv/kernel/probes/kprobes.c
+++ b/arch/riscv/kernel/probes/kprobes.c
@@ -345,19 +345,6 @@ int __init arch_populate_kprobe_blacklist(void)
return ret;
}
-void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
-{
- return (void *)kretprobe_trampoline_handler(regs, NULL);
-}
-
-void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
- struct pt_regs *regs)
-{
- ri->ret_addr = (kprobe_opcode_t *)regs->ra;
- ri->fp = NULL;
- regs->ra = (unsigned long) &__kretprobe_trampoline;
-}
-
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
return 0;
diff --git a/arch/riscv/kernel/probes/rethook.c b/arch/riscv/kernel/probes/rethook.c
new file mode 100644
index 000000000000..5c27c1f50989
--- /dev/null
+++ b/arch/riscv/kernel/probes/rethook.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generic return hook for riscv.
+ */
+
+#include <linux/kprobes.h>
+#include <linux/rethook.h>
+#include "rethook.h"
+
+/* This is called from arch_rethook_trampoline() */
+unsigned long __used arch_rethook_trampoline_callback(struct pt_regs *regs)
+{
+ return rethook_trampoline_handler(regs, regs->s0);
+}
+
+NOKPROBE_SYMBOL(arch_rethook_trampoline_callback);
+
+void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount)
+{
+ rhn->ret_addr = regs->ra;
+ rhn->frame = regs->s0;
+
+ /* replace return addr with trampoline */
+ regs->ra = (unsigned long)arch_rethook_trampoline;
+}
+
+NOKPROBE_SYMBOL(arch_rethook_prepare);
diff --git a/arch/riscv/kernel/probes/rethook.h b/arch/riscv/kernel/probes/rethook.h
new file mode 100644
index 000000000000..4758f7e3ce88
--- /dev/null
+++ b/arch/riscv/kernel/probes/rethook.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __RISCV_RETHOOK_H
+#define __RISCV_RETHOOK_H
+
+unsigned long arch_rethook_trampoline_callback(struct pt_regs *regs);
+void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount);
+
+#endif
diff --git a/arch/riscv/kernel/probes/kprobes_trampoline.S b/arch/riscv/kernel/probes/rethook_trampoline.S
index 7bdb09ded39b..21bac92a170a 100644
--- a/arch/riscv/kernel/probes/kprobes_trampoline.S
+++ b/arch/riscv/kernel/probes/rethook_trampoline.S
@@ -75,13 +75,13 @@
REG_L x31, PT_T6(sp)
.endm
-ENTRY(__kretprobe_trampoline)
+ENTRY(arch_rethook_trampoline)
addi sp, sp, -(PT_SIZE_ON_STACK)
save_all_base_regs
move a0, sp /* pt_regs */
- call trampoline_probe_handler
+ call arch_rethook_trampoline_callback
/* use the result as the return-address */
move ra, a0
@@ -90,4 +90,4 @@ ENTRY(__kretprobe_trampoline)
addi sp, sp, PT_SIZE_ON_STACK
ret
-ENDPROC(__kretprobe_trampoline)
+ENDPROC(arch_rethook_trampoline)
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 5c591123c440..bfb2afa4135f 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -313,19 +313,27 @@ static void do_signal(struct pt_regs *regs)
}
/*
- * notification of userspace execution resumption
- * - triggered by the _TIF_WORK_MASK flags
+ * Handle any pending work on the resume-to-userspace path, as indicated by
+ * _TIF_WORK_MASK. Entered from assembly with IRQs off.
*/
-asmlinkage __visible void do_notify_resume(struct pt_regs *regs,
- unsigned long thread_info_flags)
+asmlinkage __visible void do_work_pending(struct pt_regs *regs,
+ unsigned long thread_info_flags)
{
- if (thread_info_flags & _TIF_UPROBE)
- uprobe_notify_resume(regs);
-
- /* Handle pending signal delivery */
- if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
- do_signal(regs);
-
- if (thread_info_flags & _TIF_NOTIFY_RESUME)
- resume_user_mode_work(regs);
+ do {
+ if (thread_info_flags & _TIF_NEED_RESCHED) {
+ schedule();
+ } else {
+ local_irq_enable();
+ if (thread_info_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+ /* Handle pending signal delivery */
+ if (thread_info_flags & (_TIF_SIGPENDING |
+ _TIF_NOTIFY_SIGNAL))
+ do_signal(regs);
+ if (thread_info_flags & _TIF_NOTIFY_RESUME)
+ resume_user_mode_work(regs);
+ }
+ local_irq_disable();
+ thread_info_flags = read_thread_flags();
+ } while (thread_info_flags & _TIF_WORK_MASK);
}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 08d11a53f39e..75c8dd64fc48 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -16,6 +16,8 @@
#ifdef CONFIG_FRAME_POINTER
+extern asmlinkage void ret_from_exception(void);
+
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
bool (*fn)(void *, unsigned long), void *arg)
{
@@ -58,7 +60,14 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
} else {
fp = frame->fp;
pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
- (unsigned long *)(fp - 8));
+ &frame->ra);
+ if (pc == (unsigned long)ret_from_exception) {
+ if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
+ break;
+
+ pc = ((struct pt_regs *)sp)->epc;
+ fp = ((struct pt_regs *)sp)->s0;
+ }
}
}
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 7abd8e4c4df6..549bde5c970a 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -208,18 +208,18 @@ int is_valid_bugaddr(unsigned long pc)
#endif /* CONFIG_GENERIC_BUG */
#ifdef CONFIG_VMAP_STACK
+/*
+ * Extra stack space that allows us to provide panic messages when the kernel
+ * has overflowed its stack.
+ */
static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
overflow_stack)__aligned(16);
/*
- * shadow stack, handled_ kernel_ stack_ overflow(in kernel/entry.S) is used
- * to get per-cpu overflow stack(get_overflow_stack).
+ * A temporary stack for use by handle_kernel_stack_overflow. This is used so
+ * we can call into C code to get the per-hart overflow stack. Usage of this
+ * stack must be protected by spin_shadow_stack.
*/
-long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)];
-asmlinkage unsigned long get_overflow_stack(void)
-{
- return (unsigned long)this_cpu_ptr(overflow_stack) +
- OVERFLOW_STACK_SIZE;
-}
+long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
/*
* A pseudo spinlock to protect the shadow stack from being used by multiple
@@ -230,6 +230,12 @@ asmlinkage unsigned long get_overflow_stack(void)
*/
unsigned long spin_shadow_stack;
+asmlinkage unsigned long get_overflow_stack(void)
+{
+ return (unsigned long)this_cpu_ptr(overflow_stack) +
+ OVERFLOW_STACK_SIZE;
+}
+
asmlinkage void handle_bad_stack(struct pt_regs *regs)
{
unsigned long tsk_stk = (unsigned long)current->stack;
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index d76aabf4b94d..2ac177c05352 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -13,6 +13,8 @@ obj-y += extable.o
obj-$(CONFIG_MMU) += fault.o pageattr.o
obj-y += cacheflush.o
obj-y += context.o
+obj-y += pgtable.o
+obj-y += pmem.o
ifeq ($(CONFIG_MMU),y)
obj-$(CONFIG_SMP) += tlbflush.o
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 57b40a350420..3cc07ed45aeb 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -83,6 +83,13 @@ void flush_icache_pte(pte_t pte)
{
struct page *page = pte_page(pte);
+ /*
+ * HugeTLB pages are always fully mapped, so only setting head page's
+ * PG_dcache_clean flag is enough.
+ */
+ if (PageHuge(page))
+ page = compound_head(page);
+
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
flush_icache_all();
}
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 7acbfbd14557..80ce9caba8d2 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -196,6 +196,16 @@ switch_mm_fast:
if (need_flush_tlb)
local_flush_tlb_all();
+#ifdef CONFIG_SMP
+ else {
+ cpumask_t *mask = &mm->context.tlb_stale_mask;
+
+ if (cpumask_test_cpu(cpu, mask)) {
+ cpumask_clear_cpu(cpu, mask);
+ local_flush_tlb_all_asid(cntx & asid_mask);
+ }
+ }
+#endif
}
static void set_mm_noasid(struct mm_struct *mm)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 50a1b6edd491..478d6763a01a 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -672,10 +672,11 @@ void __init create_pgd_mapping(pgd_t *pgdp,
static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
{
/* Upgrade to PMD_SIZE mappings whenever possible */
- if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
- return PAGE_SIZE;
+ base &= PMD_SIZE - 1;
+ if (!base && size >= PMD_SIZE)
+ return PMD_SIZE;
- return PMD_SIZE;
+ return PAGE_SIZE;
}
#ifdef CONFIG_XIP_KERNEL
@@ -926,15 +927,15 @@ static void __init pt_ops_set_early(void)
*/
static void __init pt_ops_set_fixmap(void)
{
- pt_ops.alloc_pte = kernel_mapping_pa_to_va((uintptr_t)alloc_pte_fixmap);
- pt_ops.get_pte_virt = kernel_mapping_pa_to_va((uintptr_t)get_pte_virt_fixmap);
+ pt_ops.alloc_pte = kernel_mapping_pa_to_va(alloc_pte_fixmap);
+ pt_ops.get_pte_virt = kernel_mapping_pa_to_va(get_pte_virt_fixmap);
#ifndef __PAGETABLE_PMD_FOLDED
- pt_ops.alloc_pmd = kernel_mapping_pa_to_va((uintptr_t)alloc_pmd_fixmap);
- pt_ops.get_pmd_virt = kernel_mapping_pa_to_va((uintptr_t)get_pmd_virt_fixmap);
- pt_ops.alloc_pud = kernel_mapping_pa_to_va((uintptr_t)alloc_pud_fixmap);
- pt_ops.get_pud_virt = kernel_mapping_pa_to_va((uintptr_t)get_pud_virt_fixmap);
- pt_ops.alloc_p4d = kernel_mapping_pa_to_va((uintptr_t)alloc_p4d_fixmap);
- pt_ops.get_p4d_virt = kernel_mapping_pa_to_va((uintptr_t)get_p4d_virt_fixmap);
+ pt_ops.alloc_pmd = kernel_mapping_pa_to_va(alloc_pmd_fixmap);
+ pt_ops.get_pmd_virt = kernel_mapping_pa_to_va(get_pmd_virt_fixmap);
+ pt_ops.alloc_pud = kernel_mapping_pa_to_va(alloc_pud_fixmap);
+ pt_ops.get_pud_virt = kernel_mapping_pa_to_va(get_pud_virt_fixmap);
+ pt_ops.alloc_p4d = kernel_mapping_pa_to_va(alloc_p4d_fixmap);
+ pt_ops.get_p4d_virt = kernel_mapping_pa_to_va(get_p4d_virt_fixmap);
#endif
}
@@ -1110,9 +1111,9 @@ static void __init setup_vm_final(void)
if (end >= __pa(PAGE_OFFSET) + memory_limit)
end = __pa(PAGE_OFFSET) + memory_limit;
- map_size = best_map_size(start, end - start);
for (pa = start; pa < end; pa += map_size) {
va = (uintptr_t)__va(pa);
+ map_size = best_map_size(pa, end - pa);
create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
pgprot_from_va(va));
diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
new file mode 100644
index 000000000000..6645ead1a7c1
--- /dev/null
+++ b/arch/riscv/mm/pgtable.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <asm/pgalloc.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/pgtable.h>
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
+{
+ return 0;
+}
+
+void p4d_clear_huge(p4d_t *p4d)
+{
+}
+
+int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
+{
+ pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
+
+ set_pud(pud, new_pud);
+ return 1;
+}
+
+int pud_clear_huge(pud_t *pud)
+{
+ if (!pud_leaf(READ_ONCE(*pud)))
+ return 0;
+ pud_clear(pud);
+ return 1;
+}
+
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ pmd_t *pmd = pud_pgtable(*pud);
+ int i;
+
+ pud_clear(pud);
+
+ flush_tlb_kernel_range(addr, addr + PUD_SIZE);
+
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (!pmd_none(pmd[i])) {
+ pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]);
+
+ pte_free_kernel(NULL, pte);
+ }
+ }
+
+ pmd_free(NULL, pmd);
+
+ return 1;
+}
+
+int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
+{
+ pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
+
+ set_pmd(pmd, new_pmd);
+ return 1;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+ if (!pmd_leaf(READ_ONCE(*pmd)))
+ return 0;
+ pmd_clear(pmd);
+ return 1;
+}
+
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
+
+ pmd_clear(pmd);
+
+ flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+ pte_free_kernel(NULL, pte);
+ return 1;
+}
+
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c
index 19cf25a74ee2..9b18bda74154 100644
--- a/arch/riscv/mm/physaddr.c
+++ b/arch/riscv/mm/physaddr.c
@@ -22,7 +22,7 @@ EXPORT_SYMBOL(__virt_to_phys);
phys_addr_t __phys_addr_symbol(unsigned long x)
{
unsigned long kernel_start = kernel_map.virt_addr;
- unsigned long kernel_end = (unsigned long)_end;
+ unsigned long kernel_end = kernel_start + kernel_map.size;
/*
* Boundary checking aginst the kernel image mapping.
diff --git a/arch/riscv/mm/pmem.c b/arch/riscv/mm/pmem.c
new file mode 100644
index 000000000000..089df92ae876
--- /dev/null
+++ b/arch/riscv/mm/pmem.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+
+#include <linux/export.h>
+#include <linux/libnvdimm.h>
+
+#include <asm/cacheflush.h>
+
+void arch_wb_cache_pmem(void *addr, size_t size)
+{
+ ALT_CMO_OP(clean, addr, size, riscv_cbom_block_size);
+}
+EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
+
+void arch_invalidate_pmem(void *addr, size_t size)
+{
+ ALT_CMO_OP(inval, addr, size, riscv_cbom_block_size);
+}
+EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 37ed760d007c..ce7dfc81bb3f 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -5,23 +5,7 @@
#include <linux/sched.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
-
-static inline void local_flush_tlb_all_asid(unsigned long asid)
-{
- __asm__ __volatile__ ("sfence.vma x0, %0"
- :
- : "r" (asid)
- : "memory");
-}
-
-static inline void local_flush_tlb_page_asid(unsigned long addr,
- unsigned long asid)
-{
- __asm__ __volatile__ ("sfence.vma %0, %1"
- :
- : "r" (addr), "r" (asid)
- : "memory");
-}
+#include <asm/tlbflush.h>
void flush_tlb_all(void)
{
@@ -31,6 +15,7 @@ void flush_tlb_all(void)
static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
{
+ struct cpumask *pmask = &mm->context.tlb_stale_mask;
struct cpumask *cmask = mm_cpumask(mm);
unsigned int cpuid;
bool broadcast;
@@ -44,6 +29,15 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
if (static_branch_unlikely(&use_asid_allocator)) {
unsigned long asid = atomic_long_read(&mm->context.id);
+ /*
+ * TLB will be immediately flushed on harts concurrently
+ * executing this MM context. TLB flush on other harts
+ * is deferred until this MM context migrates there.
+ */
+ cpumask_setall(pmask);
+ cpumask_clear_cpu(cpuid, pmask);
+ cpumask_andnot(pmask, pmask, cmask);
+
if (broadcast) {
sbi_remote_sfence_vma_asid(cmask, start, size, asid);
} else if (size <= stride) {