summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel/patch.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel/patch.c')
-rw-r--r--arch/riscv/kernel/patch.c46
1 files changed, 27 insertions, 19 deletions
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 8a4fc65ee022..5805791cd5b5 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -5,22 +5,21 @@
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/memory.h>
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
#include <asm/kprobes.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
-struct riscv_insn_patch {
+struct patch_insn {
void *addr;
u32 insn;
atomic_t cpu_count;
};
#ifdef CONFIG_MMU
-static DEFINE_RAW_SPINLOCK(patch_lock);
-
-static void __kprobes *patch_map(void *addr, int fixmap)
+static void *patch_map(void *addr, int fixmap)
{
uintptr_t uintaddr = (uintptr_t) addr;
struct page *page;
@@ -37,20 +36,26 @@ static void __kprobes *patch_map(void *addr, int fixmap)
return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
(uintaddr & ~PAGE_MASK));
}
+NOKPROBE_SYMBOL(patch_map);
-static void __kprobes patch_unmap(int fixmap)
+static void patch_unmap(int fixmap)
{
clear_fixmap(fixmap);
}
+NOKPROBE_SYMBOL(patch_unmap);
-static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len)
+static int patch_insn_write(void *addr, const void *insn, size_t len)
{
void *waddr = addr;
bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
- unsigned long flags = 0;
int ret;
- raw_spin_lock_irqsave(&patch_lock, flags);
+ /*
+ * Before reaching here, it was expected to lock the text_mutex
+ * already, so we don't need to give another lock here and could
+ * ensure that it was safe between each cores.
+ */
+ lockdep_assert_held(&text_mutex);
if (across_pages)
patch_map(addr + len, FIX_TEXT_POKE1);
@@ -64,38 +69,39 @@ static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len)
if (across_pages)
patch_unmap(FIX_TEXT_POKE1);
- raw_spin_unlock_irqrestore(&patch_lock, flags);
-
return ret;
}
+NOKPROBE_SYMBOL(patch_insn_write);
#else
-static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len)
+static int patch_insn_write(void *addr, const void *insn, size_t len)
{
return probe_kernel_write(addr, insn, len);
}
+NOKPROBE_SYMBOL(patch_insn_write);
#endif /* CONFIG_MMU */
-int __kprobes riscv_patch_text_nosync(void *addr, const void *insns, size_t len)
+int patch_text_nosync(void *addr, const void *insns, size_t len)
{
u32 *tp = addr;
int ret;
- ret = riscv_insn_write(tp, insns, len);
+ ret = patch_insn_write(tp, insns, len);
if (!ret)
flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
return ret;
}
+NOKPROBE_SYMBOL(patch_text_nosync);
-static int __kprobes riscv_patch_text_cb(void *data)
+static int patch_text_cb(void *data)
{
- struct riscv_insn_patch *patch = data;
+ struct patch_insn *patch = data;
int ret = 0;
if (atomic_inc_return(&patch->cpu_count) == 1) {
ret =
- riscv_patch_text_nosync(patch->addr, &patch->insn,
+ patch_text_nosync(patch->addr, &patch->insn,
GET_INSN_LENGTH(patch->insn));
atomic_inc(&patch->cpu_count);
} else {
@@ -106,15 +112,17 @@ static int __kprobes riscv_patch_text_cb(void *data)
return ret;
}
+NOKPROBE_SYMBOL(patch_text_cb);
-int __kprobes riscv_patch_text(void *addr, u32 insn)
+int patch_text(void *addr, u32 insn)
{
- struct riscv_insn_patch patch = {
+ struct patch_insn patch = {
.addr = addr,
.insn = insn,
.cpu_count = ATOMIC_INIT(0),
};
- return stop_machine_cpuslocked(riscv_patch_text_cb,
+ return stop_machine_cpuslocked(patch_text_cb,
&patch, cpu_online_mask);
}
+NOKPROBE_SYMBOL(patch_text);