summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/brk-imm.h2
-rw-r--r--arch/arm64/include/asm/dma-mapping.h8
-rw-r--r--arch/arm64/include/asm/futex.h2
-rw-r--r--arch/arm64/include/asm/kasan.h8
-rw-r--r--arch/arm64/include/asm/memory.h56
-rw-r--r--arch/arm64/include/asm/pgalloc.h4
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/smp.h8
-rw-r--r--arch/arm64/include/asm/uaccess.h15
-rw-r--r--arch/arm64/include/asm/unistd.h7
-rw-r--r--arch/arm64/include/asm/unistd32.h2
11 files changed, 66 insertions, 47 deletions
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h
index ed693c5bcec0..2945fe6cd863 100644
--- a/arch/arm64/include/asm/brk-imm.h
+++ b/arch/arm64/include/asm/brk-imm.h
@@ -16,10 +16,12 @@
* 0x400: for dynamic BRK instruction
* 0x401: for compile time BRK instruction
* 0x800: kernel-mode BUG() and WARN() traps
+ * 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff)
*/
#define FAULT_BRK_IMM 0x100
#define KGDB_DYN_DBG_BRK_IMM 0x400
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
#define BUG_BRK_IMM 0x800
+#define KASAN_BRK_IMM 0x900
#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index c41f3fb1446c..95dbf3ef735a 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -24,15 +24,9 @@
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
-extern const struct dma_map_ops dummy_dma_ops;
-
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- /*
- * We expect no ISA devices, and all other DMA masters are expected to
- * have someone call arch_setup_dma_ops at device creation time.
- */
- return &dummy_dma_ops;
+ return NULL;
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 07fe2479d310..cccb83ad7fa8 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -96,7 +96,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
u32 val, tmp;
u32 __user *uaddr;
- if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32)))
+ if (!access_ok(_uaddr, sizeof(u32)))
return -EFAULT;
uaddr = __uaccess_mask_ptr(_uaddr);
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index 8758bb008436..b52aacd2c526 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -4,12 +4,16 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_KASAN
-
#include <linux/linkage.h>
#include <asm/memory.h>
#include <asm/pgtable-types.h>
+#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)
+#define arch_kasan_reset_tag(addr) __tag_reset(addr)
+#define arch_kasan_get_tag(addr) __tag_get(addr)
+
+#ifdef CONFIG_KASAN
+
/*
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
* KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index a0ee78c208c3..e1ec947e7c0c 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -53,8 +53,11 @@
#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
(UL(1) << (VA_BITS - 1)) + 1)
#define KIMAGE_VADDR (MODULES_END)
+#define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE)
+#define BPF_JIT_REGION_SIZE (SZ_128M)
+#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
-#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
+#define MODULES_VADDR (BPF_JIT_REGION_END)
#define MODULES_VSIZE (SZ_128M)
#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE)
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
@@ -71,13 +74,11 @@
#endif
/*
- * KASAN requires 1/8th of the kernel virtual address space for the shadow
- * region. KASAN can bloat the stack significantly, so double the (minimum)
- * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is
- * on.
+ * Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual
+ * address space for the shadow region respectively. They can bloat the stack
+ * significantly, so double the (minimum) stack size when they are in use.
*/
#ifdef CONFIG_KASAN
-#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
#ifdef CONFIG_KASAN_EXTRA
#define KASAN_THREAD_SHIFT 2
@@ -170,14 +171,6 @@
#define IOREMAP_MAX_ORDER (PMD_SHIFT)
#endif
-#ifdef CONFIG_BLK_DEV_INITRD
-#define __early_init_dt_declare_initrd(__start, __end) \
- do { \
- initrd_start = (__start); \
- initrd_end = (__end); \
- } while (0)
-#endif
-
#ifndef __ASSEMBLY__
#include <linux/bitops.h>
@@ -218,6 +211,26 @@ extern u64 vabits_user;
#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
/*
+ * When dealing with data aborts, watchpoints, or instruction traps we may end
+ * up with a tagged userland pointer. Clear the tag to get a sane pointer to
+ * pass on to access_ok(), for instance.
+ */
+#define untagged_addr(addr) \
+ ((__typeof__(addr))sign_extend64((u64)(addr), 55))
+
+#ifdef CONFIG_KASAN_SW_TAGS
+#define __tag_shifted(tag) ((u64)(tag) << 56)
+#define __tag_set(addr, tag) (__typeof__(addr))( \
+ ((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag))
+#define __tag_reset(addr) untagged_addr(addr)
+#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
+#else
+#define __tag_set(addr, tag) (addr)
+#define __tag_reset(addr) (addr)
+#define __tag_get(addr) 0
+#endif
+
+/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
@@ -300,7 +313,13 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
-#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
+#define page_to_virt(page) ({ \
+ unsigned long __addr = \
+ ((__page_to_voff(page)) | PAGE_OFFSET); \
+ __addr = __tag_set(__addr, page_kasan_tag(page)); \
+ ((void *)__addr); \
+})
+
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
@@ -308,9 +327,10 @@ static inline void *phys_to_virt(phys_addr_t x)
#endif
#endif
-#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET)
-#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
- _virt_addr_valid(kaddr))
+#define _virt_addr_is_linear(kaddr) \
+ (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET)
+#define virt_addr_valid(kaddr) \
+ (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr))
#include <asm-generic/memory_model.h>
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 2e05bcd944c8..52fa47c73bf0 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -91,13 +91,13 @@ extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
static inline pte_t *
-pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
+pte_alloc_one_kernel(struct mm_struct *mm)
{
return (pte_t *)__get_free_page(PGALLOC_GFP);
}
static inline pgtable_t
-pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+pte_alloc_one(struct mm_struct *mm)
{
struct page *pte;
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 22bb3ae514f5..e9b0a7d75184 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -299,6 +299,7 @@
#define TCR_A1 (UL(1) << 22)
#define TCR_ASID16 (UL(1) << 36)
#define TCR_TBI0 (UL(1) << 37)
+#define TCR_TBI1 (UL(1) << 38)
#define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40)
#define TCR_NFD1 (UL(1) << 54)
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 1895561839a9..18553f399e08 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -16,9 +16,11 @@
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
+#include <linux/const.h>
+
/* Values for secondary_data.status */
#define CPU_STUCK_REASON_SHIFT (8)
-#define CPU_BOOT_STATUS_MASK ((1U << CPU_STUCK_REASON_SHIFT) - 1)
+#define CPU_BOOT_STATUS_MASK ((UL(1) << CPU_STUCK_REASON_SHIFT) - 1)
#define CPU_MMU_OFF (-1)
#define CPU_BOOT_SUCCESS (0)
@@ -29,8 +31,8 @@
/* Fatal system error detected by secondary CPU, crash the system */
#define CPU_PANIC_KERNEL (3)
-#define CPU_STUCK_REASON_52_BIT_VA (1U << CPU_STUCK_REASON_SHIFT)
-#define CPU_STUCK_REASON_NO_GRAN (2U << CPU_STUCK_REASON_SHIFT)
+#define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT)
+#define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index fad33f5fde47..547d7a0c9d05 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -95,14 +95,7 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
return ret;
}
-/*
- * When dealing with data aborts, watchpoints, or instruction traps we may end
- * up with a tagged userland pointer. Clear the tag to get a sane pointer to
- * pass on to access_ok(), for instance.
- */
-#define untagged_addr(addr) sign_extend64(addr, 55)
-
-#define access_ok(type, addr, size) __range_ok(addr, size)
+#define access_ok(addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
#define _ASM_EXTABLE(from, to) \
@@ -308,7 +301,7 @@ do { \
({ \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
- if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
+ if (access_ok(__p, sizeof(*__p))) { \
__p = uaccess_mask_ptr(__p); \
__get_user_err((x), __p, (err)); \
} else { \
@@ -377,7 +370,7 @@ do { \
({ \
__typeof__(*(ptr)) __user *__p = (ptr); \
might_fault(); \
- if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
+ if (access_ok(__p, sizeof(*__p))) { \
__p = uaccess_mask_ptr(__p); \
__put_user_err((x), __p, (err)); \
} else { \
@@ -425,7 +418,7 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
{
- if (access_ok(VERIFY_WRITE, to, n))
+ if (access_ok(to, n))
n = __arch_clear_user(__uaccess_mask_ptr(to), n);
return n;
}
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index b13ca091f833..a7b1fc58ffdf 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -40,10 +40,11 @@
* The following SVCs are ARM private.
*/
#define __ARM_NR_COMPAT_BASE 0x0f0000
-#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
-#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
+#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2)
+#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
+#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-#define __NR_compat_syscalls 399
+#define __NR_compat_syscalls 400
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 2cd6dcf8d246..04ee190b90fe 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -819,6 +819,8 @@ __SYSCALL(__NR_pkey_free, sys_pkey_free)
__SYSCALL(__NR_statx, sys_statx)
#define __NR_rseq 398
__SYSCALL(__NR_rseq, sys_rseq)
+#define __NR_io_pgetevents 399
+__SYSCALL(__NR_io_pgetevents, compat_sys_io_pgetevents)
/*
* Please add new compat syscalls above this comment and update