summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2022-06-24 17:06:32 +0200
committerWill Deacon <will@kernel.org>2022-06-24 17:18:09 +0100
commit0d9b1ffefabee93727bae68201593fac80a79002 (patch)
tree8c0b7a0645e617ca07c18f2c5ccde862fdf3c794 /arch/arm64/mm
parent475031b6ed43d208925c81bea612f48c3259c3c8 (diff)
arm64: mm: make vabits_actual a build time constant if possible
Currently, we only support 52-bit virtual addressing on 64k pages configurations, and in all other cases, vabits_actual is guaranteed to equal VA_BITS (== VA_BITS_MIN). So get rid of the variable entirely in that case. While at it, move the assignment out of the asm entry code - it has no need to be there. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20220624150651.1358849-3-ardb@kernel.org Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/init.c15
-rw-r--r--arch/arm64/mm/mmu.c4
2 files changed, 17 insertions, 2 deletions
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 339ee84e5a61..1faa6760895e 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -265,7 +265,20 @@ early_param("mem", early_mem);
void __init arm64_memblock_init(void)
{
- s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
+ s64 linear_region_size;
+
+#if VA_BITS > 48
+ if (cpuid_feature_extract_unsigned_field(
+ read_sysreg_s(SYS_ID_AA64MMFR2_EL1),
+ ID_AA64MMFR2_LVA_SHIFT))
+ vabits_actual = VA_BITS;
+
+ /* make the variable visible to secondaries with the MMU off */
+ dcache_clean_inval_poc((u64)&vabits_actual,
+ (u64)&vabits_actual + sizeof(vabits_actual));
+#endif
+
+ linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
/*
* Corner case: 52-bit VA capable systems running KVM in nVHE mode may
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index fde2b326419a..88b4177254a0 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -46,8 +46,10 @@
u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
-u64 __section(".mmuoff.data.write") vabits_actual;
+#if VA_BITS > 48
+u64 vabits_actual __ro_after_init = VA_BITS_MIN;
EXPORT_SYMBOL(vabits_actual);
+#endif
u64 kimage_vaddr __ro_after_init = (u64)&_text;
EXPORT_SYMBOL(kimage_vaddr);