diff options
author | Mark Brown <broonie@kernel.org> | 2021-05-18 17:24:52 +0100 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2021-05-18 17:24:52 +0100 |
commit | c37fe6aff89cb0d842993fe2f69e48bf3ebe0ab0 (patch) | |
tree | 2a322c48218f7006bab789b7bf16ec58b129a096 /arch/x86/mm/init.c | |
parent | d7aed20d446d8c87f5e13adf73281056b0064a45 (diff) | |
parent | d07f6ca923ea0927a1024dfccafc5b53b61cfecc (diff) |
Merge tag 'v5.13-rc2' into spi-5.13
Linux 5.13-rc2
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r-- | arch/x86/mm/init.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index dd694fb93916..75ef19aa8903 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -29,7 +29,7 @@ /* * We need to define the tracepoints somewhere, and tlb.c - * is only compied when SMP=y. + * is only compiled when SMP=y. */ #define CREATE_TRACE_POINTS #include <trace/events/tlb.h> @@ -756,7 +756,7 @@ void __init init_mem_mapping(void) #ifdef CONFIG_X86_64 if (max_pfn > max_low_pfn) { - /* can we preseve max_low_pfn ?*/ + /* can we preserve max_low_pfn ?*/ max_low_pfn = max_pfn; } #else @@ -939,7 +939,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) { /* * end could be not aligned, and We can not align that, - * decompresser could be confused by aligned initrd_end + * decompressor could be confused by aligned initrd_end * We already reserve the end partial page before in * - i386_start_kernel() * - x86_64_start_kernel() @@ -1017,7 +1017,7 @@ void __init zone_sizes_init(void) free_area_init(max_zone_pfns); } -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { +__visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = { .loaded_mm = &init_mm, .next_asid = 1, .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ |