diff options
-rw-r--r-- | arch/x86/coco/sev/core.c | 131 | ||||
-rw-r--r-- | arch/x86/include/asm/sev.h | 4 | ||||
-rw-r--r-- | arch/x86/mm/mem_encrypt_amd.c | 2 |
3 files changed, 137 insertions, 0 deletions
diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c index c7b4270d0e18..97f445f3366a 100644 --- a/arch/x86/coco/sev/core.c +++ b/arch/x86/coco/sev/core.c @@ -954,6 +954,137 @@ void snp_accept_memory(phys_addr_t start, phys_addr_t end) set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE); } +static void set_pte_enc(pte_t *kpte, int level, void *va) +{ + struct pte_enc_desc d = { + .kpte = kpte, + .pte_level = level, + .va = va, + .encrypt = true + }; + + prepare_pte_enc(&d); + set_pte_enc_mask(kpte, d.pfn, d.new_pgprot); +} + +static void unshare_all_memory(void) +{ + unsigned long addr, end, size, ghcb; + struct sev_es_runtime_data *data; + unsigned int npages, level; + bool skipped_addr; + pte_t *pte; + int cpu; + + /* Unshare the direct mapping. */ + addr = PAGE_OFFSET; + end = PAGE_OFFSET + get_max_mapped(); + + while (addr < end) { + pte = lookup_address(addr, &level); + size = page_level_size(level); + npages = size / PAGE_SIZE; + skipped_addr = false; + + if (!pte || !pte_decrypted(*pte) || pte_none(*pte)) { + addr += size; + continue; + } + + /* + * Ensure that all the per-CPU GHCBs are made private at the + * end of the unsharing loop so that the switch to the slower + * MSR protocol happens last. + */ + for_each_possible_cpu(cpu) { + data = per_cpu(runtime_data, cpu); + ghcb = (unsigned long)&data->ghcb_page; + + if (addr <= ghcb && ghcb <= addr + size) { + skipped_addr = true; + break; + } + } + + if (!skipped_addr) { + set_pte_enc(pte, level, (void *)addr); + snp_set_memory_private(addr, npages); + } + addr += size; + } + + /* Unshare all bss decrypted memory. */ + addr = (unsigned long)__start_bss_decrypted; + end = (unsigned long)__start_bss_decrypted_unused; + npages = (end - addr) >> PAGE_SHIFT; + + for (; addr < end; addr += PAGE_SIZE) { + pte = lookup_address(addr, &level); + if (!pte || !pte_decrypted(*pte) || pte_none(*pte)) + continue; + + set_pte_enc(pte, level, (void *)addr); + } + addr = (unsigned long)__start_bss_decrypted; + snp_set_memory_private(addr, npages); + + __flush_tlb_all(); +} + +/* Stop new private<->shared conversions */ +void snp_kexec_begin(void) +{ + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return; + + if (!IS_ENABLED(CONFIG_KEXEC_CORE)) + return; + + /* + * Crash kernel ends up here with interrupts disabled: can't wait for + * conversions to finish. + * + * If race happened, just report and proceed. + */ + if (!set_memory_enc_stop_conversion()) + pr_warn("Failed to stop shared<->private conversions\n"); +} + +void snp_kexec_finish(void) +{ + struct sev_es_runtime_data *data; + unsigned int level, cpu; + unsigned long size; + struct ghcb *ghcb; + pte_t *pte; + + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return; + + if (!IS_ENABLED(CONFIG_KEXEC_CORE)) + return; + + unshare_all_memory(); + + /* + * Switch to using the MSR protocol to change per-CPU GHCBs to + * private. All the per-CPU GHCBs have been switched back to private, + * so can't do any more GHCB calls to the hypervisor beyond this point + * until the kexec'ed kernel starts running. + */ + boot_ghcb = NULL; + sev_cfg.ghcbs_initialized = false; + + for_each_possible_cpu(cpu) { + data = per_cpu(runtime_data, cpu); + ghcb = &data->ghcb_page; + pte = lookup_address((unsigned long)ghcb, &level); + size = page_level_size(level); + set_pte_enc(pte, level, (void *)ghcb); + snp_set_memory_private((unsigned long)ghcb, (size / PAGE_SIZE)); + } +} + static int snp_set_vmsa(void *va, void *caa, int apic_id, bool make_vmsa) { int ret; diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 5f598937f090..91f08af31078 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -455,6 +455,8 @@ void sev_show_status(void); void snp_update_svsm_ca(void); int prepare_pte_enc(struct pte_enc_desc *d); void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot); +void snp_kexec_finish(void); +void snp_kexec_begin(void); #else /* !CONFIG_AMD_MEM_ENCRYPT */ @@ -494,6 +496,8 @@ static inline void sev_show_status(void) { } static inline void snp_update_svsm_ca(void) { } static inline int prepare_pte_enc(struct pte_enc_desc *d) { return 0; } static inline void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new_prot) { } +static inline void snp_kexec_finish(void) { } +static inline void snp_kexec_begin(void) { } #endif /* CONFIG_AMD_MEM_ENCRYPT */ diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index f4be81db72ee..774f9677458f 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -490,6 +490,8 @@ void __init sme_early_init(void) x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish; x86_platform.guest.enc_tlb_flush_required = amd_enc_tlb_flush_required; x86_platform.guest.enc_cache_flush_required = amd_enc_cache_flush_required; + x86_platform.guest.enc_kexec_begin = snp_kexec_begin; + x86_platform.guest.enc_kexec_finish = snp_kexec_finish; /* * AMD-SEV-ES intercepts the RDMSR to read the X2APIC ID in the |