summaryrefslogtreecommitdiff
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2024-02-03 11:45:13 +0100
committerHeiko Carstens <hca@linux.ibm.com>2024-02-16 14:30:16 +0100
commited3a0a011a9c33d81a0d024882ee433c42bfccae (patch)
tree0987d6dba643b39e1152bb77b97c1ece26cef3d7 /arch/s390/kvm
parent4eed43de9ba0ae3af6716544408d185a152424cd (diff)
s390/kvm: convert to regular kernel fpu user
KVM modifies the kernel fpu's regs pointer to its own area to implement its custom version of preemtible kernel fpu context. With general support for preemptible kernel fpu context there is no need for the extra complexity in KVM code anymore. Therefore convert KVM to a regular kernel fpu user. In particular this means that all TIF_FPU checks can be removed, since the fpu register context will never be changed by other kernel fpu users, and also the fpu register context will be restored if a thread is preempted. Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/interrupt.c8
-rw-r--r--arch/s390/kvm/kvm-s390.c34
-rw-r--r--arch/s390/kvm/vsie.c3
3 files changed, 22 insertions, 23 deletions
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 9315203c2786..c81708acd1f4 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -584,7 +584,11 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
mci.val = mchk->mcic;
/* take care of lazy register loading */
- save_user_fpu_regs();
+ fpu_stfpc(&vcpu->run->s.regs.fpc);
+ if (cpu_has_vx())
+ save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+ else
+ save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
save_access_regs(vcpu->run->s.regs.acrs);
if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb);
@@ -648,7 +652,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
}
rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
vcpu->run->s.regs.gprs, 128);
- rc |= put_guest_lc(vcpu, current->thread.ufpu.fpc,
+ rc |= put_guest_lc(vcpu, vcpu->run->s.regs.fpc,
(u32 __user *) __LC_FP_CREG_SAVE_AREA);
rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
(u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3ce4029cabc2..8467945344b5 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4829,8 +4829,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
vcpu->run->s.regs.gprs,
sizeof(sie_page->pv_grregs));
}
- if (test_thread_flag(TIF_FPU))
- load_user_fpu_regs();
exit_reason = sie64a(vcpu->arch.sie_block,
vcpu->run->s.regs.gprs);
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
@@ -4951,16 +4949,11 @@ static void sync_regs(struct kvm_vcpu *vcpu)
}
save_access_regs(vcpu->arch.host_acrs);
restore_access_regs(vcpu->run->s.regs.acrs);
- /* save host (userspace) fprs/vrs */
- save_user_fpu_regs();
- vcpu->arch.host_fpregs.fpc = current->thread.ufpu.fpc;
- vcpu->arch.host_fpregs.regs = current->thread.ufpu.regs;
+ fpu_lfpc_safe(&vcpu->run->s.regs.fpc);
if (cpu_has_vx())
- current->thread.ufpu.regs = vcpu->run->s.regs.vrs;
+ load_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
else
- current->thread.ufpu.regs = vcpu->run->s.regs.fprs;
- current->thread.ufpu.fpc = vcpu->run->s.regs.fpc;
-
+ load_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
/* Sync fmt2 only data */
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
sync_regs_fmt2(vcpu);
@@ -5021,12 +5014,11 @@ static void store_regs(struct kvm_vcpu *vcpu)
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
save_access_regs(vcpu->run->s.regs.acrs);
restore_access_regs(vcpu->arch.host_acrs);
- /* Save guest register state */
- save_user_fpu_regs();
- vcpu->run->s.regs.fpc = current->thread.ufpu.fpc;
- /* Restore will be done lazily at return */
- current->thread.ufpu.fpc = vcpu->arch.host_fpregs.fpc;
- current->thread.ufpu.regs = vcpu->arch.host_fpregs.regs;
+ fpu_stfpc(&vcpu->run->s.regs.fpc);
+ if (cpu_has_vx())
+ save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+ else
+ save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
store_regs_fmt2(vcpu);
}
@@ -5034,6 +5026,7 @@ static void store_regs(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
+ DECLARE_KERNEL_FPU_ONSTACK(fpu);
int rc;
/*
@@ -5075,6 +5068,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
goto out;
}
+ kernel_fpu_begin(&fpu, KERNEL_FPC | KERNEL_VXR);
sync_regs(vcpu);
enable_cpu_timer_accounting(vcpu);
@@ -5098,6 +5092,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
disable_cpu_timer_accounting(vcpu);
store_regs(vcpu);
+ kernel_fpu_end(&fpu, KERNEL_FPC | KERNEL_VXR);
kvm_sigset_deactivate(vcpu);
@@ -5172,8 +5167,11 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
* switch in the run ioctl. Let's update our copies before we save
* it into the save area
*/
- save_user_fpu_regs();
- vcpu->run->s.regs.fpc = current->thread.ufpu.fpc;
+ fpu_stfpc(&vcpu->run->s.regs.fpc);
+ if (cpu_has_vx())
+ save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+ else
+ save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
save_access_regs(vcpu->run->s.regs.acrs);
return kvm_s390_store_status_unloaded(vcpu, addr);
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index e0f79c9a4852..3ec11612805d 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -18,7 +18,6 @@
#include <asm/sclp.h>
#include <asm/nmi.h>
#include <asm/dis.h>
-#include <asm/fpu.h>
#include <asm/facility.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -1149,8 +1148,6 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
*/
vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
barrier();
- if (test_thread_flag(TIF_FPU))
- load_user_fpu_regs();
if (!kvm_s390_vcpu_sie_inhibited(vcpu))
rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
barrier();