summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMarian Rotariu <mrotariu@bitdefender.com>2018-04-30 12:23:01 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-06-21 04:01:41 +0900
commit9332f10ed8ff01b20bfb25af59ebde534c9edf4d (patch)
tree402d853e63c85339ec197ef53dd744828f0272a4 /arch
parent922d8611bec3851a3a9deeef05d1b42a29683849 (diff)
x86: Delay skip of emulated hypercall instruction
[ Upstream commit 6356ee0c9602004e0a3b4b2dad68ee2ee9385b17 ] The IP increment should be done after the hypercall emulation, after calling the various handlers. In this way, these handlers can accurately identify the the IP of the VMCALL if they need it. This patch keeps the same functionality for the Hyper-V handler which does not use the return code of the standard kvm_skip_emulated_instruction() call. Signed-off-by: Marian Rotariu <mrotariu@bitdefender.com> [Hyper-V hypercalls also need kvm_skip_emulated_instruction() - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Sasha Levin <alexander.levin@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/hyperv.c2
-rw-r--r--arch/x86/kvm/x86.c19
2 files changed, 12 insertions, 9 deletions
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index dc97f2544b6f..5d13abecb384 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1223,7 +1223,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
struct kvm_run *run = vcpu->run;
kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2c7a391c897c..a43f9487f136 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6553,12 +6553,13 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
unsigned long nr, a0, a1, a2, a3, ret;
- int op_64_bit, r;
+ int op_64_bit;
- r = kvm_skip_emulated_instruction(vcpu);
-
- if (kvm_hv_hypercall_enabled(vcpu->kvm))
- return kvm_hv_hypercall(vcpu);
+ if (kvm_hv_hypercall_enabled(vcpu->kvm)) {
+ if (!kvm_hv_hypercall(vcpu))
+ return 0;
+ goto out;
+ }
nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
@@ -6579,7 +6580,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
if (kvm_x86_ops->get_cpl(vcpu) != 0) {
ret = -KVM_EPERM;
- goto out;
+ goto out_error;
}
switch (nr) {
@@ -6599,12 +6600,14 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
ret = -KVM_ENOSYS;
break;
}
-out:
+out_error:
if (!op_64_bit)
ret = (u32)ret;
kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
+
+out:
++vcpu->stat.hypercalls;
- return r;
+ return kvm_skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);