summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/virtual/kvm/api.txt45
-rw-r--r--arch/powerpc/include/asm/asm-compat.h4
-rw-r--r--arch/powerpc/include/asm/hvcall.h6
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h10
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h23
-rw-r--r--arch/powerpc/include/asm/kvm_host.h14
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h2
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h5
-rw-r--r--arch/powerpc/include/asm/reg.h16
-rw-r--r--arch/powerpc/include/asm/time.h9
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s.c17
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c128
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c22
-rw-r--r--arch/powerpc/kvm/book3s_hv.c164
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c13
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c146
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S72
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S4
-rw-r--r--arch/powerpc/kvm/book3s_pr.c37
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c83
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S6
-rw-r--r--arch/powerpc/kvm/e500_emulate.c12
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c4
-rw-r--r--arch/powerpc/kvm/e500mc.c6
-rw-r--r--arch/powerpc/kvm/powerpc.c47
-rw-r--r--include/uapi/linux/kvm.h1
29 files changed, 685 insertions, 219 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 0fe36497642c..69553183ef0f 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2863,8 +2863,8 @@ The fields in each entry are defined as follows:
this function/index combination
-6. Capabilities that can be enabled
------------------------------------
+6. Capabilities that can be enabled on vCPUs
+--------------------------------------------
There are certain capabilities that change the behavior of the virtual CPU when
enabled. To enable them, please see section 4.37. Below you can find a list of
@@ -3002,3 +3002,44 @@ Parameters: args[0] is the XICS device fd
args[1] is the XICS CPU number (server ID) for this vcpu
This capability connects the vcpu to an in-kernel XICS device.
+
+
+7. Capabilities that can be enabled on VMs
+------------------------------------------
+
+There are certain capabilities that change the behavior of the virtual
+machine when enabled. To enable them, please see section 4.37. Below
+you can find a list of capabilities and what their effect on the VM
+is when enabling them.
+
+The following information is provided along with the description:
+
+ Architectures: which instruction set architectures provide this ioctl.
+ x86 includes both i386 and x86_64.
+
+ Parameters: what parameters are accepted by the capability.
+
+ Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL)
+ are not detailed, but errors with specific meanings are.
+
+
+7.1 KVM_CAP_PPC_ENABLE_HCALL
+
+Architectures: ppc
+Parameters: args[0] is the sPAPR hcall number
+ args[1] is 0 to disable, 1 to enable in-kernel handling
+
+This capability controls whether individual sPAPR hypercalls (hcalls)
+get handled by the kernel or not. Enabling or disabling in-kernel
+handling of an hcall is effective across the VM. On creation, an
+initial set of hcalls are enabled for in-kernel handling, which
+consists of those hcalls for which in-kernel handlers were implemented
+before this capability was implemented. If disabled, the kernel will
+not to attempt to handle the hcall, but will always exit to userspace
+to handle it. Note that it may not make sense to enable some and
+disable others of a group of related hcalls, but KVM does not prevent
+userspace from doing that.
+
+If the hcall number specified is not one that has an in-kernel
+implementation, the KVM_ENABLE_CAP ioctl will fail with an EINVAL
+error.
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 4b237aa35660..21be8ae8f809 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -34,10 +34,14 @@
#define PPC_MIN_STKFRM 112
#ifdef __BIG_ENDIAN__
+#define LWZX_BE stringify_in_c(lwzx)
#define LDX_BE stringify_in_c(ldx)
+#define STWX_BE stringify_in_c(stwx)
#define STDX_BE stringify_in_c(stdx)
#else
+#define LWZX_BE stringify_in_c(lwbrx)
#define LDX_BE stringify_in_c(ldbrx)
+#define STWX_BE stringify_in_c(stwbrx)
#define STDX_BE stringify_in_c(stdbrx)
#endif
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 5dbbb29f5c3e..85bc8c0d257b 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -279,6 +279,12 @@
#define H_GET_24X7_DATA 0xF07C
#define H_GET_PERF_COUNTER_INFO 0xF080
+/* Values for 2nd argument to H_SET_MODE */
+#define H_SET_MODE_RESOURCE_SET_CIABR 1
+#define H_SET_MODE_RESOURCE_SET_DAWR 2
+#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3
+#define H_SET_MODE_RESOURCE_LE 4
+
#ifndef __ASSEMBLY__
/**
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index f52f65694527..8ac5392dc477 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -83,8 +83,6 @@ struct kvmppc_vcpu_book3s {
u64 sdr1;
u64 hior;
u64 msr_mask;
- u64 purr_offset;
- u64 spurr_offset;
#ifdef CONFIG_PPC_BOOK3S_32
u32 vsid_pool[VSID_POOL_SIZE];
u32 vsid_next;
@@ -148,6 +146,7 @@ extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *
extern int kvmppc_mmu_hpte_sysinit(void);
extern void kvmppc_mmu_hpte_sysexit(void);
extern int kvmppc_mmu_hv_init(void);
+extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
@@ -163,9 +162,9 @@ extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
bool *writable);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
unsigned long *rmap, long pte_index, int realmode);
-extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
+extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index);
-void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
+void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index);
extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
unsigned long *nb_ret);
@@ -189,6 +188,9 @@ extern void kvmppc_hv_entry_trampoline(void);
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
+extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
+extern int kvmppc_hcall_impl_pr(unsigned long cmd);
+extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index fddb72b48ce9..e504f8845c42 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -59,20 +59,29 @@ extern unsigned long kvm_rma_pages;
/* These bits are reserved in the guest view of the HPTE */
#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
-static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
+static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
{
unsigned long tmp, old;
+ __be64 be_lockbit, be_bits;
+
+ /*
+ * We load/store in native endian, but the HTAB is in big endian. If
+ * we byte swap all data we apply on the PTE we're implicitly correct
+ * again.
+ */
+ be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
+ be_bits = cpu_to_be64(bits);
asm volatile(" ldarx %0,0,%2\n"
" and. %1,%0,%3\n"
" bne 2f\n"
- " ori %0,%0,%4\n"
+ " or %0,%0,%4\n"
" stdcx. %0,0,%2\n"
" beq+ 2f\n"
" mr %1,%3\n"
"2: isync"
: "=&r" (tmp), "=&r" (old)
- : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
+ : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
: "cc", "memory");
return old == 0;
}
@@ -110,16 +119,12 @@ static inline int __hpte_actual_psize(unsigned int lp, int psize)
static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
unsigned long pte_index)
{
- int b_psize, a_psize;
+ int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
unsigned int penc;
unsigned long rb = 0, va_low, sllp;
unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
- if (!(v & HPTE_V_LARGE)) {
- /* both base and actual psize is 4k */
- b_psize = MMU_PAGE_4K;
- a_psize = MMU_PAGE_4K;
- } else {
+ if (v & HPTE_V_LARGE) {
for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
/* valid entries have a shift value */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index bb66d8b8efdf..855ba4d9539d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -34,6 +34,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
+#include <asm/hvcall.h>
#define KVM_MAX_VCPUS NR_CPUS
#define KVM_MAX_VCORES NR_CPUS
@@ -254,7 +255,6 @@ struct kvm_arch {
atomic_t hpte_mod_interest;
spinlock_t slot_phys_lock;
cpumask_t need_tlb_flush;
- struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
int hpt_cma_alloc;
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -263,6 +263,7 @@ struct kvm_arch {
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
struct list_head rtas_tokens;
+ DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
#endif
#ifdef CONFIG_KVM_MPIC
struct openpic *mpic;
@@ -271,6 +272,10 @@ struct kvm_arch {
struct kvmppc_xics *xics;
#endif
struct kvmppc_ops *kvm_ops;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* This array can grow quite large, keep it at the end */
+ struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
+#endif
};
/*
@@ -503,8 +508,10 @@ struct kvm_vcpu_arch {
#ifdef CONFIG_BOOKE
u32 decar;
#endif
- u32 tbl;
- u32 tbu;
+ /* Time base value when we entered the guest */
+ u64 entry_tb;
+ u64 entry_vtb;
+ u64 entry_ic;
u32 tcr;
ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
u32 ivor[64];
@@ -580,6 +587,7 @@ struct kvm_vcpu_arch {
u32 mmucfg;
u32 eptcfg;
u32 epr;
+ u32 pwrmgtcr0;
u32 crit_save;
/* guest debug registers*/
struct debug_reg dbg_reg;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9c89cdd067a6..e2fd5a133b9c 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -228,7 +228,7 @@ struct kvmppc_ops {
void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
unsigned long arg);
-
+ int (*hcall_implemented)(unsigned long hcall);
};
extern struct kvmppc_ops *kvmppc_hv_ops;
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index d0918e09557f..8d24f788fd09 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,7 +40,9 @@
/* MAS registers bit definitions */
-#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000)
+#define MAS0_TLBSEL_MASK 0x30000000
+#define MAS0_TLBSEL_SHIFT 28
+#define MAS0_TLBSEL(x) (((x) << MAS0_TLBSEL_SHIFT) & MAS0_TLBSEL_MASK)
#define MAS0_ESEL_MASK 0x0FFF0000
#define MAS0_ESEL_SHIFT 16
#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK)
@@ -86,6 +88,7 @@
#define MAS3_SPSIZE 0x0000003e
#define MAS3_SPSIZE_SHIFT 1
+#define MAS4_TLBSEL_MASK MAS0_TLBSEL_MASK
#define MAS4_TLBSELD(x) MAS0_TLBSEL(x)
#define MAS4_INDD 0x00008000 /* Default IND */
#define MAS4_TSIZED(x) MAS1_TSIZE(x)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index bffd89d27301..1f34ef7ec4a8 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -25,6 +25,7 @@
#ifdef CONFIG_8xx
#include <asm/reg_8xx.h>
#endif /* CONFIG_8xx */
+#include <asm/bug.h>
#define MSR_SF_LG 63 /* Enable 64 bit mode */
#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
@@ -1203,6 +1204,21 @@
: "r" ((unsigned long)(v)) \
: "memory")
+static inline unsigned long mfvtb (void)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ return mfspr(SPRN_VTB);
+#endif
+ /*
+ * The above mfspr will be a no-op on anything before Power8
+ * That can result in random values returned. We need to
+ * capture that.
+ */
+ BUG();
+ return 0;
+}
+
#ifdef __powerpc64__
#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
#define mftb() ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 1d428e6007ca..03cbada59d3a 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -102,6 +102,15 @@ static inline u64 get_rtc(void)
return (u64)hi * 1000000000 + lo;
}
+static inline u64 get_vtb(void)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ return mfvtb();
+#endif
+ return 0;
+}
+
#ifdef CONFIG_PPC64
static inline u64 get_tb(void)
{
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index f5995a912213..17ffcb4f27f9 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -493,6 +493,7 @@ int main(void)
DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
+ DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index d6a53b95de94..8aeeda1ff42a 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -75,7 +75,6 @@ config KVM_BOOK3S_64
config KVM_BOOK3S_64_HV
tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
depends on KVM_BOOK3S_64
- depends on !CPU_LITTLE_ENDIAN
select KVM_BOOK3S_HV_POSSIBLE
select MMU_NOTIFIER
select CMA
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index c254c27f240e..bd75902b38ba 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -646,6 +646,12 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
case KVM_REG_PPC_BESCR:
val = get_reg_val(reg->id, vcpu->arch.bescr);
break;
+ case KVM_REG_PPC_VTB:
+ val = get_reg_val(reg->id, vcpu->arch.vtb);
+ break;
+ case KVM_REG_PPC_IC:
+ val = get_reg_val(reg->id, vcpu->arch.ic);
+ break;
default:
r = -EINVAL;
break;
@@ -750,6 +756,12 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
case KVM_REG_PPC_BESCR:
vcpu->arch.bescr = set_reg_val(reg->id, val);
break;
+ case KVM_REG_PPC_VTB:
+ vcpu->arch.vtb = set_reg_val(reg->id, val);
+ break;
+ case KVM_REG_PPC_IC:
+ vcpu->arch.ic = set_reg_val(reg->id, val);
+ break;
default:
r = -EINVAL;
break;
@@ -913,6 +925,11 @@ int kvmppc_core_check_processor_compat(void)
return 0;
}
+int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
+{
+ return kvm->arch.kvm_ops->hcall_implemented(hcall);
+}
+
static int kvmppc_book3s_init(void)
{
int r;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 80561074078d..2d154d9319b3 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -450,7 +450,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
unsigned long slb_v;
unsigned long pp, key;
unsigned long v, gr;
- unsigned long *hptep;
+ __be64 *hptep;
int index;
int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
@@ -473,13 +473,13 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
preempt_enable();
return -ENOENT;
}
- hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
- v = hptep[0] & ~HPTE_V_HVLOCK;
+ hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
+ v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
gr = kvm->arch.revmap[index].guest_rpte;
/* Unlock the HPTE */
asm volatile("lwsync" : : : "memory");
- hptep[0] = v;
+ hptep[0] = cpu_to_be64(v);
preempt_enable();
gpte->eaddr = eaddr;
@@ -583,7 +583,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr)
{
struct kvm *kvm = vcpu->kvm;
- unsigned long *hptep, hpte[3], r;
+ unsigned long hpte[3], r;
+ __be64 *hptep;
unsigned long mmu_seq, psize, pte_size;
unsigned long gpa_base, gfn_base;
unsigned long gpa, gfn, hva, pfn;
@@ -606,16 +607,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (ea != vcpu->arch.pgfault_addr)
return RESUME_GUEST;
index = vcpu->arch.pgfault_index;
- hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
+ hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
rev = &kvm->arch.revmap[index];
preempt_disable();
while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
cpu_relax();
- hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
- hpte[1] = hptep[1];
+ hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
+ hpte[1] = be64_to_cpu(hptep[1]);
hpte[2] = r = rev->guest_rpte;
asm volatile("lwsync" : : : "memory");
- hptep[0] = hpte[0];
+ hptep[0] = cpu_to_be64(hpte[0]);
preempt_enable();
if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
@@ -731,8 +732,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
preempt_disable();
while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
cpu_relax();
- if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
- rev->guest_rpte != hpte[2])
+ if ((be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK) != hpte[0] ||
+ be64_to_cpu(hptep[1]) != hpte[1] ||
+ rev->guest_rpte != hpte[2])
/* HPTE has been changed under us; let the guest retry */
goto out_unlock;
hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
@@ -752,20 +754,20 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
- if (hptep[0] & HPTE_V_VALID) {
+ if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) {
/* HPTE was previously valid, so we need to invalidate it */
unlock_rmap(rmap);
- hptep[0] |= HPTE_V_ABSENT;
+ hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
kvmppc_invalidate_hpte(kvm, hptep, index);
/* don't lose previous R and C bits */
- r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
+ r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
} else {
kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
}
- hptep[1] = r;
+ hptep[1] = cpu_to_be64(r);
eieio();
- hptep[0] = hpte[0];
+ hptep[0] = cpu_to_be64(hpte[0]);
asm volatile("ptesync" : : : "memory");
preempt_enable();
if (page && hpte_is_writable(r))
@@ -784,7 +786,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
return ret;
out_unlock:
- hptep[0] &= ~HPTE_V_HVLOCK;
+ hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
preempt_enable();
goto out_put;
}
@@ -860,7 +862,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
{
struct revmap_entry *rev = kvm->arch.revmap;
unsigned long h, i, j;
- unsigned long *hptep;
+ __be64 *hptep;
unsigned long ptel, psize, rcbits;
for (;;) {
@@ -876,11 +878,11 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
* rmap chain lock.
*/
i = *rmapp & KVMPPC_RMAP_INDEX;
- hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
+ hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
/* unlock rmap before spinning on the HPTE lock */
unlock_rmap(rmapp);
- while (hptep[0] & HPTE_V_HVLOCK)
+ while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
cpu_relax();
continue;
}
@@ -899,14 +901,14 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
/* Now check and modify the HPTE */
ptel = rev[i].guest_rpte;
- psize = hpte_page_size(hptep[0], ptel);
- if ((hptep[0] & HPTE_V_VALID) &&
+ psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel);
+ if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
hpte_rpn(ptel, psize) == gfn) {
if (kvm->arch.using_mmu_notifiers)
- hptep[0] |= HPTE_V_ABSENT;
+ hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
kvmppc_invalidate_hpte(kvm, hptep, i);
/* Harvest R and C */
- rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
+ rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
if (rcbits & ~rev[i].guest_rpte) {
rev[i].guest_rpte = ptel | rcbits;
@@ -914,7 +916,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
}
}
unlock_rmap(rmapp);
- hptep[0] &= ~HPTE_V_HVLOCK;
+ hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
}
return 0;
}
@@ -961,7 +963,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
{
struct revmap_entry *rev = kvm->arch.revmap;
unsigned long head, i, j;
- unsigned long *hptep;
+ __be64 *hptep;
int ret = 0;
retry:
@@ -977,23 +979,24 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
i = head = *rmapp & KVMPPC_RMAP_INDEX;
do {
- hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
+ hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
j = rev[i].forw;
/* If this HPTE isn't referenced, ignore it */
- if (!(hptep[1] & HPTE_R_R))
+ if (!(be64_to_cpu(hptep[1]) & HPTE_R_R))
continue;
if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
/* unlock rmap before spinning on the HPTE lock */
unlock_rmap(rmapp);
- while (hptep[0] & HPTE_V_HVLOCK)
+ while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
cpu_relax();
goto retry;
}
/* Now check and modify the HPTE */
- if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
+ if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
+ (be64_to_cpu(hptep[1]) & HPTE_R_R)) {
kvmppc_clear_ref_hpte(kvm, hptep, i);
if (!(rev[i].guest_rpte & HPTE_R_R)) {
rev[i].guest_rpte |= HPTE_R_R;
@@ -1001,7 +1004,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
}
ret = 1;
}
- hptep[0] &= ~HPTE_V_HVLOCK;
+ hptep[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
} while ((i = j) != head);
unlock_rmap(rmapp);
@@ -1035,7 +1038,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
do {
hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
j = rev[i].forw;
- if (hp[1] & HPTE_R_R)
+ if (be64_to_cpu(hp[1]) & HPTE_R_R)
goto out;
} while ((i = j) != head);
}
@@ -1075,7 +1078,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
unsigned long head, i, j;
unsigned long n;
unsigned long v, r;
- unsigned long *hptep;
+ __be64 *hptep;
int npages_dirty = 0;
retry:
@@ -1091,7 +1094,8 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
i = head = *rmapp & KVMPPC_RMAP_INDEX;
do {
- hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
+ unsigned long hptep1;
+ hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
j = rev[i].forw;
/*
@@ -1108,29 +1112,30 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
* Otherwise we need to do the tlbie even if C==0 in
* order to pick up any delayed writeback of C.
*/
- if (!(hptep[1] & HPTE_R_C) &&
- (!hpte_is_writable(hptep[1]) || vcpus_running(kvm)))
+ hptep1 = be64_to_cpu(hptep[1]);
+ if (!(hptep1 & HPTE_R_C) &&
+ (!hpte_is_writable(hptep1) || vcpus_running(kvm)))
continue;
if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
/* unlock rmap before spinning on the HPTE lock */
unlock_rmap(rmapp);
- while (hptep[0] & HPTE_V_HVLOCK)
+ while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK))
cpu_relax();
goto retry;
}
/* Now check and modify the HPTE */
- if (!(hptep[0] & HPTE_V_VALID))
+ if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID)))
continue;
/* need to make it temporarily absent so C is stable */
- hptep[0] |= HPTE_V_ABSENT;
+ hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
kvmppc_invalidate_hpte(kvm, hptep, i);
- v = hptep[0];
- r = hptep[1];
+ v = be64_to_cpu(hptep[0]);
+ r = be64_to_cpu(hptep[1]);
if (r & HPTE_R_C) {
- hptep[1] = r & ~HPTE_R_C;
+ hptep[1] = cpu_to_be64(r & ~HPTE_R_C);
if (!(rev[i].guest_rpte & HPTE_R_C)) {
rev[i].guest_rpte |= HPTE_R_C;
note_hpte_modification(kvm, &rev[i]);
@@ -1143,7 +1148,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
}
v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK);
v |= HPTE_V_VALID;
- hptep[0] = v;
+ hptep[0] = cpu_to_be64(v);
} while ((i = j) != head);
unlock_rmap(rmapp);
@@ -1307,7 +1312,7 @@ struct kvm_htab_ctx {
* Returns 1 if this HPT entry has been modified or has pending
* R/C bit changes.
*/
-static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp)
+static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp)
{
unsigned long rcbits_unset;
@@ -1316,13 +1321,14 @@ static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp)
/* Also need to consider changes in reference and changed bits */
rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
- if ((hptp[0] & HPTE_V_VALID) && (hptp[1] & rcbits_unset))
+ if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) &&
+ (be64_to_cpu(hptp[1]) & rcbits_unset))
return 1;
return 0;
}
-static long record_hpte(unsigned long flags, unsigned long *hptp,
+static long record_hpte(unsigned long flags, __be64 *hptp,
unsigned long *hpte, struct revmap_entry *revp,
int want_valid, int first_pass)
{
@@ -1337,10 +1343,10 @@ static long record_hpte(unsigned long flags, unsigned long *hptp,
return 0;
valid = 0;
- if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) {
+ if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) {
valid = 1;
if ((flags & KVM_GET_HTAB_BOLTED_ONLY) &&
- !(hptp[0] & HPTE_V_BOLTED))
+ !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED))
valid = 0;
}
if (valid != want_valid)
@@ -1352,7 +1358,7 @@ static long record_hpte(unsigned long flags, unsigned long *hptp,
preempt_disable();
while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
cpu_relax();
- v = hptp[0];
+ v = be64_to_cpu(hptp[0]);
/* re-evaluate valid and dirty from synchronized HPTE value */
valid = !!(v & HPTE_V_VALID);
@@ -1360,9 +1366,9 @@ static long record_hpte(unsigned long flags, unsigned long *hptp,
/* Harvest R and C into guest view if necessary */
rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
- if (valid && (rcbits_unset & hptp[1])) {
- revp->guest_rpte |= (hptp[1] & (HPTE_R_R | HPTE_R_C)) |
- HPTE_GR_MODIFIED;
+ if (valid && (rcbits_unset & be64_to_cpu(hptp[1]))) {
+ revp->guest_rpte |= (be64_to_cpu(hptp[1]) &
+ (HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED;
dirty = 1;
}
@@ -1381,13 +1387,13 @@ static long record_hpte(unsigned long flags, unsigned long *hptp,
revp->guest_rpte = r;
}
asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
- hptp[0] &= ~HPTE_V_HVLOCK;
+ hptp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
preempt_enable();
if (!(valid == want_valid && (first_pass || dirty)))
ok = 0;
}
- hpte[0] = v;
- hpte[1] = r;
+ hpte[0] = cpu_to_be64(v);
+ hpte[1] = cpu_to_be64(r);
return ok;
}
@@ -1397,7 +1403,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
struct kvm_htab_ctx *ctx = file->private_data;
struct kvm *kvm = ctx->kvm;
struct kvm_get_htab_header hdr;
- unsigned long *hptp;
+ __be64 *hptp;
struct revmap_entry *revp;
unsigned long i, nb, nw;
unsigned long __user *lbuf;
@@ -1413,7 +1419,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
flags = ctx->flags;
i = ctx->index;
- hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
+ hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
revp = kvm->arch.revmap + i;
lbuf = (unsigned long __user *)buf;
@@ -1497,7 +1503,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
unsigned long i, j;
unsigned long v, r;
unsigned long __user *lbuf;
- unsigned long *hptp;
+ __be64 *hptp;
unsigned long tmp[2];
ssize_t nb;
long int err, ret;
@@ -1539,7 +1545,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte)
break;
- hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
+ hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
lbuf = (unsigned long __user *)buf;
for (j = 0; j < hdr.n_valid; ++j) {
err = -EFAULT;
@@ -1551,7 +1557,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
lbuf += 2;
nb += HPTE_SIZE;
- if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT))
+ if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
err = -EIO;
ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
@@ -1577,7 +1583,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
}
for (j = 0; j < hdr.n_invalid; ++j) {
- if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT))
+ if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
++i;
hptp += 2;
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 3f295269af37..84fddcd6c1f8 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -439,12 +439,6 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
(mfmsr() & MSR_HV))
vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
break;
- case SPRN_PURR:
- to_book3s(vcpu)->purr_offset = spr_val - get_tb();
- break;
- case SPRN_SPURR:
- to_book3s(vcpu)->spurr_offset = spr_val - get_tb();
- break;
case SPRN_GQR0:
case SPRN_GQR1:
case SPRN_GQR2:
@@ -572,10 +566,22 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
*spr_val = 0;
break;
case SPRN_PURR:
- *spr_val = get_tb() + to_book3s(vcpu)->purr_offset;
+ /*
+ * On exit we would have updated purr
+ */
+ *spr_val = vcpu->arch.purr;
break;
case SPRN_SPURR:
- *spr_val = get_tb() + to_book3s(vcpu)->purr_offset;
+ /*
+ * On exit we would have updated spurr
+ */
+ *spr_val = vcpu->arch.spurr;
+ break;
+ case SPRN_VTB:
+ *spr_val = vcpu->arch.vtb;
+ break;
+ case SPRN_IC:
+ *spr_val = vcpu->arch.ic;
break;
case SPRN_GQR0:
case SPRN_GQR1:
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 7a12edbb61e7..f1281c4c381c 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -67,6 +67,8 @@
/* Used as a "null" value for timebase values */
#define TB_NIL (~(u64)0)
+static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
+
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
@@ -270,7 +272,7 @@ struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
{
vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
- vpa->yield_count = 1;
+ vpa->yield_count = cpu_to_be32(1);
}
static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
@@ -293,8 +295,8 @@ static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
struct reg_vpa {
u32 dummy;
union {
- u16 hword;
- u32 word;
+ __be16 hword;
+ __be32 word;
} length;
};
@@ -333,9 +335,9 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
if (va == NULL)
return H_PARAMETER;
if (subfunc == H_VPA_REG_VPA)
- len = ((struct reg_vpa *)va)->length.hword;
+ len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
else
- len = ((struct reg_vpa *)va)->length.word;
+ len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
kvmppc_unpin_guest_page(kvm, va, vpa, false);
/* Check length */
@@ -540,21 +542,63 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
return;
memset(dt, 0, sizeof(struct dtl_entry));
dt->dispatch_reason = 7;
- dt->processor_id = vc->pcpu + vcpu->arch.ptid;
- dt->timebase = now + vc->tb_offset;
- dt->enqueue_to_dispatch_time = stolen;
- dt->srr0 = kvmppc_get_pc(vcpu);
- dt->srr1 = vcpu->arch.shregs.msr;
+ dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
+ dt->timebase = cpu_to_be64(now + vc->tb_offset);
+ dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
+ dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
+ dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
++dt;
if (dt == vcpu->arch.dtl.pinned_end)
dt = vcpu->arch.dtl.pinned_addr;
vcpu->arch.dtl_ptr = dt;
/* order writing *dt vs. writing vpa->dtl_idx */
smp_wmb();
- vpa->dtl_idx = ++vcpu->arch.dtl_index;
+ vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
vcpu->arch.dtl.dirty = true;
}
+static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
+ return true;
+ if ((!vcpu->arch.vcore->arch_compat) &&
+ cpu_has_feature(CPU_FTR_ARCH_207S))
+ return true;
+ return false;
+}
+
+static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
+ unsigned long resource, unsigned long value1,
+ unsigned long value2)
+{
+ switch (resource) {
+ case H_SET_MODE_RESOURCE_SET_CIABR:
+ if (!kvmppc_power8_compatible(vcpu))
+ return H_P2;
+ if (value2)
+ return H_P4;
+ if (mflags)
+ return H_UNSUPPORTED_FLAG_START;
+ /* Guests can't breakpoint the hypervisor */
+ if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
+ return H_P3;
+ vcpu->arch.ciabr = value1;
+ return H_SUCCESS;
+ case H_SET_MODE_RESOURCE_SET_DAWR:
+ if (!kvmppc_power8_compatible(vcpu))
+ return H_P2;
+ if (mflags)
+ return H_UNSUPPORTED_FLAG_START;
+ if (value2 & DABRX_HYP)
+ return H_P4;
+ vcpu->arch.dawr = value1;
+ vcpu->arch.dawrx = value2;
+ return H_SUCCESS;
+ default:
+ return H_TOO_HARD;
+ }
+}
+
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
{
unsigned long req = kvmppc_get_gpr(vcpu, 3);
@@ -562,6 +606,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
struct kvm_vcpu *tvcpu;
int idx, rc;
+ if (req <= MAX_HCALL_OPCODE &&
+ !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
+ return RESUME_HOST;
+
switch (req) {
case H_ENTER:
idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -620,7 +668,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
/* Send the error out to userspace via KVM_RUN */
return rc;
-
+ case H_SET_MODE:
+ ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6),
+ kvmppc_get_gpr(vcpu, 7));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
case H_XIRR:
case H_CPPR:
case H_EOI:
@@ -639,6 +694,29 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
+static int kvmppc_hcall_impl_hv(unsigned long cmd)
+{
+ switch (cmd) {
+ case H_CEDE:
+ case H_PROD:
+ case H_CONFER:
+ case H_REGISTER_VPA:
+ case H_SET_MODE:
+#ifdef CONFIG_KVM_XICS
+ case H_XIRR:
+ case H_CPPR:
+ case H_EOI:
+ case H_IPI:
+ case H_IPOLL:
+ case H_XIRR_X:
+#endif
+ return 1;
+ }
+
+ /* See if it's in the real-mode table */
+ return kvmppc_hcall_impl_hv_realmode(cmd);
+}
+
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
@@ -894,12 +972,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_CIABR:
*val = get_reg_val(id, vcpu->arch.ciabr);
break;
- case KVM_REG_PPC_IC:
- *val = get_reg_val(id, vcpu->arch.ic);
- break;
- case KVM_REG_PPC_VTB:
- *val = get_reg_val(id, vcpu->arch.vtb);
- break;
case KVM_REG_PPC_CSIGR:
*val = get_reg_val(id, vcpu->arch.csigr);
break;
@@ -1094,12 +1166,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
break;
- case KVM_REG_PPC_IC:
- vcpu->arch.ic = set_reg_val(id, *val);
- break;
- case KVM_REG_PPC_VTB:
- vcpu->arch.vtb = set_reg_val(id, *val);
- break;
case KVM_REG_PPC_CSIGR:
vcpu->arch.csigr = set_reg_val(id, *val);
break;
@@ -2281,6 +2347,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
*/
cpumask_setall(&kvm->arch.need_tlb_flush);
+ /* Start out with the default set of hcalls enabled */
+ memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
+ sizeof(kvm->arch.enabled_hcalls));
+
kvm->arch.rma = NULL;
kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
@@ -2419,6 +2489,49 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
return r;
}
+/*
+ * List of hcall numbers to enable by default.
+ * For compatibility with old userspace, we enable by default
+ * all hcalls that were implemented before the hcall-enabling
+ * facility was added. Note this list should not include H_RTAS.
+ */
+static unsigned int default_hcall_list[] = {
+ H_REMOVE,
+ H_ENTER,
+ H_READ,
+ H_PROTECT,
+ H_BULK_REMOVE,
+ H_GET_TCE,
+ H_PUT_TCE,
+ H_SET_DABR,
+ H_SET_XDABR,
+ H_CEDE,
+ H_PROD,
+ H_CONFER,
+ H_REGISTER_VPA,
+#ifdef CONFIG_KVM_XICS
+ H_EOI,
+ H_CPPR,
+ H_IPI,
+ H_IPOLL,
+ H_XIRR,
+ H_XIRR_X,
+#endif
+ 0
+};
+
+static void init_default_hcalls(void)
+{
+ int i;
+ unsigned int hcall;
+
+ for (i = 0; default_hcall_list[i]; ++i) {
+ hcall = default_hcall_list[i];
+ WARN_ON(!kvmppc_hcall_impl_hv(hcall));
+ __set_bit(hcall / 4, default_enabled_hcalls);
+ }
+}
+
static struct kvmppc_ops kvm_ops_hv = {
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
@@ -2451,6 +2564,7 @@ static struct kvmppc_ops kvm_ops_hv = {
.emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
.fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
.arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
+ .hcall_implemented = kvmppc_hcall_impl_hv,
};
static int kvmppc_book3s_init_hv(void)
@@ -2466,6 +2580,8 @@ static int kvmppc_book3s_init_hv(void)
kvm_ops_hv.owner = THIS_MODULE;
kvmppc_hv_ops = &kvm_ops_hv;
+ init_default_hcalls();
+
r = kvmppc_mmu_hv_init();
return r;
}
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 7cde8a665205..3b41447482e5 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -212,3 +212,16 @@ bool kvm_hv_mode_active(void)
{
return atomic_read(&hv_vm_count) != 0;
}
+
+extern int hcall_real_table[], hcall_real_table_end[];
+
+int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
+{
+ cmd /= 4;
+ if (cmd < hcall_real_table_end - hcall_real_table &&
+ hcall_real_table[cmd])
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 3a5c568b1e89..d562c8e2bc30 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -45,14 +45,14 @@ static void reload_slb(struct kvm_vcpu *vcpu)
return;
/* Sanity check */
- n = min_t(u32, slb->persistent, SLB_MIN_SIZE);
+ n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
return;
/* Load up the SLB from that */
for (i = 0; i < n; ++i) {
- unsigned long rb = slb->save_area[i].esid;
- unsigned long rs = slb->save_area[i].vsid;
+ unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
+ unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
rb = (rb & ~0xFFFul) | i; /* insert entry number */
asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 6e6224318c36..e5c6063c83f2 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -154,10 +154,10 @@ static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
}
-static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
+static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
{
asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
- hpte[0] = hpte_v;
+ hpte[0] = cpu_to_be64(hpte_v);
}
long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
@@ -166,7 +166,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
{
unsigned long i, pa, gpa, gfn, psize;
unsigned long slot_fn, hva;
- unsigned long *hpte;
+ __be64 *hpte;
struct revmap_entry *rev;
unsigned long g_ptel;
struct kvm_memory_slot *memslot;
@@ -275,9 +275,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
return H_PARAMETER;
if (likely((flags & H_EXACT) == 0)) {
pte_index &= ~7UL;
- hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
for (i = 0; i < 8; ++i) {
- if ((*hpte & HPTE_V_VALID) == 0 &&
+ if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
HPTE_V_ABSENT))
break;
@@ -292,11 +292,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
*/
hpte -= 16;
for (i = 0; i < 8; ++i) {
+ u64 pte;
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax();
- if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
+ pte = be64_to_cpu(*hpte);
+ if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
break;
- *hpte &= ~HPTE_V_HVLOCK;
+ *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
hpte += 2;
}
if (i == 8)
@@ -304,14 +306,17 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
}
pte_index += i;
} else {
- hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
HPTE_V_ABSENT)) {
/* Lock the slot and check again */
+ u64 pte;
+
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax();
- if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
- *hpte &= ~HPTE_V_HVLOCK;
+ pte = be64_to_cpu(*hpte);
+ if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
+ *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
return H_PTEG_FULL;
}
}
@@ -347,11 +352,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
}
}
- hpte[1] = ptel;
+ hpte[1] = cpu_to_be64(ptel);
/* Write the first HPTE dword, unlocking the HPTE and making it valid */
eieio();
- hpte[0] = pteh;
+ hpte[0] = cpu_to_be64(pteh);
asm volatile("ptesync" : : : "memory");
*pte_idx_ret = pte_index;
@@ -468,30 +473,35 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
unsigned long pte_index, unsigned long avpn,
unsigned long *hpret)
{
- unsigned long *hpte;
+ __be64 *hpte;
unsigned long v, r, rb;
struct revmap_entry *rev;
+ u64 pte;
if (pte_index >= kvm->arch.hpt_npte)
return H_PARAMETER;
- hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax();
- if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
- ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
- ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
- hpte[0] &= ~HPTE_V_HVLOCK;
+ pte = be64_to_cpu(hpte[0]);
+ if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
+ ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
+ ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
+ hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
return H_NOT_FOUND;
}
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
- v = hpte[0] & ~HPTE_V_HVLOCK;
+ v = pte & ~HPTE_V_HVLOCK;
if (v & HPTE_V_VALID) {
- hpte[0] &= ~HPTE_V_VALID;
- rb = compute_tlbie_rb(v, hpte[1], pte_index);
+ u64 pte1;
+
+ pte1 = be64_to_cpu(hpte[1]);
+ hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
+ rb = compute_tlbie_rb(v, pte1, pte_index);
do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
/* Read PTE low word after tlbie to get final R/C values */
- remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
+ remove_revmap_chain(kvm, pte_index, rev, v, pte1);
}
r = rev->guest_rpte & ~HPTE_GR_RESERVED;
note_hpte_modification(kvm, rev);
@@ -514,12 +524,14 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
unsigned long *args = &vcpu->arch.gpr[4];
- unsigned long *hp, *hptes[4], tlbrb[4];
+ __be64 *hp, *hptes[4];
+ unsigned long tlbrb[4];
long int i, j, k, n, found, indexes[4];
unsigned long flags, req, pte_index, rcbits;
int global;
long int ret = H_SUCCESS;
struct revmap_entry *rev, *revs[4];
+ u64 hp0;
global = global_invalidates(kvm, 0);
for (i = 0; i < 4 && ret == H_SUCCESS; ) {
@@ -542,8 +554,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
ret = H_PARAMETER;
break;
}
- hp = (unsigned long *)
- (kvm->arch.hpt_virt + (pte_index << 4));
+ hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4));
/* to avoid deadlock, don't spin except for first */
if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
if (n)
@@ -552,23 +563,24 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
cpu_relax();
}
found = 0;
- if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
+ hp0 = be64_to_cpu(hp[0]);
+ if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
switch (flags & 3) {
case 0: /* absolute */
found = 1;
break;
case 1: /* andcond */
- if (!(hp[0] & args[j + 1]))
+ if (!(hp0 & args[j + 1]))
found = 1;
break;
case 2: /* AVPN */
- if ((hp[0] & ~0x7fUL) == args[j + 1])
+ if ((hp0 & ~0x7fUL) == args[j + 1])
found = 1;
break;
}
}
if (!found) {
- hp[0] &= ~HPTE_V_HVLOCK;
+ hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
args[j] = ((0x90 | flags) << 56) + pte_index;
continue;
}
@@ -577,7 +589,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
note_hpte_modification(kvm, rev);
- if (!(hp[0] & HPTE_V_VALID)) {
+ if (!(hp0 & HPTE_V_VALID)) {
/* insert R and C bits from PTE */
rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
args[j] |= rcbits << (56 - 5);
@@ -585,8 +597,10 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
continue;
}
- hp[0] &= ~HPTE_V_VALID; /* leave it locked */
- tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
+ /* leave it locked */
+ hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
+ tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]),
+ be64_to_cpu(hp[1]), pte_index);
indexes[n] = j;
hptes[n] = hp;
revs[n] = rev;
@@ -605,7 +619,8 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
pte_index = args[j] & ((1ul << 56) - 1);
hp = hptes[k];
rev = revs[k];
- remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]);
+ remove_revmap_chain(kvm, pte_index, rev,
+ be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
args[j] |= rcbits << (56 - 5);
hp[0] = 0;
@@ -620,23 +635,25 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long va)
{
struct kvm *kvm = vcpu->kvm;
- unsigned long *hpte;
+ __be64 *hpte;
struct revmap_entry *rev;
unsigned long v, r, rb, mask, bits;
+ u64 pte;
if (pte_index >= kvm->arch.hpt_npte)
return H_PARAMETER;
- hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+ hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
cpu_relax();
- if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
- ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
- hpte[0] &= ~HPTE_V_HVLOCK;
+ pte = be64_to_cpu(hpte[0]);
+ if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
+ ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
+ hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
return H_NOT_FOUND;
}
- v = hpte[0];
+ v = pte;
bits = (flags << 55) & HPTE_R_PP0;
bits |= (flags << 48) & HPTE_R_KEY_HI;
bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
@@ -650,12 +667,12 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
rev->guest_rpte = r;
note_hpte_modification(kvm, rev);
}
- r = (hpte[1] & ~mask) | bits;
+ r = (be64_to_cpu(hpte[1]) & ~mask) | bits;
/* Update HPTE */
if (v & HPTE_V_VALID) {
rb = compute_tlbie_rb(v, r, pte_index);
- hpte[0] = v & ~HPTE_V_VALID;
+ hpte[0] = cpu_to_be64(v & ~HPTE_V_VALID);
do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
/*
* If the host has this page as readonly but the guest
@@ -681,9 +698,9 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
}
}
}
- hpte[1] = r;
+ hpte[1] = cpu_to_be64(r);
eieio();
- hpte[0] = v & ~HPTE_V_HVLOCK;
+ hpte[0] = cpu_to_be64(v & ~HPTE_V_HVLOCK);
asm volatile("ptesync" : : : "memory");
return H_SUCCESS;
}
@@ -692,7 +709,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
{
struct kvm *kvm = vcpu->kvm;
- unsigned long *hpte, v, r;
+ __be64 *hpte;
+ unsigned long v, r;
int i, n = 1;
struct revmap_entry *rev = NULL;
@@ -704,9 +722,9 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
}
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
for (i = 0; i < n; ++i, ++pte_index) {
- hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
- v = hpte[0] & ~HPTE_V_HVLOCK;
- r = hpte[1];
+ hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+ v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
+ r = be64_to_cpu(hpte[1]);
if (v & HPTE_V_ABSENT) {
v &= ~HPTE_V_ABSENT;
v |= HPTE_V_VALID;
@@ -721,25 +739,27 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
return H_SUCCESS;
}
-void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
+void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index)
{
unsigned long rb;
- hptep[0] &= ~HPTE_V_VALID;
- rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
+ hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
+ rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
+ pte_index);
do_tlbies(kvm, &rb, 1, 1, true);
}
EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
-void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
+void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index)
{
unsigned long rb;
unsigned char rbyte;
- rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
- rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
+ rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
+ pte_index);
+ rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
/* modify only the second-last byte, which contains the ref bit */
*((char *)hptep + 14) = rbyte;
do_tlbies(kvm, &rb, 1, 1, false);
@@ -765,7 +785,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
unsigned long somask;
unsigned long vsid, hash;
unsigned long avpn;
- unsigned long *hpte;
+ __be64 *hpte;
unsigned long mask, val;
unsigned long v, r;
@@ -797,11 +817,11 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
val |= avpn;
for (;;) {
- hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
+ hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7));
for (i = 0; i < 16; i += 2) {
/* Read the PTE racily */
- v = hpte[i] & ~HPTE_V_HVLOCK;
+ v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
/* Check valid/absent, hash, segment size and AVPN */
if (!(v & valid) || (v & mask) != val)
@@ -810,8 +830,8 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
/* Lock the PTE and read it under the lock */
while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
cpu_relax();
- v = hpte[i] & ~HPTE_V_HVLOCK;
- r = hpte[i+1];
+ v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
+ r = be64_to_cpu(hpte[i+1]);
/*
* Check the HPTE again, including large page size
@@ -825,7 +845,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
return (hash << 3) + (i >> 1);
/* Unlock and move on */
- hpte[i] = v;
+ hpte[i] = cpu_to_be64(v);
}
if (val & HPTE_V_SECONDARY)
@@ -854,7 +874,7 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
struct kvm *kvm = vcpu->kvm;
long int index;
unsigned long v, r, gr;
- unsigned long *hpte;
+ __be64 *hpte;
unsigned long valid;
struct revmap_entry *rev;
unsigned long pp, key;
@@ -870,9 +890,9 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
return status; /* there really was no HPTE */
return 0; /* for prot fault, HPTE disappeared */
}
- hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
- v = hpte[0] & ~HPTE_V_HVLOCK;
- r = hpte[1];
+ hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
+ v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
+ r = be64_to_cpu(hpte[1]);
rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
gr = rev->guest_rpte;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 868347ef09fd..855521ef04e8 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -32,10 +32,6 @@
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
-#ifdef __LITTLE_ENDIAN__
-#error Need to fix lppaca and SLB shadow accesses in little endian mode
-#endif
-
/* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE 1
#define NAPPING_NOVCPU 2
@@ -48,7 +44,7 @@
*
* LR = return address to continue at after eventually re-enabling MMU
*/
-_GLOBAL(kvmppc_hv_entry_trampoline)
+_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
mflr r0
std r0, PPC_LR_STKOFF(r1)
stdu r1, -112(r1)
@@ -595,9 +591,10 @@ kvmppc_got_guest:
ld r3, VCPU_VPA(r4)
cmpdi r3, 0
beq 25f
- lwz r5, LPPACA_YIELDCOUNT(r3)
+ li r6, LPPACA_YIELDCOUNT
+ LWZX_BE r5, r3, r6
addi r5, r5, 1
- stw r5, LPPACA_YIELDCOUNT(r3)
+ STWX_BE r5, r3, r6
li r6, 1
stb r6, VCPU_VPA_DIRTY(r4)
25:
@@ -671,9 +668,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
mr r31, r4
addi r3, r31, VCPU_FPRS_TM
- bl .load_fp_state
+ bl load_fp_state
addi r3, r31, VCPU_VRS_TM
- bl .load_vr_state
+ bl load_vr_state
mr r4, r31
lwz r7, VCPU_VRSAVE_TM(r4)
mtspr SPRN_VRSAVE, r7
@@ -1417,9 +1414,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/* Save FP/VSX. */
addi r3, r9, VCPU_FPRS_TM
- bl .store_fp_state
+ bl store_fp_state
addi r3, r9, VCPU_VRS_TM
- bl .store_vr_state
+ bl store_vr_state
mfspr r6, SPRN_VRSAVE
stw r6, VCPU_VRSAVE_TM(r9)
1:
@@ -1442,9 +1439,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
ld r8, VCPU_VPA(r9) /* do they have a VPA? */
cmpdi r8, 0
beq 25f
- lwz r3, LPPACA_YIELDCOUNT(r8)
+ li r4, LPPACA_YIELDCOUNT
+ LWZX_BE r3, r8, r4
addi r3, r3, 1
- stw r3, LPPACA_YIELDCOUNT(r8)
+ STWX_BE r3, r8, r4
li r3, 1
stb r3, VCPU_VPA_DIRTY(r9)
25:
@@ -1757,8 +1755,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
33: ld r8,PACA_SLBSHADOWPTR(r13)
.rept SLB_NUM_BOLTED
- ld r5,SLBSHADOW_SAVEAREA(r8)
- ld r6,SLBSHADOW_SAVEAREA+8(r8)
+ li r3, SLBSHADOW_SAVEAREA
+ LDX_BE r5, r8, r3
+ addi r3, r3, 8
+ LDX_BE r6, r8, r3
andis. r7,r5,SLB_ESID_V@h
beq 1f
slbmte r6,r5
@@ -1909,12 +1909,23 @@ hcall_try_real_mode:
clrrdi r3,r3,2
cmpldi r3,hcall_real_table_end - hcall_real_table
bge guest_exit_cont
+ /* See if this hcall is enabled for in-kernel handling */
+ ld r4, VCPU_KVM(r9)
+ srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
+ sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
+ add r4, r4, r0
+ ld r0, KVM_ENABLED_HCALLS(r4)
+ rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
+ srd r0, r0, r4
+ andi. r0, r0, 1
+ beq guest_exit_cont
+ /* Get pointer to handler, if any, and call it */
LOAD_REG_ADDR(r4, hcall_real_table)
lwax r3,r3,r4
cmpwi r3,0
beq guest_exit_cont
- add r3,r3,r4
- mtctr r3
+ add r12,r3,r4
+ mtctr r12
mr r3,r9 /* get vcpu pointer */
ld r4,VCPU_GPR(R4)(r9)
bctrl
@@ -2031,6 +2042,7 @@ hcall_real_table:
.long 0 /* 0x12c */
.long 0 /* 0x130 */
.long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
+ .globl hcall_real_table_end
hcall_real_table_end:
ignore_hdec:
@@ -2338,7 +2350,18 @@ kvmppc_read_intr:
cmpdi r6, 0
beq- 1f
lwzcix r0, r6, r7
- rlwinm. r3, r0, 0, 0xffffff
+ /*
+ * Save XIRR for later. Since we get in in reverse endian on LE
+ * systems, save it byte reversed and fetch it back in host endian.
+ */
+ li r3, HSTATE_SAVED_XIRR
+ STWX_BE r0, r3, r13
+#ifdef __LITTLE_ENDIAN__
+ lwz r3, HSTATE_SAVED_XIRR(r13)
+#else
+ mr r3, r0
+#endif
+ rlwinm. r3, r3, 0, 0xffffff
sync
beq 1f /* if nothing pending in the ICP */
@@ -2370,10 +2393,9 @@ kvmppc_read_intr:
li r3, -1
1: blr
-42: /* It's not an IPI and it's for the host, stash it in the PACA
- * before exit, it will be picked up by the host ICP driver
+42: /* It's not an IPI and it's for the host. We saved a copy of XIRR in
+ * the PACA earlier, it will be picked up by the host ICP driver
*/
- stw r0, HSTATE_SAVED_XIRR(r13)
li r3, 1
b 1b
@@ -2408,11 +2430,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
mtmsrd r8
isync
addi r3,r3,VCPU_FPRS
- bl .store_fp_state
+ bl store_fp_state
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
addi r3,r31,VCPU_VRS
- bl .store_vr_state
+ bl store_vr_state
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
mfspr r6,SPRN_VRSAVE
@@ -2444,11 +2466,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
mtmsrd r8
isync
addi r3,r4,VCPU_FPRS
- bl .load_fp_state
+ bl load_fp_state
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
addi r3,r31,VCPU_VRS
- bl .load_vr_state
+ bl load_vr_state
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
lwz r7,VCPU_VRSAVE(r31)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index e2c29e381dc7..d044b8b7c69d 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -25,7 +25,11 @@
#include <asm/exception-64s.h>
#if defined(CONFIG_PPC_BOOK3S_64)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define FUNC(name) name
+#else
#define FUNC(name) GLUE(.,name)
+#endif
#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
#elif defined(CONFIG_PPC_BOOK3S_32)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 8eef1e519077..15fd6c25179c 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -71,6 +71,12 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
svcpu->in_use = 0;
svcpu_put(svcpu);
#endif
+
+ /* Disable AIL if supported */
+ if (cpu_has_feature(CPU_FTR_HVMODE) &&
+ cpu_has_feature(CPU_FTR_ARCH_207S))
+ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
+
vcpu->cpu = smp_processor_id();
#ifdef CONFIG_PPC_BOOK3S_32
current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
@@ -91,6 +97,12 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
+
+ /* Enable AIL if supported */
+ if (cpu_has_feature(CPU_FTR_HVMODE) &&
+ cpu_has_feature(CPU_FTR_ARCH_207S))
+ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
+
vcpu->cpu = -1;
}
@@ -120,6 +132,14 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
#ifdef CONFIG_PPC_BOOK3S_64
svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
#endif
+ /*
+ * Now also save the current time base value. We use this
+ * to find the guest purr and spurr value.
+ */
+ vcpu->arch.entry_tb = get_tb();
+ vcpu->arch.entry_vtb = get_vtb();
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ vcpu->arch.entry_ic = mfspr(SPRN_IC);
svcpu->in_use = true;
}
@@ -166,6 +186,14 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
#ifdef CONFIG_PPC_BOOK3S_64
vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
#endif
+ /*
+ * Update purr and spurr using time base on exit.
+ */
+ vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
+ vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
+ vcpu->arch.vtb += get_vtb() - vcpu->arch.entry_vtb;
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
svcpu->in_use = false;
out:
@@ -960,6 +988,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_DECREMENTER:
case BOOK3S_INTERRUPT_HV_DECREMENTER:
case BOOK3S_INTERRUPT_DOORBELL:
+ case BOOK3S_INTERRUPT_H_DOORBELL:
vcpu->stat.dec_exits++;
r = RESUME_GUEST;
break;
@@ -1568,6 +1597,11 @@ static int kvmppc_core_init_vm_pr(struct kvm *kvm)
{
mutex_init(&kvm->arch.hpt_mutex);
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* Start out with the default set of hcalls enabled */
+ kvmppc_pr_init_default_hcalls(kvm);
+#endif
+
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
spin_lock(&kvm_global_user_count_lock);
if (++kvm_global_user_count == 1)
@@ -1636,6 +1670,9 @@ static struct kvmppc_ops kvm_ops_pr = {
.emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
.fast_vcpu_kick = kvm_vcpu_kick,
.arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
+#ifdef CONFIG_PPC_BOOK3S_64
+ .hcall_implemented = kvmppc_hcall_impl_pr,
+#endif
};
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 52a63bfe3f07..6d0143fbeb63 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -40,8 +40,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
{
long flags = kvmppc_get_gpr(vcpu, 4);
long pte_index = kvmppc_get_gpr(vcpu, 5);
- unsigned long pteg[2 * 8];
- unsigned long pteg_addr, i, *hpte;
+ __be64 pteg[2 * 8];
+ __be64 *hpte;
+ unsigned long pteg_addr, i;
long int ret;
i = pte_index & 7;
@@ -93,8 +94,8 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
- pte[0] = be64_to_cpu(pte[0]);
- pte[1] = be64_to_cpu(pte[1]);
+ pte[0] = be64_to_cpu((__force __be64)pte[0]);
+ pte[1] = be64_to_cpu((__force __be64)pte[1]);
ret = H_NOT_FOUND;
if ((pte[0] & HPTE_V_VALID) == 0 ||
@@ -171,8 +172,8 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
- pte[0] = be64_to_cpu(pte[0]);
- pte[1] = be64_to_cpu(pte[1]);
+ pte[0] = be64_to_cpu((__force __be64)pte[0]);
+ pte[1] = be64_to_cpu((__force __be64)pte[1]);
/* tsl = AVPN */
flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
@@ -211,8 +212,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
- pte[0] = be64_to_cpu(pte[0]);
- pte[1] = be64_to_cpu(pte[1]);
+ pte[0] = be64_to_cpu((__force __be64)pte[0]);
+ pte[1] = be64_to_cpu((__force __be64)pte[1]);
ret = H_NOT_FOUND;
if ((pte[0] & HPTE_V_VALID) == 0 ||
@@ -231,8 +232,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
rb = compute_tlbie_rb(v, r, pte_index);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
- pte[0] = cpu_to_be64(pte[0]);
- pte[1] = cpu_to_be64(pte[1]);
+ pte[0] = (__force u64)cpu_to_be64(pte[0]);
+ pte[1] = (__force u64)cpu_to_be64(pte[1]);
copy_to_user((void __user *)pteg, pte, sizeof(pte));
ret = H_SUCCESS;
@@ -266,6 +267,10 @@ static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
{
+ if (cmd <= MAX_HCALL_OPCODE &&
+ !test_bit(cmd/4, vcpu->kvm->arch.enabled_hcalls))
+ return EMULATE_FAIL;
+
switch (cmd) {
case H_ENTER:
return kvmppc_h_pr_enter(vcpu);
@@ -303,3 +308,61 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
return EMULATE_FAIL;
}
+
+int kvmppc_hcall_impl_pr(unsigned long cmd)
+{
+ switch (cmd) {
+ case H_ENTER:
+ case H_REMOVE:
+ case H_PROTECT:
+ case H_BULK_REMOVE:
+ case H_PUT_TCE:
+ case H_CEDE:
+#ifdef CONFIG_KVM_XICS
+ case H_XIRR:
+ case H_CPPR:
+ case H_EOI:
+ case H_IPI:
+ case H_IPOLL:
+ case H_XIRR_X:
+#endif
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * List of hcall numbers to enable by default.
+ * For compatibility with old userspace, we enable by default
+ * all hcalls that were implemented before the hcall-enabling
+ * facility was added. Note this list should not include H_RTAS.
+ */
+static unsigned int default_hcall_list[] = {
+ H_ENTER,
+ H_REMOVE,
+ H_PROTECT,
+ H_BULK_REMOVE,
+ H_PUT_TCE,
+ H_CEDE,
+#ifdef CONFIG_KVM_XICS
+ H_XIRR,
+ H_CPPR,
+ H_EOI,
+ H_IPI,
+ H_IPOLL,
+ H_XIRR_X,
+#endif
+ 0
+};
+
+void kvmppc_pr_init_default_hcalls(struct kvm *kvm)
+{
+ int i;
+ unsigned int hcall;
+
+ for (i = 0; default_hcall_list[i]; ++i) {
+ hcall = default_hcall_list[i];
+ WARN_ON(!kvmppc_hcall_impl_pr(hcall));
+ __set_bit(hcall / 4, kvm->arch.enabled_hcalls);
+ }
+}
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 9eec675220e6..16c4d88ba27d 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,7 +36,11 @@
#if defined(CONFIG_PPC_BOOK3S_64)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define FUNC(name) name
+#else
#define FUNC(name) GLUE(.,name)
+#endif
#elif defined(CONFIG_PPC_BOOK3S_32)
@@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
* On entry, r4 contains the guest shadow MSR
* MSR.EE has to be 0 when calling this function
*/
-_GLOBAL(kvmppc_entry_trampoline)
+_GLOBAL_TOC(kvmppc_entry_trampoline)
mfmsr r5
LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
toreal(r7)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 002d51764143..c99c40e9182a 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -250,6 +250,14 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
spr_val);
break;
+ case SPRN_PWRMGTCR0:
+ /*
+ * Guest relies on host power management configurations
+ * Treat the request as a general store
+ */
+ vcpu->arch.pwrmgtcr0 = spr_val;
+ break;
+
/* extra exceptions */
case SPRN_IVOR32:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
@@ -368,6 +376,10 @@ int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_v
*spr_val = vcpu->arch.eptcfg;
break;
+ case SPRN_PWRMGTCR0:
+ *spr_val = vcpu->arch.pwrmgtcr0;
+ break;
+
/* extra exceptions */
case SPRN_IVOR32:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index dd2cc03f406f..79677d76d1a4 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -107,11 +107,15 @@ static u32 get_host_mas0(unsigned long eaddr)
{
unsigned long flags;
u32 mas0;
+ u32 mas4;
local_irq_save(flags);
mtspr(SPRN_MAS6, 0);
+ mas4 = mfspr(SPRN_MAS4);
+ mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK);
asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
mas0 = mfspr(SPRN_MAS0);
+ mtspr(SPRN_MAS4, mas4);
local_irq_restore(flags);
return mas0;
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 17e456279224..690499d7669d 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -110,7 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
}
-static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
+static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid);
static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
{
@@ -141,9 +141,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_GESR, vcpu->arch.shared->esr);
if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
- __get_cpu_var(last_vcpu_on_cpu) != vcpu) {
+ __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] != vcpu) {
kvmppc_e500_tlbil_all(vcpu_e500);
- __get_cpu_var(last_vcpu_on_cpu) = vcpu;
+ __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu;
}
kvmppc_load_guest_fp(vcpu);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 61c738ab1283..7efc2b711404 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -387,6 +387,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_PPC_UNSET_IRQ:
case KVM_CAP_PPC_IRQ_LEVEL:
case KVM_CAP_ENABLE_CAP:
+ case KVM_CAP_ENABLE_CAP_VM:
case KVM_CAP_ONE_REG:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
@@ -417,6 +418,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_PPC_ALLOC_HTAB:
case KVM_CAP_PPC_RTAS:
case KVM_CAP_PPC_FIXUP_HCALL:
+ case KVM_CAP_PPC_ENABLE_HCALL:
#ifdef CONFIG_KVM_XICS
case KVM_CAP_IRQ_XICS:
#endif
@@ -1099,6 +1101,42 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
return 0;
}
+
+static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ struct kvm_enable_cap *cap)
+{
+ int r;
+
+ if (cap->flags)
+ return -EINVAL;
+
+ switch (cap->cap) {
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+ case KVM_CAP_PPC_ENABLE_HCALL: {
+ unsigned long hcall = cap->args[0];
+
+ r = -EINVAL;
+ if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
+ cap->args[1] > 1)
+ break;
+ if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
+ break;
+ if (cap->args[1])
+ set_bit(hcall / 4, kvm->arch.enabled_hcalls);
+ else
+ clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
+ r = 0;
+ break;
+ }
+#endif
+ default:
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -1118,6 +1156,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
break;
}
+ case KVM_ENABLE_CAP:
+ {
+ struct kvm_enable_cap cap;
+ r = -EFAULT;
+ if (copy_from_user(&cap, argp, sizeof(cap)))
+ goto out;
+ r = kvm_vm_ioctl_enable_cap(kvm, &cap);
+ break;
+ }
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CREATE_SPAPR_TCE: {
struct kvm_create_spapr_tce create_tce;
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index e11d8f170a62..0418b746cb68 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -758,6 +758,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_VM_ATTRIBUTES 101
#define KVM_CAP_ARM_PSCI_0_2 102
#define KVM_CAP_PPC_FIXUP_HCALL 103
+#define KVM_CAP_PPC_ENABLE_HCALL 104
#ifdef KVM_CAP_IRQ_ROUTING