summaryrefslogtreecommitdiff
path: root/arch/ia64/kvm/kvm-ia64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kvm/kvm-ia64.c')
-rw-r--r--arch/ia64/kvm/kvm-ia64.c111
1 files changed, 62 insertions, 49 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 3138d762b2df..af1464f7a6ad 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -180,6 +180,7 @@ int kvm_dev_ioctl_check_extension(long ext)
switch (ext) {
case KVM_CAP_IRQCHIP:
+ case KVM_CAP_USER_MEMORY:
case KVM_CAP_MP_STATE:
r = 1;
@@ -438,6 +439,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
expires = div64_u64(itc_diff, cyc_per_usec);
kt = ktime_set(0, 1000 * expires);
+ down_read(&vcpu->kvm->slots_lock);
vcpu->arch.ht_active = 1;
hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
@@ -450,6 +452,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
+ up_read(&vcpu->kvm->slots_lock);
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
return -EINTR;
@@ -473,13 +476,6 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu,
return 1;
}
-static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
- struct kvm_run *kvm_run)
-{
- printk("VMM: %s", vcpu->arch.log_buf);
- return 1;
-}
-
static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run) = {
[EXIT_REASON_VM_PANIC] = handle_vm_error,
@@ -491,7 +487,6 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
[EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
[EXIT_REASON_IPI] = handle_ipi,
[EXIT_REASON_PTC_G] = handle_global_purge,
- [EXIT_REASON_DEBUG] = handle_vcpu_debug,
};
@@ -703,24 +698,27 @@ out:
return r;
}
+/*
+ * Allocate 16M memory for every vm to hold its specific data.
+ * Its memory map is defined in kvm_host.h.
+ */
static struct kvm *kvm_alloc_kvm(void)
{
struct kvm *kvm;
uint64_t vm_base;
- BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
-
vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
if (!vm_base)
return ERR_PTR(-ENOMEM);
+ printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base);
+ /* Zero all pages before use! */
memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
- kvm = (struct kvm *)(vm_base +
- offsetof(struct kvm_vm_data, kvm_vm_struct));
+
+ kvm = (struct kvm *)(vm_base + KVM_VM_OFS);
kvm->arch.vm_base = vm_base;
- printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
return kvm;
}
@@ -762,12 +760,21 @@ static void kvm_build_io_pmt(struct kvm *kvm)
static void kvm_init_vm(struct kvm *kvm)
{
+ long vm_base;
+
BUG_ON(!kvm);
kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
kvm->arch.vmm_init_rr = VMM_INIT_RR;
+ vm_base = kvm->arch.vm_base;
+ if (vm_base) {
+ kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS;
+ kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS;
+ kvm->arch.vpd_base = vm_base + KVM_VPD_OFS;
+ }
+
/*
*Fill P2M entries for MMIO/IO ranges
*/
@@ -831,8 +838,9 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
int i;
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+ int r;
vcpu_load(vcpu);
@@ -849,7 +857,18 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vpd->vpr = regs->vpd.vpr;
- memcpy(&vcpu->arch.guest, &regs->saved_guest, sizeof(union context));
+ r = -EFAULT;
+ r = copy_from_user(&vcpu->arch.guest, regs->saved_guest,
+ sizeof(union context));
+ if (r)
+ goto out;
+ r = copy_from_user(vcpu + 1, regs->saved_stack +
+ sizeof(struct kvm_vcpu),
+ IA64_STK_OFFSET - sizeof(struct kvm_vcpu));
+ if (r)
+ goto out;
+ vcpu->arch.exit_data =
+ ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data;
RESTORE_REGS(mp_state);
RESTORE_REGS(vmm_rr);
@@ -883,8 +902,9 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
set_bit(KVM_REQ_RESUME, &vcpu->requests);
vcpu_put(vcpu);
-
- return 0;
+ r = 0;
+out:
+ return r;
}
long kvm_arch_vm_ioctl(struct file *filp,
@@ -1146,11 +1166,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
/*Set entry address for first run.*/
regs->cr_iip = PALE_RESET_ENTRY;
- /*Initialize itc offset for vcpus*/
+ /*Initilize itc offset for vcpus*/
itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
- for (i = 0; i < KVM_MAX_VCPUS; i++) {
- v = (struct kvm_vcpu *)((char *)vcpu +
- sizeof(struct kvm_vcpu_data) * i);
+ for (i = 0; i < MAX_VCPU_NUM; i++) {
+ v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
v->arch.itc_offset = itc_offset;
v->arch.last_itc = 0;
}
@@ -1164,7 +1183,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.apic->vcpu = vcpu;
p_ctx->gr[1] = 0;
- p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
+ p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET);
p_ctx->gr[13] = (unsigned long)vmm_vcpu;
p_ctx->psr = 0x1008522000UL;
p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
@@ -1199,12 +1218,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
vcpu->arch.hlt_timer.function = hlt_timer_fn;
vcpu->arch.last_run_cpu = -1;
- vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
+ vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id);
vcpu->arch.vsa_base = kvm_vsa_base;
vcpu->arch.__gp = kvm_vmm_gp;
vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
- vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
- vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
+ vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id);
+ vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id);
init_ptce_info(vcpu);
r = 0;
@@ -1254,22 +1273,12 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
int r;
int cpu;
- BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
-
- r = -EINVAL;
- if (id >= KVM_MAX_VCPUS) {
- printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
- KVM_MAX_VCPUS);
- goto fail;
- }
-
r = -ENOMEM;
if (!vm_base) {
printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
goto fail;
}
- vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
- vcpu_data[id].vcpu_struct));
+ vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id);
vcpu->kvm = kvm;
cpu = get_cpu();
@@ -1302,8 +1311,8 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return -EINVAL;
}
-int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
- struct kvm_guest_debug *dbg)
+int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+ struct kvm_debug_guest *dbg)
{
return -EINVAL;
}
@@ -1365,9 +1374,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
- struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
int i;
-
+ int r;
+ struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
vcpu_load(vcpu);
for (i = 0; i < 16; i++) {
@@ -1382,8 +1391,14 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->vpd.vpsr = vpd->vpsr;
regs->vpd.vpr = vpd->vpr;
- memcpy(&regs->saved_guest, &vcpu->arch.guest, sizeof(union context));
-
+ r = -EFAULT;
+ r = copy_to_user(regs->saved_guest, &vcpu->arch.guest,
+ sizeof(union context));
+ if (r)
+ goto out;
+ r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET);
+ if (r)
+ goto out;
SAVE_REGS(mp_state);
SAVE_REGS(vmm_rr);
memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
@@ -1411,9 +1426,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
SAVE_REGS(metaphysical_saved_rr4);
SAVE_REGS(fp_psr);
SAVE_REGS(saved_gp);
-
vcpu_put(vcpu);
- return 0;
+ r = 0;
+out:
+ return r;
}
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
@@ -1441,9 +1457,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
unsigned long base_gfn = memslot->base_gfn;
- if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
- return -ENOMEM;
-
for (i = 0; i < npages; i++) {
pfn = gfn_to_pfn(kvm, base_gfn + i);
if (!kvm_is_mmio_pfn(pfn)) {
@@ -1618,8 +1631,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot;
int r, i;
long n, base;
- unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
- offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
+ unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS
+ + KVM_MEM_DIRTY_LOG_OFS);
r = -EINVAL;
if (log->slot >= KVM_MEMORY_SLOTS)