summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2018-01-15 09:29:51 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2018-01-15 09:29:51 +1100
commit494295ef9936ba8dd9123687f22a723c90442b3b (patch)
tree53aa09fe3f0f688ac53f6225be877551ebf4f1e6 /virt
parentf26ffa26a91d8335e35842f969a9e1e0fe456119 (diff)
parent418bb8b3ff50515c6d83fdf6d609331c676de178 (diff)
Merge remote-tracking branch 'vfs/for-next'
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/eventfd.c4
-rw-r--r--virt/kvm/kvm_main.c43
2 files changed, 14 insertions, 33 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index f2ac53ab8243..a334399fafec 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -188,7 +188,7 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
struct kvm_kernel_irqfd *irqfd =
container_of(wait, struct kvm_kernel_irqfd, wait);
- unsigned long flags = (unsigned long)key;
+ __poll_t flags = key_to_poll(key);
struct kvm_kernel_irq_routing_entry irq;
struct kvm *kvm = irqfd->kvm;
unsigned seq;
@@ -287,7 +287,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
struct fd f;
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
int ret;
- unsigned int events;
+ __poll_t events;
int idx;
if (!kvm_arch_intc_initialized(kvm))
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 210bf820385a..d6b9370806f8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1322,17 +1322,6 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
return gfn_to_hva_memslot_prot(slot, gfn, writable);
}
-static int get_user_page_nowait(unsigned long start, int write,
- struct page **page)
-{
- int flags = FOLL_NOWAIT | FOLL_HWPOISON;
-
- if (write)
- flags |= FOLL_WRITE;
-
- return get_user_pages(start, 1, flags, page, NULL);
-}
-
static inline int check_user_page_hwpoison(unsigned long addr)
{
int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
@@ -1381,7 +1370,8 @@ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
bool *writable, kvm_pfn_t *pfn)
{
- struct page *page[1];
+ unsigned int flags = FOLL_HWPOISON;
+ struct page *page;
int npages = 0;
might_sleep();
@@ -1389,35 +1379,26 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
if (writable)
*writable = write_fault;
- if (async) {
- down_read(&current->mm->mmap_sem);
- npages = get_user_page_nowait(addr, write_fault, page);
- up_read(&current->mm->mmap_sem);
- } else {
- unsigned int flags = FOLL_HWPOISON;
-
- if (write_fault)
- flags |= FOLL_WRITE;
+ if (write_fault)
+ flags |= FOLL_WRITE;
+ if (async)
+ flags |= FOLL_NOWAIT;
- npages = get_user_pages_unlocked(addr, 1, page, flags);
- }
+ npages = get_user_pages_unlocked(addr, 1, &page, flags);
if (npages != 1)
return npages;
/* map read fault as writable if possible */
if (unlikely(!write_fault) && writable) {
- struct page *wpage[1];
+ struct page *wpage;
- npages = __get_user_pages_fast(addr, 1, 1, wpage);
- if (npages == 1) {
+ if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
*writable = true;
- put_page(page[0]);
- page[0] = wpage[0];
+ put_page(page);
+ page = wpage;
}
-
- npages = 1;
}
- *pfn = page_to_pfn(page[0]);
+ *pfn = page_to_pfn(page);
return npages;
}