summaryrefslogtreecommitdiff
path: root/mm/nommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/nommu.c')
-rw-r--r--mm/nommu.c39
1 files changed, 21 insertions, 18 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index 8b8faaf2a9e9..2d131b97a851 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -17,6 +17,7 @@
#include <linux/export.h>
#include <linux/mm.h>
+#include <linux/sched/mm.h>
#include <linux/vmacache.h>
#include <linux/mman.h>
#include <linux/swap.h>
@@ -35,7 +36,7 @@
#include <linux/audit.h>
#include <linux/printk.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
@@ -176,9 +177,10 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
}
EXPORT_SYMBOL(get_user_pages_locked);
-long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- struct page **pages, unsigned int gup_flags)
+static long __get_user_pages_unlocked(struct task_struct *tsk,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long nr_pages, struct page **pages,
+ unsigned int gup_flags)
{
long ret;
down_read(&mm->mmap_sem);
@@ -187,7 +189,6 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
up_read(&mm->mmap_sem);
return ret;
}
-EXPORT_SYMBOL(__get_user_pages_unlocked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags)
@@ -517,7 +518,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
}
/*
- * initialise the VMA and region record slabs
+ * initialise the percpu counter for VM and region record slabs
*/
void __init mmap_init(void)
{
@@ -757,7 +758,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
mm->map_count--;
for (i = 0; i < VMACACHE_SIZE; i++) {
/* if the vma is cached, invalidate the entire cache */
- if (curr->vmacache[i] == vma) {
+ if (curr->vmacache.vmas[i] == vma) {
vmacache_invalidate(mm);
break;
}
@@ -1084,7 +1085,7 @@ static int do_mmap_shared_file(struct vm_area_struct *vma)
{
int ret;
- ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
+ ret = call_mmap(vma->vm_file, vma);
if (ret == 0) {
vma->vm_region->vm_top = vma->vm_region->vm_end;
return 0;
@@ -1115,7 +1116,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
* - VM_MAYSHARE will be set if it may attempt to share
*/
if (capabilities & NOMMU_MAP_DIRECT) {
- ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
+ ret = call_mmap(vma->vm_file, vma);
if (ret == 0) {
/* shouldn't return success if we're not sharing */
BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
@@ -1191,7 +1192,7 @@ error_free:
enomem:
pr_err("Allocation of length %lu from process %d (%s) failed\n",
len, current->pid, current->comm);
- show_free_areas(0);
+ show_free_areas(0, NULL);
return -ENOMEM;
}
@@ -1205,7 +1206,8 @@ unsigned long do_mmap(struct file *file,
unsigned long flags,
vm_flags_t vm_flags,
unsigned long pgoff,
- unsigned long *populate)
+ unsigned long *populate,
+ struct list_head *uf)
{
struct vm_area_struct *vma;
struct vm_region *region;
@@ -1412,13 +1414,13 @@ error_getting_vma:
kmem_cache_free(vm_region_jar, region);
pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
len, current->pid);
- show_free_areas(0);
+ show_free_areas(0, NULL);
return -ENOMEM;
error_getting_region:
pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
len, current->pid);
- show_free_areas(0);
+ show_free_areas(0, NULL);
return -ENOMEM;
}
@@ -1577,7 +1579,7 @@ static int shrink_vma(struct mm_struct *mm,
* - under NOMMU conditions the chunk to be unmapped must be backed by a single
* VMA, though it need not cover the whole VMA
*/
-int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
{
struct vm_area_struct *vma;
unsigned long end;
@@ -1643,7 +1645,7 @@ int vm_munmap(unsigned long addr, size_t len)
int ret;
down_write(&mm->mmap_sem);
- ret = do_munmap(mm, addr, len);
+ ret = do_munmap(mm, addr, len, NULL);
up_write(&mm->mmap_sem);
return ret;
}
@@ -1794,21 +1796,21 @@ void unmap_mapping_range(struct address_space *mapping,
}
EXPORT_SYMBOL(unmap_mapping_range);
-int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+int filemap_fault(struct vm_fault *vmf)
{
BUG();
return 0;
}
EXPORT_SYMBOL(filemap_fault);
-void filemap_map_pages(struct fault_env *fe,
+void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff)
{
BUG();
}
EXPORT_SYMBOL(filemap_map_pages);
-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
unsigned long addr, void *buf, int len, unsigned int gup_flags)
{
struct vm_area_struct *vma;
@@ -1878,6 +1880,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
mmput(mm);
return len;
}
+EXPORT_SYMBOL_GPL(access_process_vm);
/**
* nommu_shrink_inode_mappings - Shrink the shared mappings on an inode