summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c23
-rw-r--r--mm/compaction.c24
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/filemap_xip.c7
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/kmemleak.c3
-rw-r--r--mm/memblock.c7
-rw-r--r--mm/memcontrol.c8
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/nommu.c9
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/process_vm_access.c23
-rw-r--r--mm/swap.c2
13 files changed, 85 insertions, 36 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 7ba8feae11b8..dd8e2aafb07e 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -318,7 +318,7 @@ static void wakeup_timer_fn(unsigned long data)
if (bdi->wb.task) {
trace_writeback_wake_thread(bdi);
wake_up_process(bdi->wb.task);
- } else {
+ } else if (bdi->dev) {
/*
* When bdi tasks are inactive for long time, they are killed.
* In this case we have to wake-up the forker thread which
@@ -584,6 +584,8 @@ EXPORT_SYMBOL(bdi_register_dev);
*/
static void bdi_wb_shutdown(struct backing_dev_info *bdi)
{
+ struct task_struct *task;
+
if (!bdi_cap_writeback_dirty(bdi))
return;
@@ -602,8 +604,13 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
* Finally, kill the kernel thread. We don't need to be RCU
* safe anymore, since the bdi is gone from visibility.
*/
- if (bdi->wb.task)
- kthread_stop(bdi->wb.task);
+ spin_lock_bh(&bdi->wb_lock);
+ task = bdi->wb.task;
+ bdi->wb.task = NULL;
+ spin_unlock_bh(&bdi->wb_lock);
+
+ if (task)
+ kthread_stop(task);
}
/*
@@ -623,7 +630,9 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
void bdi_unregister(struct backing_dev_info *bdi)
{
- if (bdi->dev) {
+ struct device *dev = bdi->dev;
+
+ if (dev) {
bdi_set_min_ratio(bdi, 0);
trace_writeback_bdi_unregister(bdi);
bdi_prune_sb(bdi);
@@ -632,8 +641,12 @@ void bdi_unregister(struct backing_dev_info *bdi)
if (!bdi_cap_flush_forker(bdi))
bdi_wb_shutdown(bdi);
bdi_debug_unregister(bdi);
- device_unregister(bdi->dev);
+
+ spin_lock_bh(&bdi->wb_lock);
bdi->dev = NULL;
+ spin_unlock_bh(&bdi->wb_lock);
+
+ device_unregister(dev);
}
}
EXPORT_SYMBOL(bdi_unregister);
diff --git a/mm/compaction.c b/mm/compaction.c
index 71a58f67f481..d9ebebe1a2aa 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -313,12 +313,34 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
} else if (!locked)
spin_lock_irq(&zone->lru_lock);
+ /*
+ * migrate_pfn does not necessarily start aligned to a
+ * pageblock. Ensure that pfn_valid is called when moving
+ * into a new MAX_ORDER_NR_PAGES range in case of large
+ * memory holes within the zone
+ */
+ if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
+ if (!pfn_valid(low_pfn)) {
+ low_pfn += MAX_ORDER_NR_PAGES - 1;
+ continue;
+ }
+ }
+
if (!pfn_valid_within(low_pfn))
continue;
nr_scanned++;
- /* Get the page and skip if free */
+ /*
+ * Get the page and ensure the page is within the same zone.
+ * See the comment in isolate_freepages about overlapping
+ * nodes. It is deliberate that the new zone lock is not taken
+ * as memory compaction should not move pages between nodes.
+ */
page = pfn_to_page(low_pfn);
+ if (page_zone(page) != zone)
+ continue;
+
+ /* Skip if free */
if (PageBuddy(page))
continue;
diff --git a/mm/filemap.c b/mm/filemap.c
index 97f49ed35bd2..b66275757c28 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1400,15 +1400,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long seg = 0;
size_t count;
loff_t *ppos = &iocb->ki_pos;
- struct blk_plug plug;
count = 0;
retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (retval)
return retval;
- blk_start_plug(&plug);
-
/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
if (filp->f_flags & O_DIRECT) {
loff_t size;
@@ -1424,8 +1421,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
retval = filemap_write_and_wait_range(mapping, pos,
pos + iov_length(iov, nr_segs) - 1);
if (!retval) {
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
retval = mapping->a_ops->direct_IO(READ, iocb,
iov, pos, nr_segs);
+ blk_finish_plug(&plug);
}
if (retval > 0) {
*ppos = pos + retval;
@@ -1481,7 +1482,6 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
break;
}
out:
- blk_finish_plug(&plug);
return retval;
}
EXPORT_SYMBOL(generic_file_aio_read);
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index f91b2f687343..a4eb31132229 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -263,7 +263,12 @@ found:
xip_pfn);
if (err == -ENOMEM)
return VM_FAULT_OOM;
- BUG_ON(err);
+ /*
+ * err == -EBUSY is fine, we've raced against another thread
+ * that faulted-in the same page
+ */
+ if (err != -EBUSY)
+ BUG_ON(err);
return VM_FAULT_NOPAGE;
} else {
int err, ret = VM_FAULT_OOM;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b3ffc21ce801..91d3efb25d15 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2083,7 +2083,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
{
struct mm_struct *mm = mm_slot->mm;
- VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
+ VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
if (khugepaged_test_exit(mm)) {
/* free mm_slot */
@@ -2113,7 +2113,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
int progress = 0;
VM_BUG_ON(!pages);
- VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
+ VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
if (khugepaged_scan.mm_slot)
mm_slot = khugepaged_scan.mm_slot;
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index c833addd94d7..45eb6217bf38 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1036,7 +1036,7 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
{
pr_debug("%s(0x%p)\n", __func__, ptr);
- if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+ if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr))
add_scan_area((unsigned long)ptr, size, gfp);
else if (atomic_read(&kmemleak_early_log))
log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
@@ -1757,6 +1757,7 @@ void __init kmemleak_init(void)
#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
if (!kmemleak_skip_disable) {
+ atomic_set(&kmemleak_early_log, 0);
kmemleak_disable();
return;
}
diff --git a/mm/memblock.c b/mm/memblock.c
index 2f55f19b7c86..77b5f227e1d8 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -106,14 +106,17 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
end = memblock.current_limit;
- /* adjust @start to avoid underflow and allocating the first page */
- start = max3(start, size, (phys_addr_t)PAGE_SIZE);
+ /* avoid allocating the first page */
+ start = max_t(phys_addr_t, start, PAGE_SIZE);
end = max(start, end);
for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
this_start = clamp(this_start, start, end);
this_end = clamp(this_end, start, end);
+ if (this_end < size)
+ continue;
+
cand = round_down(this_end - size, align);
if (cand >= this_start)
return cand;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 556859fec4ef..228d6461c12a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -776,7 +776,8 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
/* threshold event is triggered in finer grain than soft limit */
if (unlikely(mem_cgroup_event_ratelimit(memcg,
MEM_CGROUP_TARGET_THRESH))) {
- bool do_softlimit, do_numainfo;
+ bool do_softlimit;
+ bool do_numainfo __maybe_unused;
do_softlimit = mem_cgroup_event_ratelimit(memcg,
MEM_CGROUP_TARGET_SOFTLIMIT);
@@ -4413,6 +4414,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
*/
BUG_ON(!thresholds);
+ if (!thresholds->primary)
+ goto unlock;
+
usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
/* Check if a threshold crossed before removing */
@@ -4461,7 +4465,7 @@ swap_buffers:
/* To be sure that nobody uses thresholds */
synchronize_rcu();
-
+unlock:
mutex_unlock(&memcg->thresholds_lock);
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 9871a56d82c3..df141f60289e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -445,7 +445,6 @@ void migrate_page_copy(struct page *newpage, struct page *page)
ClearPageSwapCache(page);
ClearPagePrivate(page);
set_page_private(page, 0);
- page->mapping = NULL;
/*
* If any waiters have accumulated on the new page then
@@ -667,6 +666,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
} else {
if (remap_swapcache)
remove_migration_ptes(page, newpage);
+ page->mapping = NULL;
}
unlock_page(newpage);
diff --git a/mm/nommu.c b/mm/nommu.c
index b982290fd962..f59e170fceb4 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -696,9 +696,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
+ mutex_lock(&mapping->i_mmap_mutex);
flush_dcache_mmap_lock(mapping);
vma_prio_tree_insert(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
+ mutex_unlock(&mapping->i_mmap_mutex);
}
/* add the VMA to the tree */
@@ -760,9 +762,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
+ mutex_lock(&mapping->i_mmap_mutex);
flush_dcache_mmap_lock(mapping);
vma_prio_tree_remove(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
+ mutex_unlock(&mapping->i_mmap_mutex);
}
/* remove from the MM's tree and list */
@@ -775,8 +779,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
if (vma->vm_next)
vma->vm_next->vm_prev = vma->vm_prev;
-
- vma->vm_mm = NULL;
}
/*
@@ -2052,6 +2054,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
down_write(&nommu_region_sem);
+ mutex_lock(&inode->i_mapping->i_mmap_mutex);
/* search for VMAs that fall within the dead zone */
vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
@@ -2059,6 +2062,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
/* found one - only interested if it's shared out of the page
* cache */
if (vma->vm_flags & VM_SHARED) {
+ mutex_unlock(&inode->i_mapping->i_mmap_mutex);
up_write(&nommu_region_sem);
return -ETXTBSY; /* not quite true, but near enough */
}
@@ -2086,6 +2090,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
}
}
+ mutex_unlock(&inode->i_mapping->i_mmap_mutex);
up_write(&nommu_region_sem);
return 0;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d2186ecb36f7..a13ded1938f0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5236,6 +5236,7 @@ void *__init alloc_large_system_hash(const char *tablename,
max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
do_div(max, bucketsize);
}
+ max = min(max, 0x80000000ULL);
if (numentries > max)
numentries = max;
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index e920aa3ce104..c20ff48994c2 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -298,23 +298,18 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
goto free_proc_pages;
}
- task_lock(task);
- if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
- task_unlock(task);
- rc = -EPERM;
- goto put_task_struct;
- }
- mm = task->mm;
-
- if (!mm || (task->flags & PF_KTHREAD)) {
- task_unlock(task);
- rc = -EINVAL;
+ mm = mm_access(task, PTRACE_MODE_ATTACH);
+ if (!mm || IS_ERR(mm)) {
+ rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
+ /*
+ * Explicitly map EACCES to EPERM as EPERM is a more a
+ * appropriate error code for process_vw_readv/writev
+ */
+ if (rc == -EACCES)
+ rc = -EPERM;
goto put_task_struct;
}
- atomic_inc(&mm->mm_users);
- task_unlock(task);
-
for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
rc = process_vm_rw_single_vec(
(unsigned long)rvec[i].iov_base, rvec[i].iov_len,
diff --git a/mm/swap.c b/mm/swap.c
index b0f529b38979..fff1ff7fb9ad 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -659,7 +659,7 @@ void lru_add_page_tail(struct zone* zone,
VM_BUG_ON(!PageHead(page));
VM_BUG_ON(PageCompound(page_tail));
VM_BUG_ON(PageLRU(page_tail));
- VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
+ VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
SetPageLRU(page_tail);