summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorXavier Boudet <x-boudet@ti.com>2012-11-05 14:20:49 +0100
committerXavier Boudet <x-boudet@ti.com>2012-11-05 14:20:49 +0100
commit29a6cc6e3a241e34749edc3cc69cf88757acd491 (patch)
tree6a0efb8175f50c183e0541cbb1b1f8c2d3979493 /mm
parent3374c3584e1cb313f62b6a13418e477625c4a25e (diff)
parent5390967b2f98e6aa46f20eae09580e7db73826b9 (diff)
Merge remote-tracking branch 'remotes/upstream-stable/linux-3.4.y' into tilt-3.4
Conflicts: arch/arm/kernel/smp.c
Diffstat (limited to 'mm')
-rw-r--r--mm/memblock.c24
-rw-r--r--mm/rmap.c20
2 files changed, 39 insertions, 5 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 280d3d7835d6..11e5bd174f3c 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -908,6 +908,30 @@ int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t si
return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
}
+void __init_memblock memblock_trim_memory(phys_addr_t align)
+{
+ int i;
+ phys_addr_t start, end, orig_start, orig_end;
+ struct memblock_type *mem = &memblock.memory;
+
+ for (i = 0; i < mem->cnt; i++) {
+ orig_start = mem->regions[i].base;
+ orig_end = mem->regions[i].base + mem->regions[i].size;
+ start = round_up(orig_start, align);
+ end = round_down(orig_end, align);
+
+ if (start == orig_start && end == orig_end)
+ continue;
+
+ if (start < end) {
+ mem->regions[i].base = start;
+ mem->regions[i].size = end - start;
+ } else {
+ memblock_remove_region(mem, i);
+ i--;
+ }
+ }
+}
void __init_memblock memblock_set_current_limit(phys_addr_t limit)
{
diff --git a/mm/rmap.c b/mm/rmap.c
index 5b5ad584ffb7..bfca52c96999 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -56,6 +56,7 @@
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/hugetlb.h>
+#include <linux/backing-dev.h>
#include <asm/tlbflush.h>
@@ -977,11 +978,8 @@ int page_mkclean(struct page *page)
if (page_mapped(page)) {
struct address_space *mapping = page_mapping(page);
- if (mapping) {
+ if (mapping)
ret = page_mkclean_file(mapping, page);
- if (page_test_and_clear_dirty(page_to_pfn(page), 1))
- ret = 1;
- }
}
return ret;
@@ -1167,6 +1165,7 @@ void page_add_file_rmap(struct page *page)
*/
void page_remove_rmap(struct page *page)
{
+ struct address_space *mapping = page_mapping(page);
bool anon = PageAnon(page);
bool locked;
unsigned long flags;
@@ -1189,8 +1188,19 @@ void page_remove_rmap(struct page *page)
* this if the page is anon, so about to be freed; but perhaps
* not if it's in swapcache - there might be another pte slot
* containing the swap entry, but page not yet written to swap.
+ *
+ * And we can skip it on file pages, so long as the filesystem
+ * participates in dirty tracking; but need to catch shm and tmpfs
+ * and ramfs pages which have been modified since creation by read
+ * fault.
+ *
+ * Note that mapping must be decided above, before decrementing
+ * mapcount (which luckily provides a barrier): once page is unmapped,
+ * it could be truncated and page->mapping reset to NULL at any moment.
+ * Note also that we are relying on page_mapping(page) to set mapping
+ * to &swapper_space when PageSwapCache(page).
*/
- if ((!anon || PageSwapCache(page)) &&
+ if (mapping && !mapping_cap_account_dirty(mapping) &&
page_test_and_clear_dirty(page_to_pfn(page), 1))
set_page_dirty(page);
/*