summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorXavier Boudet <x-boudet@ti.com>2012-10-04 10:58:18 +0200
committerXavier Boudet <x-boudet@ti.com>2012-10-04 10:58:18 +0200
commitf91ad1217aebdf4cc7b536180ed3528952b47c8d (patch)
tree3882b4d8a4d68aeac87f4f676e08ff3e1d5b985f /mm
parent827efacf9b499b55bebef8112a18333ad0a6ed91 (diff)
parent1c7eb28096b50831697a9cf6f8bf1af0e5b234bc (diff)
Merge branch 'linux-3.4.y' into tilt-3.4_04Oct_rebase
Conflicts: drivers/rtc/rtc-twl.c
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c8
-rw-r--r--mm/memory_hotplug.c16
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/vmscan.c6
4 files changed, 20 insertions, 12 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 0131170c9d54..53cf62b186b6 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -766,13 +766,17 @@ void * __init alloc_bootmem_section(unsigned long size,
unsigned long section_nr)
{
bootmem_data_t *bdata;
- unsigned long pfn, goal;
+ unsigned long pfn, goal, limit;
pfn = section_nr_to_pfn(section_nr);
goal = pfn << PAGE_SHIFT;
+ limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
- return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, 0);
+ if (goal + size > limit)
+ limit = 0;
+
+ return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit);
}
#endif
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index fc898cb4fe8f..77ad30613a3d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -127,9 +127,6 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
struct mem_section *ms;
struct page *page, *memmap;
- if (!pfn_valid(start_pfn))
- return;
-
section_nr = pfn_to_section_nr(start_pfn);
ms = __nr_to_section(section_nr);
@@ -188,9 +185,16 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
end_pfn = pfn + pgdat->node_spanned_pages;
/* register_section info */
- for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
- register_page_bootmem_info_section(pfn);
-
+ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ /*
+ * Some platforms can assign the same pfn to multiple nodes - on
+ * node0 as well as nodeN. To avoid registering a pfn against
+ * multiple nodes we check that this pfn does not already
+ * reside in some other node.
+ */
+ if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
+ register_page_bootmem_info_section(pfn);
+ }
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e74413663382..f74132bbcb6d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -581,7 +581,7 @@ static inline void __free_one_page(struct page *page,
combined_idx = buddy_idx & page_idx;
higher_page = page + (combined_idx - page_idx);
buddy_idx = __find_buddy_index(combined_idx, order + 1);
- higher_buddy = page + (buddy_idx - combined_idx);
+ higher_buddy = higher_page + (buddy_idx - combined_idx);
if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
list_add_tail(&page->lru,
&zone->free_area[order].free_list[migratetype]);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index be5bc0af2e76..e989ee22f100 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1983,10 +1983,10 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
* proportional to the fraction of recently scanned pages on
* each list that were recently referenced and in active use.
*/
- ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
+ ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
ap /= reclaim_stat->recent_rotated[0] + 1;
- fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
+ fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
fp /= reclaim_stat->recent_rotated[1] + 1;
spin_unlock_irq(&mz->zone->lru_lock);
@@ -1999,7 +1999,7 @@ out:
unsigned long scan;
scan = zone_nr_lru_pages(mz, lru);
- if (priority || noswap) {
+ if (priority || noswap || !vmscan_swappiness(mz, sc)) {
scan >>= priority;
if (!scan && force_scan)
scan = SWAP_CLUSTER_MAX;