summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2008-03-31 14:20:10 +0300
committerAvi Kivity <avi@qumranet.com>2008-03-31 14:20:10 +0300
commit53e06a6d157bb29819e0789628ff6954a935a559 (patch)
tree189b3454475a9d7bfa843ad683a73d93ece8ec5c /mm
parent2ac530fcd40ad730d4cc7961768e412eaf9b7caa (diff)
parenta9edadbf790d72adf6ebed476cb5caf7743e7e4a (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c17
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slub.c5
-rw-r--r--mm/sparse-vmemmap.c8
4 files changed, 25 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 74c1b6b0b37b..51c9e2c01640 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -401,12 +401,20 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
struct page *page;
unsigned long nr_pages;
+ /*
+ * We want to release as many surplus pages as possible, spread
+ * evenly across all nodes. Iterate across all nodes until we
+ * can no longer free unreserved surplus pages. This occurs when
+ * the nodes with surplus pages have no free pages.
+ */
+ unsigned long remaining_iterations = num_online_nodes();
+
/* Uncommit the reservation */
resv_huge_pages -= unused_resv_pages;
nr_pages = min(unused_resv_pages, surplus_huge_pages);
- while (nr_pages) {
+ while (remaining_iterations-- && nr_pages) {
nid = next_node(nid, node_online_map);
if (nid == MAX_NUMNODES)
nid = first_node(node_online_map);
@@ -424,6 +432,7 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
surplus_huge_pages--;
surplus_huge_pages_node[nid]--;
nr_pages--;
+ remaining_iterations = num_online_nodes();
}
}
}
@@ -671,9 +680,11 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
{
return sprintf(buf,
"Node %d HugePages_Total: %5u\n"
- "Node %d HugePages_Free: %5u\n",
+ "Node %d HugePages_Free: %5u\n"
+ "Node %d HugePages_Surp: %5u\n",
nid, nr_huge_pages_node[nid],
- nid, free_huge_pages_node[nid]);
+ nid, free_huge_pages_node[nid],
+ nid, surplus_huge_pages_node[nid]);
}
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
diff --git a/mm/slab.c b/mm/slab.c
index bb4070e1079f..04b308c3bc54 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1481,7 +1481,7 @@ void __init kmem_cache_init(void)
list_add(&cache_cache.next, &cache_chain);
cache_cache.colour_off = cache_line_size();
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
- cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
+ cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
/*
* struct kmem_cache size depends on nr_node_ids, which
@@ -1602,7 +1602,7 @@ void __init kmem_cache_init(void)
int nid;
for_each_online_node(nid) {
- init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], nid);
+ init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
init_list(malloc_sizes[INDEX_AC].cs_cachep,
&initkmem_list3[SIZE_AC + nid], nid);
diff --git a/mm/slub.c b/mm/slub.c
index ca71d5b81e4a..84ed734b96b3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1470,6 +1470,9 @@ static void *__slab_alloc(struct kmem_cache *s,
void **object;
struct page *new;
+ /* We handle __GFP_ZERO in the caller */
+ gfpflags &= ~__GFP_ZERO;
+
if (!c->page)
goto new_slab;
@@ -2685,6 +2688,7 @@ void kfree(const void *x)
}
EXPORT_SYMBOL(kfree);
+#if defined(SLUB_DEBUG) || defined(CONFIG_SLABINFO)
static unsigned long count_partial(struct kmem_cache_node *n)
{
unsigned long flags;
@@ -2697,6 +2701,7 @@ static unsigned long count_partial(struct kmem_cache_node *n)
spin_unlock_irqrestore(&n->list_lock, flags);
return x;
}
+#endif
/*
* kmem_cache_shrink removes empty slabs from the partial lists and sorts
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index cd75b21dd4c3..99c4f36eb8a3 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -76,7 +76,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
pte_t entry;
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
if (!p)
- return 0;
+ return NULL;
entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
set_pte_at(&init_mm, addr, pte, entry);
}
@@ -89,7 +89,7 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
if (pmd_none(*pmd)) {
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
if (!p)
- return 0;
+ return NULL;
pmd_populate_kernel(&init_mm, pmd, p);
}
return pmd;
@@ -101,7 +101,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
if (pud_none(*pud)) {
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
if (!p)
- return 0;
+ return NULL;
pud_populate(&init_mm, pud, p);
}
return pud;
@@ -113,7 +113,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
if (pgd_none(*pgd)) {
void *p = vmemmap_alloc_block(PAGE_SIZE, node);
if (!p)
- return 0;
+ return NULL;
pgd_populate(&init_mm, pgd, p);
}
return pgd;