summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2009-04-30 15:05:42 +0200
committerStephen Rothwell <sfr@canb.auug.org.au>2009-05-01 15:48:05 +1000
commit092d0f6044bd38376a6ed8b042f1d2fbc53875d7 (patch)
tree36cb4bddfcb7b0c9655e372872f18a9ef4af3e7c
parentd084f98d7efc00c7d01aaeb365b9570a0ae90889 (diff)
SLQB: fix dumb early allocation cache
The dumb early allocation cache had a bug where it could allow allocation to go past the end of a page, which could cause crashes or random memory corruption. Fix this and simplify the logic. Tested-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Nick Piggin <npiggin@suse.de>
-rw-r--r--mm/slqb.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/mm/slqb.c b/mm/slqb.c
index a6518433bdda..2252070fac66 100644
--- a/mm/slqb.c
+++ b/mm/slqb.c
@@ -2185,8 +2185,11 @@ static void *kmem_cache_dyn_array_alloc(int ids)
{
size_t size = sizeof(void *) * ids;
+ BUG_ON(!size);
+
if (unlikely(!slab_is_available())) {
static void *nextmem;
+ static size_t nextleft;
void *ret;
/*
@@ -2194,16 +2197,16 @@ static void *kmem_cache_dyn_array_alloc(int ids)
* never get freed by definition so we can do it rather
* simply.
*/
- if (!nextmem) {
- nextmem = alloc_pages_exact(size, GFP_KERNEL);
- if (!nextmem)
- return NULL;
+ if (size > nextleft) {
+ nextmem = alloc_pages_exact(size, GFP_KERNEL);
+ if (!nextmem)
+ return NULL;
+ nextleft = roundup(size, PAGE_SIZE);
}
+
ret = nextmem;
- nextmem = (void *)((unsigned long)ret + size);
- if ((unsigned long)ret >> PAGE_SHIFT !=
- (unsigned long)nextmem >> PAGE_SHIFT)
- nextmem = NULL;
+ nextleft -= size;
+ nextmem += size;
memset(ret, 0, size);
return ret;
} else {