From bcd78e49613c41b5bed96fa288e983876f286a59 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 23 Jul 2008 21:27:35 -0700 Subject: tmpfs: support aio We have a request for tmpfs to support the AIO interface: easily done, no more than replacing the old shmem_file_read by shmem_file_aio_read, cribbed from generic_file_aio_read. (In 2.6.25 its write side was already changed to use generic_file_aio_write.) Incorporate cleanups from Andrew Morton and Harvey Harrison. Tests out fine with LTP's ltp-aiodio.sh, given hacks (not included) to support O_DIRECT. tmpfs cannot honestly support O_DIRECT: its cache-avoiding-IO nature is at odds with direct IO-avoiding-cache. Signed-off-by: Hugh Dickins Tested-by: Lawrence Greenfield Cc: Christoph Rohland Cc: Badari Pulavarty Cc: Zach Brown Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 55 ++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 21 deletions(-) (limited to 'mm/shmem.c') diff --git a/mm/shmem.c b/mm/shmem.c index e2a6ae1a44e9..9ffbea9b79e1 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1690,26 +1690,38 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_ file_accessed(filp); } -static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) -{ - read_descriptor_t desc; - - if ((ssize_t) count < 0) - return -EINVAL; - if (!access_ok(VERIFY_WRITE, buf, count)) - return -EFAULT; - if (!count) - return 0; - - desc.written = 0; - desc.count = count; - desc.arg.buf = buf; - desc.error = 0; - - do_shmem_file_read(filp, ppos, &desc, file_read_actor); - if (desc.written) - return desc.written; - return desc.error; +static ssize_t shmem_file_aio_read(struct kiocb *iocb, + const struct iovec *iov, unsigned long nr_segs, loff_t pos) +{ + struct file *filp = iocb->ki_filp; + ssize_t retval; + unsigned long seg; + size_t count; + loff_t *ppos = &iocb->ki_pos; + + retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); + if (retval) + return retval; + + for (seg = 0; seg < nr_segs; seg++) { + read_descriptor_t desc; + + desc.written = 0; + desc.arg.buf = iov[seg].iov_base; + desc.count = iov[seg].iov_len; + if (desc.count == 0) + continue; + desc.error = 0; + do_shmem_file_read(filp, ppos, &desc, file_read_actor); + retval += desc.written; + if (desc.error) { + retval = retval ?: desc.error; + break; + } + if (desc.count > 0) + break; + } + return retval; } static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) @@ -2369,8 +2381,9 @@ static const struct file_operations shmem_file_operations = { .mmap = shmem_mmap, #ifdef CONFIG_TMPFS .llseek = generic_file_llseek, - .read = shmem_file_read, + .read = do_sync_read, .write = do_sync_write, + .aio_read = shmem_file_aio_read, .aio_write = generic_file_aio_write, .fsync = simple_sync_file, .splice_read = generic_file_splice_read, -- cgit v1.2.3 From 69029cd550284e32de13d6dd2f77b723c8a0e444 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Fri, 25 Jul 2008 01:47:14 -0700 Subject: memcg: remove refcnt from page_cgroup memcg: performance improvements Patch Description 1/5 ... remove refcnt fron page_cgroup patch (shmem handling is fixed) 2/5 ... swapcache handling patch 3/5 ... add helper function for shmem's memory reclaim patch 4/5 ... optimize by likely/unlikely ppatch 5/5 ... remove redundunt check patch (shmem handling is fixed.) Unix bench result. == 2.6.26-rc2-mm1 + memory resource controller Execl Throughput 2915.4 lps (29.6 secs, 3 samples) C Compiler Throughput 1019.3 lpm (60.0 secs, 3 samples) Shell Scripts (1 concurrent) 5796.0 lpm (60.0 secs, 3 samples) Shell Scripts (8 concurrent) 1097.7 lpm (60.0 secs, 3 samples) Shell Scripts (16 concurrent) 565.3 lpm (60.0 secs, 3 samples) File Read 1024 bufsize 2000 maxblocks 1022128.0 KBps (30.0 secs, 3 samples) File Write 1024 bufsize 2000 maxblocks 544057.0 KBps (30.0 secs, 3 samples) File Copy 1024 bufsize 2000 maxblocks 346481.0 KBps (30.0 secs, 3 samples) File Read 256 bufsize 500 maxblocks 319325.0 KBps (30.0 secs, 3 samples) File Write 256 bufsize 500 maxblocks 148788.0 KBps (30.0 secs, 3 samples) File Copy 256 bufsize 500 maxblocks 99051.0 KBps (30.0 secs, 3 samples) File Read 4096 bufsize 8000 maxblocks 2058917.0 KBps (30.0 secs, 3 samples) File Write 4096 bufsize 8000 maxblocks 1606109.0 KBps (30.0 secs, 3 samples) File Copy 4096 bufsize 8000 maxblocks 854789.0 KBps (30.0 secs, 3 samples) Dc: sqrt(2) to 99 decimal places 126145.2 lpm (30.0 secs, 3 samples) INDEX VALUES TEST BASELINE RESULT INDEX Execl Throughput 43.0 2915.4 678.0 File Copy 1024 bufsize 2000 maxblocks 3960.0 346481.0 875.0 File Copy 256 bufsize 500 maxblocks 1655.0 99051.0 598.5 File Copy 4096 bufsize 8000 maxblocks 5800.0 854789.0 1473.8 Shell Scripts (8 concurrent) 6.0 1097.7 1829.5 ========= FINAL SCORE 991.3 == 2.6.26-rc2-mm1 + this set == Execl Throughput 3012.9 lps (29.9 secs, 3 samples) C Compiler Throughput 981.0 lpm (60.0 secs, 3 samples) Shell Scripts (1 concurrent) 5872.0 lpm (60.0 secs, 3 samples) Shell Scripts (8 concurrent) 1120.3 lpm (60.0 secs, 3 samples) Shell Scripts (16 concurrent) 578.0 lpm (60.0 secs, 3 samples) File Read 1024 bufsize 2000 maxblocks 1003993.0 KBps (30.0 secs, 3 samples) File Write 1024 bufsize 2000 maxblocks 550452.0 KBps (30.0 secs, 3 samples) File Copy 1024 bufsize 2000 maxblocks 347159.0 KBps (30.0 secs, 3 samples) File Read 256 bufsize 500 maxblocks 314644.0 KBps (30.0 secs, 3 samples) File Write 256 bufsize 500 maxblocks 151852.0 KBps (30.0 secs, 3 samples) File Copy 256 bufsize 500 maxblocks 101000.0 KBps (30.0 secs, 3 samples) File Read 4096 bufsize 8000 maxblocks 2033256.0 KBps (30.0 secs, 3 samples) File Write 4096 bufsize 8000 maxblocks 1611814.0 KBps (30.0 secs, 3 samples) File Copy 4096 bufsize 8000 maxblocks 847979.0 KBps (30.0 secs, 3 samples) Dc: sqrt(2) to 99 decimal places 128148.7 lpm (30.0 secs, 3 samples) INDEX VALUES TEST BASELINE RESULT INDEX Execl Throughput 43.0 3012.9 700.7 File Copy 1024 bufsize 2000 maxblocks 3960.0 347159.0 876.7 File Copy 256 bufsize 500 maxblocks 1655.0 101000.0 610.3 File Copy 4096 bufsize 8000 maxblocks 5800.0 847979.0 1462.0 Shell Scripts (8 concurrent) 6.0 1120.3 1867.2 ========= FINAL SCORE 1004.6 This patch: Remove refcnt from page_cgroup(). After this, * A page is charged only when !page_mapped() && no page_cgroup is assigned. * Anon page is newly mapped. * File page is added to mapping->tree. * A page is uncharged only when * Anon page is fully unmapped. * File page is removed from LRU. There is no change in behavior from user's view. This patch also removes unnecessary calls in rmap.c which was used only for refcnt mangement. [akpm@linux-foundation.org: fix warning] [hugh@veritas.com: fix shmem_unuse_inode charging] Signed-off-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: "Eric W. Biederman" Cc: Pavel Emelyanov Cc: Li Zefan Cc: Hugh Dickins Cc: YAMAMOTO Takashi Cc: Paul Menage Cc: David Rientjes Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 10 ++--- mm/filemap.c | 6 +-- mm/memcontrol.c | 109 ++++++++++++++++++++++++++------------------- mm/migrate.c | 3 +- mm/rmap.c | 14 +----- mm/shmem.c | 35 ++++++++++----- 6 files changed, 97 insertions(+), 80 deletions(-) (limited to 'mm/shmem.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 84ead2aa6f18..b4980b8f048e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -35,6 +35,7 @@ extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); extern void mem_cgroup_uncharge_page(struct page *page); +extern void mem_cgroup_uncharge_cache_page(struct page *page); extern void mem_cgroup_move_lists(struct page *page, bool active); extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, @@ -53,7 +54,6 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); extern int mem_cgroup_prepare_migration(struct page *page, struct page *newpage); extern void mem_cgroup_end_migration(struct page *page); -extern int mem_cgroup_getref(struct page *page); /* * For memory reclaim. @@ -98,6 +98,10 @@ static inline void mem_cgroup_uncharge_page(struct page *page) { } +static inline void mem_cgroup_uncharge_cache_page(struct page *page) +{ +} + static inline void mem_cgroup_move_lists(struct page *page, bool active) { } @@ -123,10 +127,6 @@ static inline void mem_cgroup_end_migration(struct page *page) { } -static inline void mem_cgroup_getref(struct page *page) -{ -} - static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) { return 0; diff --git a/mm/filemap.c b/mm/filemap.c index 5d4c880d7cd9..2d3ec1ffc66e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -115,7 +115,7 @@ void __remove_from_page_cache(struct page *page) { struct address_space *mapping = page->mapping; - mem_cgroup_uncharge_page(page); + mem_cgroup_uncharge_cache_page(page); radix_tree_delete(&mapping->page_tree, page->index); page->mapping = NULL; mapping->nrpages--; @@ -474,12 +474,12 @@ int add_to_page_cache(struct page *page, struct address_space *mapping, mapping->nrpages++; __inc_zone_page_state(page, NR_FILE_PAGES); } else - mem_cgroup_uncharge_page(page); + mem_cgroup_uncharge_cache_page(page); write_unlock_irq(&mapping->tree_lock); radix_tree_preload_end(); } else - mem_cgroup_uncharge_page(page); + mem_cgroup_uncharge_cache_page(page); out: return error; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index da5912b84551..a61706193c31 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -166,7 +166,6 @@ struct page_cgroup { struct list_head lru; /* per cgroup LRU list */ struct page *page; struct mem_cgroup *mem_cgroup; - int ref_cnt; /* cached, mapped, migrating */ int flags; }; #define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */ @@ -185,6 +184,7 @@ static enum zone_type page_cgroup_zid(struct page_cgroup *pc) enum charge_type { MEM_CGROUP_CHARGE_TYPE_CACHE = 0, MEM_CGROUP_CHARGE_TYPE_MAPPED, + MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */ }; /* @@ -552,9 +552,7 @@ retry: */ if (pc) { VM_BUG_ON(pc->page != page); - VM_BUG_ON(pc->ref_cnt <= 0); - - pc->ref_cnt++; + VM_BUG_ON(!pc->mem_cgroup); unlock_page_cgroup(page); goto done; } @@ -570,10 +568,7 @@ retry: * thread group leader migrates. It's possible that mm is not * set, if so charge the init_mm (happens for pagecache usage). */ - if (!memcg) { - if (!mm) - mm = &init_mm; - + if (likely(!memcg)) { rcu_read_lock(); mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); /* @@ -609,7 +604,6 @@ retry: } } - pc->ref_cnt = 1; pc->mem_cgroup = mem; pc->page = page; /* @@ -653,6 +647,17 @@ err: int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { + /* + * If already mapped, we don't have to account. + * If page cache, page->mapping has address_space. + * But page->mapping may have out-of-use anon_vma pointer, + * detecit it by PageAnon() check. newly-mapped-anon's page->mapping + * is NULL. + */ + if (page_mapped(page) || (page->mapping && !PageAnon(page))) + return 0; + if (unlikely(!mm)) + mm = &init_mm; return mem_cgroup_charge_common(page, mm, gfp_mask, MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); } @@ -660,32 +665,17 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { - if (!mm) + if (unlikely(!mm)) mm = &init_mm; return mem_cgroup_charge_common(page, mm, gfp_mask, MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); } -int mem_cgroup_getref(struct page *page) -{ - struct page_cgroup *pc; - - if (mem_cgroup_subsys.disabled) - return 0; - - lock_page_cgroup(page); - pc = page_get_page_cgroup(page); - VM_BUG_ON(!pc); - pc->ref_cnt++; - unlock_page_cgroup(page); - return 0; -} - /* - * Uncharging is always a welcome operation, we never complain, simply - * uncharge. + * uncharge if !page_mapped(page) */ -void mem_cgroup_uncharge_page(struct page *page) +static void +__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) { struct page_cgroup *pc; struct mem_cgroup *mem; @@ -704,29 +694,41 @@ void mem_cgroup_uncharge_page(struct page *page) goto unlock; VM_BUG_ON(pc->page != page); - VM_BUG_ON(pc->ref_cnt <= 0); - if (--(pc->ref_cnt) == 0) { - mz = page_cgroup_zoneinfo(pc); - spin_lock_irqsave(&mz->lru_lock, flags); - __mem_cgroup_remove_list(mz, pc); - spin_unlock_irqrestore(&mz->lru_lock, flags); + if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED) + && ((pc->flags & PAGE_CGROUP_FLAG_CACHE) + || page_mapped(page))) + goto unlock; - page_assign_page_cgroup(page, NULL); - unlock_page_cgroup(page); + mz = page_cgroup_zoneinfo(pc); + spin_lock_irqsave(&mz->lru_lock, flags); + __mem_cgroup_remove_list(mz, pc); + spin_unlock_irqrestore(&mz->lru_lock, flags); - mem = pc->mem_cgroup; - res_counter_uncharge(&mem->res, PAGE_SIZE); - css_put(&mem->css); + page_assign_page_cgroup(page, NULL); + unlock_page_cgroup(page); - kmem_cache_free(page_cgroup_cache, pc); - return; - } + mem = pc->mem_cgroup; + res_counter_uncharge(&mem->res, PAGE_SIZE); + css_put(&mem->css); + kmem_cache_free(page_cgroup_cache, pc); + return; unlock: unlock_page_cgroup(page); } +void mem_cgroup_uncharge_page(struct page *page) +{ + __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED); +} + +void mem_cgroup_uncharge_cache_page(struct page *page) +{ + VM_BUG_ON(page_mapped(page)); + __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); +} + /* * Before starting migration, account against new page. */ @@ -757,15 +759,29 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage) return ret; } -/* remove redundant charge */ +/* remove redundant charge if migration failed*/ void mem_cgroup_end_migration(struct page *newpage) { - mem_cgroup_uncharge_page(newpage); + /* + * At success, page->mapping is not NULL. + * special rollback care is necessary when + * 1. at migration failure. (newpage->mapping is cleared in this case) + * 2. the newpage was moved but not remapped again because the task + * exits and the newpage is obsolete. In this case, the new page + * may be a swapcache. So, we just call mem_cgroup_uncharge_page() + * always for avoiding mess. The page_cgroup will be removed if + * unnecessary. File cache pages is still on radix-tree. Don't + * care it. + */ + if (!newpage->mapping) + __mem_cgroup_uncharge_common(newpage, + MEM_CGROUP_CHARGE_TYPE_FORCE); + else if (PageAnon(newpage)) + mem_cgroup_uncharge_page(newpage); } /* * This routine traverse page_cgroup in given list and drop them all. - * This routine ignores page_cgroup->ref_cnt. * *And* this routine doesn't reclaim page itself, just removes page_cgroup. */ #define FORCE_UNCHARGE_BATCH (128) @@ -795,7 +811,8 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, * if it's under page migration. */ if (PageLRU(page)) { - mem_cgroup_uncharge_page(page); + __mem_cgroup_uncharge_common(page, + MEM_CGROUP_CHARGE_TYPE_FORCE); put_page(page); if (--count <= 0) { count = FORCE_UNCHARGE_BATCH; diff --git a/mm/migrate.c b/mm/migrate.c index f6d7f8efd1a8..d8c65a65c61d 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -359,8 +359,7 @@ static int migrate_page_move_mapping(struct address_space *mapping, write_unlock_irq(&mapping->tree_lock); if (!PageSwapCache(newpage)) { - mem_cgroup_uncharge_page(page); - mem_cgroup_getref(newpage); + mem_cgroup_uncharge_cache_page(page); } return 0; diff --git a/mm/rmap.c b/mm/rmap.c index bf0a5b7cfb8e..abbd29f7c43f 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -576,14 +576,8 @@ void page_add_anon_rmap(struct page *page, VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); if (atomic_inc_and_test(&page->_mapcount)) __page_set_anon_rmap(page, vma, address); - else { + else __page_check_anon_rmap(page, vma, address); - /* - * We unconditionally charged during prepare, we uncharge here - * This takes care of balancing the reference counts - */ - mem_cgroup_uncharge_page(page); - } } /** @@ -614,12 +608,6 @@ void page_add_file_rmap(struct page *page) { if (atomic_inc_and_test(&page->_mapcount)) __inc_zone_page_state(page, NR_FILE_MAPPED); - else - /* - * We unconditionally charged during prepare, we uncharge here - * This takes care of balancing the reference counts - */ - mem_cgroup_uncharge_page(page); } #ifdef CONFIG_DEBUG_VM diff --git a/mm/shmem.c b/mm/shmem.c index 9ffbea9b79e1..d58305e8a484 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -922,20 +922,26 @@ found: error = 1; if (!inode) goto out; - /* Precharge page while we can wait, compensate afterwards */ + /* Precharge page using GFP_KERNEL while we can wait */ error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); if (error) goto out; error = radix_tree_preload(GFP_KERNEL); - if (error) - goto uncharge; + if (error) { + mem_cgroup_uncharge_cache_page(page); + goto out; + } error = 1; spin_lock(&info->lock); ptr = shmem_swp_entry(info, idx, NULL); - if (ptr && ptr->val == entry.val) + if (ptr && ptr->val == entry.val) { error = add_to_page_cache(page, inode->i_mapping, idx, GFP_NOWAIT); + /* does mem_cgroup_uncharge_cache_page on error */ + } else /* we must compensate for our precharge above */ + mem_cgroup_uncharge_cache_page(page); + if (error == -EEXIST) { struct page *filepage = find_get_page(inode->i_mapping, idx); error = 1; @@ -961,8 +967,6 @@ found: shmem_swp_unmap(ptr); spin_unlock(&info->lock); radix_tree_preload_end(); -uncharge: - mem_cgroup_uncharge_page(page); out: unlock_page(page); page_cache_release(page); @@ -1319,7 +1323,7 @@ repeat: page_cache_release(swappage); goto failed; } - mem_cgroup_uncharge_page(swappage); + mem_cgroup_uncharge_cache_page(swappage); } page_cache_release(swappage); goto repeat; @@ -1358,6 +1362,8 @@ repeat: } if (!filepage) { + int ret; + spin_unlock(&info->lock); filepage = shmem_alloc_page(gfp, info, idx); if (!filepage) { @@ -1386,10 +1392,18 @@ repeat: swap = *entry; shmem_swp_unmap(entry); } - if (error || swap.val || 0 != add_to_page_cache_lru( - filepage, mapping, idx, GFP_NOWAIT)) { + ret = error || swap.val; + if (ret) + mem_cgroup_uncharge_cache_page(filepage); + else + ret = add_to_page_cache_lru(filepage, mapping, + idx, GFP_NOWAIT); + /* + * At add_to_page_cache_lru() failure, uncharge will + * be done automatically. + */ + if (ret) { spin_unlock(&info->lock); - mem_cgroup_uncharge_page(filepage); page_cache_release(filepage); shmem_unacct_blocks(info->flags, 1); shmem_free_blocks(inode, 1); @@ -1398,7 +1412,6 @@ repeat: goto failed; goto repeat; } - mem_cgroup_uncharge_page(filepage); info->flags |= SHMEM_PAGEIN; } -- cgit v1.2.3 From c9b0ed51483cc2fc42bb801b6675c4231b0e4634 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Fri, 25 Jul 2008 01:47:15 -0700 Subject: memcg: helper function for relcaim from shmem. A new call, mem_cgroup_shrink_usage() is added for shmem handling and relacing non-standard usage of mem_cgroup_charge/uncharge. Now, shmem calls mem_cgroup_charge() just for reclaim some pages from mem_cgroup. In general, shmem is used by some process group and not for global resource (like file caches). So, it's reasonable to reclaim pages from mem_cgroup where shmem is mainly used. [hugh@veritas.com: shmem_getpage release page sooner] [hugh@veritas.com: mem_cgroup_shrink_usage css_put] Signed-off-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: "Eric W. Biederman" Cc: Pavel Emelyanov Cc: Li Zefan Cc: YAMAMOTO Takashi Cc: Paul Menage Cc: David Rientjes Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 7 +++++++ mm/memcontrol.c | 26 ++++++++++++++++++++++++++ mm/shmem.c | 11 ++++------- 3 files changed, 37 insertions(+), 7 deletions(-) (limited to 'mm/shmem.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b4980b8f048e..fdf3967e1397 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -37,6 +37,8 @@ extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, extern void mem_cgroup_uncharge_page(struct page *page); extern void mem_cgroup_uncharge_cache_page(struct page *page); extern void mem_cgroup_move_lists(struct page *page, bool active); +extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask); + extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, @@ -102,6 +104,11 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page) { } +static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) +{ + return 0; +} + static inline void mem_cgroup_move_lists(struct page *page, bool active) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a61706193c31..f46b8615de6c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -780,6 +780,32 @@ void mem_cgroup_end_migration(struct page *newpage) mem_cgroup_uncharge_page(newpage); } +/* + * A call to try to shrink memory usage under specified resource controller. + * This is typically used for page reclaiming for shmem for reducing side + * effect of page allocation from shmem, which is used by some mem_cgroup. + */ +int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) +{ + struct mem_cgroup *mem; + int progress = 0; + int retry = MEM_CGROUP_RECLAIM_RETRIES; + + rcu_read_lock(); + mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); + css_get(&mem->css); + rcu_read_unlock(); + + do { + progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); + } while (!progress && --retry); + + css_put(&mem->css); + if (!retry) + return -ENOMEM; + return 0; +} + /* * This routine traverse page_cgroup in given list and drop them all. * *And* this routine doesn't reclaim page itself, just removes page_cgroup. diff --git a/mm/shmem.c b/mm/shmem.c index d58305e8a484..f92fea94d037 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1315,17 +1315,14 @@ repeat: shmem_swp_unmap(entry); spin_unlock(&info->lock); unlock_page(swappage); + page_cache_release(swappage); if (error == -ENOMEM) { /* allow reclaim from this memory cgroup */ - error = mem_cgroup_cache_charge(swappage, - current->mm, gfp & ~__GFP_HIGHMEM); - if (error) { - page_cache_release(swappage); + error = mem_cgroup_shrink_usage(current->mm, + gfp); + if (error) goto failed; - } - mem_cgroup_uncharge_cache_page(swappage); } - page_cache_release(swappage); goto repeat; } } else if (sgp == SGP_READ && !filepage) { -- cgit v1.2.3 From e286781d5f2e9c846e012a39653a166e9d31777d Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 25 Jul 2008 19:45:30 -0700 Subject: mm: speculative page references If we can be sure that elevating the page_count on a pagecache page will pin it, we can speculatively run this operation, and subsequently check to see if we hit the right page rather than relying on holding a lock or otherwise pinning a reference to the page. This can be done if get_page/put_page behaves consistently throughout the whole tree (ie. if we "get" the page after it has been used for something else, we must be able to free it with a put_page). Actually, there is a period where the count behaves differently: when the page is free or if it is a constituent page of a compound page. We need an atomic_inc_not_zero operation to ensure we don't try to grab the page in either case. This patch introduces the core locking protocol to the pagecache (ie. adds page_cache_get_speculative, and tweaks some update-side code to make it work). Thanks to Hugh for pointing out an improvement to the algorithm setting page_count to zero when we have control of all references, in order to hold off speculative getters. [kamezawa.hiroyu@jp.fujitsu.com: fix migration_entry_wait()] [hugh@veritas.com: fix add_to_page_cache] [akpm@linux-foundation.org: repair a comment] Signed-off-by: Nick Piggin Cc: Jeff Garzik Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Hugh Dickins Cc: "Paul E. McKenney" Reviewed-by: Peter Zijlstra Signed-off-by: Daisuke Nishimura Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: KOSAKI Motohiro Signed-off-by: Hugh Dickins Acked-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/net/cassini.c | 12 ++++++ include/linux/pagemap.h | 111 +++++++++++++++++++++++++++++++++++++++++++++++- mm/filemap.c | 32 ++++++++------ mm/migrate.c | 20 ++++++++- mm/shmem.c | 6 +-- mm/swap_state.c | 17 +++++--- mm/vmscan.c | 74 +++++++++++++++++++++++--------- 7 files changed, 227 insertions(+), 45 deletions(-) (limited to 'mm/shmem.c') diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 83768df27806..f1936d51b458 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -576,6 +576,18 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags) list_for_each_safe(elem, tmp, &list) { cas_page_t *page = list_entry(elem, cas_page_t, list); + /* + * With the lockless pagecache, cassini buffering scheme gets + * slightly less accurate: we might find that a page has an + * elevated reference count here, due to a speculative ref, + * and skip it as in-use. Ideally we would be able to reclaim + * it. However this would be such a rare case, it doesn't + * matter too much as we should pick it up the next time round. + * + * Importantly, if we find that the page has a refcount of 1 + * here (our refcount), then we know it is definitely not inuse + * so we can reuse it. + */ if (page_count(page->buffer) > 1) continue; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ee1ec2c7723c..a81d81890422 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -12,6 +12,7 @@ #include #include #include +#include /* for in_interrupt() */ /* * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page @@ -62,6 +63,98 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) #define page_cache_release(page) put_page(page) void release_pages(struct page **pages, int nr, int cold); +/* + * speculatively take a reference to a page. + * If the page is free (_count == 0), then _count is untouched, and 0 + * is returned. Otherwise, _count is incremented by 1 and 1 is returned. + * + * This function must be called inside the same rcu_read_lock() section as has + * been used to lookup the page in the pagecache radix-tree (or page table): + * this allows allocators to use a synchronize_rcu() to stabilize _count. + * + * Unless an RCU grace period has passed, the count of all pages coming out + * of the allocator must be considered unstable. page_count may return higher + * than expected, and put_page must be able to do the right thing when the + * page has been finished with, no matter what it is subsequently allocated + * for (because put_page is what is used here to drop an invalid speculative + * reference). + * + * This is the interesting part of the lockless pagecache (and lockless + * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) + * has the following pattern: + * 1. find page in radix tree + * 2. conditionally increment refcount + * 3. check the page is still in pagecache (if no, goto 1) + * + * Remove-side that cares about stability of _count (eg. reclaim) has the + * following (with tree_lock held for write): + * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) + * B. remove page from pagecache + * C. free the page + * + * There are 2 critical interleavings that matter: + * - 2 runs before A: in this case, A sees elevated refcount and bails out + * - A runs before 2: in this case, 2 sees zero refcount and retries; + * subsequently, B will complete and 1 will find no page, causing the + * lookup to return NULL. + * + * It is possible that between 1 and 2, the page is removed then the exact same + * page is inserted into the same position in pagecache. That's OK: the + * old find_get_page using tree_lock could equally have run before or after + * such a re-insertion, depending on order that locks are granted. + * + * Lookups racing against pagecache insertion isn't a big problem: either 1 + * will find the page or it will not. Likewise, the old find_get_page could run + * either before the insertion or afterwards, depending on timing. + */ +static inline int page_cache_get_speculative(struct page *page) +{ + VM_BUG_ON(in_interrupt()); + +#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) +# ifdef CONFIG_PREEMPT + VM_BUG_ON(!in_atomic()); +# endif + /* + * Preempt must be disabled here - we rely on rcu_read_lock doing + * this for us. + * + * Pagecache won't be truncated from interrupt context, so if we have + * found a page in the radix tree here, we have pinned its refcount by + * disabling preempt, and hence no need for the "speculative get" that + * SMP requires. + */ + VM_BUG_ON(page_count(page) == 0); + atomic_inc(&page->_count); + +#else + if (unlikely(!get_page_unless_zero(page))) { + /* + * Either the page has been freed, or will be freed. + * In either case, retry here and the caller should + * do the right thing (see comments above). + */ + return 0; + } +#endif + VM_BUG_ON(PageTail(page)); + + return 1; +} + +static inline int page_freeze_refs(struct page *page, int count) +{ + return likely(atomic_cmpxchg(&page->_count, count, 0) == count); +} + +static inline void page_unfreeze_refs(struct page *page, int count) +{ + VM_BUG_ON(page_count(page) != 0); + VM_BUG_ON(count == 0); + + atomic_set(&page->_count, count); +} + #ifdef CONFIG_NUMA extern struct page *__page_cache_alloc(gfp_t gfp); #else @@ -133,13 +226,29 @@ static inline struct page *read_mapping_page(struct address_space *mapping, return read_cache_page(mapping, index, filler, data); } -int add_to_page_cache(struct page *page, struct address_space *mapping, +int add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); extern void remove_from_page_cache(struct page *page); extern void __remove_from_page_cache(struct page *page); +/* + * Like add_to_page_cache_locked, but used to add newly allocated pages: + * the page is new, so we can just run SetPageLocked() against it. + */ +static inline int add_to_page_cache(struct page *page, + struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) +{ + int error; + + SetPageLocked(page); + error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); + if (unlikely(error)) + ClearPageLocked(page); + return error; +} + /* * Return byte-offset into filesystem object for page. */ diff --git a/mm/filemap.c b/mm/filemap.c index 2d3ec1ffc66e..4e182a9a14c0 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -442,39 +442,43 @@ int filemap_write_and_wait_range(struct address_space *mapping, } /** - * add_to_page_cache - add newly allocated pagecache pages + * add_to_page_cache_locked - add a locked page to the pagecache * @page: page to add * @mapping: the page's address_space * @offset: page index * @gfp_mask: page allocation mode * - * This function is used to add newly allocated pagecache pages; - * the page is new, so we can just run SetPageLocked() against it. - * The other page state flags were set by rmqueue(). - * + * This function is used to add a page to the pagecache. It must be locked. * This function does not add the page to the LRU. The caller must do that. */ -int add_to_page_cache(struct page *page, struct address_space *mapping, +int add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { - int error = mem_cgroup_cache_charge(page, current->mm, + int error; + + VM_BUG_ON(!PageLocked(page)); + + error = mem_cgroup_cache_charge(page, current->mm, gfp_mask & ~__GFP_HIGHMEM); if (error) goto out; error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); if (error == 0) { + page_cache_get(page); + page->mapping = mapping; + page->index = offset; + write_lock_irq(&mapping->tree_lock); error = radix_tree_insert(&mapping->page_tree, offset, page); - if (!error) { - page_cache_get(page); - SetPageLocked(page); - page->mapping = mapping; - page->index = offset; + if (likely(!error)) { mapping->nrpages++; __inc_zone_page_state(page, NR_FILE_PAGES); - } else + } else { + page->mapping = NULL; mem_cgroup_uncharge_cache_page(page); + page_cache_release(page); + } write_unlock_irq(&mapping->tree_lock); radix_tree_preload_end(); @@ -483,7 +487,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping, out: return error; } -EXPORT_SYMBOL(add_to_page_cache); +EXPORT_SYMBOL(add_to_page_cache_locked); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) diff --git a/mm/migrate.c b/mm/migrate.c index d8c65a65c61d..3ca6392e82cc 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -285,7 +285,15 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, page = migration_entry_to_page(entry); - get_page(page); + /* + * Once radix-tree replacement of page migration started, page_count + * *must* be zero. And, we don't want to call wait_on_page_locked() + * against a page without get_page(). + * So, we use get_page_unless_zero(), here. Even failed, page fault + * will occur again. + */ + if (!get_page_unless_zero(page)) + goto out; pte_unmap_unlock(ptep, ptl); wait_on_page_locked(page); put_page(page); @@ -305,6 +313,7 @@ out: static int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page) { + int expected_count; void **pslot; if (!mapping) { @@ -319,12 +328,18 @@ static int migrate_page_move_mapping(struct address_space *mapping, pslot = radix_tree_lookup_slot(&mapping->page_tree, page_index(page)); - if (page_count(page) != 2 + !!PagePrivate(page) || + expected_count = 2 + !!PagePrivate(page); + if (page_count(page) != expected_count || (struct page *)radix_tree_deref_slot(pslot) != page) { write_unlock_irq(&mapping->tree_lock); return -EAGAIN; } + if (!page_freeze_refs(page, expected_count)) { + write_unlock_irq(&mapping->tree_lock); + return -EAGAIN; + } + /* * Now we know that no one else is looking at the page. */ @@ -338,6 +353,7 @@ static int migrate_page_move_mapping(struct address_space *mapping, radix_tree_replace_slot(pslot, newpage); + page_unfreeze_refs(page, expected_count); /* * Drop cache reference from old page. * We know this isn't the last reference. diff --git a/mm/shmem.c b/mm/shmem.c index f92fea94d037..1089092aecaf 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -936,7 +936,7 @@ found: spin_lock(&info->lock); ptr = shmem_swp_entry(info, idx, NULL); if (ptr && ptr->val == entry.val) { - error = add_to_page_cache(page, inode->i_mapping, + error = add_to_page_cache_locked(page, inode->i_mapping, idx, GFP_NOWAIT); /* does mem_cgroup_uncharge_cache_page on error */ } else /* we must compensate for our precharge above */ @@ -1301,8 +1301,8 @@ repeat: SetPageUptodate(filepage); set_page_dirty(filepage); swap_free(swap); - } else if (!(error = add_to_page_cache( - swappage, mapping, idx, GFP_NOWAIT))) { + } else if (!(error = add_to_page_cache_locked(swappage, mapping, + idx, GFP_NOWAIT))) { info->flags |= SHMEM_PAGEIN; shmem_swp_set(info, entry, 0); shmem_swp_unmap(entry); diff --git a/mm/swap_state.c b/mm/swap_state.c index d8aadaf2a0ba..3e3381d6c7ee 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -64,7 +64,7 @@ void show_swap_cache_info(void) } /* - * add_to_swap_cache resembles add_to_page_cache on swapper_space, + * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) @@ -76,19 +76,26 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) BUG_ON(PagePrivate(page)); error = radix_tree_preload(gfp_mask); if (!error) { + page_cache_get(page); + SetPageSwapCache(page); + set_page_private(page, entry.val); + write_lock_irq(&swapper_space.tree_lock); error = radix_tree_insert(&swapper_space.page_tree, entry.val, page); - if (!error) { - page_cache_get(page); - SetPageSwapCache(page); - set_page_private(page, entry.val); + if (likely(!error)) { total_swapcache_pages++; __inc_zone_page_state(page, NR_FILE_PAGES); INC_CACHE_INFO(add_total); } write_unlock_irq(&swapper_space.tree_lock); radix_tree_preload_end(); + + if (unlikely(error)) { + set_page_private(page, 0UL); + ClearPageSwapCache(page); + page_cache_release(page); + } } return error; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 26672c6cd3ce..0075eac1cd04 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -391,12 +391,10 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, } /* - * Attempt to detach a locked page from its ->mapping. If it is dirty or if - * someone else has a ref on the page, abort and return 0. If it was - * successfully detached, return 1. Assumes the caller has a single ref on - * this page. + * Same as remove_mapping, but if the page is removed from the mapping, it + * gets returned with a refcount of 0. */ -int remove_mapping(struct address_space *mapping, struct page *page) +static int __remove_mapping(struct address_space *mapping, struct page *page) { BUG_ON(!PageLocked(page)); BUG_ON(mapping != page_mapping(page)); @@ -427,24 +425,24 @@ int remove_mapping(struct address_space *mapping, struct page *page) * Note that if SetPageDirty is always performed via set_page_dirty, * and thus under tree_lock, then this ordering is not required. */ - if (unlikely(page_count(page) != 2)) + if (!page_freeze_refs(page, 2)) goto cannot_free; - smp_rmb(); - if (unlikely(PageDirty(page))) + /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ + if (unlikely(PageDirty(page))) { + page_unfreeze_refs(page, 2); goto cannot_free; + } if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; __delete_from_swap_cache(page); write_unlock_irq(&mapping->tree_lock); swap_free(swap); - __put_page(page); /* The pagecache ref */ - return 1; + } else { + __remove_from_page_cache(page); + write_unlock_irq(&mapping->tree_lock); } - __remove_from_page_cache(page); - write_unlock_irq(&mapping->tree_lock); - __put_page(page); return 1; cannot_free: @@ -452,6 +450,26 @@ cannot_free: return 0; } +/* + * Attempt to detach a locked page from its ->mapping. If it is dirty or if + * someone else has a ref on the page, abort and return 0. If it was + * successfully detached, return 1. Assumes the caller has a single ref on + * this page. + */ +int remove_mapping(struct address_space *mapping, struct page *page) +{ + if (__remove_mapping(mapping, page)) { + /* + * Unfreezing the refcount with 1 rather than 2 effectively + * drops the pagecache ref for us without requiring another + * atomic operation. + */ + page_unfreeze_refs(page, 1); + return 1; + } + return 0; +} + /* * shrink_page_list() returns the number of reclaimed pages */ @@ -598,18 +616,34 @@ static unsigned long shrink_page_list(struct list_head *page_list, if (PagePrivate(page)) { if (!try_to_release_page(page, sc->gfp_mask)) goto activate_locked; - if (!mapping && page_count(page) == 1) - goto free_it; + if (!mapping && page_count(page) == 1) { + unlock_page(page); + if (put_page_testzero(page)) + goto free_it; + else { + /* + * rare race with speculative reference. + * the speculative reference will free + * this page shortly, so we may + * increment nr_reclaimed here (and + * leave it off the LRU). + */ + nr_reclaimed++; + continue; + } + } } - if (!mapping || !remove_mapping(mapping, page)) + if (!mapping || !__remove_mapping(mapping, page)) goto keep_locked; -free_it: unlock_page(page); +free_it: nr_reclaimed++; - if (!pagevec_add(&freed_pvec, page)) - __pagevec_release_nonlru(&freed_pvec); + if (!pagevec_add(&freed_pvec, page)) { + __pagevec_free(&freed_pvec); + pagevec_reinit(&freed_pvec); + } continue; activate_locked: @@ -623,7 +657,7 @@ keep: } list_splice(&ret_pages, page_list); if (pagevec_count(&freed_pvec)) - __pagevec_release_nonlru(&freed_pvec); + __pagevec_free(&freed_pvec); count_vm_events(PGACTIVATE, pgactivate); return nr_reclaimed; } -- cgit v1.2.3 From 51cc50685a4275c6a02653670af9f108a64e01cf Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Fri, 25 Jul 2008 19:45:34 -0700 Subject: SL*B: drop kmem cache argument from constructor Kmem cache passed to constructor is only needed for constructors that are themselves multiplexeres. Nobody uses this "feature", nor does anybody uses passed kmem cache in non-trivial way, so pass only pointer to object. Non-trivial places are: arch/powerpc/mm/init_64.c arch/powerpc/mm/hugetlbpage.c This is flag day, yes. Signed-off-by: Alexey Dobriyan Acked-by: Pekka Enberg Acked-by: Christoph Lameter Cc: Jon Tollefson Cc: Nick Piggin Cc: Matt Mackall [akpm@linux-foundation.org: fix arch/powerpc/mm/hugetlbpage.c] [akpm@linux-foundation.org: fix mm/slab.c] [akpm@linux-foundation.org: fix ubifs] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/plat-s3c24xx/dma.c | 2 +- arch/powerpc/kernel/rtas_flash.c | 2 +- arch/powerpc/mm/hugetlbpage.c | 9 ++------- arch/powerpc/mm/init_64.c | 24 +++++++++--------------- arch/powerpc/platforms/cell/spufs/inode.c | 2 +- arch/sh/mm/pmb.c | 2 +- arch/xtensa/mm/init.c | 2 +- drivers/usb/mon/mon_text.c | 4 ++-- fs/adfs/super.c | 2 +- fs/affs/super.c | 2 +- fs/afs/super.c | 4 ++-- fs/befs/linuxvfs.c | 2 +- fs/bfs/inode.c | 2 +- fs/block_dev.c | 2 +- fs/buffer.c | 2 +- fs/cifs/cifsfs.c | 2 +- fs/coda/inode.c | 2 +- fs/ecryptfs/main.c | 4 ++-- fs/efs/super.c | 2 +- fs/ext2/super.c | 2 +- fs/ext3/super.c | 2 +- fs/ext4/super.c | 2 +- fs/fat/cache.c | 2 +- fs/fat/inode.c | 2 +- fs/fuse/inode.c | 2 +- fs/gfs2/main.c | 4 ++-- fs/hfs/super.c | 2 +- fs/hfsplus/super.c | 2 +- fs/hpfs/super.c | 2 +- fs/hugetlbfs/inode.c | 2 +- fs/inode.c | 2 +- fs/isofs/inode.c | 2 +- fs/jffs2/super.c | 2 +- fs/jfs/jfs_metapage.c | 2 +- fs/jfs/super.c | 2 +- fs/locks.c | 2 +- fs/minix/inode.c | 2 +- fs/ncpfs/inode.c | 2 +- fs/nfs/inode.c | 2 +- fs/ntfs/super.c | 2 +- fs/ocfs2/dlm/dlmfs.c | 3 +-- fs/ocfs2/super.c | 2 +- fs/openpromfs/inode.c | 2 +- fs/proc/inode.c | 2 +- fs/qnx4/inode.c | 2 +- fs/reiserfs/super.c | 2 +- fs/romfs/inode.c | 2 +- fs/smbfs/inode.c | 2 +- fs/sysv/inode.c | 2 +- fs/ubifs/super.c | 2 +- fs/udf/super.c | 2 +- fs/ufs/super.c | 2 +- fs/xfs/linux-2.6/kmem.h | 2 +- fs/xfs/linux-2.6/xfs_super.c | 1 - include/linux/slab.h | 2 +- include/linux/slub_def.h | 2 +- ipc/mqueue.c | 2 +- kernel/fork.c | 2 +- lib/idr.c | 2 +- lib/radix-tree.c | 2 +- mm/rmap.c | 2 +- mm/shmem.c | 2 +- mm/slab.c | 11 +++++------ mm/slob.c | 7 +++---- mm/slub.c | 13 ++++++------- net/socket.c | 2 +- net/sunrpc/rpc_pipe.c | 2 +- 67 files changed, 90 insertions(+), 106 deletions(-) (limited to 'mm/shmem.c') diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index 60f162dc4fad..8c5e656d5d8c 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c @@ -1304,7 +1304,7 @@ struct sysdev_class dma_sysclass = { /* kmem cache implementation */ -static void s3c2410_dma_cache_ctor(struct kmem_cache *c, void *p) +static void s3c2410_dma_cache_ctor(void *p) { memset(p, 0, sizeof(struct s3c2410_dma_buf)); } diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 09ded5c424a9..149cb112cd1a 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -286,7 +286,7 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf, } /* constructor for flash_block_cache */ -void rtas_block_ctor(struct kmem_cache *cache, void *ptr) +void rtas_block_ctor(void *ptr) { memset(ptr, 0, RTAS_BLK_SIZE); } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index fb42c4dd3217..ed0aab0208a6 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -113,7 +113,7 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, unsigned long address, unsigned int psize) { - pte_t *new = kmem_cache_alloc(huge_pgtable_cache(psize), + pte_t *new = kmem_cache_zalloc(huge_pgtable_cache(psize), GFP_KERNEL|__GFP_REPEAT); if (! new) @@ -730,11 +730,6 @@ static int __init hugepage_setup_sz(char *str) } __setup("hugepagesz=", hugepage_setup_sz); -static void zero_ctor(struct kmem_cache *cache, void *addr) -{ - memset(addr, 0, kmem_cache_size(cache)); -} - static int __init hugetlbpage_init(void) { unsigned int psize; @@ -756,7 +751,7 @@ static int __init hugetlbpage_init(void) HUGEPTE_TABLE_SIZE(psize), HUGEPTE_TABLE_SIZE(psize), 0, - zero_ctor); + NULL); if (!huge_pgtable_cache(psize)) panic("hugetlbpage_init(): could not create %s"\ "\n", HUGEPTE_CACHE_NAME(psize)); diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index a41bc5aa2043..4f7df85129d8 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -136,9 +136,14 @@ static int __init setup_kcore(void) module_init(setup_kcore); #endif -static void zero_ctor(struct kmem_cache *cache, void *addr) +static void pgd_ctor(void *addr) { - memset(addr, 0, kmem_cache_size(cache)); + memset(addr, 0, PGD_TABLE_SIZE); +} + +static void pmd_ctor(void *addr) +{ + memset(addr, 0, PMD_TABLE_SIZE); } static const unsigned int pgtable_cache_size[2] = { @@ -163,19 +168,8 @@ struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)]; void pgtable_cache_init(void) { - int i; - - for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) { - int size = pgtable_cache_size[i]; - const char *name = pgtable_cache_name[i]; - - pr_debug("Allocating page table cache %s (#%d) " - "for size: %08x...\n", name, i, size); - pgtable_cache[i] = kmem_cache_create(name, - size, size, - SLAB_PANIC, - zero_ctor); - } + pgtable_cache[0] = kmem_cache_create(pgtable_cache_name[0], PGD_TABLE_SIZE, PGD_TABLE_SIZE, SLAB_PANIC, pgd_ctor); + pgtable_cache[1] = kmem_cache_create(pgtable_cache_name[1], PMD_TABLE_SIZE, PMD_TABLE_SIZE, SLAB_PANIC, pmd_ctor); } #ifdef CONFIG_SPARSEMEM_VMEMMAP diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 7123472801d9..690ca7b0dcf6 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -78,7 +78,7 @@ spufs_destroy_inode(struct inode *inode) } static void -spufs_init_once(struct kmem_cache *cachep, void *p) +spufs_init_once(void *p) { struct spufs_inode_info *ei = p; diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 0b0ec6e04753..46911bcbf17b 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -293,7 +293,7 @@ void pmb_unmap(unsigned long addr) } while (pmbe); } -static void pmb_cache_ctor(struct kmem_cache *cachep, void *pmb) +static void pmb_cache_ctor(void *pmb) { struct pmb_entry *pmbe = pmb; diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 81d0560eaea2..ee261005b363 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -309,7 +309,7 @@ void show_mem(void) struct kmem_cache *pgtable_cache __read_mostly; -static void pgd_ctor(struct kmem_cache *cache, void* addr) +static void pgd_ctor(void* addr) { pte_t* ptep = (pte_t*)addr; int i; diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index 5e3e4e9b6c77..1f715436d6d3 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -87,7 +87,7 @@ struct mon_reader_text { static struct dentry *mon_dir; /* Usually /sys/kernel/debug/usbmon */ -static void mon_text_ctor(struct kmem_cache *, void *); +static void mon_text_ctor(void *); struct mon_text_ptr { int cnt, limit; @@ -720,7 +720,7 @@ void mon_text_del(struct mon_bus *mbus) /* * Slab interface: constructor. */ -static void mon_text_ctor(struct kmem_cache *slab, void *mem) +static void mon_text_ctor(void *mem) { /* * Nothing to initialize. No, really! diff --git a/fs/adfs/super.c b/fs/adfs/super.c index 9e421eeb672b..26f3b43726bb 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -249,7 +249,7 @@ static void adfs_destroy_inode(struct inode *inode) kmem_cache_free(adfs_inode_cachep, ADFS_I(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct adfs_inode_info *ei = (struct adfs_inode_info *) foo; diff --git a/fs/affs/super.c b/fs/affs/super.c index 4e0309566406..3a89094f93d0 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -90,7 +90,7 @@ static void affs_destroy_inode(struct inode *inode) kmem_cache_free(affs_inode_cachep, AFFS_I(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct affs_inode_info *ei = (struct affs_inode_info *) foo; diff --git a/fs/afs/super.c b/fs/afs/super.c index 7e3faeef6818..250d8c4d66e4 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -27,7 +27,7 @@ #define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */ -static void afs_i_init_once(struct kmem_cache *cachep, void *foo); +static void afs_i_init_once(void *foo); static int afs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt); @@ -449,7 +449,7 @@ static void afs_put_super(struct super_block *sb) /* * initialise an inode cache slab element prior to any use */ -static void afs_i_init_once(struct kmem_cache *cachep, void *_vnode) +static void afs_i_init_once(void *_vnode) { struct afs_vnode *vnode = _vnode; diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index e8717de3bab3..02c6e62b72f8 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -289,7 +289,7 @@ befs_destroy_inode(struct inode *inode) kmem_cache_free(befs_inode_cachep, BEFS_I(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct befs_inode_info *bi = (struct befs_inode_info *) foo; diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index 053e690ec9ed..0ed57b5ee012 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -264,7 +264,7 @@ static void bfs_destroy_inode(struct inode *inode) kmem_cache_free(bfs_inode_cachep, BFS_I(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct bfs_inode_info *bi = foo; diff --git a/fs/block_dev.c b/fs/block_dev.c index 10d8a0aa871a..dcf37cada369 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -271,7 +271,7 @@ static void bdev_destroy_inode(struct inode *inode) kmem_cache_free(bdev_cachep, bdi); } -static void init_once(struct kmem_cache * cachep, void *foo) +static void init_once(void *foo) { struct bdev_inode *ei = (struct bdev_inode *) foo; struct block_device *bdev = &ei->bdev; diff --git a/fs/buffer.c b/fs/buffer.c index 109b261192d9..5fd497cdd6f3 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3272,7 +3272,7 @@ int bh_submit_read(struct buffer_head *bh) EXPORT_SYMBOL(bh_submit_read); static void -init_buffer_head(struct kmem_cache *cachep, void *data) +init_buffer_head(void *data) { struct buffer_head *bh = data; diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 22857c639df5..fe5f6809cba6 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -766,7 +766,7 @@ const struct file_operations cifs_dir_ops = { }; static void -cifs_init_once(struct kmem_cache *cachep, void *inode) +cifs_init_once(void *inode) { struct cifsInodeInfo *cifsi = inode; diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 2f58dfc70083..830f51abb971 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -58,7 +58,7 @@ static void coda_destroy_inode(struct inode *inode) kmem_cache_free(coda_inode_cachep, ITOC(inode)); } -static void init_once(struct kmem_cache * cachep, void *foo) +static void init_once(void *foo) { struct coda_inode_info *ei = (struct coda_inode_info *) foo; diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 6f403cfba14f..448dfd597b5f 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -578,7 +578,7 @@ static struct file_system_type ecryptfs_fs_type = { * Initializes the ecryptfs_inode_info_cache when it is created */ static void -inode_info_init_once(struct kmem_cache *cachep, void *vptr) +inode_info_init_once(void *vptr) { struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr; @@ -589,7 +589,7 @@ static struct ecryptfs_cache_info { struct kmem_cache **cache; const char *name; size_t size; - void (*ctor)(struct kmem_cache *cache, void *obj); + void (*ctor)(void *obj); } ecryptfs_cache_infos[] = { { .cache = &ecryptfs_auth_tok_list_item_cache, diff --git a/fs/efs/super.c b/fs/efs/super.c index d733531b55e2..567b134fa1f1 100644 --- a/fs/efs/super.c +++ b/fs/efs/super.c @@ -70,7 +70,7 @@ static void efs_destroy_inode(struct inode *inode) kmem_cache_free(efs_inode_cachep, INODE_INFO(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct efs_inode_info *ei = (struct efs_inode_info *) foo; diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 31308a3b0b8b..fd88c7b43e66 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -159,7 +159,7 @@ static void ext2_destroy_inode(struct inode *inode) kmem_cache_free(ext2_inode_cachep, EXT2_I(inode)); } -static void init_once(struct kmem_cache * cachep, void *foo) +static void init_once(void *foo) { struct ext2_inode_info *ei = (struct ext2_inode_info *) foo; diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 615788c6843a..8ddced384674 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -472,7 +472,7 @@ static void ext3_destroy_inode(struct inode *inode) kmem_cache_free(ext3_inode_cachep, EXT3_I(inode)); } -static void init_once(struct kmem_cache * cachep, void *foo) +static void init_once(void *foo) { struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 1cb371dcd609..b5479b1dff14 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -595,7 +595,7 @@ static void ext4_destroy_inode(struct inode *inode) kmem_cache_free(ext4_inode_cachep, EXT4_I(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct ext4_inode_info *ei = (struct ext4_inode_info *) foo; diff --git a/fs/fat/cache.c b/fs/fat/cache.c index 3a9ecac8d61f..3222f51c41cf 100644 --- a/fs/fat/cache.c +++ b/fs/fat/cache.c @@ -36,7 +36,7 @@ static inline int fat_max_cache(struct inode *inode) static struct kmem_cache *fat_cache_cachep; -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct fat_cache *cache = (struct fat_cache *)foo; diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 23676f9d79ce..6d266d793e2c 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -498,7 +498,7 @@ static void fat_destroy_inode(struct inode *inode) kmem_cache_free(fat_inode_cachep, MSDOS_I(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct msdos_inode_info *ei = (struct msdos_inode_info *)foo; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 7d2f7d6e22e2..d2249f174e20 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -956,7 +956,7 @@ static inline void unregister_fuseblk(void) } #endif -static void fuse_inode_init_once(struct kmem_cache *cachep, void *foo) +static void fuse_inode_init_once(void *foo) { struct inode * inode = foo; diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index bcc668d0fadd..bb2cc303ac29 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -24,7 +24,7 @@ #include "util.h" #include "glock.h" -static void gfs2_init_inode_once(struct kmem_cache *cachep, void *foo) +static void gfs2_init_inode_once(void *foo) { struct gfs2_inode *ip = foo; @@ -33,7 +33,7 @@ static void gfs2_init_inode_once(struct kmem_cache *cachep, void *foo) ip->i_alloc = NULL; } -static void gfs2_init_glock_once(struct kmem_cache *cachep, void *foo) +static void gfs2_init_glock_once(void *foo) { struct gfs2_glock *gl = foo; diff --git a/fs/hfs/super.c b/fs/hfs/super.c index ac2ec5ef66e4..4abb1047c689 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -432,7 +432,7 @@ static struct file_system_type hfs_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; -static void hfs_init_once(struct kmem_cache *cachep, void *p) +static void hfs_init_once(void *p) { struct hfs_inode_info *i = p; diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 3859118531c7..e834e578c93f 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -485,7 +485,7 @@ static struct file_system_type hfsplus_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; -static void hfsplus_init_once(struct kmem_cache *cachep, void *p) +static void hfsplus_init_once(void *p) { struct hfsplus_inode_info *i = p; diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index f63a699ec659..b8ae9c90ada0 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -173,7 +173,7 @@ static void hpfs_destroy_inode(struct inode *inode) kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index dbd01d262ca4..3f58923fb39b 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -705,7 +705,7 @@ static const struct address_space_operations hugetlbfs_aops = { }; -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; diff --git a/fs/inode.c b/fs/inode.c index 35b6414522ea..b6726f644530 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -224,7 +224,7 @@ void inode_init_once(struct inode *inode) EXPORT_SYMBOL(inode_init_once); -static void init_once(struct kmem_cache * cachep, void *foo) +static void init_once(void *foo) { struct inode * inode = (struct inode *) foo; diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 044a254d526b..26948a6033b6 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -73,7 +73,7 @@ static void isofs_destroy_inode(struct inode *inode) kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct iso_inode_info *ei = foo; diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index 7da69eae49e4..efd401257ed9 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -44,7 +44,7 @@ static void jffs2_destroy_inode(struct inode *inode) kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode)); } -static void jffs2_i_init_once(struct kmem_cache *cachep, void *foo) +static void jffs2_i_init_once(void *foo) { struct jffs2_inode_info *f = foo; diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 854ff0ec574f..c350057087dd 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -182,7 +182,7 @@ static inline void remove_metapage(struct page *page, struct metapage *mp) #endif -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct metapage *mp = (struct metapage *)foo; diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 359c091d8965..3630718be395 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -760,7 +760,7 @@ static struct file_system_type jfs_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo; diff --git a/fs/locks.c b/fs/locks.c index 01490300f7cb..5eb259e3cd38 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -201,7 +201,7 @@ EXPORT_SYMBOL(locks_init_lock); * Initialises the fields of the file lock which are invariant for * free file_locks. */ -static void init_once(struct kmem_cache *cache, void *foo) +static void init_once(void *foo) { struct file_lock *lock = (struct file_lock *) foo; diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 523d73713418..d1d1eb84679d 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -68,7 +68,7 @@ static void minix_destroy_inode(struct inode *inode) kmem_cache_free(minix_inode_cachep, minix_i(inode)); } -static void init_once(struct kmem_cache * cachep, void *foo) +static void init_once(void *foo) { struct minix_inode_info *ei = (struct minix_inode_info *) foo; diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 2e5ab1204dec..d642f0e5b365 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -64,7 +64,7 @@ static void ncp_destroy_inode(struct inode *inode) kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct ncp_inode_info *ei = (struct ncp_inode_info *) foo; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index df23f987da6b..52daefa2f521 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1242,7 +1242,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi) #endif } -static void init_once(struct kmem_cache * cachep, void *foo) +static void init_once(void *foo) { struct nfs_inode *nfsi = (struct nfs_inode *) foo; diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 3e76f3b216bc..4a46743b5077 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -3080,7 +3080,7 @@ struct kmem_cache *ntfs_inode_cache; struct kmem_cache *ntfs_big_inode_cache; /* Init once constructor for the inode slab cache. */ -static void ntfs_big_inode_init_once(struct kmem_cache *cachep, void *foo) +static void ntfs_big_inode_init_once(void *foo) { ntfs_inode *ni = (ntfs_inode *)foo; diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c index e48aba698b77..533a789c3ef8 100644 --- a/fs/ocfs2/dlm/dlmfs.c +++ b/fs/ocfs2/dlm/dlmfs.c @@ -267,8 +267,7 @@ static ssize_t dlmfs_file_write(struct file *filp, return writelen; } -static void dlmfs_init_once(struct kmem_cache *cachep, - void *foo) +static void dlmfs_init_once(void *foo) { struct dlmfs_inode_private *ip = (struct dlmfs_inode_private *) foo; diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index ccecfe5094fa..2560b33889aa 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1118,7 +1118,7 @@ bail: return status; } -static void ocfs2_inode_init_once(struct kmem_cache *cachep, void *data) +static void ocfs2_inode_init_once(void *data) { struct ocfs2_inode_info *oi = data; diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c index d17b4fd204e1..9f5b054f06b9 100644 --- a/fs/openpromfs/inode.c +++ b/fs/openpromfs/inode.c @@ -430,7 +430,7 @@ static struct file_system_type openprom_fs_type = { .kill_sb = kill_anon_super, }; -static void op_inode_init_once(struct kmem_cache * cachep, void *data) +static void op_inode_init_once(void *data) { struct op_inode_info *oi = (struct op_inode_info *) data; diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 02eca2ed9dd7..b37f25dc45a5 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -94,7 +94,7 @@ static void proc_destroy_inode(struct inode *inode) kmem_cache_free(proc_inode_cachep, PROC_I(inode)); } -static void init_once(struct kmem_cache * cachep, void *foo) +static void init_once(void *foo) { struct proc_inode *ei = (struct proc_inode *) foo; diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index b31ab78052b3..2aad1044b84c 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c @@ -553,7 +553,7 @@ static void qnx4_destroy_inode(struct inode *inode) kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo; diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 2ec748ba0bd3..879e54d35c2d 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -521,7 +521,7 @@ static void reiserfs_destroy_inode(struct inode *inode) kmem_cache_free(reiserfs_inode_cachep, REISERFS_I(inode)); } -static void init_once(struct kmem_cache * cachep, void *foo) +static void init_once(void *foo) { struct reiserfs_inode_info *ei = (struct reiserfs_inode_info *)foo; diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c index 3f13d491c7c7..8e51a2aaa977 100644 --- a/fs/romfs/inode.c +++ b/fs/romfs/inode.c @@ -577,7 +577,7 @@ static void romfs_destroy_inode(struct inode *inode) kmem_cache_free(romfs_inode_cachep, ROMFS_I(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct romfs_inode_info *ei = foo; diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c index 376ef3ee6ed7..3528f40ffb0f 100644 --- a/fs/smbfs/inode.c +++ b/fs/smbfs/inode.c @@ -67,7 +67,7 @@ static void smb_destroy_inode(struct inode *inode) kmem_cache_free(smb_inode_cachep, SMB_I(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct smb_inode_info *ei = (struct smb_inode_info *) foo; diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index c5d60de0658f..df0d435baa48 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c @@ -326,7 +326,7 @@ static void sysv_destroy_inode(struct inode *inode) kmem_cache_free(sysv_inode_cachep, SYSV_I(inode)); } -static void init_once(struct kmem_cache *cachep, void *p) +static void init_once(void *p) { struct sysv_inode_info *si = (struct sysv_inode_info *)p; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 00eb9c68ad03..ca1e2d4e03cc 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1841,7 +1841,7 @@ static struct file_system_type ubifs_fs_type = { /* * Inode slab cache constructor. */ -static void inode_slab_ctor(struct kmem_cache *cachep, void *obj) +static void inode_slab_ctor(void *obj) { struct ubifs_inode *ui = obj; inode_init_once(&ui->vfs_inode); diff --git a/fs/udf/super.c b/fs/udf/super.c index 44cc702f96cc..5698bbf83bbf 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -148,7 +148,7 @@ static void udf_destroy_inode(struct inode *inode) kmem_cache_free(udf_inode_cachep, UDF_I(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct udf_inode_info *ei = (struct udf_inode_info *)foo; diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 227c9d700040..3e30e40aa24d 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -1302,7 +1302,7 @@ static void ufs_destroy_inode(struct inode *inode) kmem_cache_free(ufs_inode_cachep, UFS_I(inode)); } -static void init_once(struct kmem_cache * cachep, void *foo) +static void init_once(void *foo) { struct ufs_inode_info *ei = (struct ufs_inode_info *) foo; diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h index 5e9564902976..a20683cf74dd 100644 --- a/fs/xfs/linux-2.6/kmem.h +++ b/fs/xfs/linux-2.6/kmem.h @@ -79,7 +79,7 @@ kmem_zone_init(int size, char *zone_name) static inline kmem_zone_t * kmem_zone_init_flags(int size, char *zone_name, unsigned long flags, - void (*construct)(kmem_zone_t *, void *)) + void (*construct)(void *)) { return kmem_cache_create(zone_name, size, 0, flags, construct); } diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 742b2c7852c1..943381284e2e 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -843,7 +843,6 @@ xfs_fs_destroy_inode( STATIC void xfs_fs_inode_init_once( - kmem_zone_t *zonep, void *vnode) { inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); diff --git a/include/linux/slab.h b/include/linux/slab.h index 41103910f8a2..9ff8e8499403 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -58,7 +58,7 @@ int slab_is_available(void); struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, - void (*)(struct kmem_cache *, void *)); + void (*)(void *)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void kmem_cache_free(struct kmem_cache *, void *); diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index d117ea2825a9..5bad61a93f65 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -85,7 +85,7 @@ struct kmem_cache { struct kmem_cache_order_objects min; gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ - void (*ctor)(struct kmem_cache *, void *); + void (*ctor)(void *); int inuse; /* Offset to metadata */ int align; /* Alignment */ const char *name; /* Name (only for display!) */ diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 1fdc2eb2f6d8..474984f9e032 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -207,7 +207,7 @@ static int mqueue_get_sb(struct file_system_type *fs_type, return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; diff --git a/kernel/fork.c b/kernel/fork.c index b99d73e971a4..80e83e459b17 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1442,7 +1442,7 @@ long do_fork(unsigned long clone_flags, #define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif -static void sighand_ctor(struct kmem_cache *cachep, void *data) +static void sighand_ctor(void *data) { struct sighand_struct *sighand = data; diff --git a/lib/idr.c b/lib/idr.c index 3476f8203e97..e728c7fccc4d 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -607,7 +607,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) } EXPORT_SYMBOL(idr_replace); -static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) +static void idr_cache_ctor(void *idr_layer) { memset(idr_layer, 0, sizeof(struct idr_layer)); } diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 9c4f1ffa2864..be86b32bc874 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1183,7 +1183,7 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) EXPORT_SYMBOL(radix_tree_tagged); static void -radix_tree_node_ctor(struct kmem_cache *cachep, void *node) +radix_tree_node_ctor(void *node) { memset(node, 0, sizeof(struct radix_tree_node)); } diff --git a/mm/rmap.c b/mm/rmap.c index abbd29f7c43f..39ae5a9bf382 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -138,7 +138,7 @@ void anon_vma_unlink(struct vm_area_struct *vma) anon_vma_free(anon_vma); } -static void anon_vma_ctor(struct kmem_cache *cachep, void *data) +static void anon_vma_ctor(void *data) { struct anon_vma *anon_vma = data; diff --git a/mm/shmem.c b/mm/shmem.c index 1089092aecaf..952d361774bb 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2352,7 +2352,7 @@ static void shmem_destroy_inode(struct inode *inode) kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct shmem_inode_info *p = (struct shmem_inode_info *) foo; diff --git a/mm/slab.c b/mm/slab.c index 052e7d64537e..918f04f7fef1 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -406,7 +406,7 @@ struct kmem_cache { unsigned int dflags; /* dynamic flags */ /* constructor func */ - void (*ctor)(struct kmem_cache *, void *); + void (*ctor)(void *obj); /* 5) cache creation/removal */ const char *name; @@ -2137,8 +2137,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) */ struct kmem_cache * kmem_cache_create (const char *name, size_t size, size_t align, - unsigned long flags, - void (*ctor)(struct kmem_cache *, void *)) + unsigned long flags, void (*ctor)(void *)) { size_t left_over, slab_size, ralign; struct kmem_cache *cachep = NULL, *pc; @@ -2653,7 +2652,7 @@ static void cache_init_objs(struct kmem_cache *cachep, * They must also be threaded. */ if (cachep->ctor && !(cachep->flags & SLAB_POISON)) - cachep->ctor(cachep, objp + obj_offset(cachep)); + cachep->ctor(objp + obj_offset(cachep)); if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) @@ -2669,7 +2668,7 @@ static void cache_init_objs(struct kmem_cache *cachep, cachep->buffer_size / PAGE_SIZE, 0); #else if (cachep->ctor) - cachep->ctor(cachep, objp); + cachep->ctor(objp); #endif slab_bufctl(slabp)[i] = i + 1; } @@ -3093,7 +3092,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, #endif objp += obj_offset(cachep); if (cachep->ctor && cachep->flags & SLAB_POISON) - cachep->ctor(cachep, objp); + cachep->ctor(objp); #if ARCH_SLAB_MINALIGN if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) { printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", diff --git a/mm/slob.c b/mm/slob.c index de268eb7ac70..d8fbd4d1bfa7 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -525,12 +525,11 @@ struct kmem_cache { unsigned int size, align; unsigned long flags; const char *name; - void (*ctor)(struct kmem_cache *, void *); + void (*ctor)(void *); }; struct kmem_cache *kmem_cache_create(const char *name, size_t size, - size_t align, unsigned long flags, - void (*ctor)(struct kmem_cache *, void *)) + size_t align, unsigned long flags, void (*ctor)(void *)) { struct kmem_cache *c; @@ -575,7 +574,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) b = slob_new_page(flags, get_order(c->size), node); if (c->ctor) - c->ctor(c, b); + c->ctor(b); return b; } diff --git a/mm/slub.c b/mm/slub.c index 77c21cf53ff9..b7e2cd5d82db 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1012,7 +1012,7 @@ __setup("slub_debug", setup_slub_debug); static unsigned long kmem_cache_flags(unsigned long objsize, unsigned long flags, const char *name, - void (*ctor)(struct kmem_cache *, void *)) + void (*ctor)(void *)) { /* * Enable debugging if selected on the kernel commandline. @@ -1040,7 +1040,7 @@ static inline int check_object(struct kmem_cache *s, struct page *page, static inline void add_full(struct kmem_cache_node *n, struct page *page) {} static inline unsigned long kmem_cache_flags(unsigned long objsize, unsigned long flags, const char *name, - void (*ctor)(struct kmem_cache *, void *)) + void (*ctor)(void *)) { return flags; } @@ -1103,7 +1103,7 @@ static void setup_object(struct kmem_cache *s, struct page *page, { setup_object_debug(s, page, object); if (unlikely(s->ctor)) - s->ctor(s, object); + s->ctor(object); } static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) @@ -2286,7 +2286,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, const char *name, size_t size, size_t align, unsigned long flags, - void (*ctor)(struct kmem_cache *, void *)) + void (*ctor)(void *)) { memset(s, 0, kmem_size); s->name = name; @@ -3042,7 +3042,7 @@ static int slab_unmergeable(struct kmem_cache *s) static struct kmem_cache *find_mergeable(size_t size, size_t align, unsigned long flags, const char *name, - void (*ctor)(struct kmem_cache *, void *)) + void (*ctor)(void *)) { struct kmem_cache *s; @@ -3082,8 +3082,7 @@ static struct kmem_cache *find_mergeable(size_t size, } struct kmem_cache *kmem_cache_create(const char *name, size_t size, - size_t align, unsigned long flags, - void (*ctor)(struct kmem_cache *, void *)) + size_t align, unsigned long flags, void (*ctor)(void *)) { struct kmem_cache *s; diff --git a/net/socket.c b/net/socket.c index 1310a82cbba7..8ef8ba81b9e2 100644 --- a/net/socket.c +++ b/net/socket.c @@ -265,7 +265,7 @@ static void sock_destroy_inode(struct inode *inode) container_of(inode, struct socket_alloc, vfs_inode)); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct socket_alloc *ei = (struct socket_alloc *)foo; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 5a9b0e7828cd..23a2b8f6dc49 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -897,7 +897,7 @@ static struct file_system_type rpc_pipe_fs_type = { }; static void -init_once(struct kmem_cache * cachep, void *foo) +init_once(void *foo) { struct rpc_inode *rpci = (struct rpc_inode *) foo; -- cgit v1.2.3 From 14fcc23fdc78e9d32372553ccf21758a9bd56fa1 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 28 Jul 2008 15:46:19 -0700 Subject: tmpfs: fix kernel BUG in shmem_delete_inode SuSE's insserve initscript ordering program hits kernel BUG at mm/shmem.c:814 on 2.6.26. It's using posix_fadvise on directories, and the shmem_readpage method added in 2.6.23 is letting POSIX_FADV_WILLNEED allocate useless pages to a tmpfs directory, incrementing i_blocks count but never decrementing it. Fix this by assigning shmem_aops (pointing to readpage and writepage and set_page_dirty) only when it's needed, on a regular file or a long symlink. Many thanks to Kel for outstanding bugreport and steps to reproduce it. Reported-by: Kel Modderman Tested-by: Kel Modderman Signed-off-by: Hugh Dickins Cc: [2.6.25.x, 2.6.26.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shmem.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm/shmem.c') diff --git a/mm/shmem.c b/mm/shmem.c index 952d361774bb..c1e5a3b4f758 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1513,7 +1513,6 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) inode->i_uid = current->fsuid; inode->i_gid = current->fsgid; inode->i_blocks = 0; - inode->i_mapping->a_ops = &shmem_aops; inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_generation = get_seconds(); @@ -1528,6 +1527,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) init_special_inode(inode, mode, dev); break; case S_IFREG: + inode->i_mapping->a_ops = &shmem_aops; inode->i_op = &shmem_inode_operations; inode->i_fop = &shmem_file_operations; mpol_shared_policy_init(&info->policy, @@ -1929,6 +1929,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s return error; } unlock_page(page); + inode->i_mapping->a_ops = &shmem_aops; inode->i_op = &shmem_symlink_inode_operations; kaddr = kmap_atomic(page, KM_USER0); memcpy(kaddr, symname, len); -- cgit v1.2.3 From 529ae9aaa08378cfe2a4350bded76f32cc8ff0ce Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 2 Aug 2008 12:01:03 +0200 Subject: mm: rename page trylock Converting page lock to new locking bitops requires a change of page flag operation naming, so we might as well convert it to something nicer (!TestSetPageLocked_Lock => trylock_page, SetPageLocked => set_page_locked). This also facilitates lockdeping of page lock. Signed-off-by: Nick Piggin Acked-by: KOSAKI Motohiro Acked-by: Peter Zijlstra Acked-by: Andrew Morton Acked-by: Benjamin Herrenschmidt Signed-off-by: Linus Torvalds --- drivers/scsi/sg.c | 2 +- fs/afs/write.c | 2 +- fs/cifs/file.c | 2 +- fs/jbd/commit.c | 4 +-- fs/jbd2/commit.c | 2 +- fs/reiserfs/journal.c | 2 +- fs/splice.c | 2 +- fs/xfs/linux-2.6/xfs_aops.c | 4 +-- include/linux/page-flags.h | 2 +- include/linux/pagemap.h | 67 +++++++++++++++++++++++++++------------------ mm/filemap.c | 12 ++++---- mm/memory.c | 2 +- mm/migrate.c | 4 +-- mm/rmap.c | 2 +- mm/shmem.c | 4 +-- mm/swap.c | 2 +- mm/swap_state.c | 8 +++--- mm/swapfile.c | 2 +- mm/truncate.c | 4 +-- mm/vmscan.c | 4 +-- 20 files changed, 74 insertions(+), 59 deletions(-) (limited to 'mm/shmem.c') diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index d3b8ebb83776..3d36270a8b4d 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1747,7 +1747,7 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages, */ flush_dcache_page(pages[i]); /* ?? Is locking needed? I don't think so */ - /* if (TestSetPageLocked(pages[i])) + /* if (!trylock_page(pages[i])) goto out_unlock; */ } diff --git a/fs/afs/write.c b/fs/afs/write.c index 9a849ad3c489..065b4e10681a 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -404,7 +404,7 @@ static int afs_write_back_from_locked_page(struct afs_writeback *wb, page = pages[loop]; if (page->index > wb->last) break; - if (TestSetPageLocked(page)) + if (!trylock_page(page)) break; if (!PageDirty(page) || page_private(page) != (unsigned long) wb) { diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 0aac824371a5..e692c42f24b5 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1280,7 +1280,7 @@ retry: if (first < 0) lock_page(page); - else if (TestSetPageLocked(page)) + else if (!trylock_page(page)) break; if (unlikely(page->mapping != mapping)) { diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 2eccbfaa1d48..81a9ad7177ca 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c @@ -63,7 +63,7 @@ static void release_buffer_page(struct buffer_head *bh) goto nope; /* OK, it's a truncated page */ - if (TestSetPageLocked(page)) + if (!trylock_page(page)) goto nope; page_cache_get(page); @@ -446,7 +446,7 @@ void journal_commit_transaction(journal_t *journal) spin_lock(&journal->j_list_lock); } if (unlikely(!buffer_uptodate(bh))) { - if (TestSetPageLocked(bh->b_page)) { + if (!trylock_page(bh->b_page)) { spin_unlock(&journal->j_list_lock); lock_page(bh->b_page); spin_lock(&journal->j_list_lock); diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index adf0395f318e..f2ad061e95ec 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -67,7 +67,7 @@ static void release_buffer_page(struct buffer_head *bh) goto nope; /* OK, it's a truncated page */ - if (TestSetPageLocked(page)) + if (!trylock_page(page)) goto nope; page_cache_get(page); diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index c8f60ee183b5..ce2208b27118 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -627,7 +627,7 @@ static int journal_list_still_alive(struct super_block *s, static void release_buffer_page(struct buffer_head *bh) { struct page *page = bh->b_page; - if (!page->mapping && !TestSetPageLocked(page)) { + if (!page->mapping && trylock_page(page)) { page_cache_get(page); put_bh(bh); if (!page->mapping) diff --git a/fs/splice.c b/fs/splice.c index b30311ba8af6..1bbc6f4bb09c 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -371,7 +371,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, * for an in-flight io page */ if (flags & SPLICE_F_NONBLOCK) { - if (TestSetPageLocked(page)) { + if (!trylock_page(page)) { error = -EAGAIN; break; } diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 0b211cba1909..fa73179233ad 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -675,7 +675,7 @@ xfs_probe_cluster( } else pg_offset = PAGE_CACHE_SIZE; - if (page->index == tindex && !TestSetPageLocked(page)) { + if (page->index == tindex && trylock_page(page)) { pg_len = xfs_probe_page(page, pg_offset, mapped); unlock_page(page); } @@ -759,7 +759,7 @@ xfs_convert_page( if (page->index != tindex) goto fail; - if (TestSetPageLocked(page)) + if (!trylock_page(page)) goto fail; if (PageWriteback(page)) goto fail_unlock_page; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 25aaccdb2f26..c74d3e875314 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -163,7 +163,7 @@ static inline int Page##uname(struct page *page) \ struct page; /* forward declaration */ -PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked) +TESTPAGEFLAG(Locked, locked) PAGEFLAG(Error, error) PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 69ed3cb1197a..5da31c12101c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -250,29 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping, return read_cache_page(mapping, index, filler, data); } -int add_to_page_cache_locked(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp_mask); -int add_to_page_cache_lru(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp_mask); -extern void remove_from_page_cache(struct page *page); -extern void __remove_from_page_cache(struct page *page); - -/* - * Like add_to_page_cache_locked, but used to add newly allocated pages: - * the page is new, so we can just run SetPageLocked() against it. - */ -static inline int add_to_page_cache(struct page *page, - struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) -{ - int error; - - SetPageLocked(page); - error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); - if (unlikely(error)) - ClearPageLocked(page); - return error; -} - /* * Return byte-offset into filesystem object for page. */ @@ -294,13 +271,28 @@ extern int __lock_page_killable(struct page *page); extern void __lock_page_nosync(struct page *page); extern void unlock_page(struct page *page); +static inline void set_page_locked(struct page *page) +{ + set_bit(PG_locked, &page->flags); +} + +static inline void clear_page_locked(struct page *page) +{ + clear_bit(PG_locked, &page->flags); +} + +static inline int trylock_page(struct page *page) +{ + return !test_and_set_bit(PG_locked, &page->flags); +} + /* * lock_page may only be called if we have the page's inode pinned. */ static inline void lock_page(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) __lock_page(page); } @@ -312,7 +304,7 @@ static inline void lock_page(struct page *page) static inline int lock_page_killable(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) return __lock_page_killable(page); return 0; } @@ -324,7 +316,7 @@ static inline int lock_page_killable(struct page *page) static inline void lock_page_nosync(struct page *page) { might_sleep(); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) __lock_page_nosync(page); } @@ -409,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) return ret; } +int add_to_page_cache_locked(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +int add_to_page_cache_lru(struct page *page, struct address_space *mapping, + pgoff_t index, gfp_t gfp_mask); +extern void remove_from_page_cache(struct page *page); +extern void __remove_from_page_cache(struct page *page); + +/* + * Like add_to_page_cache_locked, but used to add newly allocated pages: + * the page is new, so we can just run set_page_locked() against it. + */ +static inline int add_to_page_cache(struct page *page, + struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) +{ + int error; + + set_page_locked(page); + error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); + if (unlikely(error)) + clear_page_locked(page); + return error; +} + #endif /* _LINUX_PAGEMAP_H */ diff --git a/mm/filemap.c b/mm/filemap.c index d97d1ad55473..54e968650855 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -558,14 +558,14 @@ EXPORT_SYMBOL(wait_on_page_bit); * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. * * The first mb is necessary to safely close the critical section opened by the - * TestSetPageLocked(), the second mb is necessary to enforce ordering between - * the clear_bit and the read of the waitqueue (to avoid SMP races with a - * parallel wait_on_page_locked()). + * test_and_set_bit() to lock the page; the second mb is necessary to enforce + * ordering between the clear_bit and the read of the waitqueue (to avoid SMP + * races with a parallel wait_on_page_locked()). */ void unlock_page(struct page *page) { smp_mb__before_clear_bit(); - if (!TestClearPageLocked(page)) + if (!test_and_clear_bit(PG_locked, &page->flags)) BUG(); smp_mb__after_clear_bit(); wake_up_page(page, PG_locked); @@ -931,7 +931,7 @@ grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) struct page *page = find_get_page(mapping, index); if (page) { - if (!TestSetPageLocked(page)) + if (trylock_page(page)) return page; page_cache_release(page); return NULL; @@ -1027,7 +1027,7 @@ find_page: if (inode->i_blkbits == PAGE_CACHE_SHIFT || !mapping->a_ops->is_partially_uptodate) goto page_not_up_to_date; - if (TestSetPageLocked(page)) + if (!trylock_page(page)) goto page_not_up_to_date; if (!mapping->a_ops->is_partially_uptodate(page, desc, offset)) diff --git a/mm/memory.c b/mm/memory.c index a472bcd4b061..1002f473f497 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1789,7 +1789,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, * not dirty accountable. */ if (PageAnon(old_page)) { - if (!TestSetPageLocked(old_page)) { + if (trylock_page(old_page)) { reuse = can_share_swap_page(old_page); unlock_page(old_page); } diff --git a/mm/migrate.c b/mm/migrate.c index 153572fb60b8..2a80136b23bb 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -605,7 +605,7 @@ static int move_to_new_page(struct page *newpage, struct page *page) * establishing additional references. We are the only one * holding a reference to the new page at this point. */ - if (TestSetPageLocked(newpage)) + if (!trylock_page(newpage)) BUG(); /* Prepare mapping for the new page.*/ @@ -667,7 +667,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, BUG_ON(charge); rc = -EAGAIN; - if (TestSetPageLocked(page)) { + if (!trylock_page(page)) { if (!force) goto move_newpage; lock_page(page); diff --git a/mm/rmap.c b/mm/rmap.c index 94a5246a3f98..1ea4e6fcee77 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -422,7 +422,7 @@ int page_referenced(struct page *page, int is_locked, referenced += page_referenced_anon(page, mem_cont); else if (is_locked) referenced += page_referenced_file(page, mem_cont); - else if (TestSetPageLocked(page)) + else if (!trylock_page(page)) referenced++; else { if (page->mapping) diff --git a/mm/shmem.c b/mm/shmem.c index c1e5a3b4f758..04fb4f1ab88e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1265,7 +1265,7 @@ repeat: } /* We have to do this with page locked to prevent races */ - if (TestSetPageLocked(swappage)) { + if (!trylock_page(swappage)) { shmem_swp_unmap(entry); spin_unlock(&info->lock); wait_on_page_locked(swappage); @@ -1329,7 +1329,7 @@ repeat: shmem_swp_unmap(entry); filepage = find_get_page(mapping, idx); if (filepage && - (!PageUptodate(filepage) || TestSetPageLocked(filepage))) { + (!PageUptodate(filepage) || !trylock_page(filepage))) { spin_unlock(&info->lock); wait_on_page_locked(filepage); page_cache_release(filepage); diff --git a/mm/swap.c b/mm/swap.c index 7417a2adbe50..9e0cb3118079 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -444,7 +444,7 @@ void pagevec_strip(struct pagevec *pvec) for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; - if (PagePrivate(page) && !TestSetPageLocked(page)) { + if (PagePrivate(page) && trylock_page(page)) { if (PagePrivate(page)) try_to_release_page(page, 0); unlock_page(page); diff --git a/mm/swap_state.c b/mm/swap_state.c index b8035b055129..167cf2dc8a03 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -201,7 +201,7 @@ void delete_from_swap_cache(struct page *page) */ static inline void free_swap_cache(struct page *page) { - if (PageSwapCache(page) && !TestSetPageLocked(page)) { + if (PageSwapCache(page) && trylock_page(page)) { remove_exclusive_swap_page(page); unlock_page(page); } @@ -302,9 +302,9 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * re-using the just freed swap entry for an existing page. * May fail (-ENOMEM) if radix-tree node allocation failed. */ - SetPageLocked(new_page); + set_page_locked(new_page); err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); - if (!err) { + if (likely(!err)) { /* * Initiate read into locked page and return. */ @@ -312,7 +312,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, swap_readpage(NULL, new_page); return new_page; } - ClearPageLocked(new_page); + clear_page_locked(new_page); swap_free(entry); } while (err != -ENOMEM); diff --git a/mm/swapfile.c b/mm/swapfile.c index bb7f79641f9e..1e330f2998fa 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -403,7 +403,7 @@ void free_swap_and_cache(swp_entry_t entry) if (p) { if (swap_entry_free(p, swp_offset(entry)) == 1) { page = find_get_page(&swapper_space, entry.val); - if (page && unlikely(TestSetPageLocked(page))) { + if (page && unlikely(!trylock_page(page))) { page_cache_release(page); page = NULL; } diff --git a/mm/truncate.c b/mm/truncate.c index 894e9a70699f..250505091d37 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -187,7 +187,7 @@ void truncate_inode_pages_range(struct address_space *mapping, if (page_index > next) next = page_index; next++; - if (TestSetPageLocked(page)) + if (!trylock_page(page)) continue; if (PageWriteback(page)) { unlock_page(page); @@ -280,7 +280,7 @@ unsigned long __invalidate_mapping_pages(struct address_space *mapping, pgoff_t index; int lock_failed; - lock_failed = TestSetPageLocked(page); + lock_failed = !trylock_page(page); /* * We really shouldn't be looking at the ->index of an diff --git a/mm/vmscan.c b/mm/vmscan.c index 75be453628bf..1ff1a58e7c10 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -496,7 +496,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, page = lru_to_page(page_list); list_del(&page->lru); - if (TestSetPageLocked(page)) + if (!trylock_page(page)) goto keep; VM_BUG_ON(PageActive(page)); @@ -582,7 +582,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, * A synchronous write - probably a ramdisk. Go * ahead and try to reclaim the page. */ - if (TestSetPageLocked(page)) + if (!trylock_page(page)) goto keep; if (PageDirty(page) || PageWriteback(page)) goto keep_locked; -- cgit v1.2.3