From 476ff8a0e3b17fc23994255aa9fd917d599d2ec7 Mon Sep 17 00:00:00 2001 From: Emil Medve Date: Fri, 23 May 2008 05:49:22 +1000 Subject: [POWERPC] Fix return value check logic in debugfs virq_mapping setup debugfs_create_file() returns a non-NULL (non-zero) value in case of success, not a NULL value. This fixes this non-critical boot-time debugging error message: [ 1.316386] calling irq_debugfs_init+0x0/0x50 [ 1.316399] initcall irq_debugfs_init+0x0/0x50 returned -12 after 0 msecs [ 1.316411] initcall irq_debugfs_init+0x0/0x50 returned with error code -12 Signed-off-by: Emil Medve Acked-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 2f73f705d564..bcc249d90c4d 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -1073,7 +1073,7 @@ static const struct file_operations virq_debug_fops = { static int __init irq_debugfs_init(void) { if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, - NULL, &virq_debug_fops)) + NULL, &virq_debug_fops) == NULL) return -ENOMEM; return 0; -- cgit v1.2.3 From da3de6df33f5f42ff9dc40093fbc884f524c9a49 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Fri, 13 Jun 2008 07:20:58 +1000 Subject: [POWERPC] Fix -Os kernel builds with newer gcc versions GCC 4.4.x looks to be adding support for generating out-of-line register saves/restores based on: http://gcc.gnu.org/ml/gcc-patches/2008-04/msg01678.html This breaks the kernel if we enable CONFIG_CC_OPTIMIZE_FOR_SIZE. To fix this we add the use the save/restore code from gcc and simplified it down for our needs (integer only). Additionally, we have to link this code into each module. The other solution was to add EXPORT_SYMBOL() which meant going through the trampoline which seemed nonsensical for these out-of-line routines. Finally, we add some checks to prom_init_check.sh to ignore the out-of-line save/restore functions. Signed-off-by: Kumar Gala Signed-off-by: Paul Mackerras --- arch/powerpc/Makefile | 2 + arch/powerpc/kernel/prom_init_check.sh | 14 ++ arch/powerpc/lib/Makefile | 2 +- arch/powerpc/lib/crtsavres.S | 229 +++++++++++++++++++++++++++++++++ 4 files changed, 246 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/lib/crtsavres.S (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 86096ccc5914..b7d4c4ce2fe6 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -96,6 +96,8 @@ endif else KBUILD_CFLAGS += $(call cc-option,-mtune=power4) endif +else +LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o endif ifeq ($(CONFIG_TUNE_CELL),y) diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index 31729a9387df..2c7e8e87f770 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -48,6 +48,20 @@ do fi done + # ignore register save/restore funcitons + if [ "${UNDEF:0:9}" = "_restgpr_" ]; then + OK=1 + fi + if [ "${UNDEF:0:11}" = "_rest32gpr_" ]; then + OK=1 + fi + if [ "${UNDEF:0:9}" = "_savegpr_" ]; then + OK=1 + fi + if [ "${UNDEF:0:11}" = "_save32gpr_" ]; then + OK=1 + fi + if [ $OK -eq 0 ]; then ERROR=1 echo "Error: External symbol '$UNDEF' referenced" \ diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index c71d37dc6a88..e522b06cc42f 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -9,7 +9,7 @@ endif ifeq ($(CONFIG_PPC_MERGE),y) obj-y := string.o alloc.o \ checksum_$(CONFIG_WORD_SIZE).o -obj-$(CONFIG_PPC32) += div64.o copy_32.o +obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o obj-$(CONFIG_HAS_IOMEM) += devres.o endif diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S new file mode 100644 index 000000000000..70a9cd8a3008 --- /dev/null +++ b/arch/powerpc/lib/crtsavres.S @@ -0,0 +1,229 @@ +/* + * Special support for eabi and SVR4 + * + * Copyright (C) 1995, 1996, 1998, 2000, 2001 Free Software Foundation, Inc. + * Copyright 2008 Freescale Semiconductor, Inc. + * Written By Michael Meissner + * + * Based on gcc/config/rs6000/crtsavres.asm from gcc + * + * This file is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * In addition to the permissions in the GNU General Public License, the + * Free Software Foundation gives you unlimited permission to link the + * compiled version of this file with other programs, and to distribute + * those programs without any restriction coming from the use of this + * file. (The General Public License restrictions do apply in other + * respects; for example, they cover modification of the file, and + * distribution when not linked into another program.) + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + * As a special exception, if you link this library with files + * compiled with GCC to produce an executable, this does not cause + * the resulting executable to be covered by the GNU General Public License. + * This exception does not however invalidate any other reasons why + * the executable file might be covered by the GNU General Public License. + */ + +#include + + .file "crtsavres.S" + .section ".text" + +#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE + +/* Routines for saving integer registers, called by the compiler. */ +/* Called with r11 pointing to the stack header word of the caller of the */ +/* function, just beyond the end of the integer save area. */ + +_GLOBAL(_savegpr_14) +_GLOBAL(_save32gpr_14) + stw 14,-72(11) /* save gp registers */ +_GLOBAL(_savegpr_15) +_GLOBAL(_save32gpr_15) + stw 15,-68(11) +_GLOBAL(_savegpr_16) +_GLOBAL(_save32gpr_16) + stw 16,-64(11) +_GLOBAL(_savegpr_17) +_GLOBAL(_save32gpr_17) + stw 17,-60(11) +_GLOBAL(_savegpr_18) +_GLOBAL(_save32gpr_18) + stw 18,-56(11) +_GLOBAL(_savegpr_19) +_GLOBAL(_save32gpr_19) + stw 19,-52(11) +_GLOBAL(_savegpr_20) +_GLOBAL(_save32gpr_20) + stw 20,-48(11) +_GLOBAL(_savegpr_21) +_GLOBAL(_save32gpr_21) + stw 21,-44(11) +_GLOBAL(_savegpr_22) +_GLOBAL(_save32gpr_22) + stw 22,-40(11) +_GLOBAL(_savegpr_23) +_GLOBAL(_save32gpr_23) + stw 23,-36(11) +_GLOBAL(_savegpr_24) +_GLOBAL(_save32gpr_24) + stw 24,-32(11) +_GLOBAL(_savegpr_25) +_GLOBAL(_save32gpr_25) + stw 25,-28(11) +_GLOBAL(_savegpr_26) +_GLOBAL(_save32gpr_26) + stw 26,-24(11) +_GLOBAL(_savegpr_27) +_GLOBAL(_save32gpr_27) + stw 27,-20(11) +_GLOBAL(_savegpr_28) +_GLOBAL(_save32gpr_28) + stw 28,-16(11) +_GLOBAL(_savegpr_29) +_GLOBAL(_save32gpr_29) + stw 29,-12(11) +_GLOBAL(_savegpr_30) +_GLOBAL(_save32gpr_30) + stw 30,-8(11) +_GLOBAL(_savegpr_31) +_GLOBAL(_save32gpr_31) + stw 31,-4(11) + blr + +/* Routines for restoring integer registers, called by the compiler. */ +/* Called with r11 pointing to the stack header word of the caller of the */ +/* function, just beyond the end of the integer restore area. */ + +_GLOBAL(_restgpr_14) +_GLOBAL(_rest32gpr_14) + lwz 14,-72(11) /* restore gp registers */ +_GLOBAL(_restgpr_15) +_GLOBAL(_rest32gpr_15) + lwz 15,-68(11) +_GLOBAL(_restgpr_16) +_GLOBAL(_rest32gpr_16) + lwz 16,-64(11) +_GLOBAL(_restgpr_17) +_GLOBAL(_rest32gpr_17) + lwz 17,-60(11) +_GLOBAL(_restgpr_18) +_GLOBAL(_rest32gpr_18) + lwz 18,-56(11) +_GLOBAL(_restgpr_19) +_GLOBAL(_rest32gpr_19) + lwz 19,-52(11) +_GLOBAL(_restgpr_20) +_GLOBAL(_rest32gpr_20) + lwz 20,-48(11) +_GLOBAL(_restgpr_21) +_GLOBAL(_rest32gpr_21) + lwz 21,-44(11) +_GLOBAL(_restgpr_22) +_GLOBAL(_rest32gpr_22) + lwz 22,-40(11) +_GLOBAL(_restgpr_23) +_GLOBAL(_rest32gpr_23) + lwz 23,-36(11) +_GLOBAL(_restgpr_24) +_GLOBAL(_rest32gpr_24) + lwz 24,-32(11) +_GLOBAL(_restgpr_25) +_GLOBAL(_rest32gpr_25) + lwz 25,-28(11) +_GLOBAL(_restgpr_26) +_GLOBAL(_rest32gpr_26) + lwz 26,-24(11) +_GLOBAL(_restgpr_27) +_GLOBAL(_rest32gpr_27) + lwz 27,-20(11) +_GLOBAL(_restgpr_28) +_GLOBAL(_rest32gpr_28) + lwz 28,-16(11) +_GLOBAL(_restgpr_29) +_GLOBAL(_rest32gpr_29) + lwz 29,-12(11) +_GLOBAL(_restgpr_30) +_GLOBAL(_rest32gpr_30) + lwz 30,-8(11) +_GLOBAL(_restgpr_31) +_GLOBAL(_rest32gpr_31) + lwz 31,-4(11) + blr + +/* Routines for restoring integer registers, called by the compiler. */ +/* Called with r11 pointing to the stack header word of the caller of the */ +/* function, just beyond the end of the integer restore area. */ + +_GLOBAL(_restgpr_14_x) +_GLOBAL(_rest32gpr_14_x) + lwz 14,-72(11) /* restore gp registers */ +_GLOBAL(_restgpr_15_x) +_GLOBAL(_rest32gpr_15_x) + lwz 15,-68(11) +_GLOBAL(_restgpr_16_x) +_GLOBAL(_rest32gpr_16_x) + lwz 16,-64(11) +_GLOBAL(_restgpr_17_x) +_GLOBAL(_rest32gpr_17_x) + lwz 17,-60(11) +_GLOBAL(_restgpr_18_x) +_GLOBAL(_rest32gpr_18_x) + lwz 18,-56(11) +_GLOBAL(_restgpr_19_x) +_GLOBAL(_rest32gpr_19_x) + lwz 19,-52(11) +_GLOBAL(_restgpr_20_x) +_GLOBAL(_rest32gpr_20_x) + lwz 20,-48(11) +_GLOBAL(_restgpr_21_x) +_GLOBAL(_rest32gpr_21_x) + lwz 21,-44(11) +_GLOBAL(_restgpr_22_x) +_GLOBAL(_rest32gpr_22_x) + lwz 22,-40(11) +_GLOBAL(_restgpr_23_x) +_GLOBAL(_rest32gpr_23_x) + lwz 23,-36(11) +_GLOBAL(_restgpr_24_x) +_GLOBAL(_rest32gpr_24_x) + lwz 24,-32(11) +_GLOBAL(_restgpr_25_x) +_GLOBAL(_rest32gpr_25_x) + lwz 25,-28(11) +_GLOBAL(_restgpr_26_x) +_GLOBAL(_rest32gpr_26_x) + lwz 26,-24(11) +_GLOBAL(_restgpr_27_x) +_GLOBAL(_rest32gpr_27_x) + lwz 27,-20(11) +_GLOBAL(_restgpr_28_x) +_GLOBAL(_rest32gpr_28_x) + lwz 28,-16(11) +_GLOBAL(_restgpr_29_x) +_GLOBAL(_rest32gpr_29_x) + lwz 29,-12(11) +_GLOBAL(_restgpr_30_x) +_GLOBAL(_rest32gpr_30_x) + lwz 30,-8(11) +_GLOBAL(_restgpr_31_x) +_GLOBAL(_rest32gpr_31_x) + lwz 0,4(11) + lwz 31,-4(11) + mtlr 0 + mr 1,11 + blr +#endif -- cgit v1.2.3 From 4a96db3c780f30432653f99cbff193ba3e474e0b Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 16 Jun 2008 10:50:13 +1000 Subject: [POWERPC] Remove ppc32's export of console_drivers There are no in-tree uses of the export any more and in linux-next there is a change that exports it globally which causes warnings: WARNING: vmlinux: 'console_drivers' exported twice. Previous export was in vmlinux and in one case (mpc85xx_defconfig) a build error: kernel/built-in.o: In function `__crc_console_drivers': (*ABS*+0x1eb0e6f5): multiple definition of `__crc_console_drivers' So remove the export now. Also, there is no longer any need to include linux/console.h. Signed-off-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/ppc_ksyms.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index cf6b5a7d8b3f..d3ac631cbd26 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include @@ -160,7 +159,6 @@ EXPORT_SYMBOL(screen_info); EXPORT_SYMBOL(timer_interrupt); EXPORT_SYMBOL(irq_desc); EXPORT_SYMBOL(tb_ticks_per_jiffy); -EXPORT_SYMBOL(console_drivers); EXPORT_SYMBOL(cacheable_memcpy); #endif -- cgit v1.2.3 From b17879f71c2eb4a10f5a63918819d9d572b23a9a Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Wed, 18 Jun 2008 08:34:39 +1000 Subject: [POWERPC] 4xx: Clear new TLB cache attribute bits in Data Storage vector A recent commit added support for the new 440x6 and 464 cores that have the added WL1, IL1I, IL1D, IL2I, and ILD2 bits for the caching attributes in the TLBs. The new bits were cleared in the finish_tlb_load function, however a similar bit of code was missed in the DataStorage interrupt vector. Signed-off-by: Josh Boyer Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_44x.S | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index c2b9dc4fce5d..22b5d2c459a3 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -368,7 +368,12 @@ interrupt_base: rlwimi r11,r13,0,26,31 /* Insert static perms */ - rlwinm r11,r11,0,20,15 /* Clear U0-U3 */ + /* + * Clear U0-U3 and WL1 IL1I IL1D IL2I IL2D bits which are added + * on newer 440 cores like the 440x6 used on AMCC 460EX/460GT (see + * include/asm-powerpc/pgtable-ppc32.h for details). + */ + rlwinm r11,r11,0,20,10 /* find the TLB index that caused the fault. It has to be here. */ tlbsx r10, 0, r10 -- cgit v1.2.3 From 89f5b7da2a6bad2e84670422ab8192382a5aeb9f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 20 Jun 2008 11:18:25 -0700 Subject: Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit 557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed the ZERO_PAGE from the VM mappings, any users of get_user_pages() will generally now populate the VM with real empty pages needlessly. We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but since fault handling no longer uses ZERO_PAGE for new anonymous pages, we now need to handle that special case in follow_page() instead. In particular, the removal of ZERO_PAGE effectively removed the core file writing optimization where we would skip writing pages that had not been populated at all, and increased memory pressure a lot by allocating all those useless newly zeroed pages. This reinstates the optimization by making the unmapped PTE case the same as for a non-existent page table, which already did this correctly. While at it, this also fixes the XIP case for follow_page(), where the caller could not differentiate between the case of a page that simply could not be used (because it had no "struct page" associated with it) and a page that just wasn't mapped. We do that by simply returning an error pointer for pages that could not be turned into a "struct page *". The error is arbitrarily picked to be EFAULT, since that was what get_user_pages() already used for the equivalent IO-mapped page case. [ Also removed an impossible test for pte_offset_map_lock() failing: that's not how that function works ] Acked-by: Oleg Nesterov Acked-by: Nick Piggin Cc: KAMEZAWA Hiroyuki Cc: Hugh Dickins Cc: Andrew Morton Cc: Ingo Molnar Cc: Roland McGrath Signed-off-by: Linus Torvalds --- arch/powerpc/kernel/vdso.c | 2 +- mm/memory.c | 17 +++++++++++++---- mm/migrate.c | 10 ++++++++++ 3 files changed, 24 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index c21a626af676..ce245a850db2 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -142,7 +142,7 @@ static void dump_one_vdso_page(struct page *pg, struct page *upg) printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT), page_count(pg), pg->flags); - if (upg/* && pg != upg*/) { + if (upg && !IS_ERR(upg) /* && pg != upg*/) { printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg) << PAGE_SHIFT), page_count(upg), diff --git a/mm/memory.c b/mm/memory.c index 19e0ae9beecb..9aefaae46858 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -999,17 +999,15 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, goto no_page_table; ptep = pte_offset_map_lock(mm, pmd, address, &ptl); - if (!ptep) - goto out; pte = *ptep; if (!pte_present(pte)) - goto unlock; + goto no_page; if ((flags & FOLL_WRITE) && !pte_write(pte)) goto unlock; page = vm_normal_page(vma, address, pte); if (unlikely(!page)) - goto unlock; + goto bad_page; if (flags & FOLL_GET) get_page(page); @@ -1024,6 +1022,15 @@ unlock: out: return page; +bad_page: + pte_unmap_unlock(ptep, ptl); + return ERR_PTR(-EFAULT); + +no_page: + pte_unmap_unlock(ptep, ptl); + if (!pte_none(pte)) + return page; + /* Fall through to ZERO_PAGE handling */ no_page_table: /* * When core dumping an enormous anonymous area that nobody @@ -1159,6 +1166,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, cond_resched(); } + if (IS_ERR(page)) + return i ? i : PTR_ERR(page); if (pages) { pages[i] = page; diff --git a/mm/migrate.c b/mm/migrate.c index 449d77d409f5..112bcaeaa104 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -865,6 +865,11 @@ static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm, goto set_status; page = follow_page(vma, pp->addr, FOLL_GET); + + err = PTR_ERR(page); + if (IS_ERR(page)) + goto set_status; + err = -ENOENT; if (!page) goto set_status; @@ -928,6 +933,11 @@ static int do_pages_stat(struct mm_struct *mm, struct page_to_node *pm) goto set_status; page = follow_page(vma, pm->addr, 0); + + err = PTR_ERR(page); + if (IS_ERR(page)) + goto set_status; + err = -ENOENT; /* Use PageReserved to check for zero page */ if (!page || PageReserved(page)) -- cgit v1.2.3