From 441110a547f86a2fd0c40bf04b274853622c53cc Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 29 Oct 2019 14:13:30 -0700 Subject: vmlinux.lds.h: Provide EMIT_PT_NOTE to indicate export of .notes In preparation for moving NOTES into RO_DATA, provide a mechanism for architectures that want to emit a PT_NOTE Program Header to do so. Signed-off-by: Kees Cook Signed-off-by: Borislav Petkov Acked-by: Heiko Carstens # s390 Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dave Hansen Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: Michael Ellerman Cc: Michal Simek Cc: Rick Edgecombe Cc: Segher Boessenkool Cc: Will Deacon Cc: x86-ml Cc: Yoshinori Sato Link: https://lkml.kernel.org/r/20191029211351.13243-9-keescook@chromium.org --- include/asm-generic/vmlinux.lds.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include/asm-generic') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index dae64600ccbf..f5dd45ce73f1 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -54,6 +54,14 @@ #define LOAD_OFFSET 0 #endif +/* + * Only some architectures want to have the .notes segment visible in + * a separate PT_NOTE ELF Program Header. + */ +#ifdef EMITS_PT_NOTE +#define NOTES_HEADERS :text :note +#endif + /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) -- cgit v1.2.3 From fbe6a8e618a2d70621cff277e24f6eb338d3d149 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 29 Oct 2019 14:13:31 -0700 Subject: vmlinux.lds.h: Move Program Header restoration into NOTES macro In preparation for moving NOTES into RO_DATA, make the Program Header assignment restoration be part of the NOTES macro itself. Signed-off-by: Kees Cook Signed-off-by: Borislav Petkov Acked-by: Heiko Carstens # s390 Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dave Hansen Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: Michael Ellerman Cc: Michal Simek Cc: Rick Edgecombe Cc: Segher Boessenkool Cc: Will Deacon Cc: x86-ml Cc: Yoshinori Sato Link: https://lkml.kernel.org/r/20191029211351.13243-10-keescook@chromium.org --- arch/alpha/kernel/vmlinux.lds.S | 5 +---- arch/ia64/kernel/vmlinux.lds.S | 4 +--- arch/mips/kernel/vmlinux.lds.S | 3 +-- arch/powerpc/kernel/vmlinux.lds.S | 4 +--- arch/s390/kernel/vmlinux.lds.S | 4 +--- arch/x86/kernel/vmlinux.lds.S | 3 +-- include/asm-generic/vmlinux.lds.h | 13 +++++++++++-- 7 files changed, 17 insertions(+), 19 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index 363a60ba7c31..cdfdc91ce64c 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -34,10 +34,7 @@ SECTIONS swapper_pg_dir = SWAPPER_PGD; _etext = .; /* End of text section */ - NOTES :text :note - .dummy : { - *(.dummy) - } :text + NOTES RODATA EXCEPTION_TABLE(16) diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 7cf4958b732d..bfc937ec168c 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -70,9 +70,7 @@ SECTIONS { /* * Read-only data */ - NOTES :text :note /* put .notes in text and mark in PT_NOTE */ - code_continues : { - } :text /* switch back to regular program... */ + NOTES EXCEPTION_TABLE(16) diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 1c95612eb800..6a22f531d815 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -81,8 +81,7 @@ SECTIONS __stop___dbe_table = .; } - NOTES NOTES_HEADERS - .dummy : { *(.dummy) } :text + NOTES _sdata = .; /* Start of data section */ RODATA diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 7e26e20c8324..4f19d814d592 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -164,9 +164,7 @@ SECTIONS #endif EXCEPTION_TABLE(0) - NOTES :text :note - /* Restore program header away from PT_NOTE. */ - .dummy : { *(.dummy) } :text + NOTES /* * Init sections discarded at runtime diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 646d939346df..f88eedeb915a 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -52,9 +52,7 @@ SECTIONS _etext = .; /* End of text section */ } :text = 0x0700 - NOTES :text :note - - .dummy : { *(.dummy) } :text + NOTES RO_DATA_SECTION(PAGE_SIZE) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 2e18bf5c1aed..8be25b09c2b7 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -148,8 +148,7 @@ SECTIONS _etext = .; } :text = 0x9090 - NOTES :text :note - .dummy : { *(.dummy) } :text + NOTES EXCEPTION_TABLE(16) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index f5dd45ce73f1..97d4299f14dc 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -56,10 +56,18 @@ /* * Only some architectures want to have the .notes segment visible in - * a separate PT_NOTE ELF Program Header. + * a separate PT_NOTE ELF Program Header. When this happens, it needs + * to be visible in both the kernel text's PT_LOAD and the PT_NOTE + * Program Headers. In this case, though, the PT_LOAD needs to be made + * the default again so that all the following sections don't also end + * up in the PT_NOTE Program Header. */ #ifdef EMITS_PT_NOTE #define NOTES_HEADERS :text :note +#define NOTES_HEADERS_RESTORE __restore_ph : { *(.__restore_ph) } :text +#else +#define NOTES_HEADERS +#define NOTES_HEADERS_RESTORE #endif /* Align . to a 8 byte boundary equals to maximum function alignment. */ @@ -798,7 +806,8 @@ __start_notes = .; \ KEEP(*(.note.*)) \ __stop_notes = .; \ - } + } NOTES_HEADERS \ + NOTES_HEADERS_RESTORE #define INIT_SETUP(initsetup_align) \ . = ALIGN(initsetup_align); \ -- cgit v1.2.3 From eaf937075c9a42eb8ba51eb3050773d7205d3595 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 29 Oct 2019 14:13:32 -0700 Subject: vmlinux.lds.h: Move NOTES into RO_DATA The .notes section should be non-executable read-only data. As such, move it to the RO_DATA macro instead of being per-architecture defined. Signed-off-by: Kees Cook Signed-off-by: Borislav Petkov Acked-by: Heiko Carstens # s390 Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dave Hansen Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: Michael Ellerman Cc: Michal Simek Cc: Rick Edgecombe Cc: Segher Boessenkool Cc: Will Deacon Cc: x86-ml Cc: Yoshinori Sato Link: https://lkml.kernel.org/r/20191029211351.13243-11-keescook@chromium.org --- arch/alpha/kernel/vmlinux.lds.S | 2 -- arch/arc/kernel/vmlinux.lds.S | 2 -- arch/arm/kernel/vmlinux-xip.lds.S | 2 -- arch/arm/kernel/vmlinux.lds.S | 2 -- arch/arm64/kernel/vmlinux.lds.S | 1 - arch/c6x/kernel/vmlinux.lds.S | 1 - arch/csky/kernel/vmlinux.lds.S | 1 - arch/h8300/kernel/vmlinux.lds.S | 1 - arch/hexagon/kernel/vmlinux.lds.S | 1 - arch/ia64/kernel/vmlinux.lds.S | 2 -- arch/microblaze/kernel/vmlinux.lds.S | 1 - arch/mips/kernel/vmlinux.lds.S | 2 -- arch/nds32/kernel/vmlinux.lds.S | 1 - arch/nios2/kernel/vmlinux.lds.S | 1 - arch/openrisc/kernel/vmlinux.lds.S | 1 - arch/parisc/kernel/vmlinux.lds.S | 1 - arch/powerpc/kernel/vmlinux.lds.S | 2 -- arch/riscv/kernel/vmlinux.lds.S | 1 - arch/s390/kernel/vmlinux.lds.S | 2 -- arch/sh/kernel/vmlinux.lds.S | 1 - arch/sparc/kernel/vmlinux.lds.S | 1 - arch/um/include/asm/common.lds.S | 1 - arch/unicore32/kernel/vmlinux.lds.S | 1 - arch/x86/kernel/vmlinux.lds.S | 2 -- arch/xtensa/kernel/vmlinux.lds.S | 1 - include/asm-generic/vmlinux.lds.h | 9 +++++---- 26 files changed, 5 insertions(+), 38 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index cdfdc91ce64c..bf28043485f6 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -34,8 +34,6 @@ SECTIONS swapper_pg_dir = SWAPPER_PGD; _etext = .; /* End of text section */ - NOTES - RODATA EXCEPTION_TABLE(16) diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S index 6c693a9d29b6..1d6eef4b6976 100644 --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S @@ -118,8 +118,6 @@ SECTIONS /DISCARD/ : { *(.eh_frame) } #endif - NOTES - . = ALIGN(PAGE_SIZE); _end = . ; diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index 8c74037ade22..d2a9651c24ad 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -70,8 +70,6 @@ SECTIONS ARM_UNWIND_SECTIONS #endif - NOTES - _etext = .; /* End of text and rodata section */ ARM_VECTORS diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 23150c0f0f4d..068db6860867 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -81,8 +81,6 @@ SECTIONS ARM_UNWIND_SECTIONS #endif - NOTES - #ifdef CONFIG_STRICT_KERNEL_RWX . = ALIGN(1< Date: Tue, 29 Oct 2019 14:13:33 -0700 Subject: vmlinux.lds.h: Replace RODATA with RO_DATA There's no reason to keep the RODATA macro: replace the callers with the expected RO_DATA macro. Signed-off-by: Kees Cook Signed-off-by: Borislav Petkov Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dave Hansen Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: Michael Ellerman Cc: Michal Simek Cc: Rick Edgecombe Cc: Segher Boessenkool Cc: Will Deacon Cc: x86-ml Cc: Yoshinori Sato Link: https://lkml.kernel.org/r/20191029211351.13243-12-keescook@chromium.org --- arch/alpha/kernel/vmlinux.lds.S | 2 +- arch/ia64/kernel/vmlinux.lds.S | 2 +- arch/microblaze/kernel/vmlinux.lds.S | 2 +- arch/mips/kernel/vmlinux.lds.S | 2 +- arch/um/include/asm/common.lds.S | 2 +- arch/xtensa/kernel/vmlinux.lds.S | 2 +- include/asm-generic/vmlinux.lds.h | 4 +--- 7 files changed, 7 insertions(+), 9 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index bf28043485f6..af411817dd7d 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -34,7 +34,7 @@ SECTIONS swapper_pg_dir = SWAPPER_PGD; _etext = .; /* End of text section */ - RODATA + RO_DATA(4096) EXCEPTION_TABLE(16) /* Will be freed after init */ diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index fae077595756..11d5115bc44d 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -104,7 +104,7 @@ SECTIONS { code_continues2 : { } :text - RODATA + RO_DATA(4096) .opd : AT(ADDR(.opd) - LOAD_OFFSET) { __start_opd = .; diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index d008e50bb212..2299694748ea 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -51,7 +51,7 @@ SECTIONS { } . = ALIGN(16); - RODATA + RO_DATA(4096) EXCEPTION_TABLE(16) /* diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 91e566defc16..a5f00ec73ea6 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -82,7 +82,7 @@ SECTIONS } _sdata = .; /* Start of data section */ - RODATA + RO_DATA(4096) /* writeable */ .data : { /* Data */ diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S index 91aca356095f..7145ce699982 100644 --- a/arch/um/include/asm/common.lds.S +++ b/arch/um/include/asm/common.lds.S @@ -9,7 +9,7 @@ _sdata = .; PROVIDE (sdata = .); - RODATA + RO_DATA(4096) .unprotected : { *(.unprotected) } . = ALIGN(4096); diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index a0a843745695..b97e5798b9cf 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -124,7 +124,7 @@ SECTIONS . = ALIGN(16); - RODATA + RO_DATA(4096) /* Relocation table */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index dc3390ec6b60..a0a989fbe411 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -518,9 +518,7 @@ . = ALIGN((align)); \ __end_rodata = .; -/* RODATA & RO_DATA provided for backward compatibility. - * All archs are supposed to use RO_DATA() */ -#define RODATA RO_DATA_SECTION(4096) +/* All archs are supposed to use RO_DATA() */ #define RO_DATA(align) RO_DATA_SECTION(align) /* -- cgit v1.2.3 From 93240b327929ff03c1878ea8badc5c6bd86f053f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 29 Oct 2019 14:13:34 -0700 Subject: vmlinux.lds.h: Replace RO_DATA_SECTION with RO_DATA Finish renaming RO_DATA_SECTION to RO_DATA. (Calling this a "section" is a lie, since it's multiple sections and section flags cannot be applied to the macro.) Signed-off-by: Kees Cook Signed-off-by: Borislav Petkov Acked-by: Heiko Carstens # s390 Acked-by: Geert Uytterhoeven # m68k Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dave Hansen Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: Michael Ellerman Cc: Michal Simek Cc: Rick Edgecombe Cc: Segher Boessenkool Cc: Will Deacon Cc: x86-ml Cc: Yoshinori Sato Link: https://lkml.kernel.org/r/20191029211351.13243-13-keescook@chromium.org --- arch/arc/kernel/vmlinux.lds.S | 2 +- arch/c6x/kernel/vmlinux.lds.S | 2 +- arch/csky/kernel/vmlinux.lds.S | 2 +- arch/h8300/kernel/vmlinux.lds.S | 2 +- arch/hexagon/kernel/vmlinux.lds.S | 2 +- arch/m68k/kernel/vmlinux-nommu.lds | 2 +- arch/nds32/kernel/vmlinux.lds.S | 2 +- arch/nios2/kernel/vmlinux.lds.S | 2 +- arch/openrisc/kernel/vmlinux.lds.S | 4 ++-- arch/parisc/kernel/vmlinux.lds.S | 4 ++-- arch/riscv/kernel/vmlinux.lds.S | 2 +- arch/s390/kernel/vmlinux.lds.S | 2 +- arch/unicore32/kernel/vmlinux.lds.S | 2 +- include/asm-generic/vmlinux.lds.h | 7 ++----- 14 files changed, 17 insertions(+), 20 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S index 1d6eef4b6976..7d1d27066deb 100644 --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S @@ -95,7 +95,7 @@ SECTIONS _etext = .; _sdata = .; - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) /* * 1. this is .data essentially diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S index d6e3802536b3..a3547f9d415b 100644 --- a/arch/c6x/kernel/vmlinux.lds.S +++ b/arch/c6x/kernel/vmlinux.lds.S @@ -82,7 +82,7 @@ SECTIONS EXCEPTION_TABLE(16) - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) .const : { *(.const .const.* .gnu.linkonce.r.*) diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S index 75dd31412242..8598bd7a7bcd 100644 --- a/arch/csky/kernel/vmlinux.lds.S +++ b/arch/csky/kernel/vmlinux.lds.S @@ -49,7 +49,7 @@ SECTIONS _sdata = .; - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S index 88776e785245..d3247d33b115 100644 --- a/arch/h8300/kernel/vmlinux.lds.S +++ b/arch/h8300/kernel/vmlinux.lds.S @@ -38,7 +38,7 @@ SECTIONS _etext = . ; } EXCEPTION_TABLE(16) - RO_DATA_SECTION(4) + RO_DATA(4) ROMEND = .; #if defined(CONFIG_ROMKERNEL) . = RAMTOP; diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S index 6a6e8fc422ee..0145251fa317 100644 --- a/arch/hexagon/kernel/vmlinux.lds.S +++ b/arch/hexagon/kernel/vmlinux.lds.S @@ -50,7 +50,7 @@ SECTIONS _sdata = .; RW_DATA_SECTION(32,PAGE_SIZE,_THREAD_SIZE) - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) _edata = .; EXCEPTION_TABLE(16) diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds index cf6edda38971..de80f8b8ae78 100644 --- a/arch/m68k/kernel/vmlinux-nommu.lds +++ b/arch/m68k/kernel/vmlinux-nommu.lds @@ -60,7 +60,7 @@ SECTIONS { #endif _sdata = .; - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) _edata = .; diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S index c4f1c5a604c3..10ff570ba95b 100644 --- a/arch/nds32/kernel/vmlinux.lds.S +++ b/arch/nds32/kernel/vmlinux.lds.S @@ -53,7 +53,7 @@ SECTIONS _etext = .; /* End of text and rodata section */ _sdata = .; - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; diff --git a/arch/nios2/kernel/vmlinux.lds.S b/arch/nios2/kernel/vmlinux.lds.S index 20e4078b3477..318804a2c7a1 100644 --- a/arch/nios2/kernel/vmlinux.lds.S +++ b/arch/nios2/kernel/vmlinux.lds.S @@ -49,7 +49,7 @@ SECTIONS __init_end = .; _sdata = .; - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S index 142c51c994f5..f73e0d3ea09f 100644 --- a/arch/openrisc/kernel/vmlinux.lds.S +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -67,8 +67,8 @@ SECTIONS _sdata = .; - /* Page alignment required for RO_DATA_SECTION */ - RO_DATA_SECTION(PAGE_SIZE) + /* Page alignment required for RO_DATA */ + RO_DATA(PAGE_SIZE) _e_kernel_ro = .; /* Whatever comes after _e_kernel_ro had better be page-aligend, too */ diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 168d12b2ebb8..e1c563c7dca1 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -109,7 +109,7 @@ SECTIONS _sdata = .; /* Architecturally we need to keep __gp below 0x1000000 and thus - * in front of RO_DATA_SECTION() which stores lots of tracepoint + * in front of RO_DATA() which stores lots of tracepoint * and ftrace symbols. */ #ifdef CONFIG_64BIT . = ALIGN(16); @@ -127,7 +127,7 @@ SECTIONS } #endif - RO_DATA_SECTION(8) + RO_DATA(8) /* RO because of BUILDTIME_EXTABLE_SORT */ EXCEPTION_TABLE(8) diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index df5229c4034d..66dc17d24dd9 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -52,7 +52,7 @@ SECTIONS /* Start of data section */ _sdata = .; - RO_DATA_SECTION(L1_CACHE_BYTES) + RO_DATA(L1_CACHE_BYTES) .srodata : { *(.srodata*) } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index beb4df053e20..b33c4823f8b5 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -52,7 +52,7 @@ SECTIONS _etext = .; /* End of text section */ } :text = 0x0700 - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) . = ALIGN(PAGE_SIZE); _sdata = .; /* Start of data section */ diff --git a/arch/unicore32/kernel/vmlinux.lds.S b/arch/unicore32/kernel/vmlinux.lds.S index 78c4c56057b0..367c80313bec 100644 --- a/arch/unicore32/kernel/vmlinux.lds.S +++ b/arch/unicore32/kernel/vmlinux.lds.S @@ -43,7 +43,7 @@ SECTIONS _etext = .; _sdata = .; - RO_DATA_SECTION(PAGE_SIZE) + RO_DATA(PAGE_SIZE) RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index a0a989fbe411..061e57c609f6 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -23,7 +23,7 @@ * _etext = .; * * _sdata = .; - * RO_DATA_SECTION(PAGE_SIZE) + * RO_DATA(PAGE_SIZE) * RW_DATA_SECTION(...) * _edata = .; * @@ -363,7 +363,7 @@ /* * Read only Data */ -#define RO_DATA_SECTION(align) \ +#define RO_DATA(align) \ . = ALIGN((align)); \ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ __start_rodata = .; \ @@ -518,9 +518,6 @@ . = ALIGN((align)); \ __end_rodata = .; -/* All archs are supposed to use RO_DATA() */ -#define RO_DATA(align) RO_DATA_SECTION(align) - /* * .text section. Map to function alignment to avoid address changes * during second ld run in second ld pass when generating System.map -- cgit v1.2.3 From c9174047b48d700a785b633319dd7d27288b86be Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 29 Oct 2019 14:13:35 -0700 Subject: vmlinux.lds.h: Replace RW_DATA_SECTION with RW_DATA Rename RW_DATA_SECTION to RW_DATA. (Calling this a "section" is a lie, since it's multiple sections and section flags cannot be applied to the macro.) Signed-off-by: Kees Cook Signed-off-by: Borislav Petkov Acked-by: Heiko Carstens # s390 Acked-by: Geert Uytterhoeven # m68k Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dave Hansen Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: Michael Ellerman Cc: Michal Simek Cc: Rick Edgecombe Cc: Segher Boessenkool Cc: Will Deacon Cc: x86-ml Cc: Yoshinori Sato Link: https://lkml.kernel.org/r/20191029211351.13243-14-keescook@chromium.org --- arch/alpha/kernel/vmlinux.lds.S | 2 +- arch/arc/kernel/vmlinux.lds.S | 2 +- arch/arm/kernel/vmlinux-xip.lds.S | 2 +- arch/arm/kernel/vmlinux.lds.S | 2 +- arch/arm64/kernel/vmlinux.lds.S | 2 +- arch/csky/kernel/vmlinux.lds.S | 2 +- arch/h8300/kernel/vmlinux.lds.S | 2 +- arch/hexagon/kernel/vmlinux.lds.S | 2 +- arch/m68k/kernel/vmlinux-nommu.lds | 2 +- arch/m68k/kernel/vmlinux-std.lds | 2 +- arch/m68k/kernel/vmlinux-sun3.lds | 2 +- arch/microblaze/kernel/vmlinux.lds.S | 2 +- arch/nds32/kernel/vmlinux.lds.S | 2 +- arch/nios2/kernel/vmlinux.lds.S | 2 +- arch/openrisc/kernel/vmlinux.lds.S | 2 +- arch/parisc/kernel/vmlinux.lds.S | 2 +- arch/riscv/kernel/vmlinux.lds.S | 2 +- arch/s390/kernel/vmlinux.lds.S | 2 +- arch/sh/kernel/vmlinux.lds.S | 2 +- arch/sparc/kernel/vmlinux.lds.S | 2 +- arch/unicore32/kernel/vmlinux.lds.S | 2 +- arch/xtensa/kernel/vmlinux.lds.S | 2 +- include/asm-generic/vmlinux.lds.h | 4 ++-- 23 files changed, 24 insertions(+), 24 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index af411817dd7d..edc45f45523b 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -50,7 +50,7 @@ SECTIONS _sdata = .; /* Start of rw data section */ _data = .; - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) .got : { *(.got) diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S index 7d1d27066deb..54139a6f469b 100644 --- a/arch/arc/kernel/vmlinux.lds.S +++ b/arch/arc/kernel/vmlinux.lds.S @@ -101,7 +101,7 @@ SECTIONS * 1. this is .data essentially * 2. THREAD_SIZE for init.task, must be kernel-stk sz aligned */ - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index d2a9651c24ad..21b8b271c80d 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -112,7 +112,7 @@ SECTIONS . = ALIGN(THREAD_SIZE); _sdata = .; - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) { *(.data..ro_after_init) } diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 068db6860867..319ccb10846a 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -141,7 +141,7 @@ SECTIONS __init_end = .; _sdata = .; - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; BSS_SECTION(0, 0, 0) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index e7dafc29b1fa..a4b3e6c0680c 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -214,7 +214,7 @@ SECTIONS _data = .; _sdata = .; - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN) /* * Data written with the MMU off but read with the MMU on requires diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S index 8598bd7a7bcd..2ff37beaf2bf 100644 --- a/arch/csky/kernel/vmlinux.lds.S +++ b/arch/csky/kernel/vmlinux.lds.S @@ -50,7 +50,7 @@ SECTIONS _sdata = .; RO_DATA(PAGE_SIZE) - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; EXCEPTION_TABLE(L1_CACHE_BYTES) diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S index d3247d33b115..2ac7bdcd2fe0 100644 --- a/arch/h8300/kernel/vmlinux.lds.S +++ b/arch/h8300/kernel/vmlinux.lds.S @@ -47,7 +47,7 @@ SECTIONS #endif _sdata = . ; __data_start = . ; - RW_DATA_SECTION(0, PAGE_SIZE, THREAD_SIZE) + RW_DATA(0, PAGE_SIZE, THREAD_SIZE) #if defined(CONFIG_ROMKERNEL) #undef ADDR #endif diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S index 0145251fa317..0ca2471ddb9f 100644 --- a/arch/hexagon/kernel/vmlinux.lds.S +++ b/arch/hexagon/kernel/vmlinux.lds.S @@ -49,7 +49,7 @@ SECTIONS INIT_DATA_SECTION(PAGE_SIZE) _sdata = .; - RW_DATA_SECTION(32,PAGE_SIZE,_THREAD_SIZE) + RW_DATA(32,PAGE_SIZE,_THREAD_SIZE) RO_DATA(PAGE_SIZE) _edata = .; diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds index de80f8b8ae78..7b975420c3d9 100644 --- a/arch/m68k/kernel/vmlinux-nommu.lds +++ b/arch/m68k/kernel/vmlinux-nommu.lds @@ -61,7 +61,7 @@ SECTIONS { _sdata = .; RO_DATA(PAGE_SIZE) - RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) + RW_DATA(16, PAGE_SIZE, THREAD_SIZE) _edata = .; EXCEPTION_TABLE(16) diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds index 625a5785804f..6e7eb49ed985 100644 --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds @@ -33,7 +33,7 @@ SECTIONS RODATA - RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) + RW_DATA(16, PAGE_SIZE, THREAD_SIZE) BSS_SECTION(0, 0, 0) diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds index 9868270b0984..1a0ad6b6dd8c 100644 --- a/arch/m68k/kernel/vmlinux-sun3.lds +++ b/arch/m68k/kernel/vmlinux-sun3.lds @@ -30,7 +30,7 @@ SECTIONS EXCEPTION_TABLE(16) :data _sdata = .; /* Start of rw data section */ - RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) :data + RW_DATA(16, PAGE_SIZE, THREAD_SIZE) :data /* End of data goes *here* so that freeing init code works properly. */ _edata = .; NOTES diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S index 2299694748ea..b8efb08204a1 100644 --- a/arch/microblaze/kernel/vmlinux.lds.S +++ b/arch/microblaze/kernel/vmlinux.lds.S @@ -69,7 +69,7 @@ SECTIONS { } _sdata = . ; - RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) + RW_DATA(32, PAGE_SIZE, THREAD_SIZE) _edata = . ; /* Under the microblaze ABI, .sdata and .sbss must be contiguous */ diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S index 10ff570ba95b..f679d3397436 100644 --- a/arch/nds32/kernel/vmlinux.lds.S +++ b/arch/nds32/kernel/vmlinux.lds.S @@ -54,7 +54,7 @@ SECTIONS _sdata = .; RO_DATA(PAGE_SIZE) - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; EXCEPTION_TABLE(16) diff --git a/arch/nios2/kernel/vmlinux.lds.S b/arch/nios2/kernel/vmlinux.lds.S index 318804a2c7a1..c55a7cfa1075 100644 --- a/arch/nios2/kernel/vmlinux.lds.S +++ b/arch/nios2/kernel/vmlinux.lds.S @@ -50,7 +50,7 @@ SECTIONS _sdata = .; RO_DATA(PAGE_SIZE) - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; BSS_SECTION(0, 0, 0) diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S index f73e0d3ea09f..60449fd7f16f 100644 --- a/arch/openrisc/kernel/vmlinux.lds.S +++ b/arch/openrisc/kernel/vmlinux.lds.S @@ -74,7 +74,7 @@ SECTIONS /* Whatever comes after _e_kernel_ro had better be page-aligend, too */ /* 32 here is cacheline size... recheck this */ - RW_DATA_SECTION(32, PAGE_SIZE, PAGE_SIZE) + RW_DATA(32, PAGE_SIZE, PAGE_SIZE) _edata = .; diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index e1c563c7dca1..12b3d7d5e9e4 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -148,7 +148,7 @@ SECTIONS data_start = .; /* Data */ - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE) /* PA-RISC locks requires 16-byte alignment */ . = ALIGN(16); diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 66dc17d24dd9..12f42f96d46e 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -57,7 +57,7 @@ SECTIONS *(.srodata*) } - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) .sdata : { __global_pointer$ = . + 0x800; *(.sdata*) diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index b33c4823f8b5..37695499717d 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -67,7 +67,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); __end_ro_after_init = .; - RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE) + RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE) BOOT_DATA_PRESERVED _edata = .; /* End of data section */ diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index fef39054cc70..c60b19958c35 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -51,7 +51,7 @@ SECTIONS _sdata = .; RO_DATA(PAGE_SIZE) - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; DWARF_EH_FRAME diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 8929fbc35a80..7ec79918b566 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -67,7 +67,7 @@ SECTIONS .data1 : { *(.data1) } - RW_DATA_SECTION(SMP_CACHE_BYTES, 0, THREAD_SIZE) + RW_DATA(SMP_CACHE_BYTES, 0, THREAD_SIZE) /* End of data section */ _edata = .; diff --git a/arch/unicore32/kernel/vmlinux.lds.S b/arch/unicore32/kernel/vmlinux.lds.S index 367c80313bec..6fb320b337ef 100644 --- a/arch/unicore32/kernel/vmlinux.lds.S +++ b/arch/unicore32/kernel/vmlinux.lds.S @@ -44,7 +44,7 @@ SECTIONS _sdata = .; RO_DATA(PAGE_SIZE) - RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; EXCEPTION_TABLE(L1_CACHE_BYTES) diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index b97e5798b9cf..bdbd7c4056c1 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -134,7 +134,7 @@ SECTIONS /* Data section */ _sdata = .; - RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE) + RW_DATA(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE) _edata = .; /* Initialization code and data: */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 061e57c609f6..356078e50a5c 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -24,7 +24,7 @@ * * _sdata = .; * RO_DATA(PAGE_SIZE) - * RW_DATA_SECTION(...) + * RW_DATA(...) * _edata = .; * * EXCEPTION_TABLE(...) @@ -975,7 +975,7 @@ * matches the requirement of PAGE_ALIGNED_DATA. * * use 0 as page_align if page_aligned data is not used */ -#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \ +#define RW_DATA(cacheline, pagealigned, inittask) \ . = ALIGN(PAGE_SIZE); \ .data : AT(ADDR(.data) - LOAD_OFFSET) { \ INIT_TASK_DATA(inittask) \ -- cgit v1.2.3 From b8c2f776164c8f74ac31c5e370ca3f029be0aa19 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 29 Oct 2019 14:13:36 -0700 Subject: vmlinux.lds.h: Allow EXCEPTION_TABLE to live in RO_DATA Many architectures have an EXCEPTION_TABLE that needs to be only readable. As such, it should live in RO_DATA. Create a macro to identify this case for the architectures that can move EXCEPTION_TABLE into RO_DATA. Signed-off-by: Kees Cook Signed-off-by: Borislav Petkov Acked-by: Will Deacon Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Dave Hansen Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-c6x-dev@linux-c6x.org Cc: linux-ia64@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Cc: linux-s390@vger.kernel.org Cc: Michael Ellerman Cc: Michal Simek Cc: Rick Edgecombe Cc: Segher Boessenkool Cc: x86-ml Cc: Yoshinori Sato Link: https://lkml.kernel.org/r/20191029211351.13243-15-keescook@chromium.org --- include/asm-generic/vmlinux.lds.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'include/asm-generic') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 356078e50a5c..9867d8e41eed 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -69,6 +69,17 @@ #define NOTES_HEADERS_RESTORE #endif +/* + * Some architectures have non-executable read-only exception tables. + * They can be added to the RO_DATA segment by specifying their desired + * alignment. + */ +#ifdef RO_EXCEPTION_TABLE_ALIGN +#define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN) +#else +#define RO_EXCEPTION_TABLE +#endif + /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) @@ -513,6 +524,7 @@ __stop___modver = .; \ } \ \ + RO_EXCEPTION_TABLE \ NOTES \ \ . = ALIGN((align)); \ -- cgit v1.2.3 From 864edb758c50c30787bb51f2e26c4be2b4937025 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 24 Oct 2019 13:28:01 +0530 Subject: powerpc/mm/book3s64/radix: Flush the full mm even when need_flush_all is set With the previous patch, we should now not be using need_flush_all for powerpc. But then make sure we force a PID tlbie flush with RIC=2 if we ever find need_flush_all set. Also don't reset it after a mmu gather flush. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20191024075801.22434-3-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/book3s64/radix_tlb.c | 3 +-- include/asm-generic/tlb.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index f9a4d5793f03..a95175c0972b 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -995,7 +995,7 @@ void radix__tlb_flush(struct mmu_gather *tlb) * that flushes the process table entry cache upon process teardown. * See the comment for radix in arch_exit_mmap(). */ - if (tlb->fullmm) { + if (tlb->fullmm || tlb->need_flush_all) { __flush_all_mm(mm, true); } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) { if (!tlb->freed_tables) @@ -1008,7 +1008,6 @@ void radix__tlb_flush(struct mmu_gather *tlb) else radix__flush_tlb_pwc_range_psize(mm, start, end, psize); } - tlb->need_flush_all = 0; } static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm, diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 04c0644006fd..e64991142a8b 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -428,7 +428,7 @@ static inline void tlb_change_page_size(struct mmu_gather *tlb, { #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE if (tlb->page_size && tlb->page_size != page_size) { - if (!tlb->fullmm) + if (!tlb->fullmm && !tlb->need_flush_all) tlb_flush_mmu(tlb); } -- cgit v1.2.3 From a1326b17ac03a9012cb3d01e434aacb4d67a416c Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 16 Oct 2019 18:17:11 +0100 Subject: module/ftrace: handle patchable-function-entry When using patchable-function-entry, the compiler will record the callsites into a section named "__patchable_function_entries" rather than "__mcount_loc". Let's abstract this difference behind a new FTRACE_CALLSITE_SECTION, so that architectures don't have to handle this explicitly (e.g. with custom module linker scripts). As parisc currently handles this explicitly, it is fixed up accordingly, with its custom linker script removed. Since FTRACE_CALLSITE_SECTION is only defined when DYNAMIC_FTRACE is selected, the parisc module loading code is updated to only use the definition in that case. When DYNAMIC_FTRACE is not selected, modules shouldn't have this section, so this removes some redundant work in that case. To make sure that this is keep up-to-date for modules and the main kernel, a comment is added to vmlinux.lds.h, with the existing ifdeffery simplified for legibility. I built parisc generic-{32,64}bit_defconfig with DYNAMIC_FTRACE enabled, and verified that the section made it into the .ko files for modules. Signed-off-by: Mark Rutland Acked-by: Helge Deller Acked-by: Steven Rostedt (VMware) Reviewed-by: Ard Biesheuvel Reviewed-by: Torsten Duwe Tested-by: Amit Daniel Kachhap Tested-by: Sven Schnelle Tested-by: Torsten Duwe Cc: Ingo Molnar Cc: James E.J. Bottomley Cc: Jessica Yu Cc: linux-parisc@vger.kernel.org --- arch/parisc/Makefile | 1 - arch/parisc/kernel/module.c | 10 +++++++--- arch/parisc/kernel/module.lds | 7 ------- include/asm-generic/vmlinux.lds.h | 14 +++++++------- include/linux/ftrace.h | 5 +++++ kernel/module.c | 2 +- 6 files changed, 20 insertions(+), 19 deletions(-) delete mode 100644 arch/parisc/kernel/module.lds (limited to 'include/asm-generic') diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 36b834f1c933..dca8f2de8cf5 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -60,7 +60,6 @@ KBUILD_CFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY=1 \ -DFTRACE_PATCHABLE_FUNCTION_SIZE=$(NOP_COUNT) CC_FLAGS_FTRACE := -fpatchable-function-entry=$(NOP_COUNT),$(shell echo $$(($(NOP_COUNT)-1))) -KBUILD_LDS_MODULE += $(srctree)/arch/parisc/kernel/module.lds endif OBJCOPY_FLAGS =-O binary -R .note -R .comment -S diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index ac5f34993b53..1c50093e2ebe 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -862,7 +863,7 @@ int module_finalize(const Elf_Ehdr *hdr, const char *strtab = NULL; const Elf_Shdr *s; char *secstrings; - int err, symindex = -1; + int symindex = -1; Elf_Sym *newptr, *oldptr; Elf_Shdr *symhdr = NULL; #ifdef DEBUG @@ -946,11 +947,13 @@ int module_finalize(const Elf_Ehdr *hdr, /* patch .altinstructions */ apply_alternatives(aseg, aseg + s->sh_size, me->name); +#ifdef CONFIG_DYNAMIC_FTRACE /* For 32 bit kernels we're compiling modules with * -ffunction-sections so we must relocate the addresses in the - *__mcount_loc section. + * ftrace callsite section. */ - if (symindex != -1 && !strcmp(secname, "__mcount_loc")) { + if (symindex != -1 && !strcmp(secname, FTRACE_CALLSITE_SECTION)) { + int err; if (s->sh_type == SHT_REL) err = apply_relocate((Elf_Shdr *)sechdrs, strtab, symindex, @@ -962,6 +965,7 @@ int module_finalize(const Elf_Ehdr *hdr, if (err) return err; } +#endif } return 0; } diff --git a/arch/parisc/kernel/module.lds b/arch/parisc/kernel/module.lds deleted file mode 100644 index 1a9a92aca5c8..000000000000 --- a/arch/parisc/kernel/module.lds +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -SECTIONS { - __mcount_loc : { - *(__patchable_function_entries) - } -} diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index dae64600ccbf..a9c4e4721434 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -110,17 +110,17 @@ #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD -#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY -#define MCOUNT_REC() . = ALIGN(8); \ - __start_mcount_loc = .; \ - KEEP(*(__patchable_function_entries)) \ - __stop_mcount_loc = .; -#else +/* + * The ftrace call sites are logged to a section whose name depends on the + * compiler option used. A given kernel image will only use one, AKA + * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header + * dependencies for FTRACE_CALLSITE_SECTION's definition. + */ #define MCOUNT_REC() . = ALIGN(8); \ __start_mcount_loc = .; \ KEEP(*(__mcount_loc)) \ + KEEP(*(__patchable_function_entries)) \ __stop_mcount_loc = .; -#endif #else #define MCOUNT_REC() #endif diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9867d90d635e..9141f2263286 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -738,6 +738,11 @@ static inline unsigned long get_lock_parent_ip(void) #ifdef CONFIG_FTRACE_MCOUNT_RECORD extern void ftrace_init(void); +#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY +#define FTRACE_CALLSITE_SECTION "__patchable_function_entries" +#else +#define FTRACE_CALLSITE_SECTION "__mcount_loc" +#endif #else static inline void ftrace_init(void) { } #endif diff --git a/kernel/module.c b/kernel/module.c index ff2d7359a418..acf7962936c4 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3222,7 +3222,7 @@ static int find_module_sections(struct module *mod, struct load_info *info) #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD /* sechdrs[0].sh_size is always zero */ - mod->ftrace_callsites = section_objs(info, "__mcount_loc", + mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION, sizeof(*mod->ftrace_callsites), &mod->num_ftrace_callsites); #endif -- cgit v1.2.3 From 81d2c6f81996e01fbcd2b5aeefbb519e21c806e9 Mon Sep 17 00:00:00 2001 From: Daniel Axtens Date: Tue, 20 Aug 2019 12:49:40 +1000 Subject: kasan: support instrumented bitops combined with generic bitops Currently bitops-instrumented.h assumes that the architecture provides atomic, non-atomic and locking bitops (e.g. both set_bit and __set_bit). This is true on x86 and s390, but is not always true: there is a generic bitops/non-atomic.h header that provides generic non-atomic operations, and also a generic bitops/lock.h for locking operations. powerpc uses the generic non-atomic version, so it does not have it's own e.g. __set_bit that could be renamed arch___set_bit. Split up bitops-instrumented.h to mirror the atomic/non-atomic/lock split. This allows arches to only include the headers where they have arch-specific versions to rename. Update x86 and s390. (The generic operations are automatically instrumented because they're written in C, not asm.) Suggested-by: Christophe Leroy Reviewed-by: Christophe Leroy Signed-off-by: Daniel Axtens Acked-by: Marco Elver Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20190820024941.12640-1-dja@axtens.net --- Documentation/core-api/kernel-api.rst | 17 +- arch/s390/include/asm/bitops.h | 4 +- arch/x86/include/asm/bitops.h | 4 +- include/asm-generic/bitops-instrumented.h | 263 --------------------- include/asm-generic/bitops/instrumented-atomic.h | 100 ++++++++ include/asm-generic/bitops/instrumented-lock.h | 81 +++++++ .../asm-generic/bitops/instrumented-non-atomic.h | 114 +++++++++ 7 files changed, 317 insertions(+), 266 deletions(-) delete mode 100644 include/asm-generic/bitops-instrumented.h create mode 100644 include/asm-generic/bitops/instrumented-atomic.h create mode 100644 include/asm-generic/bitops/instrumented-lock.h create mode 100644 include/asm-generic/bitops/instrumented-non-atomic.h (limited to 'include/asm-generic') diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst index f77de49b1d51..2caaeb55e8dd 100644 --- a/Documentation/core-api/kernel-api.rst +++ b/Documentation/core-api/kernel-api.rst @@ -57,7 +57,22 @@ The Linux kernel provides more basic utility functions. Bit Operations -------------- -.. kernel-doc:: include/asm-generic/bitops-instrumented.h +Atomic Operations +~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: include/asm-generic/bitops/instrumented-atomic.h + :internal: + +Non-atomic Operations +~~~~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: include/asm-generic/bitops/instrumented-non-atomic.h + :internal: + +Locking Operations +~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: include/asm-generic/bitops/instrumented-lock.h :internal: Bitmap Operations diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index eb7eed43e780..431e208a5ea4 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -241,7 +241,9 @@ static inline void arch___clear_bit_unlock(unsigned long nr, arch___clear_bit(nr, ptr); } -#include +#include +#include +#include /* * Functions which use MSB0 bit numbering. diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 7d1f6a49bfae..062cdecb2f24 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -388,7 +388,9 @@ static __always_inline int fls64(__u64 x) #include -#include +#include +#include +#include #include diff --git a/include/asm-generic/bitops-instrumented.h b/include/asm-generic/bitops-instrumented.h deleted file mode 100644 index ddd1c6d9d8db..000000000000 --- a/include/asm-generic/bitops-instrumented.h +++ /dev/null @@ -1,263 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* - * This file provides wrappers with sanitizer instrumentation for bit - * operations. - * - * To use this functionality, an arch's bitops.h file needs to define each of - * the below bit operations with an arch_ prefix (e.g. arch_set_bit(), - * arch___set_bit(), etc.). - */ -#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_H -#define _ASM_GENERIC_BITOPS_INSTRUMENTED_H - -#include - -/** - * set_bit - Atomically set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * This is a relaxed atomic operation (no implied memory barriers). - * - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void set_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - arch_set_bit(nr, addr); -} - -/** - * __set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic. If it is called on the same - * region of memory concurrently, the effect may be that only one operation - * succeeds. - */ -static inline void __set_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - arch___set_bit(nr, addr); -} - -/** - * clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * This is a relaxed atomic operation (no implied memory barriers). - */ -static inline void clear_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - arch_clear_bit(nr, addr); -} - -/** - * __clear_bit - Clears a bit in memory - * @nr: the bit to clear - * @addr: the address to start counting from - * - * Unlike clear_bit(), this function is non-atomic. If it is called on the same - * region of memory concurrently, the effect may be that only one operation - * succeeds. - */ -static inline void __clear_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - arch___clear_bit(nr, addr); -} - -/** - * clear_bit_unlock - Clear a bit in memory, for unlock - * @nr: the bit to set - * @addr: the address to start counting from - * - * This operation is atomic and provides release barrier semantics. - */ -static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - arch_clear_bit_unlock(nr, addr); -} - -/** - * __clear_bit_unlock - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * This is a non-atomic operation but implies a release barrier before the - * memory operation. It can be used for an unlock if no other CPUs can - * concurrently modify other bits in the word. - */ -static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - arch___clear_bit_unlock(nr, addr); -} - -/** - * change_bit - Toggle a bit in memory - * @nr: Bit to change - * @addr: Address to start counting from - * - * This is a relaxed atomic operation (no implied memory barriers). - * - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void change_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - arch_change_bit(nr, addr); -} - -/** - * __change_bit - Toggle a bit in memory - * @nr: the bit to change - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic. If it is called on the same - * region of memory concurrently, the effect may be that only one operation - * succeeds. - */ -static inline void __change_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - arch___change_bit(nr, addr); -} - -/** - * test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This is an atomic fully-ordered operation (implied full memory barrier). - */ -static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - return arch_test_and_set_bit(nr, addr); -} - -/** - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic. If two instances of this operation race, one - * can appear to succeed but actually fail. - */ -static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - return arch___test_and_set_bit(nr, addr); -} - -/** - * test_and_set_bit_lock - Set a bit and return its old value, for lock - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and provides acquire barrier semantics if - * the returned value is 0. - * It can be used to implement bit locks. - */ -static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - return arch_test_and_set_bit_lock(nr, addr); -} - -/** - * test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This is an atomic fully-ordered operation (implied full memory barrier). - */ -static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - return arch_test_and_clear_bit(nr, addr); -} - -/** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is non-atomic. If two instances of this operation race, one - * can appear to succeed but actually fail. - */ -static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - return arch___test_and_clear_bit(nr, addr); -} - -/** - * test_and_change_bit - Change a bit and return its old value - * @nr: Bit to change - * @addr: Address to count from - * - * This is an atomic fully-ordered operation (implied full memory barrier). - */ -static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - return arch_test_and_change_bit(nr, addr); -} - -/** - * __test_and_change_bit - Change a bit and return its old value - * @nr: Bit to change - * @addr: Address to count from - * - * This operation is non-atomic. If two instances of this operation race, one - * can appear to succeed but actually fail. - */ -static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - return arch___test_and_change_bit(nr, addr); -} - -/** - * test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - */ -static inline bool test_bit(long nr, const volatile unsigned long *addr) -{ - kasan_check_read(addr + BIT_WORD(nr), sizeof(long)); - return arch_test_bit(nr, addr); -} - -#if defined(arch_clear_bit_unlock_is_negative_byte) -/** - * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom - * byte is negative, for unlock. - * @nr: the bit to clear - * @addr: the address to start counting from - * - * This operation is atomic and provides release barrier semantics. - * - * This is a bit of a one-trick-pony for the filemap code, which clears - * PG_locked and tests PG_waiters, - */ -static inline bool -clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) -{ - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); - return arch_clear_bit_unlock_is_negative_byte(nr, addr); -} -/* Let everybody know we have it. */ -#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte -#endif - -#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_H */ diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h new file mode 100644 index 000000000000..18ce3c9e8eec --- /dev/null +++ b/include/asm-generic/bitops/instrumented-atomic.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * This file provides wrappers with sanitizer instrumentation for atomic bit + * operations. + * + * To use this functionality, an arch's bitops.h file needs to define each of + * the below bit operations with an arch_ prefix (e.g. arch_set_bit(), + * arch___set_bit(), etc.). + */ +#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H +#define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H + +#include + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This is a relaxed atomic operation (no implied memory barriers). + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void set_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch_set_bit(nr, addr); +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * This is a relaxed atomic operation (no implied memory barriers). + */ +static inline void clear_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch_clear_bit(nr, addr); +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * This is a relaxed atomic operation (no implied memory barriers). + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void change_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch_change_bit(nr, addr); +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This is an atomic fully-ordered operation (implied full memory barrier). + */ +static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_and_set_bit(nr, addr); +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This is an atomic fully-ordered operation (implied full memory barrier). + */ +static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_and_clear_bit(nr, addr); +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This is an atomic fully-ordered operation (implied full memory barrier). + */ +static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_and_change_bit(nr, addr); +} + +#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */ diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h new file mode 100644 index 000000000000..ec53fdeea9ec --- /dev/null +++ b/include/asm-generic/bitops/instrumented-lock.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * This file provides wrappers with sanitizer instrumentation for bit + * locking operations. + * + * To use this functionality, an arch's bitops.h file needs to define each of + * the below bit operations with an arch_ prefix (e.g. arch_set_bit(), + * arch___set_bit(), etc.). + */ +#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H +#define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H + +#include + +/** + * clear_bit_unlock - Clear a bit in memory, for unlock + * @nr: the bit to set + * @addr: the address to start counting from + * + * This operation is atomic and provides release barrier semantics. + */ +static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch_clear_bit_unlock(nr, addr); +} + +/** + * __clear_bit_unlock - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * This is a non-atomic operation but implies a release barrier before the + * memory operation. It can be used for an unlock if no other CPUs can + * concurrently modify other bits in the word. + */ +static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___clear_bit_unlock(nr, addr); +} + +/** + * test_and_set_bit_lock - Set a bit and return its old value, for lock + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and provides acquire barrier semantics if + * the returned value is 0. + * It can be used to implement bit locks. + */ +static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_and_set_bit_lock(nr, addr); +} + +#if defined(arch_clear_bit_unlock_is_negative_byte) +/** + * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom + * byte is negative, for unlock. + * @nr: the bit to clear + * @addr: the address to start counting from + * + * This operation is atomic and provides release barrier semantics. + * + * This is a bit of a one-trick-pony for the filemap code, which clears + * PG_locked and tests PG_waiters, + */ +static inline bool +clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch_clear_bit_unlock_is_negative_byte(nr, addr); +} +/* Let everybody know we have it. */ +#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte +#endif + +#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */ diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h new file mode 100644 index 000000000000..95ff28d128a1 --- /dev/null +++ b/include/asm-generic/bitops/instrumented-non-atomic.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * This file provides wrappers with sanitizer instrumentation for non-atomic + * bit operations. + * + * To use this functionality, an arch's bitops.h file needs to define each of + * the below bit operations with an arch_ prefix (e.g. arch_set_bit(), + * arch___set_bit(), etc.). + */ +#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H +#define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H + +#include + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic. If it is called on the same + * region of memory concurrently, the effect may be that only one operation + * succeeds. + */ +static inline void __set_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___set_bit(nr, addr); +} + +/** + * __clear_bit - Clears a bit in memory + * @nr: the bit to clear + * @addr: the address to start counting from + * + * Unlike clear_bit(), this function is non-atomic. If it is called on the same + * region of memory concurrently, the effect may be that only one operation + * succeeds. + */ +static inline void __clear_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___clear_bit(nr, addr); +} + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic. If it is called on the same + * region of memory concurrently, the effect may be that only one operation + * succeeds. + */ +static inline void __change_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + arch___change_bit(nr, addr); +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic. If two instances of this operation race, one + * can appear to succeed but actually fail. + */ +static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch___test_and_set_bit(nr, addr); +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic. If two instances of this operation race, one + * can appear to succeed but actually fail. + */ +static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch___test_and_clear_bit(nr, addr); +} + +/** + * __test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is non-atomic. If two instances of this operation race, one + * can appear to succeed but actually fail. + */ +static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) +{ + kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + return arch___test_and_change_bit(nr, addr); +} + +/** + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static inline bool test_bit(long nr, const volatile unsigned long *addr) +{ + kasan_check_read(addr + BIT_WORD(nr), sizeof(long)); + return arch_test_bit(nr, addr); +} + +#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */ -- cgit v1.2.3 From a31ec048ef01a76ff893e0fa482e569d04d0c4b4 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 18 Oct 2019 13:31:47 +0900 Subject: asm-generic/export.h: make __ksymtab_* local symbols For EXPORT_SYMBOL from C files, defines __ksymtab_* as local symbols. For EXPORT_SYMBOL from assembly, in contrast, produces globally-visible __ksymtab_* symbols due to this .globl directive. I do not know why this must be global. It still works without this. Signed-off-by: Masahiro Yamada --- include/asm-generic/export.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h index fa577978fbbd..80ef2dc0c8be 100644 --- a/include/asm-generic/export.h +++ b/include/asm-generic/export.h @@ -31,7 +31,6 @@ */ .macro ___EXPORT_SYMBOL name,val,sec #ifdef CONFIG_MODULES - .globl __ksymtab_\name .section ___ksymtab\sec+\name,"a" .balign KSYM_ALIGN __ksymtab_\name: -- cgit v1.2.3 From 03034dbdaed8b47282d647c1100dbb0f522798f3 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 18 Oct 2019 13:31:48 +0900 Subject: asm-generic/export.h: remove unneeded __kcrctab_* symbols EXPORT_SYMBOL from assembly code produces an unused symbol __kcrctab_*. kcrctab is used as a section name (prefixed with three underscores), but never used as a symbol. Signed-off-by: Masahiro Yamada --- include/asm-generic/export.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h index 80ef2dc0c8be..a3983e2ce0fd 100644 --- a/include/asm-generic/export.h +++ b/include/asm-generic/export.h @@ -43,7 +43,6 @@ __kstrtab_\name: #ifdef CONFIG_MODVERSIONS .section ___kcrctab\sec+\name,"a" .balign KCRC_ALIGN -__kcrctab_\name: #if defined(CONFIG_MODULE_REL_CRCS) .long __crc_\name - . #else -- cgit v1.2.3 From e97133959ad2942ae43c81a08858bef01bc0ec18 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Aug 2019 07:53:05 +0200 Subject: asm-generic: ioremap_uc should behave the same with and without MMU Whatever reason there is for the existence of ioremap_uc, and the fact that it returns NULL by default on architectures with an MMU applies equally to nommu architectures, so don't provide different defaults. In practice the difference is meaningless as the only portable driver that uses ioremap_uc is atyfb which probably doesn't show up on nommu devices. Signed-off-by: Christoph Hellwig Reviewed-by: Arnd Bergmann Reviewed-by: Palmer Dabbelt --- include/asm-generic/io.h | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index d02806513670..bf8204de3a48 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -935,18 +935,7 @@ static inline void *phys_to_virt(unsigned long address) * defined your own ioremap_*() variant you must then declare your own * ioremap_*() variant as defined to itself to avoid the default NULL return. */ - -#ifdef CONFIG_MMU - -#ifndef ioremap_uc -#define ioremap_uc ioremap_uc -static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) -{ - return NULL; -} -#endif - -#else /* !CONFIG_MMU */ +#ifndef CONFIG_MMU /* * Change "struct page" to physical address. @@ -980,14 +969,6 @@ static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) } #endif -#ifndef ioremap_uc -#define ioremap_uc ioremap_uc -static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) -{ - return ioremap_nocache(offset, size); -} -#endif - #ifndef ioremap_wc #define ioremap_wc ioremap_wc static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) @@ -1004,6 +985,21 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size) } #endif +/* + * ioremap_uc is special in that we do require an explicit architecture + * implementation. In general you do not want to use this function in a + * driver and use plain ioremap, which is uncached by default. Similarly + * architectures should not implement it unless they have a very good + * reason. + */ +#ifndef ioremap_uc +#define ioremap_uc ioremap_uc +static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) +{ + return NULL; +} +#endif + #ifdef CONFIG_HAS_IOPORT_MAP #ifndef CONFIG_GENERIC_IOMAP #ifndef ioport_map -- cgit v1.2.3 From 97c9801a15e5b0c9a20e495b2ccabf010894e74b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 11 Aug 2019 14:53:20 +0200 Subject: asm-generic: don't provide ioremap for CONFIG_MMU All MMU-enabled ports have a non-trivial ioremap and should thus provide the prototype for their implementation instead of providing a generic one unless a different symbol is not defined. Note that this only affects sparc32 nds32 as all others do provide their own version. Also update the kerneldoc comments in asm-generic/io.h to explain the situation around the default ioremap* implementations correctly. Signed-off-by: Christoph Hellwig Reviewed-by: Arnd Bergmann --- arch/nds32/include/asm/io.h | 2 ++ arch/sparc/include/asm/io_32.h | 1 + include/asm-generic/io.h | 29 ++++++++--------------------- 3 files changed, 11 insertions(+), 21 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/nds32/include/asm/io.h b/arch/nds32/include/asm/io.h index 16f262322b8f..fb0e8a24c7af 100644 --- a/arch/nds32/include/asm/io.h +++ b/arch/nds32/include/asm/io.h @@ -6,6 +6,7 @@ #include +void __iomem *ioremap(phys_addr_t phys_addr, size_t size); extern void iounmap(volatile void __iomem *addr); #define __raw_writeb __raw_writeb static inline void __raw_writeb(u8 val, volatile void __iomem *addr) @@ -80,4 +81,5 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); }) #include + #endif /* __ASM_NDS32_IO_H */ diff --git a/arch/sparc/include/asm/io_32.h b/arch/sparc/include/asm/io_32.h index df2dc1784673..9a52d9506f80 100644 --- a/arch/sparc/include/asm/io_32.h +++ b/arch/sparc/include/asm/io_32.h @@ -127,6 +127,7 @@ static inline void sbus_memcpy_toio(volatile void __iomem *dst, * Bus number may be embedded in the higher bits of the physical address. * This is why we have no bus number argument to ioremap(). */ +void __iomem *ioremap(phys_addr_t offset, size_t size); void iounmap(volatile void __iomem *addr); /* Create a virtual mapping cookie for an IO port range */ void __iomem *ioport_map(unsigned long port, unsigned int nr); diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index bf8204de3a48..ed74d2e9af75 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -922,28 +922,16 @@ static inline void *phys_to_virt(unsigned long address) /** * DOC: ioremap() and ioremap_*() variants * - * If you have an IOMMU your architecture is expected to have both ioremap() - * and iounmap() implemented otherwise the asm-generic helpers will provide a - * direct mapping. + * Architectures with an MMU are expected to provide ioremap() and iounmap() + * themselves. For NOMMU architectures we provide a default nop-op + * implementation that expect that the physical address used for MMIO are + * already marked as uncached, and can be used as kernel virtual addresses. * - * There are ioremap_*() call variants, if you have no IOMMU we naturally will - * default to direct mapping for all of them, you can override these defaults. - * If you have an IOMMU you are highly encouraged to provide your own - * ioremap variant implementation as there currently is no safe architecture - * agnostic default. To avoid possible improper behaviour default asm-generic - * ioremap_*() variants all return NULL when an IOMMU is available. If you've - * defined your own ioremap_*() variant you must then declare your own - * ioremap_*() variant as defined to itself to avoid the default NULL return. + * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes + * for specific drivers if the architecture choses to implement them. If they + * are not implemented we fall back to plain ioremap. */ #ifndef CONFIG_MMU - -/* - * Change "struct page" to physical address. - * - * This implementation is for the no-MMU case only... if you have an MMU - * you'll need to provide your own definitions. - */ - #ifndef ioremap #define ioremap ioremap static inline void __iomem *ioremap(phys_addr_t offset, size_t size) @@ -954,14 +942,13 @@ static inline void __iomem *ioremap(phys_addr_t offset, size_t size) #ifndef iounmap #define iounmap iounmap - static inline void iounmap(void __iomem *addr) { } #endif #endif /* CONFIG_MMU */ + #ifndef ioremap_nocache -void __iomem *ioremap(phys_addr_t phys_addr, size_t size); #define ioremap_nocache ioremap_nocache static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) { -- cgit v1.2.3 From d092a87073269677b7ff09e71a8d91912b7f969a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Oct 2019 08:09:38 +0200 Subject: arch: rely on asm-generic/io.h for default ioremap_* definitions Various architectures that use asm-generic/io.h still defined their own default versions of ioremap_nocache, ioremap_wt and ioremap_wc that point back to plain ioremap directly or indirectly. Remove these definitions and rely on asm-generic/io.h instead. For this to work the backup ioremap_* defintions needs to be changed to purely cpp macros instea of inlines to cover for architectures like openrisc that only define ioremap after including . Signed-off-by: Christoph Hellwig Reviewed-by: Arnd Bergmann Reviewed-by: Palmer Dabbelt --- arch/arc/include/asm/io.h | 4 ---- arch/arm/include/asm/io.h | 1 - arch/arm64/include/asm/io.h | 2 -- arch/csky/include/asm/io.h | 1 - arch/ia64/include/asm/io.h | 1 - arch/microblaze/include/asm/io.h | 3 --- arch/nios2/include/asm/io.h | 4 ---- arch/openrisc/include/asm/io.h | 1 - arch/riscv/include/asm/io.h | 10 ---------- arch/s390/include/asm/io.h | 4 ---- arch/x86/include/asm/io.h | 1 - arch/xtensa/include/asm/io.h | 4 ---- include/asm-generic/io.h | 18 +++--------------- 13 files changed, 3 insertions(+), 51 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 72f7929736f8..8f777d6441a5 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -34,10 +34,6 @@ static inline void ioport_unmap(void __iomem *addr) extern void iounmap(const void __iomem *addr); -#define ioremap_nocache(phy, sz) ioremap(phy, sz) -#define ioremap_wc(phy, sz) ioremap(phy, sz) -#define ioremap_wt(phy, sz) ioremap(phy, sz) - /* * io{read,write}{16,32}be() macros */ diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 924f9dd502ed..aefdabdbeb84 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -392,7 +392,6 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from, */ void __iomem *ioremap(resource_size_t res_cookie, size_t size); #define ioremap ioremap -#define ioremap_nocache ioremap /* * Do not use ioremap_cache for mapping memory. Use memremap instead. diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 323cb306bd28..4e531f57147d 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -167,9 +167,7 @@ extern void iounmap(volatile void __iomem *addr); extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) -#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) -#define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) /* * PCI configuration space mapping function. diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h index 80d071e2567f..a4b9fb616faa 100644 --- a/arch/csky/include/asm/io.h +++ b/arch/csky/include/asm/io.h @@ -42,7 +42,6 @@ extern void iounmap(void *addr); #define ioremap(addr, size) __ioremap((addr), (size), pgprot_noncached(PAGE_KERNEL)) #define ioremap_wc(addr, size) __ioremap((addr), (size), pgprot_writecombine(PAGE_KERNEL)) -#define ioremap_nocache(addr, size) ioremap((addr), (size)) #define ioremap_cache ioremap_cache #include diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index fec9df9609ed..3d666a11a2de 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -263,7 +263,6 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo return ioremap(phys_addr, size); } #define ioremap ioremap -#define ioremap_nocache ioremap #define ioremap_cache ioremap_cache #define ioremap_uc ioremap_uc #define iounmap iounmap diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index 86c95b2a1ce1..d33c61737b8b 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h @@ -39,9 +39,6 @@ extern resource_size_t isa_mem_base; extern void iounmap(volatile void __iomem *addr); extern void __iomem *ioremap(phys_addr_t address, unsigned long size); -#define ioremap_nocache(addr, size) ioremap((addr), (size)) -#define ioremap_wc(addr, size) ioremap((addr), (size)) -#define ioremap_wt(addr, size) ioremap((addr), (size)) #endif /* CONFIG_MMU */ diff --git a/arch/nios2/include/asm/io.h b/arch/nios2/include/asm/io.h index 74ab34aa6731..d108937c321e 100644 --- a/arch/nios2/include/asm/io.h +++ b/arch/nios2/include/asm/io.h @@ -33,10 +33,6 @@ static inline void iounmap(void __iomem *addr) __iounmap(addr); } -#define ioremap_nocache ioremap -#define ioremap_wc ioremap -#define ioremap_wt ioremap - /* Pages to physical address... */ #define page_to_phys(page) virt_to_phys(page_to_virt(page)) diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h index 5b81a96ab85e..e18f038b2a6d 100644 --- a/arch/openrisc/include/asm/io.h +++ b/arch/openrisc/include/asm/io.h @@ -25,7 +25,6 @@ #define PIO_OFFSET 0 #define PIO_MASK 0 -#define ioremap_nocache ioremap #include #include diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index 3ba4d93721d3..8a5733c09e45 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -16,16 +16,6 @@ #include extern void __iomem *ioremap(phys_addr_t offset, unsigned long size); - -/* - * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't - * change the properties of memory regions. This should be fixed by the - * upcoming platform spec. - */ -#define ioremap_nocache(addr, size) ioremap((addr), (size)) -#define ioremap_wc(addr, size) ioremap((addr), (size)) -#define ioremap_wt(addr, size) ioremap((addr), (size)) - extern void iounmap(volatile void __iomem *addr); /* Generic IO read/write. These perform native-endian accesses. */ diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index ca421614722f..5a16f500515a 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -26,10 +26,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); #define IO_SPACE_LIMIT 0 -#define ioremap_nocache(addr, size) ioremap(addr, size) -#define ioremap_wc ioremap_nocache -#define ioremap_wt ioremap_nocache - void __iomem *ioremap(unsigned long offset, unsigned long size); void iounmap(volatile void __iomem *addr); diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 6b5cc41319a7..9997521fc5cd 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -205,7 +205,6 @@ extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long */ void __iomem *ioremap(resource_size_t offset, unsigned long size); #define ioremap ioremap -#define ioremap_nocache ioremap extern void iounmap(volatile void __iomem *addr); #define iounmap iounmap diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index 441fb56926a7..54188e69b988 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h @@ -52,10 +52,6 @@ static inline void __iomem *ioremap_cache(unsigned long offset, } #define ioremap_cache ioremap_cache -#define ioremap_nocache ioremap -#define ioremap_wc ioremap -#define ioremap_wt ioremap - static inline void iounmap(volatile void __iomem *addr) { unsigned long va = (unsigned long) addr; diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index ed74d2e9af75..bd676f21b5bd 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -949,27 +949,15 @@ static inline void iounmap(void __iomem *addr) #endif /* CONFIG_MMU */ #ifndef ioremap_nocache -#define ioremap_nocache ioremap_nocache -static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size) -{ - return ioremap(offset, size); -} +#define ioremap_nocache ioremap #endif #ifndef ioremap_wc -#define ioremap_wc ioremap_wc -static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size) -{ - return ioremap_nocache(offset, size); -} +#define ioremap_wc ioremap #endif #ifndef ioremap_wt -#define ioremap_wt ioremap_wt -static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size) -{ - return ioremap_nocache(offset, size); -} +#define ioremap_wt ioremap #endif /* -- cgit v1.2.3 From 80b0ca98f91ddbc09828aff5a00af1c73837713e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Aug 2019 11:24:04 +0200 Subject: lib: provide a simple generic ioremap implementation A lot of architectures reuse the same simple ioremap implementation, so start lifting the most simple variant to lib/ioremap.c. It provides ioremap_prot and iounmap, plus a default ioremap that uses prot_noncached, although that can be overridden by asm/io.h. Signed-off-by: Christoph Hellwig Reviewed-by: Arnd Bergmann Reviewed-by: Palmer Dabbelt --- include/asm-generic/io.h | 20 ++++++++++++++++---- lib/Kconfig | 3 +++ lib/ioremap.c | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 4 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index bd676f21b5bd..325fc98cc9ff 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -923,9 +923,10 @@ static inline void *phys_to_virt(unsigned long address) * DOC: ioremap() and ioremap_*() variants * * Architectures with an MMU are expected to provide ioremap() and iounmap() - * themselves. For NOMMU architectures we provide a default nop-op - * implementation that expect that the physical address used for MMIO are - * already marked as uncached, and can be used as kernel virtual addresses. + * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide + * a default nop-op implementation that expect that the physical address used + * for MMIO are already marked as uncached, and can be used as kernel virtual + * addresses. * * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes * for specific drivers if the architecture choses to implement them. If they @@ -946,7 +947,18 @@ static inline void iounmap(void __iomem *addr) { } #endif -#endif /* CONFIG_MMU */ +#elif defined(CONFIG_GENERIC_IOREMAP) +#include + +void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); +void iounmap(volatile void __iomem *addr); + +static inline void __iomem *ioremap(phys_addr_t addr, size_t size) +{ + /* _PAGE_IOREMAP needs to be supplied by the architecture */ + return ioremap_prot(addr, size, _PAGE_IOREMAP); +} +#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */ #ifndef ioremap_nocache #define ioremap_nocache ioremap diff --git a/lib/Kconfig b/lib/Kconfig index 3321d04dfa5a..cb571767d080 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -637,6 +637,9 @@ config STRING_SELFTEST endmenu +config GENERIC_IOREMAP + bool + config GENERIC_LIB_ASHLDI3 bool diff --git a/lib/ioremap.c b/lib/ioremap.c index 0a2ffadc6d71..3f0e18543de8 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c @@ -231,3 +231,42 @@ int ioremap_page_range(unsigned long addr, return err; } + +#ifdef CONFIG_GENERIC_IOREMAP +void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot) +{ + unsigned long offset, vaddr; + phys_addr_t last_addr; + struct vm_struct *area; + + /* Disallow wrap-around or zero size */ + last_addr = addr + size - 1; + if (!size || last_addr < addr) + return NULL; + + /* Page-align mappings */ + offset = addr & (~PAGE_MASK); + addr -= offset; + size = PAGE_ALIGN(size + offset); + + area = get_vm_area_caller(size, VM_IOREMAP, + __builtin_return_address(0)); + if (!area) + return NULL; + vaddr = (unsigned long)area->addr; + + if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) { + free_vm_area(area); + return NULL; + } + + return (void __iomem *)(vaddr + offset); +} +EXPORT_SYMBOL(ioremap_prot); + +void iounmap(volatile void __iomem *addr) +{ + vunmap((void *)((unsigned long)addr & PAGE_MASK)); +} +EXPORT_SYMBOL(iounmap); +#endif /* CONFIG_GENERIC_IOREMAP */ -- cgit v1.2.3 From bf49d9dd6fef688733e2ddbd55f7bcb57df194e4 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 18 Oct 2019 13:50:53 +0900 Subject: export,module: add SPDX GPL-2.0 license identifier to headers with no license Commit b24413180f56 ("License cleanup: add SPDX GPL-2.0 license identifier to files with no license") took care of a lot of files without any license information. These headers were not processed by the tool perhaps because they contain "GPL" in the code. I do not see any license boilerplate in them, so they fall back to GPL version 2 only, which is the project default. Signed-off-by: Masahiro Yamada Link: https://lore.kernel.org/r/20191018045053.8424-1-yamada.masahiro@socionext.com Signed-off-by: Greg Kroah-Hartman --- include/asm-generic/export.h | 1 + include/linux/export.h | 1 + include/linux/license.h | 1 + include/linux/module.h | 7 +++++-- 4 files changed, 8 insertions(+), 2 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h index fa577978fbbd..d0166115a825 100644 --- a/include/asm-generic/export.h +++ b/include/asm-generic/export.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __ASM_GENERIC_EXPORT_H #define __ASM_GENERIC_EXPORT_H diff --git a/include/linux/export.h b/include/linux/export.h index 941d075f03d6..aee5c86ae350 100644 --- a/include/linux/export.h +++ b/include/linux/export.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _LINUX_EXPORT_H #define _LINUX_EXPORT_H diff --git a/include/linux/license.h b/include/linux/license.h index decdbf43cb5c..7cce390f120b 100644 --- a/include/linux/license.h +++ b/include/linux/license.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef __LICENSE_H #define __LICENSE_H diff --git a/include/linux/module.h b/include/linux/module.h index 6d20895e7739..bd165ba68617 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -1,11 +1,14 @@ -#ifndef _LINUX_MODULE_H -#define _LINUX_MODULE_H +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Dynamic loading of modules into the kernel. * * Rewritten by Richard Henderson Dec 1996 * Rewritten again by Rusty Russell, 2002 */ + +#ifndef _LINUX_MODULE_H +#define _LINUX_MODULE_H + #include #include #include -- cgit v1.2.3 From b83b43ffc6e4b514ca034a0fbdee01322e2f7022 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Tue, 15 Oct 2019 09:00:55 -0400 Subject: fgraph: Fix function type mismatches of ftrace_graph_return using ftrace_stub The C compiler is allowing more checks to make sure that function pointers are assigned to the correct prototype function. Unfortunately, the function graph tracer uses a special name with its assigned ftrace_graph_return function pointer that maps to a stub function used by the function tracer (ftrace_stub). The ftrace_graph_return variable is compared to the ftrace_stub in some archs to know if the function graph tracer is enabled or not. This means we can not just simply create a new function stub that compares it without modifying all the archs. Instead, have the linker script create a function_graph_stub that maps to ftrace_stub, and this way we can define the prototype for it to match the prototype of ftrace_graph_return, and make the compiler checks all happy! Link: http://lkml.kernel.org/r/20191015090055.789a0aed@gandalf.local.home Cc: linux-sh@vger.kernel.org Cc: Yoshinori Sato Cc: Rich Felker Reported-by: Sami Tolvanen Signed-off-by: Steven Rostedt (VMware) --- arch/sh/boot/compressed/misc.c | 5 +++++ include/asm-generic/vmlinux.lds.h | 17 ++++++++++++++--- kernel/trace/fgraph.c | 11 ++++++++--- 3 files changed, 27 insertions(+), 6 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c index c15cac9251b9..e69ec12cbbe6 100644 --- a/arch/sh/boot/compressed/misc.c +++ b/arch/sh/boot/compressed/misc.c @@ -111,6 +111,11 @@ void __stack_chk_fail(void) error("stack-protector: Kernel stack is corrupted\n"); } +/* Needed because vmlinux.lds.h references this */ +void ftrace_stub(void) +{ +} + #ifdef CONFIG_SUPERH64 #define stackalign 8 #else diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index dae64600ccbf..0f358be551cd 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -111,18 +111,29 @@ #ifdef CONFIG_FTRACE_MCOUNT_RECORD #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY +/* + * Need to also make ftrace_graph_stub point to ftrace_stub + * so that the same stub location may have different protocols + * and not mess up with C verifiers. + */ #define MCOUNT_REC() . = ALIGN(8); \ __start_mcount_loc = .; \ KEEP(*(__patchable_function_entries)) \ - __stop_mcount_loc = .; + __stop_mcount_loc = .; \ + ftrace_graph_stub = ftrace_stub; #else #define MCOUNT_REC() . = ALIGN(8); \ __start_mcount_loc = .; \ KEEP(*(__mcount_loc)) \ - __stop_mcount_loc = .; + __stop_mcount_loc = .; \ + ftrace_graph_stub = ftrace_stub; #endif #else -#define MCOUNT_REC() +# ifdef CONFIG_FUNCTION_TRACER +# define MCOUNT_REC() ftrace_graph_stub = ftrace_stub; +# else +# define MCOUNT_REC() +# endif #endif #ifdef CONFIG_TRACE_BRANCH_PROFILING diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index 7950a0356042..fa3ce10d0405 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -332,9 +332,14 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) return 0; } +/* + * Simply points to ftrace_stub, but with the proper protocol. + * Defined by the linker script in linux/vmlinux.lds.h + */ +extern void ftrace_graph_stub(struct ftrace_graph_ret *); + /* The callbacks that hook a function */ -trace_func_graph_ret_t ftrace_graph_return = - (trace_func_graph_ret_t)ftrace_stub; +trace_func_graph_ret_t ftrace_graph_return = ftrace_graph_stub; trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; @@ -614,7 +619,7 @@ void unregister_ftrace_graph(struct fgraph_ops *gops) goto out; ftrace_graph_active--; - ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; + ftrace_graph_return = ftrace_graph_stub; ftrace_graph_entry = ftrace_graph_entry_stub; __ftrace_graph_entry = ftrace_graph_entry_stub; ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); -- cgit v1.2.3 From 46f9469247c6f4697cbbf37e4b3961120bf07f29 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Mon, 18 Nov 2019 10:41:29 -0500 Subject: ftrace: Rename ftrace_graph_stub to ftrace_stub_graph The ftrace_graph_stub was created and points to ftrace_stub as a way to assign the functon graph tracer function pointer to a stub function with a different prototype than what ftrace_stub has and not trigger the C verifier. The ftrace_graph_stub was created via the linker script vmlinux.lds.h. Unfortunately, powerpc already uses the name ftrace_graph_stub for its internal implementation of the function graph tracer, and even though powerpc would still build, the change via the linker script broke function tracer on powerpc from working. By using the name ftrace_stub_graph, which does not exist anywhere else in the kernel, this should not be a problem. Link: https://lore.kernel.org/r/1573849732.5937.136.camel@lca.pw Fixes: b83b43ffc6e4 ("fgraph: Fix function type mismatches of ftrace_graph_return using ftrace_stub") Reorted-by: Qian Cai Signed-off-by: Steven Rostedt (VMware) --- include/asm-generic/vmlinux.lds.h | 8 ++++---- kernel/trace/fgraph.c | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 0f358be551cd..996db32c491b 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -112,7 +112,7 @@ #ifdef CONFIG_FTRACE_MCOUNT_RECORD #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY /* - * Need to also make ftrace_graph_stub point to ftrace_stub + * Need to also make ftrace_stub_graph point to ftrace_stub * so that the same stub location may have different protocols * and not mess up with C verifiers. */ @@ -120,17 +120,17 @@ __start_mcount_loc = .; \ KEEP(*(__patchable_function_entries)) \ __stop_mcount_loc = .; \ - ftrace_graph_stub = ftrace_stub; + ftrace_stub_graph = ftrace_stub; #else #define MCOUNT_REC() . = ALIGN(8); \ __start_mcount_loc = .; \ KEEP(*(__mcount_loc)) \ __stop_mcount_loc = .; \ - ftrace_graph_stub = ftrace_stub; + ftrace_stub_graph = ftrace_stub; #endif #else # ifdef CONFIG_FUNCTION_TRACER -# define MCOUNT_REC() ftrace_graph_stub = ftrace_stub; +# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub; # else # define MCOUNT_REC() # endif diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index fa3ce10d0405..67e0c462b059 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -336,10 +336,10 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) * Simply points to ftrace_stub, but with the proper protocol. * Defined by the linker script in linux/vmlinux.lds.h */ -extern void ftrace_graph_stub(struct ftrace_graph_ret *); +extern void ftrace_stub_graph(struct ftrace_graph_ret *); /* The callbacks that hook a function */ -trace_func_graph_ret_t ftrace_graph_return = ftrace_graph_stub; +trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph; trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; @@ -619,7 +619,7 @@ void unregister_ftrace_graph(struct fgraph_ops *gops) goto out; ftrace_graph_active--; - ftrace_graph_return = ftrace_graph_stub; + ftrace_graph_return = ftrace_stub_graph; ftrace_graph_entry = ftrace_graph_entry_stub; __ftrace_graph_entry = ftrace_graph_entry_stub; ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); -- cgit v1.2.3 From b96f86534fa3102fd07246cf8b6cb5cc96788597 Mon Sep 17 00:00:00 2001 From: Dexuan Cui Date: Tue, 19 Nov 2019 23:16:04 -0800 Subject: x86/hyperv: Implement hv_is_hibernation_supported() The API will be used by the hv_balloon and hv_vmbus drivers. Balloon up/down and hot-add of memory must not be active if the user wants the Linux VM to support hibernation, because they are incompatible with hibernation according to Hyper-V team, e.g. upon suspend the balloon VSP doesn't save any info about the ballooned-out pages (if any); so, after Linux resumes, Linux balloon VSC expects that the VSP will return the pages if Linux is under memory pressure, but the VSP will never do that, since the VSP thinks it never stole the pages from the VM. So, if the user wants Linux VM to support hibernation, Linux must forbid balloon up/down and hot-add, and the only functionality of the balloon VSC driver is reporting the VM's memory pressure to the host. Ideally, when Linux detects that the user wants it to support hibernation, the balloon VSC should tell the VSP that it does not support ballooning and hot-add. However, the current version of the VSP requires the VSC should support these capabilities, otherwise the capability negotiation fails and the VSC can not load at all, so with the later changes to the VSC driver, Linux VM still reports to the VSP that the VSC supports these capabilities, but the VSC ignores the VSP's requests of balloon up/down and hot add, and reports an error to the VSP, when applicable. BTW, in the future the balloon VSP driver will allow the VSC to not support the capabilities of balloon up/down and hot add. The ACPI S4 state is not a must for hibernation to work, because Linux is able to hibernate as long as the system can shut down. However in practice we decide to artificially use the presence of the virtual ACPI S4 state as an indicator of the user's intent of using hibernation, because Linux VM must find a way to know if the user wants to use the hibernation feature or not. By default, Hyper-V does not enable the virtual ACPI S4 state; on recent Hyper-V hosts (e.g. RS5, 19H1), the administrator is able to enable the state for a VM by WMI commands. Once all the vmbus and VSC patches for the hibernation feature are accepted, an extra patch will be submitted to forbid hibernation if the virtual ACPI S4 state is absent, i.e. hv_is_hibernation_supported() is false. Signed-off-by: Dexuan Cui Reviewed-by: Michael Kelley Acked-by: Thomas Gleixner Signed-off-by: Sasha Levin --- arch/x86/hyperv/hv_init.c | 7 +++++++ include/asm-generic/mshyperv.h | 2 ++ 2 files changed, 9 insertions(+) (limited to 'include/asm-generic') diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index b2daf0ebdb19..7552fe196d83 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -7,6 +7,7 @@ * Author : K. Y. Srinivasan */ +#include #include #include #include @@ -439,3 +440,9 @@ bool hv_is_hyperv_initialized(void) return hypercall_msr.enable; } EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized); + +bool hv_is_hibernation_supported(void) +{ + return acpi_sleep_state_supported(ACPI_STATE_S4); +} +EXPORT_SYMBOL_GPL(hv_is_hibernation_supported); diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index 18d8e2d8210f..b3f1082cc435 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -166,10 +166,12 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, void hyperv_report_panic(struct pt_regs *regs, long err); void hyperv_report_panic_msg(phys_addr_t pa, size_t size); bool hv_is_hyperv_initialized(void); +bool hv_is_hibernation_supported(void); void hyperv_cleanup(void); void hv_setup_sched_clock(void *sched_clock); #else /* CONFIG_HYPERV */ static inline bool hv_is_hyperv_initialized(void) { return false; } +static inline bool hv_is_hibernation_supported(void) { return false; } static inline void hyperv_cleanup(void) {} #endif /* CONFIG_HYPERV */ -- cgit v1.2.3 From a1b39bae16a62ce4aae02d958224f19316d98b24 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Fri, 25 Oct 2019 08:10:37 +0200 Subject: asm-generic: Make msi.h a mandatory include/asm header msi.h is generic for all architectures except x86, which has its own version. Enabling MSI by adding msi.h to every architecture's Kbuild is just an additional step which doesn't need to be done. Make msi.h mandatory in the asm-generic/Kbuild so we don't have to do it for each architecture. Suggested-by: Christoph Hellwig Link: https://lore.kernel.org/r/c991669e29a79b1a8e28c3b4b3a125801a693de8.1571983829.git.michal.simek@xilinx.com Tested-by: Paul Walmsley # build only, rv32/rv64 Signed-off-by: Michal Simek Signed-off-by: Bjorn Helgaas Reviewed-by: Masahiro Yamada Acked-by: Waiman Long Acked-by: Paul Walmsley # arch/riscv --- arch/arc/include/asm/Kbuild | 1 - arch/arm/include/asm/Kbuild | 1 - arch/arm64/include/asm/Kbuild | 1 - arch/mips/include/asm/Kbuild | 1 - arch/powerpc/include/asm/Kbuild | 1 - arch/riscv/include/asm/Kbuild | 1 - arch/sparc/include/asm/Kbuild | 1 - include/asm-generic/Kbuild | 1 + 8 files changed, 1 insertion(+), 7 deletions(-) (limited to 'include/asm-generic') diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 393d4f5e1450..1b505694691e 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -17,7 +17,6 @@ generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += mmiowb.h -generic-y += msi.h generic-y += parport.h generic-y += percpu.h generic-y += preempt.h diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 68ca86f85eb7..fa579b23b4df 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -12,7 +12,6 @@ generic-y += local.h generic-y += local64.h generic-y += mm-arch-hooks.h generic-y += mmiowb.h -generic-y += msi.h generic-y += parport.h generic-y += preempt.h generic-y += seccomp.h diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 98a5405c8558..bd23f87d6c55 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -16,7 +16,6 @@ generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += mmiowb.h -generic-y += msi.h generic-y += qrwlock.h generic-y += qspinlock.h generic-y += serial.h diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index c8b595c60910..61b0fc2026e6 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -13,7 +13,6 @@ generic-y += irq_work.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h -generic-y += msi.h generic-y += parport.h generic-y += percpu.h generic-y += preempt.h diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 64870c7be4a3..17726f2e46de 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -10,4 +10,3 @@ generic-y += local64.h generic-y += mcs_spinlock.h generic-y += preempt.h generic-y += vtime.h -generic-y += msi.h diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 16970f246860..1efaeddf1e4b 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -22,7 +22,6 @@ generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mm-arch-hooks.h -generic-y += msi.h generic-y += percpu.h generic-y += preempt.h generic-y += sections.h diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index b6212164847b..62de2eb2773d 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild @@ -18,7 +18,6 @@ generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += mmiowb.h generic-y += module.h -generic-y += msi.h generic-y += preempt.h generic-y += serial.h generic-y += trace_clock.h diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index adff14fcb8e4..ddfee1bd9dc1 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -4,4 +4,5 @@ # (This file is not included when SRCARCH=um since UML borrows several # asm headers from the host architecutre.) +mandatory-y += msi.h mandatory-y += simd.h -- cgit v1.2.3 From b08861d10bbeaae4d592d5cc00b2420e2e7ba3ac Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Sat, 30 Nov 2019 17:51:10 -0800 Subject: asm-generic/tlb: stub out pud_free_tlb() if nopud ... ... independent of __ARCH_HAS_4LEVEL_HACK This came up when removing __ARCH_HAS_5LEVEL_HACK for ARC as code bloat. With this patch we see the following code reduction | bloat-o-meter2 vmlinux-B-elide-ARCH_USE_5LEVEL_HACK vmlinux-C-elide-pud_free_tlb | add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-104 (-104) | function old new delta | free_pgd_range 656 552 -104 | Total: Before=4137276, After=4137172, chg -1.000000% Note: The primary change is alternate defintion for pud_free_tlb() but while there also removed empty stubs for __pud_free_tlb, which is anyhow called only from pud_free_tlb() Link: http://lkml.kernel.org/r/20191016162400.14796-3-vgupta@synopsys.com Signed-off-by: Vineet Gupta Acked-by: Kirill A. Shutemov Acked-by: Linus Torvalds Cc: "Aneesh Kumar K . V" Cc: Arnd Bergmann Cc: Nick Piggin Cc: Peter Zijlstra Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/4level-fixup.h | 1 - include/asm-generic/pgtable-nopud.h | 2 +- include/asm-generic/tlb.h | 2 -- 3 files changed, 1 insertion(+), 4 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h index e3667c9a33a5..c86cf7cb4bba 100644 --- a/include/asm-generic/4level-fixup.h +++ b/include/asm-generic/4level-fixup.h @@ -30,7 +30,6 @@ #undef pud_free_tlb #define pud_free_tlb(tlb, x, addr) do { } while (0) #define pud_free(mm, x) do { } while (0) -#define __pud_free_tlb(tlb, x, addr) do { } while (0) #undef pud_addr_end #define pud_addr_end(addr, end) (end) diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h index c77a1d301155..d3776cb494c0 100644 --- a/include/asm-generic/pgtable-nopud.h +++ b/include/asm-generic/pgtable-nopud.h @@ -59,7 +59,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) */ #define pud_alloc_one(mm, address) NULL #define pud_free(mm, x) do { } while (0) -#define __pud_free_tlb(tlb, x, a) do { } while (0) +#define pud_free_tlb(tlb, x, a) do { } while (0) #undef pud_addr_end #define pud_addr_end(addr, end) (end) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 04c0644006fd..5e0c2d01e656 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -584,7 +584,6 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm } while (0) #endif -#ifndef __ARCH_HAS_4LEVEL_HACK #ifndef pud_free_tlb #define pud_free_tlb(tlb, pudp, address) \ do { \ @@ -594,7 +593,6 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif -#endif #ifndef __ARCH_HAS_5LEVEL_HACK #ifndef p4d_free_tlb -- cgit v1.2.3 From bffd9723477a8459eb7cbdd7f1a82fde83df46e6 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Sat, 30 Nov 2019 17:51:13 -0800 Subject: asm-generic/tlb: stub out p4d_free_tlb() if nop4d ... ... independent of __ARCH_HAS_5LEVEL_HACK This came up when removing __ARCH_HAS_5LEVEL_HACK for ARC as code bloat. With this patch we see the following code reduction | bloat-o-meter2 vmlinux-C-elide-pud_free_tlb vmlinux-D-elide-p4d_free_tlb | add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-104 (-104) | function old new delta | free_pgd_range 552 422 -130 | Total: Before=4137172, After=4137042, chg -1.000000% Link: http://lkml.kernel.org/r/20191016162400.14796-4-vgupta@synopsys.com Signed-off-by: Vineet Gupta Acked-by: Kirill A. Shutemov Acked-by: Linus Torvalds Cc: "Aneesh Kumar K . V" Cc: Arnd Bergmann Cc: Nick Piggin Cc: Peter Zijlstra Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/5level-fixup.h | 1 - include/asm-generic/pgtable-nop4d.h | 2 +- include/asm-generic/tlb.h | 2 -- 3 files changed, 1 insertion(+), 4 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h index f6947da70d71..4c74b1c1d13b 100644 --- a/include/asm-generic/5level-fixup.h +++ b/include/asm-generic/5level-fixup.h @@ -51,7 +51,6 @@ static inline int p4d_present(p4d_t p4d) #undef p4d_free_tlb #define p4d_free_tlb(tlb, x, addr) do { } while (0) #define p4d_free(mm, x) do { } while (0) -#define __p4d_free_tlb(tlb, x, addr) do { } while (0) #undef p4d_addr_end #define p4d_addr_end(addr, end) (end) diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h index aebab905e6cd..ce2cbb3c380f 100644 --- a/include/asm-generic/pgtable-nop4d.h +++ b/include/asm-generic/pgtable-nop4d.h @@ -50,7 +50,7 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) */ #define p4d_alloc_one(mm, address) NULL #define p4d_free(mm, x) do { } while (0) -#define __p4d_free_tlb(tlb, x, a) do { } while (0) +#define p4d_free_tlb(tlb, x, a) do { } while (0) #undef p4d_addr_end #define p4d_addr_end(addr, end) (end) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 5e0c2d01e656..05dddc17522b 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -594,7 +594,6 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm } while (0) #endif -#ifndef __ARCH_HAS_5LEVEL_HACK #ifndef p4d_free_tlb #define p4d_free_tlb(tlb, pudp, address) \ do { \ @@ -603,7 +602,6 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm __p4d_free_tlb(tlb, pudp, address); \ } while (0) #endif -#endif #endif /* CONFIG_MMU */ -- cgit v1.2.3 From 3d14f1110a5c015e816e8e78ccec6b5c90d2d44e Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Sat, 30 Nov 2019 17:51:16 -0800 Subject: asm-generic/tlb: stub out pmd_free_tlb() if nopmd This came up when removing __ARCH_HAS_5LEVEL_HACK for ARC as code bloat. With this patch we see the following code reduction. | bloat-o-meter2 vmlinux-E-elide-p?d_clear_bad vmlinux-F-elide-pmd_free_tlb | add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-112 (-112) | function old new delta | free_pgd_range 422 310 -112 | Total: Before=4137042, After=4136930, chg -1.000000% Note that pmd folding can be tricky: In 2-level setup (where pmd is conceptually folded) most pmd routines are valid and refer to upper levels. In this patch we can, but see next patch for example where we can't Link: http://lkml.kernel.org/r/20191016162400.14796-5-vgupta@synopsys.com Signed-off-by: Vineet Gupta Acked-by: Kirill A. Shutemov Cc: "Aneesh Kumar K . V" Cc: Arnd Bergmann Cc: Nick Piggin Cc: Peter Zijlstra Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/pgtable-nopmd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h index b85b8271a73d..0d9b28cba16d 100644 --- a/include/asm-generic/pgtable-nopmd.h +++ b/include/asm-generic/pgtable-nopmd.h @@ -60,7 +60,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { } -#define __pmd_free_tlb(tlb, x, a) do { } while (0) +#define pmd_free_tlb(tlb, x, a) do { } while (0) #undef pmd_addr_end #define pmd_addr_end(addr, end) (end) -- cgit v1.2.3 From f2400abc782dc38a1fee9cfc13589d31f1a0404f Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Sat, 30 Nov 2019 17:51:20 -0800 Subject: asm-generic/mm: stub out p{4,u}d_clear_bad() if __PAGETABLE_P{4,U}D_FOLDED This came up when removing __ARCH_HAS_5LEVEL_HACK for ARC as code bloat. With this patch we see the following code reduction. | bloat-o-meter2 vmlinux-D-elide-p4d_free_tlb vmlinux-E-elide-p?d_clear_bad | add/remove: 0/2 grow/shrink: 0/0 up/down: 0/-40 (-40) | function old new delta | pud_clear_bad 20 - -20 | p4d_clear_bad 20 - -20 | Total: Before=4136930, After=4136890, chg -1.000000% Link: http://lkml.kernel.org/r/20191016162400.14796-6-vgupta@synopsys.com Signed-off-by: Vineet Gupta Acked-by: Kirill A. Shutemov Cc: Arnd Bergmann Cc: Will Deacon Cc: "Aneesh Kumar K . V" Cc: Nick Piggin Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/pgtable.h | 11 +++++++++++ mm/pgtable-generic.c | 9 +++++++++ 2 files changed, 20 insertions(+) (limited to 'include/asm-generic') diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 818691846c90..9cdcbc7c0b7b 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -558,8 +558,19 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) * Do the tests inline, but report and clear the bad entry in mm/memory.c. */ void pgd_clear_bad(pgd_t *); + +#ifndef __PAGETABLE_P4D_FOLDED void p4d_clear_bad(p4d_t *); +#else +#define p4d_clear_bad(p4d) do { } while (0) +#endif + +#ifndef __PAGETABLE_PUD_FOLDED void pud_clear_bad(pud_t *); +#else +#define pud_clear_bad(p4d) do { } while (0) +#endif + void pmd_clear_bad(pmd_t *); static inline int pgd_none_or_clear_bad(pgd_t *pgd) diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index 532c29276fce..3d7c01e76efc 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -24,18 +24,27 @@ void pgd_clear_bad(pgd_t *pgd) pgd_clear(pgd); } +#ifndef __PAGETABLE_P4D_FOLDED void p4d_clear_bad(p4d_t *p4d) { p4d_ERROR(*p4d); p4d_clear(p4d); } +#endif +#ifndef __PAGETABLE_PUD_FOLDED void pud_clear_bad(pud_t *pud) { pud_ERROR(*pud); pud_clear(pud); } +#endif +/* + * Note that the pmd variant below can't be stub'ed out just as for p4d/pud + * above. pmd folding is special and typically pmd_* macros refer to upper + * level even when folded + */ void pmd_clear_bad(pmd_t *pmd) { pmd_ERROR(*pmd); -- cgit v1.2.3 From bf1a12a8095615c9486f5463ca473d2d69ff6952 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 30 Nov 2019 17:51:29 -0800 Subject: mm: move the backup x_devmap() functions to asm-generic/pgtable.h The asm-generic/pgtable.h include file appears to be the correct place for the backup x_devmap() inline functions. Moving them here is also necessary if we want to include x_devmap() in the [pmd|pud]_unstable functions. So move the x_devmap() functions to asm-generic/pgtable.h Link: http://lkml.kernel.org/r/20191115115808.21181-1-thomas_os@shipmail.org Signed-off-by: Thomas Hellstrom Cc: Arnd Bergmann Cc: "Kirill A. Shutemov" Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/pgtable.h | 15 +++++++++++++++ include/linux/mm.h | 15 --------------- 2 files changed, 15 insertions(+), 15 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 9cdcbc7c0b7b..3127f9028f54 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -914,6 +914,21 @@ static inline int pud_write(pud_t pud) } #endif /* pud_write */ +#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) +static inline int pmd_devmap(pmd_t pmd) +{ + return 0; +} +static inline int pud_devmap(pud_t pud) +{ + return 0; +} +static inline int pgd_devmap(pgd_t pgd) +{ + return 0; +} +#endif + #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) diff --git a/include/linux/mm.h b/include/linux/mm.h index b5b2523c80af..06b51d8728ec 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -564,21 +564,6 @@ int vma_is_stack_for_current(struct vm_area_struct *vma); struct mmu_gather; struct inode; -#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) -static inline int pmd_devmap(pmd_t pmd) -{ - return 0; -} -static inline int pud_devmap(pud_t pud) -{ - return 0; -} -static inline int pgd_devmap(pgd_t pgd) -{ - return 0; -} -#endif - /* * FIXME: take this include out, include page-flags.h in * files which need it (119 of them) -- cgit v1.2.3 From 625110b5e9dae9074d8a7e67dd07f821a053eed7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 30 Nov 2019 17:51:32 -0800 Subject: mm/memory.c: fix a huge pud insertion race during faulting A huge pud page can theoretically be faulted in racing with pmd_alloc() in __handle_mm_fault(). That will lead to pmd_alloc() returning an invalid pmd pointer. Fix this by adding a pud_trans_unstable() function similar to pmd_trans_unstable() and check whether the pud is really stable before using the pmd pointer. Race: Thread 1: Thread 2: Comment create_huge_pud() Fallback - not taken. create_huge_pud() Taken. pmd_alloc() Returns an invalid pointer. This will result in user-visible huge page data corruption. Note that this was caught during a code audit rather than a real experienced problem. It looks to me like the only implementation that currently creates huge pud pagetable entries is dev_dax_huge_fault() which doesn't appear to care much about private (COW) mappings or write-tracking which is, I believe, a prerequisite for create_huge_pud() falling back on thread 1, but not in thread 2. Link: http://lkml.kernel.org/r/20191115115808.21181-2-thomas_os@shipmail.org Fixes: a00cc7d9dd93 ("mm, x86: add support for PUD-sized transparent hugepages") Signed-off-by: Thomas Hellstrom Acked-by: Kirill A. Shutemov Cc: Arnd Bergmann Cc: Matthew Wilcox Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/pgtable.h | 25 +++++++++++++++++++++++++ mm/memory.c | 6 ++++++ 2 files changed, 31 insertions(+) (limited to 'include/asm-generic') diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 3127f9028f54..798ea36a0549 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -938,6 +938,31 @@ static inline int pud_trans_huge(pud_t pud) } #endif +/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */ +static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud) +{ + pud_t pudval = READ_ONCE(*pud); + + if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval)) + return 1; + if (unlikely(pud_bad(pudval))) { + pud_clear_bad(pud); + return 1; + } + return 0; +} + +/* See pmd_trans_unstable for discussion. */ +static inline int pud_trans_unstable(pud_t *pud) +{ +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) + return pud_none_or_trans_huge_or_dev_or_clear_bad(pud); +#else + return 0; +#endif +} + #ifndef pmd_read_atomic static inline pmd_t pmd_read_atomic(pmd_t *pmdp) { diff --git a/mm/memory.c b/mm/memory.c index 62b5cce653f6..c3902201989f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4010,6 +4010,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, vmf.pud = pud_alloc(mm, p4d, address); if (!vmf.pud) return VM_FAULT_OOM; +retry_pud: if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) { ret = create_huge_pud(&vmf); if (!(ret & VM_FAULT_FALLBACK)) @@ -4036,6 +4037,11 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, vmf.pmd = pmd_alloc(mm, vmf.pud, address); if (!vmf.pmd) return VM_FAULT_OOM; + + /* Huge pud page fault raced with pmd_alloc? */ + if (pud_trans_unstable(vmf.pud)) + goto retry_pud; + if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) { ret = create_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) -- cgit v1.2.3 From 169c474fb22d8a5e909e172f177b957546d0519d Mon Sep 17 00:00:00 2001 From: William Breathitt Gray Date: Wed, 4 Dec 2019 16:50:57 -0800 Subject: bitops: introduce the for_each_set_clump8 macro Pach series "Introduce the for_each_set_clump8 macro", v18. While adding GPIO get_multiple/set_multiple callback support for various drivers, I noticed a pattern of looping manifesting that would be useful standardized as a macro. This patchset introduces the for_each_set_clump8 macro and utilizes it in several GPIO drivers. The for_each_set_clump macro8 facilitates a for-loop syntax that iterates over a memory region entire groups of set bits at a time. For example, suppose you would like to iterate over a 32-bit integer 8 bits at a time, skipping over 8-bit groups with no set bit, where XXXXXXXX represents the current 8-bit group: Example: 10111110 00000000 11111111 00110011 First loop: 10111110 00000000 11111111 XXXXXXXX Second loop: 10111110 00000000 XXXXXXXX 00110011 Third loop: XXXXXXXX 00000000 11111111 00110011 Each iteration of the loop returns the next 8-bit group that has at least one set bit. The for_each_set_clump8 macro has four parameters: * start: set to the bit offset of the current clump * clump: set to the current clump value * bits: bitmap to search within * size: bitmap size in number of bits In this version of the patchset, the for_each_set_clump macro has been reimplemented and simplified based on the suggestions provided by Rasmus Villemoes and Andy Shevchenko in the version 4 submission. In particular, the function of the for_each_set_clump macro has been restricted to handle only 8-bit clumps; the drivers that use the for_each_set_clump macro only handle 8-bit ports so a generic for_each_set_clump implementation is not necessary. Thus, a solution for large clumps (i.e. those larger than the width of a bitmap word) can be postponed until a driver appears that actually requires such a generic for_each_set_clump implementation. For what it's worth, a semi-generic for_each_set_clump (i.e. for clumps smaller than the width of a bitmap word) can be implemented by simply replacing the hardcoded '8' and '0xFF' instances with respective variables. I have not yet had a need for such an implementation, and since it falls short of a true generic for_each_set_clump function, I have decided to forgo such an implementation for now. In addition, the bitmap_get_value8 and bitmap_set_value8 functions are introduced to get and set 8-bit values respectively. Their use is based on the behavior suggested in the patchset version 4 review. This patch (of 14): This macro iterates for each 8-bit group of bits (clump) with set bits, within a bitmap memory region. For each iteration, "start" is set to the bit offset of the found clump, while the respective clump value is stored to the location pointed by "clump". Additionally, the bitmap_get_value8 and bitmap_set_value8 functions are introduced to respectively get and set an 8-bit value in a bitmap memory region. [gustavo@embeddedor.com: fix potential sign-extension overflow] Link: http://lkml.kernel.org/r/20191015184657.GA26541@embeddedor [akpm@linux-foundation.org: s/ULL/UL/, per Joe] [vilhelm.gray@gmail.com: add for_each_set_clump8 documentation] Link: http://lkml.kernel.org/r/20191016161825.301082-1-vilhelm.gray@gmail.com Link: http://lkml.kernel.org/r/893c3b4f03266c9496137cc98ac2b1bd27f92c73.1570641097.git.vilhelm.gray@gmail.com Signed-off-by: William Breathitt Gray Signed-off-by: Gustavo A. R. Silva Suggested-by: Andy Shevchenko Suggested-by: Rasmus Villemoes Suggested-by: Lukas Wunner Tested-by: Andy Shevchenko Cc: Arnd Bergmann Cc: Linus Walleij Cc: Bartosz Golaszewski Cc: Masahiro Yamada Cc: Geert Uytterhoeven Cc: Phil Reid Cc: Geert Uytterhoeven Cc: Mathias Duckeck Cc: Morten Hein Tiljeset Cc: Sean Nyekjaer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/bitops/find.h | 17 +++++++++++++++++ include/linux/bitmap.h | 35 +++++++++++++++++++++++++++++++++++ include/linux/bitops.h | 12 ++++++++++++ lib/find_bit.c | 14 ++++++++++++++ 4 files changed, 78 insertions(+) (limited to 'include/asm-generic') diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 8a1ee10014de..9fdf21302fdf 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h @@ -80,4 +80,21 @@ extern unsigned long find_first_zero_bit(const unsigned long *addr, #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ +/** + * find_next_clump8 - find next 8-bit clump with set bits in a memory region + * @clump: location to store copy of found clump + * @addr: address to base the search on + * @size: bitmap size in number of bits + * @offset: bit offset at which to start searching + * + * Returns the bit offset for the next set clump; the found clump value is + * copied to the location pointed by @clump. If no bits are set, returns @size. + */ +extern unsigned long find_next_clump8(unsigned long *clump, + const unsigned long *addr, + unsigned long size, unsigned long offset); + +#define find_first_clump8(clump, bits, size) \ + find_next_clump8((clump), (bits), (size), 0) + #endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 29fc933df3bf..9f046609e809 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -66,6 +66,8 @@ * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst + * bitmap_get_value8(map, start) Get 8bit value from map at start + * bitmap_set_value8(map, value, start) Set 8bit value to map at start * * Note, bitmap_zero() and bitmap_fill() operate over the region of * unsigned longs, that is, bits behind bitmap till the unsigned long @@ -489,6 +491,39 @@ static inline void bitmap_from_u64(unsigned long *dst, u64 mask) dst[1] = mask >> 32; } +/** + * bitmap_get_value8 - get an 8-bit value within a memory region + * @map: address to the bitmap memory region + * @start: bit offset of the 8-bit value; must be a multiple of 8 + * + * Returns the 8-bit value located at the @start bit offset within the @src + * memory region. + */ +static inline unsigned long bitmap_get_value8(const unsigned long *map, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + return (map[index] >> offset) & 0xFF; +} + +/** + * bitmap_set_value8 - set an 8-bit value within a memory region + * @map: address to the bitmap memory region + * @value: the 8-bit value; values wider than 8 bits may clobber bitmap + * @start: bit offset of the 8-bit value; must be a multiple of 8 + */ +static inline void bitmap_set_value8(unsigned long *map, unsigned long value, + unsigned long start) +{ + const size_t index = BIT_WORD(start); + const unsigned long offset = start % BITS_PER_LONG; + + map[index] &= ~(0xFFUL << offset); + map[index] |= value << offset; +} + #endif /* __ASSEMBLY__ */ #endif /* __LINUX_BITMAP_H */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h index c94a9ff9f082..e479067c202c 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -47,6 +47,18 @@ extern unsigned long __sw_hweight64(__u64 w); (bit) < (size); \ (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) +/** + * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits + * @start: bit offset to start search and to store the current iteration offset + * @clump: location to store copy of current 8-bit clump + * @bits: bitmap address to base the search on + * @size: bitmap size in number of bits + */ +#define for_each_set_clump8(start, clump, bits, size) \ + for ((start) = find_first_clump8(&(clump), (bits), (size)); \ + (start) < (size); \ + (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8)) + static inline int get_bitmask_order(unsigned int count) { int order; diff --git a/lib/find_bit.c b/lib/find_bit.c index 5c51eb45178a..e35a76b291e6 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -214,3 +214,17 @@ EXPORT_SYMBOL(find_next_bit_le); #endif #endif /* __BIG_ENDIAN */ + +unsigned long find_next_clump8(unsigned long *clump, const unsigned long *addr, + unsigned long size, unsigned long offset) +{ + offset = find_next_bit(addr, size, offset); + if (offset == size) + return size; + + offset = round_down(offset, 8); + *clump = bitmap_get_value8(addr, offset); + + return offset; +} +EXPORT_SYMBOL(find_next_clump8); -- cgit v1.2.3 From f949286c668aed5aa24acdb5838be9cfd9513bd3 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Wed, 4 Dec 2019 16:54:32 -0800 Subject: mm: remove __ARCH_HAS_4LEVEL_HACK and include/asm-generic/4level-fixup.h There are no architectures that use include/asm-generic/4level-fixup.h therefore it can be removed along with __ARCH_HAS_4LEVEL_HACK define. Link: http://lkml.kernel.org/r/1572938135-31886-14-git-send-email-rppt@kernel.org Signed-off-by: Mike Rapoport Cc: Anatoly Pugachev Cc: Anton Ivanov Cc: Arnd Bergmann Cc: "David S. Miller" Cc: Geert Uytterhoeven Cc: Greentime Hu Cc: Greg Ungerer Cc: Helge Deller Cc: "James E.J. Bottomley" Cc: Jeff Dike Cc: "Kirill A. Shutemov" Cc: Mark Salter Cc: Matt Turner Cc: Michal Simek Cc: Peter Rosin Cc: Richard Weinberger Cc: Rolf Eike Beer Cc: Russell King Cc: Russell King Cc: Sam Creasey Cc: Vincent Chen Cc: Vineet Gupta Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/4level-fixup.h | 39 -------------------------------------- include/linux/mm.h | 10 +++++----- mm/memory.c | 8 -------- 3 files changed, 5 insertions(+), 52 deletions(-) delete mode 100644 include/asm-generic/4level-fixup.h (limited to 'include/asm-generic') diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h deleted file mode 100644 index c86cf7cb4bba..000000000000 --- a/include/asm-generic/4level-fixup.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _4LEVEL_FIXUP_H -#define _4LEVEL_FIXUP_H - -#define __ARCH_HAS_4LEVEL_HACK -#define __PAGETABLE_PUD_FOLDED 1 - -#define PUD_SHIFT PGDIR_SHIFT -#define PUD_SIZE PGDIR_SIZE -#define PUD_MASK PGDIR_MASK -#define PTRS_PER_PUD 1 - -#define pud_t pgd_t - -#define pmd_alloc(mm, pud, address) \ - ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \ - NULL: pmd_offset(pud, address)) - -#define pud_offset(pgd, start) (pgd) -#define pud_none(pud) 0 -#define pud_bad(pud) 0 -#define pud_present(pud) 1 -#define pud_ERROR(pud) do { } while (0) -#define pud_clear(pud) pgd_clear(pud) -#define pud_val(pud) pgd_val(pud) -#define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd) -#define pud_page(pud) pgd_page(pud) -#define pud_page_vaddr(pud) pgd_page_vaddr(pud) - -#undef pud_free_tlb -#define pud_free_tlb(tlb, x, addr) do { } while (0) -#define pud_free(mm, x) do { } while (0) - -#undef pud_addr_end -#define pud_addr_end(addr, end) (end) - -#include - -#endif diff --git a/include/linux/mm.h b/include/linux/mm.h index 8b0ef04b6d15..c97ea3b694e6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1838,12 +1838,12 @@ static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} int __pte_alloc(struct mm_struct *mm, pmd_t *pmd); int __pte_alloc_kernel(pmd_t *pmd); +#if defined(CONFIG_MMU) + /* - * The following ifdef needed to get the 4level-fixup.h header to work. - * Remove it when 4level-fixup.h has been removed. + * The following ifdef needed to get the 5level-fixup.h header to work. + * Remove it when 5level-fixup.h has been removed. */ -#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) - #ifndef __ARCH_HAS_5LEVEL_HACK static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) @@ -1865,7 +1865,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? NULL: pmd_offset(pud, address); } -#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ +#endif /* CONFIG_MMU */ #if USE_SPLIT_PTE_PTLOCKS #if ALLOC_SPLIT_PTLOCKS diff --git a/mm/memory.c b/mm/memory.c index e455160e0f75..606da187d1de 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4197,19 +4197,11 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) smp_wmb(); /* See comment in __pte_alloc */ ptl = pud_lock(mm, pud); -#ifndef __ARCH_HAS_4LEVEL_HACK if (!pud_present(*pud)) { mm_inc_nr_pmds(mm); pud_populate(mm, pud, new); } else /* Another has populated it */ pmd_free(mm, new); -#else - if (!pgd_present(*pud)) { - mm_inc_nr_pmds(mm); - pgd_populate(mm, pud, new); - } else /* Another has populated it */ - pmd_free(mm, new); -#endif /* __ARCH_HAS_4LEVEL_HACK */ spin_unlock(ptl); return 0; } -- cgit v1.2.3