summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2015-04-10 20:31:42 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2015-04-10 20:31:42 +1000
commitcb9b9519c1b7e8b3ced34b22bffb090592a6dbb1 (patch)
tree2cd6919dc97025b1e33ae12b75494524968ec28c
parentb7995a0dfc540e3a9beacb5cfe0e896506323ae3 (diff)
parent836ea414d24bfa10ffffc37a80973429f1c27e24 (diff)
Merge branch 'akpm/master'
-rw-r--r--.gitignore1
-rw-r--r--Documentation/ABI/testing/sysfs-class-bdi8
-rw-r--r--Documentation/spi/spidev_test.c4
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/mips/configs/ip32_defconfig3
-rw-r--r--arch/mips/include/asm/mach-ip32/mc146818rtc.h36
-rw-r--r--arch/mips/sgi-ip32/ip32-platform.c46
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c131
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c13
-rw-r--r--arch/unicore32/kernel/fpu-ucf64.c4
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/mtrr.h7
-rw-r--r--arch/x86/include/uapi/asm/mtrr.h12
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c192
-rw-r--r--arch/x86/mm/pat.c4
-rw-r--r--arch/x86/mm/pgtable.c53
-rw-r--r--drivers/oprofile/buffer_sync.c30
-rw-r--r--drivers/rtc/rtc-at91rm9200.c4
-rw-r--r--drivers/rtc/rtc-s5m.c10
-rw-r--r--drivers/w1/w1_int.c3
-rw-r--r--fs/exec.c6
-rw-r--r--kernel/fork.c20
-rw-r--r--kernel/sys.c47
-rw-r--r--lib/Kconfig5
-rw-r--r--mm/backing-dev.c35
-rw-r--r--security/tomoyo/util.c13
26 files changed, 401 insertions, 289 deletions
diff --git a/.gitignore b/.gitignore
index acb6afe6b7a3..4ad4a98b884b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -24,6 +24,7 @@
*.order
*.elf
*.bin
+*.tar
*.gz
*.bz2
*.lzma
diff --git a/Documentation/ABI/testing/sysfs-class-bdi b/Documentation/ABI/testing/sysfs-class-bdi
index d773d5697cf5..3187a18af6da 100644
--- a/Documentation/ABI/testing/sysfs-class-bdi
+++ b/Documentation/ABI/testing/sysfs-class-bdi
@@ -53,3 +53,11 @@ stable_pages_required (read-only)
If set, the backing device requires that all pages comprising a write
request must not be changed until writeout is complete.
+
+strictlimit (read-write)
+
+ Forces per-BDI checks for the share of given device in the write-back
+ cache even before the global background dirty limit is reached. This
+ is useful in situations where the global limit is much higher than
+ affordable for given relatively slow (or untrusted) device. Turning
+ strictlimit on has no visible effect if max_ratio is equal to 100%.
diff --git a/Documentation/spi/spidev_test.c b/Documentation/spi/spidev_test.c
index 94f574b0fdb2..135b3f592b83 100644
--- a/Documentation/spi/spidev_test.c
+++ b/Documentation/spi/spidev_test.c
@@ -80,7 +80,7 @@ static void hex_dump(const void *src, size_t length, size_t line_size, char *pre
* Unescape - process hexadecimal escape character
* converts shell input "\x23" -> 0x23
*/
-int unespcape(char *_dst, char *_src, size_t len)
+static int unescape(char *_dst, char *_src, size_t len)
{
int ret = 0;
char *src = _src;
@@ -304,7 +304,7 @@ int main(int argc, char *argv[])
size = strlen(input_tx+1);
tx = malloc(size);
rx = malloc(size);
- size = unespcape((char *)tx, input_tx, size);
+ size = unescape((char *)tx, input_tx, size);
transfer(fd, tx, rx, size);
free(rx);
free(tx);
diff --git a/MAINTAINERS b/MAINTAINERS
index 94572496fcec..8de88df771fa 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1226,6 +1226,7 @@ F: arch/arm/mach-orion5x/ts78xx-*
ARM/Mediatek SoC support
M: Matthias Brugger <matthias.bgg@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/boot/dts/mt6*
F: arch/arm/boot/dts/mt8*
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index 70ffe9b55829..fe48220157a9 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -105,7 +105,8 @@ CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
# CONFIG_RTC_INTF_SYSFS is not set
# CONFIG_RTC_INTF_PROC is not set
-CONFIG_RTC_DRV_CMOS=y
+CONFIG_RTC_DRV_DS1685_FAMILY=y
+CONFIG_RTC_DRV_DS1685=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/mips/include/asm/mach-ip32/mc146818rtc.h b/arch/mips/include/asm/mach-ip32/mc146818rtc.h
deleted file mode 100644
index 6b6bab43d5c1..000000000000
--- a/arch/mips/include/asm/mach-ip32/mc146818rtc.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998, 2001, 03 by Ralf Baechle
- * Copyright (C) 2000 Harald Koerfgen
- *
- * RTC routines for IP32 style attached Dallas chip.
- */
-#ifndef __ASM_MACH_IP32_MC146818RTC_H
-#define __ASM_MACH_IP32_MC146818RTC_H
-
-#include <asm/ip32/mace.h>
-
-#define RTC_PORT(x) (0x70 + (x))
-
-static unsigned char CMOS_READ(unsigned long addr)
-{
- return mace->isa.rtc[addr << 8];
-}
-
-static inline void CMOS_WRITE(unsigned char data, unsigned long addr)
-{
- mace->isa.rtc[addr << 8] = data;
-}
-
-/*
- * FIXME: Do it right. For now just assume that no one lives in 20th century
- * and no O2 user in 22th century ;-)
- */
-#define mc146818_decode_year(year) ((year) + 2000)
-
-#define RTC_ALWAYS_BCD 0
-
-#endif /* __ASM_MACH_IP32_MC146818RTC_H */
diff --git a/arch/mips/sgi-ip32/ip32-platform.c b/arch/mips/sgi-ip32/ip32-platform.c
index 94191a19588d..0134db2ad0a8 100644
--- a/arch/mips/sgi-ip32/ip32-platform.c
+++ b/arch/mips/sgi-ip32/ip32-platform.c
@@ -8,10 +8,13 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
+#include <linux/rtc/ds1685.h>
#include <asm/ip32/mace.h>
#include <asm/ip32/ip32_ints.h>
+extern void ip32_prepare_poweroff(void);
+
#define MACEISA_SERIAL1_OFFS offsetof(struct sgi_mace, isa.serial1)
#define MACEISA_SERIAL2_OFFS offsetof(struct sgi_mace, isa.serial2)
@@ -89,18 +92,47 @@ static __init int sgio2btns_devinit(void)
device_initcall(sgio2btns_devinit);
-static struct resource sgio2_cmos_rsrc[] = {
+#define MACE_RTC_RES_START (MACE_BASE + offsetof(struct sgi_mace, isa.rtc))
+#define MACE_RTC_RES_END (MACE_RTC_RES_START + 32767)
+
+static struct resource ip32_rtc_resources[] = {
{
- .start = 0x70,
- .end = 0x71,
- .flags = IORESOURCE_IO
+ .start = MACEISA_RTC_IRQ,
+ .end = MACEISA_RTC_IRQ,
+ .flags = IORESOURCE_IRQ
+ }, {
+ .start = MACE_RTC_RES_START,
+ .end = MACE_RTC_RES_END,
+ .flags = IORESOURCE_MEM,
}
};
-static __init int sgio2_cmos_devinit(void)
+/* RTC registers on IP32 are each padded by 256 bytes (0x100). */
+static struct ds1685_rtc_platform_data
+ip32_rtc_platform_data[] = {
+ {
+ .regstep = 0x100,
+ .bcd_mode = true,
+ .no_irq = false,
+ .uie_unsupported = false,
+ .alloc_io_resources = true,
+ .plat_prepare_poweroff = ip32_prepare_poweroff,
+ },
+};
+
+struct platform_device ip32_rtc_device = {
+ .name = "rtc-ds1685",
+ .id = -1,
+ .dev = {
+ .platform_data = ip32_rtc_platform_data,
+ },
+ .num_resources = ARRAY_SIZE(ip32_rtc_resources),
+ .resource = ip32_rtc_resources,
+};
+
++static int __init sgio2_rtc_devinit(void)
{
- return IS_ERR(platform_device_register_simple("rtc_cmos", -1,
- sgio2_cmos_rsrc, 1));
+ return platform_device_register(&ip32_rtc_device);
}
device_initcall(sgio2_cmos_devinit);
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index 44b3470a0bbb..8bd415c8729f 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -11,10 +11,11 @@
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/sched.h>
#include <linux/notifier.h>
#include <linux/delay.h>
-#include <linux/ds17287rtc.h>
+#include <linux/rtc/ds1685.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
@@ -33,53 +34,40 @@
#define POWERDOWN_FREQ (HZ / 4)
#define PANIC_FREQ (HZ / 8)
-static struct timer_list power_timer, blink_timer, debounce_timer;
-static int has_panicked, shuting_down;
+extern struct platform_device ip32_rtc_device;
-static void ip32_machine_restart(char *command) __noreturn;
-static void ip32_machine_halt(void) __noreturn;
-static void ip32_machine_power_off(void) __noreturn;
+static struct timer_list power_timer, blink_timer;
+static int has_panicked, shutting_down;
-static void ip32_machine_restart(char *cmd)
+static __noreturn void ip32_poweroff(void *data)
{
- crime->control = CRIME_CONTROL_HARD_RESET;
- while (1);
-}
+ void (*poweroff_func)(struct platform_device *) =
+ symbol_get(ds1685_rtc_poweroff);
+
+#ifdef CONFIG_MODULES
+ /* If the first __symbol_get failed, our module wasn't loaded. */
+ if (!poweroff_func) {
+ request_module("rtc-ds1685");
+ poweroff_func = symbol_get(ds1685_rtc_poweroff);
+ }
+#endif
-static inline void ip32_machine_halt(void)
-{
- ip32_machine_power_off();
-}
+ if (!poweroff_func)
+ pr_emerg("RTC not available for power-off. Spinning forever ...\n");
+ else {
+ (*poweroff_func)((struct platform_device *)data);
+ symbol_put(ds1685_rtc_poweroff);
+ }
-static void ip32_machine_power_off(void)
-{
- unsigned char reg_a, xctrl_a, xctrl_b;
-
- disable_irq(MACEISA_RTC_IRQ);
- reg_a = CMOS_READ(RTC_REG_A);
-
- /* setup for kickstart & wake-up (DS12287 Ref. Man. p. 19) */
- reg_a &= ~DS_REGA_DV2;
- reg_a |= DS_REGA_DV1;
-
- CMOS_WRITE(reg_a | DS_REGA_DV0, RTC_REG_A);
- wbflush();
- xctrl_b = CMOS_READ(DS_B1_XCTRL4B)
- | DS_XCTRL4B_ABE | DS_XCTRL4B_KFE;
- CMOS_WRITE(xctrl_b, DS_B1_XCTRL4B);
- xctrl_a = CMOS_READ(DS_B1_XCTRL4A) & ~DS_XCTRL4A_IFS;
- CMOS_WRITE(xctrl_a, DS_B1_XCTRL4A);
- wbflush();
- /* adios amigos... */
- CMOS_WRITE(xctrl_a | DS_XCTRL4A_PAB, DS_B1_XCTRL4A);
- CMOS_WRITE(reg_a, RTC_REG_A);
- wbflush();
- while (1);
+ unreachable();
}
-static void power_timeout(unsigned long data)
+static void ip32_machine_restart(char *cmd) __noreturn;
+static void ip32_machine_restart(char *cmd)
{
- ip32_machine_power_off();
+ msleep(20);
+ crime->control = CRIME_CONTROL_HARD_RESET;
+ unreachable();
}
static void blink_timeout(unsigned long data)
@@ -89,44 +77,27 @@ static void blink_timeout(unsigned long data)
mod_timer(&blink_timer, jiffies + data);
}
-static void debounce(unsigned long data)
+static void ip32_machine_halt(void)
{
- unsigned char reg_a, reg_c, xctrl_a;
-
- reg_c = CMOS_READ(RTC_INTR_FLAGS);
- reg_a = CMOS_READ(RTC_REG_A);
- CMOS_WRITE(reg_a | DS_REGA_DV0, RTC_REG_A);
- wbflush();
- xctrl_a = CMOS_READ(DS_B1_XCTRL4A);
- if ((xctrl_a & DS_XCTRL4A_IFS) || (reg_c & RTC_IRQF )) {
- /* Interrupt still being sent. */
- debounce_timer.expires = jiffies + 50;
- add_timer(&debounce_timer);
-
- /* clear interrupt source */
- CMOS_WRITE(xctrl_a & ~DS_XCTRL4A_IFS, DS_B1_XCTRL4A);
- CMOS_WRITE(reg_a & ~DS_REGA_DV0, RTC_REG_A);
- return;
- }
- CMOS_WRITE(reg_a & ~DS_REGA_DV0, RTC_REG_A);
-
- if (has_panicked)
- ip32_machine_restart(NULL);
+ ip32_poweroff(&ip32_rtc_device);
+}
- enable_irq(MACEISA_RTC_IRQ);
+static void power_timeout(unsigned long data)
+{
+ ip32_poweroff(&ip32_rtc_device);
}
-static inline void ip32_power_button(void)
+void ip32_prepare_poweroff(void)
{
if (has_panicked)
return;
- if (shuting_down || kill_cad_pid(SIGINT, 1)) {
+ if (shutting_down || kill_cad_pid(SIGINT, 1)) {
/* No init process or button pressed twice. */
- ip32_machine_power_off();
+ ip32_poweroff(&ip32_rtc_device);
}
- shuting_down = 1;
+ shutting_down = 1;
blink_timer.data = POWERDOWN_FREQ;
blink_timeout(POWERDOWN_FREQ);
@@ -136,27 +107,6 @@ static inline void ip32_power_button(void)
add_timer(&power_timer);
}
-static irqreturn_t ip32_rtc_int(int irq, void *dev_id)
-{
- unsigned char reg_c;
-
- reg_c = CMOS_READ(RTC_INTR_FLAGS);
- if (!(reg_c & RTC_IRQF)) {
- printk(KERN_WARNING
- "%s: RTC IRQ without RTC_IRQF\n", __func__);
- }
- /* Wait until interrupt goes away */
- disable_irq_nosync(MACEISA_RTC_IRQ);
- init_timer(&debounce_timer);
- debounce_timer.function = debounce;
- debounce_timer.expires = jiffies + 50;
- add_timer(&debounce_timer);
-
- printk(KERN_DEBUG "Power button pressed\n");
- ip32_power_button();
- return IRQ_HANDLED;
-}
-
static int panic_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
@@ -190,15 +140,12 @@ static __init int ip32_reboot_setup(void)
_machine_restart = ip32_machine_restart;
_machine_halt = ip32_machine_halt;
- pm_power_off = ip32_machine_power_off;
+ pm_power_off = ip32_machine_halt;
init_timer(&blink_timer);
blink_timer.function = blink_timeout;
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
- if (request_irq(MACEISA_RTC_IRQ, ip32_rtc_int, 0, "rtc", NULL))
- panic("Can't allocate MACEISA RTC IRQ");
-
return 0;
}
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index 1c27831df1ac..ed7b0977072a 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -22,6 +22,7 @@
#include <linux/kref.h>
#include <linux/mm.h>
#include <linux/fs.h>
+#include <linux/file.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/numa.h>
@@ -322,18 +323,20 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
unsigned long app_cookie = 0;
unsigned int my_offset = 0;
struct vm_area_struct *vma;
+ struct file *exe_file;
struct mm_struct *mm = spu->mm;
if (!mm)
goto out;
- down_read(&mm->mmap_sem);
-
- if (mm->exe_file) {
- app_cookie = fast_get_dcookie(&mm->exe_file->f_path);
- pr_debug("got dcookie for %pD\n", mm->exe_file);
+ exe_file = get_mm_exe_file(mm);
+ if (exe_file) {
+ app_cookie = fast_get_dcookie(&exe_file->f_path);
+ pr_debug("got dcookie for %pD\n", exe_file);
+ fput(exe_file);
}
+ down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
continue;
diff --git a/arch/unicore32/kernel/fpu-ucf64.c b/arch/unicore32/kernel/fpu-ucf64.c
index 282a60ac82ba..a53343a90ca2 100644
--- a/arch/unicore32/kernel/fpu-ucf64.c
+++ b/arch/unicore32/kernel/fpu-ucf64.c
@@ -90,8 +90,8 @@ void ucf64_exchandler(u32 inst, u32 fpexc, struct pt_regs *regs)
tmp &= ~(FPSCR_CON);
exc &= ~(FPSCR_CMPINSTR_BIT | FPSCR_CON);
} else {
- pr_debug(KERN_ERR "UniCore-F64 Error: unhandled exceptions\n");
- pr_debug(KERN_ERR "UniCore-F64 FPSCR 0x%08x INST 0x%08x\n",
+ pr_debug("UniCore-F64 Error: unhandled exceptions\n");
+ pr_debug("UniCore-F64 FPSCR 0x%08x INST 0x%08x\n",
cff(FPSCR), inst);
ucf64_raise_sigfpe(0, regs);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index bd5ecb4de842..780cbe3834bc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -100,7 +100,7 @@ config X86
select IRQ_FORCED_THREADING
select HAVE_BPF_JIT if X86_64
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
- select HAVE_ARCH_HUGE_VMAP if X86_64 || (X86_32 && X86_PAE)
+ select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
select ARCH_HAS_SG_CHAIN
select CLKEVT_I8253
select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index f768f6298419..da8dff10f632 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -31,7 +31,7 @@
* arch_phys_wc_add and arch_phys_wc_del.
*/
# ifdef CONFIG_MTRR
-extern u8 mtrr_type_lookup(u64 addr, u64 end);
+extern u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform);
extern void mtrr_save_fixed_ranges(void *);
extern void mtrr_save_state(void);
extern int mtrr_add(unsigned long base, unsigned long size,
@@ -50,12 +50,13 @@ extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
extern int amd_special_default_mtrr(void);
extern int phys_wc_to_mtrr_index(int handle);
# else
-static inline u8 mtrr_type_lookup(u64 addr, u64 end)
+static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform)
{
/*
* Return no-MTRRs:
*/
- return 0xff;
+ *uniform = 1;
+ return MTRR_TYPE_INVALID;
}
#define mtrr_save_fixed_ranges(arg) do {} while (0)
#define mtrr_save_state() do {} while (0)
diff --git a/arch/x86/include/uapi/asm/mtrr.h b/arch/x86/include/uapi/asm/mtrr.h
index d0acb658c8f4..0bc86c6fcae0 100644
--- a/arch/x86/include/uapi/asm/mtrr.h
+++ b/arch/x86/include/uapi/asm/mtrr.h
@@ -88,6 +88,10 @@ struct mtrr_state_type {
mtrr_type def_type;
};
+/* Bit fields for enabled in struct mtrr_state_type */
+#define MTRR_STATE_MTRR_FIXED_ENABLED 0x01
+#define MTRR_STATE_MTRR_ENABLED 0x02
+
#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
@@ -103,7 +107,7 @@ struct mtrr_state_type {
#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
-/* These are the region types */
+/* MTRR memory types, which are defined in SDM */
#define MTRR_TYPE_UNCACHABLE 0
#define MTRR_TYPE_WRCOMB 1
/*#define MTRR_TYPE_ 2*/
@@ -113,5 +117,11 @@ struct mtrr_state_type {
#define MTRR_TYPE_WRBACK 6
#define MTRR_NUM_TYPES 7
+/*
+ * Invalid MTRR memory type. mtrr_type_lookup() returns this value when
+ * MTRRs are disabled. Note, this value is allocated from the reserved
+ * values (0x7-0xff) of the MTRR memory types.
+ */
+#define MTRR_TYPE_INVALID 0xff
#endif /* _UAPI_ASM_X86_MTRR_H */
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 7d74f7b3c6ba..a83f27a89cd3 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -102,59 +102,75 @@ static int check_type_overlap(u8 *prev, u8 *curr)
return 0;
}
-/*
- * Error/Semi-error returns:
- * 0xFF - when MTRR is not enabled
- * *repeat == 1 implies [start:end] spanned across MTRR range and type returned
- * corresponds only to [start:*partial_end].
- * Caller has to lookup again for [*partial_end:end].
+/**
+ * mtrr_type_lookup_fixed - look up memory type in MTRR fixed entries
+ *
+ * MTRR fixed entries are divided into the following ways:
+ * 0x00000 - 0x7FFFF : This range is divided into eight 64KB sub-ranges
+ * 0x80000 - 0xBFFFF : This range is divided into sixteen 16KB sub-ranges
+ * 0xC0000 - 0xFFFFF : This range is divided into sixty-four 4KB sub-ranges
+ *
+ * Return Values:
+ * MTRR_TYPE_(type) - Matched memory type
+ * MTRR_TYPE_INVALID - Unmatched or fixed entries are disabled
+ */
+static u8 mtrr_type_lookup_fixed(u64 start, u64 end)
+{
+ int idx;
+
+ if (start >= 0x100000)
+ return MTRR_TYPE_INVALID;
+
+ if (!(mtrr_state.have_fixed) ||
+ !(mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED))
+ return MTRR_TYPE_INVALID;
+
+ if (start < 0x80000) { /* 0x0 - 0x7FFFF */
+ idx = 0;
+ idx += (start >> 16);
+ return mtrr_state.fixed_ranges[idx];
+
+ } else if (start < 0xC0000) { /* 0x80000 - 0xBFFFF */
+ idx = 1 * 8;
+ idx += ((start - 0x80000) >> 14);
+ return mtrr_state.fixed_ranges[idx];
+ }
+
+ /* 0xC0000 - 0xFFFFF */
+ idx = 3 * 8;
+ idx += ((start - 0xC0000) >> 12);
+ return mtrr_state.fixed_ranges[idx];
+}
+
+/**
+ * mtrr_type_lookup_variable - look up memory type in MTRR variable entries
+ *
+ * Return Value:
+ * MTRR_TYPE_(type) - Matched memory type or default memory type (unmatched)
+ *
+ * Output Arguments:
+ * repeat - Set to 1 when [start:end] spanned across MTRR range and type
+ * returned corresponds only to [start:*partial_end]. Caller has
+ * to lookup again for [*partial_end:end].
+ * uniform - Set to 1 when MTRR covers the region uniformly, i.e. the region
+ * is fully covered by a single MTRR entry or the default type.
*/
-static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
+static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
+ int *repeat, u8 *uniform)
{
int i;
u64 base, mask;
u8 prev_match, curr_match;
*repeat = 0;
- if (!mtrr_state_set)
- return 0xFF;
-
- if (!mtrr_state.enabled)
- return 0xFF;
+ *uniform = 1;
/* Make end inclusive end, instead of exclusive */
end--;
- /* Look in fixed ranges. Just return the type as per start */
- if (mtrr_state.have_fixed && (start < 0x100000)) {
- int idx;
-
- if (start < 0x80000) {
- idx = 0;
- idx += (start >> 16);
- return mtrr_state.fixed_ranges[idx];
- } else if (start < 0xC0000) {
- idx = 1 * 8;
- idx += ((start - 0x80000) >> 14);
- return mtrr_state.fixed_ranges[idx];
- } else if (start < 0x1000000) {
- idx = 3 * 8;
- idx += ((start - 0xC0000) >> 12);
- return mtrr_state.fixed_ranges[idx];
- }
- }
-
- /*
- * Look in variable ranges
- * Look of multiple ranges matching this address and pick type
- * as per MTRR precedence
- */
- if (!(mtrr_state.enabled & 2))
- return mtrr_state.def_type;
-
- prev_match = 0xFF;
+ prev_match = MTRR_TYPE_INVALID;
for (i = 0; i < num_var_ranges; ++i) {
- unsigned short start_state, end_state;
+ unsigned short start_state, end_state, inclusive;
if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
continue;
@@ -166,20 +182,22 @@ static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
start_state = ((start & mask) == (base & mask));
end_state = ((end & mask) == (base & mask));
+ inclusive = ((start < base) && (end > base));
- if (start_state != end_state) {
+ if ((start_state != end_state) || inclusive) {
/*
* We have start:end spanning across an MTRR.
- * We split the region into
- * either
- * (start:mtrr_end) (mtrr_end:end)
- * or
- * (start:mtrr_start) (mtrr_start:end)
+ * We split the region into either
+ * - start_state:1
+ * (start:mtrr_end) (mtrr_end:end)
+ * - end_state:1 or inclusive:1
+ * (start:mtrr_start) (mtrr_start:end)
* depending on kind of overlap.
* Return the type for first region and a pointer to
* the start of second region so that caller will
* lookup again on the second region.
- * Note: This way we handle multiple overlaps as well.
+ * Note: This way we handle overlaps with multiple
+ * entries and the default type properly.
*/
if (start_state)
*partial_end = base + get_mtrr_size(mask);
@@ -193,59 +211,95 @@ static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
end = *partial_end - 1; /* end is inclusive */
*repeat = 1;
+ *uniform = 0;
}
- if ((start & mask) != (base & mask))
+ if (!start_state)
continue;
curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
- if (prev_match == 0xFF) {
+ if (prev_match == MTRR_TYPE_INVALID) {
prev_match = curr_match;
continue;
}
+ *uniform = 0;
if (check_type_overlap(&prev_match, &curr_match))
return curr_match;
}
- if (mtrr_tom2) {
- if (start >= (1ULL<<32) && (end < mtrr_tom2))
- return MTRR_TYPE_WRBACK;
- }
-
- if (prev_match != 0xFF)
+ if (prev_match != MTRR_TYPE_INVALID)
return prev_match;
return mtrr_state.def_type;
}
-/*
- * Returns the effective MTRR type for the region
- * Error return:
- * 0xFF - when MTRR is not enabled
+/**
+ * mtrr_type_lookup - look up memory type in MTRR
+ *
+ * Return Values:
+ * MTRR_TYPE_(type) - The effective MTRR type for the region
+ * MTRR_TYPE_INVALID - MTRR is disabled
+ *
+ * Output Argument:
+ * uniform - Set to 1 when MTRR covers the region uniformly, i.e. the region
+ * is fully covered by a single MTRR entry or the default type.
*/
-u8 mtrr_type_lookup(u64 start, u64 end)
+u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
{
- u8 type, prev_type;
+ u8 type, prev_type, is_uniform, dummy;
int repeat;
u64 partial_end;
- type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
+ *uniform = 1;
+
+ if (!mtrr_state_set)
+ return MTRR_TYPE_INVALID;
+
+ if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED))
+ return MTRR_TYPE_INVALID;
+
+ /*
+ * Look up the fixed ranges first, which take priority over
+ * the variable ranges.
+ */
+ type = mtrr_type_lookup_fixed(start, end);
+ if (type != MTRR_TYPE_INVALID) {
+ *uniform = 0;
+ return type;
+ }
+
+ /*
+ * Look up the variable ranges. Look of multiple ranges matching
+ * this address and pick type as per MTRR precedence.
+ */
+ type = mtrr_type_lookup_variable(start, end, &partial_end,
+ &repeat, &is_uniform);
/*
* Common path is with repeat = 0.
* However, we can have cases where [start:end] spans across some
- * MTRR range. Do repeated lookups for that case here.
+ * MTRR ranges and/or the default type. Do repeated lookups for
+ * that case here.
*/
while (repeat) {
prev_type = type;
start = partial_end;
- type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
+ is_uniform = 0;
+
+ type = mtrr_type_lookup_variable(start, end, &partial_end,
+ &repeat, &dummy);
- if (check_type_overlap(&prev_type, &type))
+ if (check_type_overlap(&prev_type, &type)) {
+ *uniform = 0;
return type;
+ }
}
+ if (mtrr_tom2 && (start >= (1ULL<<32)) && (end < mtrr_tom2))
+ return MTRR_TYPE_WRBACK;
+
+ *uniform = is_uniform;
return type;
}
@@ -347,7 +401,9 @@ static void __init print_mtrr_state(void)
mtrr_attrib_to_str(mtrr_state.def_type));
if (mtrr_state.have_fixed) {
pr_debug("MTRR fixed ranges %sabled:\n",
- mtrr_state.enabled & 1 ? "en" : "dis");
+ ((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
+ (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ?
+ "en" : "dis");
print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
for (i = 0; i < 2; ++i)
print_fixed(0x80000 + i * 0x20000, 0x04000,
@@ -360,7 +416,7 @@ static void __init print_mtrr_state(void)
print_fixed_last();
}
pr_debug("MTRR variable ranges %sabled:\n",
- mtrr_state.enabled & 2 ? "en" : "dis");
+ mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
for (i = 0; i < num_var_ranges; ++i) {
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 35af6771a95a..372ad422c2c3 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -267,9 +267,9 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end,
* request is for WB.
*/
if (req_type == _PAGE_CACHE_MODE_WB) {
- u8 mtrr_type;
+ u8 mtrr_type, uniform;
- mtrr_type = mtrr_type_lookup(start, end);
+ mtrr_type = mtrr_type_lookup(start, end, &uniform);
if (mtrr_type != MTRR_TYPE_WRBACK)
return _PAGE_CACHE_MODE_UC_MINUS;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 0b97d2c75df3..3d6edea8087e 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -563,16 +563,22 @@ void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
}
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+/**
+ * pud_set_huge - setup kernel PUD mapping
+ *
+ * MTRR can override PAT memory types with 4KB granularity. Therefore,
+ * it only sets up a huge page when the range is mapped uniformly by MTRR
+ * (i.e. the range is fully covered by a single MTRR entry or the default
+ * type) or the MTRR memory type is WB.
+ *
+ * Return 1 on success, and 0 when no PUD was set.
+ */
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
{
- u8 mtrr;
+ u8 mtrr, uniform;
- /*
- * Do not use a huge page when the range is covered by non-WB type
- * of MTRRs.
- */
- mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE);
- if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
+ mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
+ if ((!uniform) && (mtrr != MTRR_TYPE_WRBACK))
return 0;
prot = pgprot_4k_2_large(prot);
@@ -584,17 +590,26 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
return 1;
}
+/**
+ * pmd_set_huge - setup kernel PMD mapping
+ *
+ * MTRR can override PAT memory types with 4KB granularity. Therefore,
+ * it only sets up a huge page when the range is mapped uniformly by MTRR
+ * (i.e. the range is fully covered by a single MTRR entry or the default
+ * type) or the MTRR memory type is WB.
+ *
+ * Return 1 on success, and 0 when no PMD was set.
+ */
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
{
- u8 mtrr;
+ u8 mtrr, uniform;
- /*
- * Do not use a huge page when the range is covered by non-WB type
- * of MTRRs.
- */
- mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE);
- if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
+ mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
+ if ((!uniform) && (mtrr != MTRR_TYPE_WRBACK)) {
+ pr_warn("pmd_set_huge: requesting [mem %#010llx-%#010llx], which spans more than a single MTRR entry\n",
+ addr, addr + PMD_SIZE);
return 0;
+ }
prot = pgprot_4k_2_large(prot);
@@ -605,6 +620,11 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
return 1;
}
+/**
+ * pud_clear_huge - clear kernel PUD mapping when it is set
+ *
+ * Return 1 on success, and 0 when no PUD map was found.
+ */
int pud_clear_huge(pud_t *pud)
{
if (pud_large(*pud)) {
@@ -615,6 +635,11 @@ int pud_clear_huge(pud_t *pud)
return 0;
}
+/**
+ * pmd_clear_huge - clear kernel PMD mapping when it is set
+ *
+ * Return 1 on success, and 0 when no PMD map was found.
+ */
int pmd_clear_huge(pmd_t *pmd)
{
if (pmd_large(*pmd)) {
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index d93b2b6b1f7a..82f7000a285d 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -21,6 +21,7 @@
* objects.
*/
+#include <linux/file.h>
#include <linux/mm.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
@@ -224,10 +225,18 @@ static inline unsigned long fast_get_dcookie(struct path *path)
static unsigned long get_exec_dcookie(struct mm_struct *mm)
{
unsigned long cookie = NO_COOKIE;
+ struct file *exe_file;
- if (mm && mm->exe_file)
- cookie = fast_get_dcookie(&mm->exe_file->f_path);
+ if (!mm)
+ goto done;
+
+ exe_file = get_mm_exe_file(mm);
+ if (!exe_file)
+ goto done;
+ cookie = fast_get_dcookie(&exe_file->f_path);
+ fput(exe_file);
+done:
return cookie;
}
@@ -236,6 +245,8 @@ static unsigned long get_exec_dcookie(struct mm_struct *mm)
* pair that can then be added to the global event buffer. We make
* sure to do this lookup before a mm->mmap modification happens so
* we don't lose track.
+ *
+ * The caller must ensure the mm is not nil (ie: not a kernel thread).
*/
static unsigned long
lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
@@ -243,6 +254,7 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
unsigned long cookie = NO_COOKIE;
struct vm_area_struct *vma;
+ down_read(&mm->mmap_sem);
for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
if (addr < vma->vm_start || addr >= vma->vm_end)
@@ -262,6 +274,7 @@ lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
if (!vma)
cookie = INVALID_COOKIE;
+ up_read(&mm->mmap_sem);
return cookie;
}
@@ -402,20 +415,9 @@ static void release_mm(struct mm_struct *mm)
{
if (!mm)
return;
- up_read(&mm->mmap_sem);
mmput(mm);
}
-
-static struct mm_struct *take_tasks_mm(struct task_struct *task)
-{
- struct mm_struct *mm = get_task_mm(task);
- if (mm)
- down_read(&mm->mmap_sem);
- return mm;
-}
-
-
static inline int is_code(unsigned long val)
{
return val == ESCAPE_CODE;
@@ -532,7 +534,7 @@ void sync_buffer(int cpu)
new = (struct task_struct *)val;
oldmm = mm;
release_mm(oldmm);
- mm = take_tasks_mm(new);
+ mm = get_task_mm(new);
if (mm != oldmm)
cookie = get_exec_dcookie(mm);
add_user_ctx_switch(new, cookie);
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index b283a1a573b3..35efd3f75b18 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -37,9 +37,9 @@
#include "rtc-at91rm9200.h"
#define at91_rtc_read(field) \
- __raw_readl(at91_rtc_regs + field)
+ readl_relaxed(at91_rtc_regs + field)
#define at91_rtc_write(field, val) \
- __raw_writel((val), at91_rtc_regs + field)
+ writel_relaxed((val), at91_rtc_regs + field)
#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 64baad379174..8c70d785ba73 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -92,7 +92,7 @@ struct s5m_rtc_info {
struct regmap *regmap;
struct rtc_device *rtc_dev;
int irq;
- int device_type;
+ enum sec_device_type device_type;
int rtc_24hr_mode;
const struct s5m_rtc_reg_config *regs;
};
@@ -668,7 +668,7 @@ static int s5m_rtc_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- switch (pdata->device_type) {
+ switch (platform_get_device_id(pdev)->driver_data) {
case S2MPS14X:
case S2MPS13X:
regmap_cfg = &s2mps14_rtc_regmap_config;
@@ -686,7 +686,9 @@ static int s5m_rtc_probe(struct platform_device *pdev)
alarm_irq = S5M8767_IRQ_RTCA1;
break;
default:
- dev_err(&pdev->dev, "Device type is not supported by RTC driver\n");
+ dev_err(&pdev->dev,
+ "Device type %lu is not supported by RTC driver\n",
+ platform_get_device_id(pdev)->driver_data);
return -ENODEV;
}
@@ -706,7 +708,7 @@ static int s5m_rtc_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
info->s5m87xx = s5m87xx;
- info->device_type = s5m87xx->device_type;
+ info->device_type = platform_get_device_id(pdev)->driver_data;
if (s5m87xx->irq_data) {
info->irq = regmap_irq_get_virq(s5m87xx->irq_data, alarm_irq);
diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
index 47249a30eae3..20f766afa4c7 100644
--- a/drivers/w1/w1_int.c
+++ b/drivers/w1/w1_int.c
@@ -91,8 +91,7 @@ static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
err = device_register(&dev->dev);
if (err) {
pr_err("Failed to register master device. err=%d\n", err);
- memset(dev, 0, sizeof(struct w1_master));
- kfree(dev);
+ put_device(&dev->dev);
dev = NULL;
}
diff --git a/fs/exec.c b/fs/exec.c
index 314e8d83a351..02bfd980a40c 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1082,7 +1082,13 @@ int flush_old_exec(struct linux_binprm * bprm)
if (retval)
goto out;
+ /*
+ * Must be called _before_ exec_mmap() as bprm->mm is
+ * not visibile until then. This also enables the update
+ * to be lockless.
+ */
set_mm_exe_file(bprm->mm, bprm->file);
+
/*
* Release all of the old mmap stuff
*/
diff --git a/kernel/fork.c b/kernel/fork.c
index 18761247a5c3..18c44fb263b7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -711,15 +711,22 @@ EXPORT_SYMBOL_GPL(mmput);
*
* This changes mm's executable file (shown as symlink /proc/[pid]/exe).
*
- * Main users are mmput(), sys_execve() and sys_prctl(PR_SET_MM_MAP/EXE_FILE).
- * Callers prevent concurrent invocations: in mmput() nobody alive left,
- * in execve task is single-threaded, prctl holds mmap_sem exclusively.
+ * Main users are mmput() and sys_execve(). Callers prevent concurrent
+ * invocations: in mmput() nobody alive left, in execve task is single
+ * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the
+ * mm->exe_file, but does so without using set_mm_exe_file() in order
+ * to do avoid the need for any locks.
*/
void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
{
- struct file *old_exe_file = rcu_dereference_protected(mm->exe_file,
- !atomic_read(&mm->mm_users) || current->in_execve ||
- lockdep_is_held(&mm->mmap_sem));
+ struct file *old_exe_file;
+
+ /*
+ * It is safe to dereference the exe_file without RCU as
+ * this function is only called if nobody else can access
+ * this mm -- see comment above for justification.
+ */
+ old_exe_file = rcu_dereference_raw(mm->exe_file);
if (new_exe_file)
get_file(new_exe_file);
@@ -745,6 +752,7 @@ struct file *get_mm_exe_file(struct mm_struct *mm)
rcu_read_unlock();
return exe_file;
}
+EXPORT_SYMBOL(get_mm_exe_file);
/**
* get_task_mm - acquire a reference to the task's mm
diff --git a/kernel/sys.c b/kernel/sys.c
index 3be344902316..a4e372b798a5 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1649,14 +1649,13 @@ SYSCALL_DEFINE1(umask, int, mask)
return mask;
}
-static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd)
+static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
{
struct fd exe;
+ struct file *old_exe, *exe_file;
struct inode *inode;
int err;
- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
-
exe = fdget(fd);
if (!exe.file)
return -EBADF;
@@ -1680,15 +1679,22 @@ static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd)
/*
* Forbid mm->exe_file change if old file still mapped.
*/
+ exe_file = get_mm_exe_file(mm);
err = -EBUSY;
- if (mm->exe_file) {
+ if (exe_file) {
struct vm_area_struct *vma;
- for (vma = mm->mmap; vma; vma = vma->vm_next)
- if (vma->vm_file &&
- path_equal(&vma->vm_file->f_path,
- &mm->exe_file->f_path))
- goto exit;
+ down_read(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (!vma->vm_file)
+ continue;
+ if (path_equal(&vma->vm_file->f_path,
+ &exe_file->f_path))
+ goto exit_err;
+ }
+
+ up_read(&mm->mmap_sem);
+ fput(exe_file);
}
/*
@@ -1702,10 +1708,18 @@ static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd)
goto exit;
err = 0;
- set_mm_exe_file(mm, exe.file); /* this grabs a reference to exe.file */
+ /* set the new file, lockless */
+ get_file(exe.file);
+ old_exe = xchg(&mm->exe_file, exe.file);
+ if (old_exe)
+ fput(old_exe);
exit:
fdput(exe);
return err;
+exit_err:
+ up_read(&mm->mmap_sem);
+ fput(exe_file);
+ goto exit;
}
#ifdef CONFIG_CHECKPOINT_RESTORE
@@ -1840,10 +1854,9 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
}
- down_write(&mm->mmap_sem);
if (prctl_map.exe_fd != (u32)-1)
- error = prctl_set_mm_exe_file_locked(mm, prctl_map.exe_fd);
- downgrade_write(&mm->mmap_sem);
+ error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
+ down_read(&mm->mmap_sem);
if (error)
goto out;
@@ -1909,12 +1922,8 @@ static int prctl_set_mm(int opt, unsigned long addr,
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- if (opt == PR_SET_MM_EXE_FILE) {
- down_write(&mm->mmap_sem);
- error = prctl_set_mm_exe_file_locked(mm, (unsigned int)addr);
- up_write(&mm->mmap_sem);
- return error;
- }
+ if (opt == PR_SET_MM_EXE_FILE)
+ return prctl_set_mm_exe_file(mm, (unsigned int)addr);
if (addr >= TASK_SIZE || addr < mmap_min_addr)
return -EINVAL;
diff --git a/lib/Kconfig b/lib/Kconfig
index b039e70fd31d..d1bef760d5a1 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -18,9 +18,8 @@ config HAVE_ARCH_BITREVERSE
default n
depends on BITREVERSE
help
- This option provides an config for the architecture which have instruction
- can do bitreverse operation, we use the hardware instruction if the architecture
- have this capability.
+ This option enables the use of hardware bit-reversal instructions on
+ architectures which support such operations.
config RATIONAL
bool
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 6dc4580df2af..a00a2e819a78 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -219,11 +219,46 @@ static ssize_t stable_pages_required_show(struct device *dev,
}
static DEVICE_ATTR_RO(stable_pages_required);
+static ssize_t strictlimit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct backing_dev_info *bdi = dev_get_drvdata(dev);
+ unsigned int val;
+ ssize_t ret;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ switch (val) {
+ case 0:
+ bdi->capabilities &= ~BDI_CAP_STRICTLIMIT;
+ break;
+ case 1:
+ bdi->capabilities |= BDI_CAP_STRICTLIMIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return count;
+}
+static ssize_t strictlimit_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct backing_dev_info *bdi = dev_get_drvdata(dev);
+
+ return snprintf(page, PAGE_SIZE-1, "%d\n",
+ !!(bdi->capabilities & BDI_CAP_STRICTLIMIT));
+}
+static DEVICE_ATTR_RW(strictlimit);
+
static struct attribute *bdi_dev_attrs[] = {
&dev_attr_read_ahead_kb.attr,
&dev_attr_min_ratio.attr,
&dev_attr_max_ratio.attr,
&dev_attr_stable_pages_required.attr,
+ &dev_attr_strictlimit.attr,
NULL,
};
ATTRIBUTE_GROUPS(bdi_dev);
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index 2952ba576fb9..b974a6997d7f 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -948,15 +948,18 @@ bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
*/
const char *tomoyo_get_exe(void)
{
+ struct file *exe_file;
+ const char *cp;
struct mm_struct *mm = current->mm;
- const char *cp = NULL;
if (!mm)
return NULL;
- down_read(&mm->mmap_sem);
- if (mm->exe_file)
- cp = tomoyo_realpath_from_path(&mm->exe_file->f_path);
- up_read(&mm->mmap_sem);
+ exe_file = get_mm_exe_file(mm);
+ if (!exe_file)
+ return NULL;
+
+ cp = tomoyo_realpath_from_path(&exe_file->f_path);
+ fput(exe_file);
return cp;
}