summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/kprobes.txt588
-rw-r--r--Documentation/video4linux/bttv/Insmod-options3
-rw-r--r--arch/arm26/mm/fault.c17
-rw-r--r--arch/cris/mm/fault.c6
-rw-r--r--arch/frv/mm/fault.c6
-rw-r--r--arch/m68k/mm/fault.c6
-rw-r--r--arch/parisc/mm/fault.c12
-rw-r--r--arch/ppc64/kernel/head.S2
-rw-r--r--arch/ppc64/kernel/machine_kexec.c12
-rw-r--r--arch/ppc64/kernel/mpic.c4
-rw-r--r--arch/ppc64/kernel/mpic.h2
-rw-r--r--arch/ppc64/kernel/xics.c31
-rw-r--r--arch/sh64/mm/fault.c6
-rw-r--r--arch/x86_64/mm/fault.c6
-rw-r--r--drivers/acpi/dispatcher/dswload.c6
-rw-r--r--drivers/acpi/osl.c6
-rw-r--r--drivers/acpi/pci_link.c7
-rw-r--r--drivers/ide/ide-probe.c16
-rw-r--r--drivers/infiniband/include/ib_cm.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c5
-rw-r--r--drivers/md/bitmap.c75
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/raid1.c29
-rw-r--r--drivers/media/video/bttv-cards.c8
-rw-r--r--drivers/media/video/bttv-driver.c28
-rw-r--r--include/asm-ppc64/machdep.h2
-rw-r--r--include/asm-ppc64/xics.h2
-rw-r--r--include/linux/raid/bitmap.h1
-rw-r--r--kernel/sys.c2
-rw-r--r--security/keys/keyctl.c11
-rw-r--r--security/keys/request_key.c2
32 files changed, 782 insertions, 128 deletions
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
new file mode 100644
index 000000000000..0541fe1de704
--- /dev/null
+++ b/Documentation/kprobes.txt
@@ -0,0 +1,588 @@
+Title : Kernel Probes (Kprobes)
+Authors : Jim Keniston <jkenisto@us.ibm.com>
+ : Prasanna S Panchamukhi <prasanna@in.ibm.com>
+
+CONTENTS
+
+1. Concepts: Kprobes, Jprobes, Return Probes
+2. Architectures Supported
+3. Configuring Kprobes
+4. API Reference
+5. Kprobes Features and Limitations
+6. Probe Overhead
+7. TODO
+8. Kprobes Example
+9. Jprobes Example
+10. Kretprobes Example
+
+1. Concepts: Kprobes, Jprobes, Return Probes
+
+Kprobes enables you to dynamically break into any kernel routine and
+collect debugging and performance information non-disruptively. You
+can trap at almost any kernel code address, specifying a handler
+routine to be invoked when the breakpoint is hit.
+
+There are currently three types of probes: kprobes, jprobes, and
+kretprobes (also called return probes). A kprobe can be inserted
+on virtually any instruction in the kernel. A jprobe is inserted at
+the entry to a kernel function, and provides convenient access to the
+function's arguments. A return probe fires when a specified function
+returns.
+
+In the typical case, Kprobes-based instrumentation is packaged as
+a kernel module. The module's init function installs ("registers")
+one or more probes, and the exit function unregisters them. A
+registration function such as register_kprobe() specifies where
+the probe is to be inserted and what handler is to be called when
+the probe is hit.
+
+The next three subsections explain how the different types of
+probes work. They explain certain things that you'll need to
+know in order to make the best use of Kprobes -- e.g., the
+difference between a pre_handler and a post_handler, and how
+to use the maxactive and nmissed fields of a kretprobe. But
+if you're in a hurry to start using Kprobes, you can skip ahead
+to section 2.
+
+1.1 How Does a Kprobe Work?
+
+When a kprobe is registered, Kprobes makes a copy of the probed
+instruction and replaces the first byte(s) of the probed instruction
+with a breakpoint instruction (e.g., int3 on i386 and x86_64).
+
+When a CPU hits the breakpoint instruction, a trap occurs, the CPU's
+registers are saved, and control passes to Kprobes via the
+notifier_call_chain mechanism. Kprobes executes the "pre_handler"
+associated with the kprobe, passing the handler the addresses of the
+kprobe struct and the saved registers.
+
+Next, Kprobes single-steps its copy of the probed instruction.
+(It would be simpler to single-step the actual instruction in place,
+but then Kprobes would have to temporarily remove the breakpoint
+instruction. This would open a small time window when another CPU
+could sail right past the probepoint.)
+
+After the instruction is single-stepped, Kprobes executes the
+"post_handler," if any, that is associated with the kprobe.
+Execution then continues with the instruction following the probepoint.
+
+1.2 How Does a Jprobe Work?
+
+A jprobe is implemented using a kprobe that is placed on a function's
+entry point. It employs a simple mirroring principle to allow
+seamless access to the probed function's arguments. The jprobe
+handler routine should have the same signature (arg list and return
+type) as the function being probed, and must always end by calling
+the Kprobes function jprobe_return().
+
+Here's how it works. When the probe is hit, Kprobes makes a copy of
+the saved registers and a generous portion of the stack (see below).
+Kprobes then points the saved instruction pointer at the jprobe's
+handler routine, and returns from the trap. As a result, control
+passes to the handler, which is presented with the same register and
+stack contents as the probed function. When it is done, the handler
+calls jprobe_return(), which traps again to restore the original stack
+contents and processor state and switch to the probed function.
+
+By convention, the callee owns its arguments, so gcc may produce code
+that unexpectedly modifies that portion of the stack. This is why
+Kprobes saves a copy of the stack and restores it after the jprobe
+handler has run. Up to MAX_STACK_SIZE bytes are copied -- e.g.,
+64 bytes on i386.
+
+Note that the probed function's args may be passed on the stack
+or in registers (e.g., for x86_64 or for an i386 fastcall function).
+The jprobe will work in either case, so long as the handler's
+prototype matches that of the probed function.
+
+1.3 How Does a Return Probe Work?
+
+When you call register_kretprobe(), Kprobes establishes a kprobe at
+the entry to the function. When the probed function is called and this
+probe is hit, Kprobes saves a copy of the return address, and replaces
+the return address with the address of a "trampoline." The trampoline
+is an arbitrary piece of code -- typically just a nop instruction.
+At boot time, Kprobes registers a kprobe at the trampoline.
+
+When the probed function executes its return instruction, control
+passes to the trampoline and that probe is hit. Kprobes' trampoline
+handler calls the user-specified handler associated with the kretprobe,
+then sets the saved instruction pointer to the saved return address,
+and that's where execution resumes upon return from the trap.
+
+While the probed function is executing, its return address is
+stored in an object of type kretprobe_instance. Before calling
+register_kretprobe(), the user sets the maxactive field of the
+kretprobe struct to specify how many instances of the specified
+function can be probed simultaneously. register_kretprobe()
+pre-allocates the indicated number of kretprobe_instance objects.
+
+For example, if the function is non-recursive and is called with a
+spinlock held, maxactive = 1 should be enough. If the function is
+non-recursive and can never relinquish the CPU (e.g., via a semaphore
+or preemption), NR_CPUS should be enough. If maxactive <= 0, it is
+set to a default value. If CONFIG_PREEMPT is enabled, the default
+is max(10, 2*NR_CPUS). Otherwise, the default is NR_CPUS.
+
+It's not a disaster if you set maxactive too low; you'll just miss
+some probes. In the kretprobe struct, the nmissed field is set to
+zero when the return probe is registered, and is incremented every
+time the probed function is entered but there is no kretprobe_instance
+object available for establishing the return probe.
+
+2. Architectures Supported
+
+Kprobes, jprobes, and return probes are implemented on the following
+architectures:
+
+- i386
+- x86_64 (AMD-64, E64MT)
+- ppc64
+- ia64 (Support for probes on certain instruction types is still in progress.)
+- sparc64 (Return probes not yet implemented.)
+
+3. Configuring Kprobes
+
+When configuring the kernel using make menuconfig/xconfig/oldconfig,
+ensure that CONFIG_KPROBES is set to "y". Under "Kernel hacking",
+look for "Kprobes". You may have to enable "Kernel debugging"
+(CONFIG_DEBUG_KERNEL) before you can enable Kprobes.
+
+You may also want to ensure that CONFIG_KALLSYMS and perhaps even
+CONFIG_KALLSYMS_ALL are set to "y", since kallsyms_lookup_name()
+is a handy, version-independent way to find a function's address.
+
+If you need to insert a probe in the middle of a function, you may find
+it useful to "Compile the kernel with debug info" (CONFIG_DEBUG_INFO),
+so you can use "objdump -d -l vmlinux" to see the source-to-object
+code mapping.
+
+4. API Reference
+
+The Kprobes API includes a "register" function and an "unregister"
+function for each type of probe. Here are terse, mini-man-page
+specifications for these functions and the associated probe handlers
+that you'll write. See the latter half of this document for examples.
+
+4.1 register_kprobe
+
+#include <linux/kprobes.h>
+int register_kprobe(struct kprobe *kp);
+
+Sets a breakpoint at the address kp->addr. When the breakpoint is
+hit, Kprobes calls kp->pre_handler. After the probed instruction
+is single-stepped, Kprobe calls kp->post_handler. If a fault
+occurs during execution of kp->pre_handler or kp->post_handler,
+or during single-stepping of the probed instruction, Kprobes calls
+kp->fault_handler. Any or all handlers can be NULL.
+
+register_kprobe() returns 0 on success, or a negative errno otherwise.
+
+User's pre-handler (kp->pre_handler):
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+int pre_handler(struct kprobe *p, struct pt_regs *regs);
+
+Called with p pointing to the kprobe associated with the breakpoint,
+and regs pointing to the struct containing the registers saved when
+the breakpoint was hit. Return 0 here unless you're a Kprobes geek.
+
+User's post-handler (kp->post_handler):
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+void post_handler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags);
+
+p and regs are as described for the pre_handler. flags always seems
+to be zero.
+
+User's fault-handler (kp->fault_handler):
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+int fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr);
+
+p and regs are as described for the pre_handler. trapnr is the
+architecture-specific trap number associated with the fault (e.g.,
+on i386, 13 for a general protection fault or 14 for a page fault).
+Returns 1 if it successfully handled the exception.
+
+4.2 register_jprobe
+
+#include <linux/kprobes.h>
+int register_jprobe(struct jprobe *jp)
+
+Sets a breakpoint at the address jp->kp.addr, which must be the address
+of the first instruction of a function. When the breakpoint is hit,
+Kprobes runs the handler whose address is jp->entry.
+
+The handler should have the same arg list and return type as the probed
+function; and just before it returns, it must call jprobe_return().
+(The handler never actually returns, since jprobe_return() returns
+control to Kprobes.) If the probed function is declared asmlinkage,
+fastcall, or anything else that affects how args are passed, the
+handler's declaration must match.
+
+register_jprobe() returns 0 on success, or a negative errno otherwise.
+
+4.3 register_kretprobe
+
+#include <linux/kprobes.h>
+int register_kretprobe(struct kretprobe *rp);
+
+Establishes a return probe for the function whose address is
+rp->kp.addr. When that function returns, Kprobes calls rp->handler.
+You must set rp->maxactive appropriately before you call
+register_kretprobe(); see "How Does a Return Probe Work?" for details.
+
+register_kretprobe() returns 0 on success, or a negative errno
+otherwise.
+
+User's return-probe handler (rp->handler):
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+int kretprobe_handler(struct kretprobe_instance *ri, struct pt_regs *regs);
+
+regs is as described for kprobe.pre_handler. ri points to the
+kretprobe_instance object, of which the following fields may be
+of interest:
+- ret_addr: the return address
+- rp: points to the corresponding kretprobe object
+- task: points to the corresponding task struct
+The handler's return value is currently ignored.
+
+4.4 unregister_*probe
+
+#include <linux/kprobes.h>
+void unregister_kprobe(struct kprobe *kp);
+void unregister_jprobe(struct jprobe *jp);
+void unregister_kretprobe(struct kretprobe *rp);
+
+Removes the specified probe. The unregister function can be called
+at any time after the probe has been registered.
+
+5. Kprobes Features and Limitations
+
+As of Linux v2.6.12, Kprobes allows multiple probes at the same
+address. Currently, however, there cannot be multiple jprobes on
+the same function at the same time.
+
+In general, you can install a probe anywhere in the kernel.
+In particular, you can probe interrupt handlers. Known exceptions
+are discussed in this section.
+
+For obvious reasons, it's a bad idea to install a probe in
+the code that implements Kprobes (mostly kernel/kprobes.c and
+arch/*/kernel/kprobes.c). A patch in the v2.6.13 timeframe instructs
+Kprobes to reject such requests.
+
+If you install a probe in an inline-able function, Kprobes makes
+no attempt to chase down all inline instances of the function and
+install probes there. gcc may inline a function without being asked,
+so keep this in mind if you're not seeing the probe hits you expect.
+
+A probe handler can modify the environment of the probed function
+-- e.g., by modifying kernel data structures, or by modifying the
+contents of the pt_regs struct (which are restored to the registers
+upon return from the breakpoint). So Kprobes can be used, for example,
+to install a bug fix or to inject faults for testing. Kprobes, of
+course, has no way to distinguish the deliberately injected faults
+from the accidental ones. Don't drink and probe.
+
+Kprobes makes no attempt to prevent probe handlers from stepping on
+each other -- e.g., probing printk() and then calling printk() from a
+probe handler. As of Linux v2.6.12, if a probe handler hits a probe,
+that second probe's handlers won't be run in that instance.
+
+In Linux v2.6.12 and previous versions, Kprobes' data structures are
+protected by a single lock that is held during probe registration and
+unregistration and while handlers are run. Thus, no two handlers
+can run simultaneously. To improve scalability on SMP systems,
+this restriction will probably be removed soon, in which case
+multiple handlers (or multiple instances of the same handler) may
+run concurrently on different CPUs. Code your handlers accordingly.
+
+Kprobes does not use semaphores or allocate memory except during
+registration and unregistration.
+
+Probe handlers are run with preemption disabled. Depending on the
+architecture, handlers may also run with interrupts disabled. In any
+case, your handler should not yield the CPU (e.g., by attempting to
+acquire a semaphore).
+
+Since a return probe is implemented by replacing the return
+address with the trampoline's address, stack backtraces and calls
+to __builtin_return_address() will typically yield the trampoline's
+address instead of the real return address for kretprobed functions.
+(As far as we can tell, __builtin_return_address() is used only
+for instrumentation and error reporting.)
+
+If the number of times a function is called does not match the
+number of times it returns, registering a return probe on that
+function may produce undesirable results. We have the do_exit()
+and do_execve() cases covered. do_fork() is not an issue. We're
+unaware of other specific cases where this could be a problem.
+
+6. Probe Overhead
+
+On a typical CPU in use in 2005, a kprobe hit takes 0.5 to 1.0
+microseconds to process. Specifically, a benchmark that hits the same
+probepoint repeatedly, firing a simple handler each time, reports 1-2
+million hits per second, depending on the architecture. A jprobe or
+return-probe hit typically takes 50-75% longer than a kprobe hit.
+When you have a return probe set on a function, adding a kprobe at
+the entry to that function adds essentially no overhead.
+
+Here are sample overhead figures (in usec) for different architectures.
+k = kprobe; j = jprobe; r = return probe; kr = kprobe + return probe
+on same function; jr = jprobe + return probe on same function
+
+i386: Intel Pentium M, 1495 MHz, 2957.31 bogomips
+k = 0.57 usec; j = 1.00; r = 0.92; kr = 0.99; jr = 1.40
+
+x86_64: AMD Opteron 246, 1994 MHz, 3971.48 bogomips
+k = 0.49 usec; j = 0.76; r = 0.80; kr = 0.82; jr = 1.07
+
+ppc64: POWER5 (gr), 1656 MHz (SMT disabled, 1 virtual CPU per physical CPU)
+k = 0.77 usec; j = 1.31; r = 1.26; kr = 1.45; jr = 1.99
+
+7. TODO
+
+a. SystemTap (http://sourceware.org/systemtap): Work in progress
+to provide a simplified programming interface for probe-based
+instrumentation.
+b. Improved SMP scalability: Currently, work is in progress to handle
+multiple kprobes in parallel.
+c. Kernel return probes for sparc64.
+d. Support for other architectures.
+e. User-space probes.
+
+8. Kprobes Example
+
+Here's a sample kernel module showing the use of kprobes to dump a
+stack trace and selected i386 registers when do_fork() is called.
+----- cut here -----
+/*kprobe_example.c*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kprobes.h>
+#include <linux/kallsyms.h>
+#include <linux/sched.h>
+
+/*For each probe you need to allocate a kprobe structure*/
+static struct kprobe kp;
+
+/*kprobe pre_handler: called just before the probed instruction is executed*/
+int handler_pre(struct kprobe *p, struct pt_regs *regs)
+{
+ printk("pre_handler: p->addr=0x%p, eip=%lx, eflags=0x%lx\n",
+ p->addr, regs->eip, regs->eflags);
+ dump_stack();
+ return 0;
+}
+
+/*kprobe post_handler: called after the probed instruction is executed*/
+void handler_post(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
+{
+ printk("post_handler: p->addr=0x%p, eflags=0x%lx\n",
+ p->addr, regs->eflags);
+}
+
+/* fault_handler: this is called if an exception is generated for any
+ * instruction within the pre- or post-handler, or when Kprobes
+ * single-steps the probed instruction.
+ */
+int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr)
+{
+ printk("fault_handler: p->addr=0x%p, trap #%dn",
+ p->addr, trapnr);
+ /* Return 0 because we don't handle the fault. */
+ return 0;
+}
+
+int init_module(void)
+{
+ int ret;
+ kp.pre_handler = handler_pre;
+ kp.post_handler = handler_post;
+ kp.fault_handler = handler_fault;
+ kp.addr = (kprobe_opcode_t*) kallsyms_lookup_name("do_fork");
+ /* register the kprobe now */
+ if (!kp.addr) {
+ printk("Couldn't find %s to plant kprobe\n", "do_fork");
+ return -1;
+ }
+ if ((ret = register_kprobe(&kp) < 0)) {
+ printk("register_kprobe failed, returned %d\n", ret);
+ return -1;
+ }
+ printk("kprobe registered\n");
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_kprobe(&kp);
+ printk("kprobe unregistered\n");
+}
+
+MODULE_LICENSE("GPL");
+----- cut here -----
+
+You can build the kernel module, kprobe-example.ko, using the following
+Makefile:
+----- cut here -----
+obj-m := kprobe-example.o
+KDIR := /lib/modules/$(shell uname -r)/build
+PWD := $(shell pwd)
+default:
+ $(MAKE) -C $(KDIR) SUBDIRS=$(PWD) modules
+clean:
+ rm -f *.mod.c *.ko *.o
+----- cut here -----
+
+$ make
+$ su -
+...
+# insmod kprobe-example.ko
+
+You will see the trace data in /var/log/messages and on the console
+whenever do_fork() is invoked to create a new process.
+
+9. Jprobes Example
+
+Here's a sample kernel module showing the use of jprobes to dump
+the arguments of do_fork().
+----- cut here -----
+/*jprobe-example.c */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uio.h>
+#include <linux/kprobes.h>
+#include <linux/kallsyms.h>
+
+/*
+ * Jumper probe for do_fork.
+ * Mirror principle enables access to arguments of the probed routine
+ * from the probe handler.
+ */
+
+/* Proxy routine having the same arguments as actual do_fork() routine */
+long jdo_fork(unsigned long clone_flags, unsigned long stack_start,
+ struct pt_regs *regs, unsigned long stack_size,
+ int __user * parent_tidptr, int __user * child_tidptr)
+{
+ printk("jprobe: clone_flags=0x%lx, stack_size=0x%lx, regs=0x%p\n",
+ clone_flags, stack_size, regs);
+ /* Always end with a call to jprobe_return(). */
+ jprobe_return();
+ /*NOTREACHED*/
+ return 0;
+}
+
+static struct jprobe my_jprobe = {
+ .entry = (kprobe_opcode_t *) jdo_fork
+};
+
+int init_module(void)
+{
+ int ret;
+ my_jprobe.kp.addr = (kprobe_opcode_t *) kallsyms_lookup_name("do_fork");
+ if (!my_jprobe.kp.addr) {
+ printk("Couldn't find %s to plant jprobe\n", "do_fork");
+ return -1;
+ }
+
+ if ((ret = register_jprobe(&my_jprobe)) <0) {
+ printk("register_jprobe failed, returned %d\n", ret);
+ return -1;
+ }
+ printk("Planted jprobe at %p, handler addr %p\n",
+ my_jprobe.kp.addr, my_jprobe.entry);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_jprobe(&my_jprobe);
+ printk("jprobe unregistered\n");
+}
+
+MODULE_LICENSE("GPL");
+----- cut here -----
+
+Build and insert the kernel module as shown in the above kprobe
+example. You will see the trace data in /var/log/messages and on
+the console whenever do_fork() is invoked to create a new process.
+(Some messages may be suppressed if syslogd is configured to
+eliminate duplicate messages.)
+
+10. Kretprobes Example
+
+Here's a sample kernel module showing the use of return probes to
+report failed calls to sys_open().
+----- cut here -----
+/*kretprobe-example.c*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kprobes.h>
+#include <linux/kallsyms.h>
+
+static const char *probed_func = "sys_open";
+
+/* Return-probe handler: If the probed function fails, log the return value. */
+static int ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ // Substitute the appropriate register name for your architecture --
+ // e.g., regs->rax for x86_64, regs->gpr[3] for ppc64.
+ int retval = (int) regs->eax;
+ if (retval < 0) {
+ printk("%s returns %d\n", probed_func, retval);
+ }
+ return 0;
+}
+
+static struct kretprobe my_kretprobe = {
+ .handler = ret_handler,
+ /* Probe up to 20 instances concurrently. */
+ .maxactive = 20
+};
+
+int init_module(void)
+{
+ int ret;
+ my_kretprobe.kp.addr =
+ (kprobe_opcode_t *) kallsyms_lookup_name(probed_func);
+ if (!my_kretprobe.kp.addr) {
+ printk("Couldn't find %s to plant return probe\n", probed_func);
+ return -1;
+ }
+ if ((ret = register_kretprobe(&my_kretprobe)) < 0) {
+ printk("register_kretprobe failed, returned %d\n", ret);
+ return -1;
+ }
+ printk("Planted return probe at %p\n", my_kretprobe.kp.addr);
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ unregister_kretprobe(&my_kretprobe);
+ printk("kretprobe unregistered\n");
+ /* nmissed > 0 suggests that maxactive was set too low. */
+ printk("Missed probing %d instances of %s\n",
+ my_kretprobe.nmissed, probed_func);
+}
+
+MODULE_LICENSE("GPL");
+----- cut here -----
+
+Build and insert the kernel module as shown in the above kprobe
+example. You will see the trace data in /var/log/messages and on the
+console whenever sys_open() returns a negative value. (Some messages
+may be suppressed if syslogd is configured to eliminate duplicate
+messages.)
+
+For additional information on Kprobes, refer to the following URLs:
+http://www-106.ibm.com/developerworks/library/l-kprobes.html?ca=dgr-lnxw42Kprobe
+http://www.redhat.com/magazine/005mar05/features/kprobes/
diff --git a/Documentation/video4linux/bttv/Insmod-options b/Documentation/video4linux/bttv/Insmod-options
index 7bb5a50b0779..fc94ff235ffa 100644
--- a/Documentation/video4linux/bttv/Insmod-options
+++ b/Documentation/video4linux/bttv/Insmod-options
@@ -44,6 +44,9 @@ bttv.o
push used by bttv. bttv will disable overlay
by default on this hardware to avoid crashes.
With this insmod option you can override this.
+ no_overlay=1 Disable overlay. It should be used by broken
+ hardware that doesn't support PCI2PCI direct
+ transfers.
automute=0/1 Automatically mutes the sound if there is
no TV signal, on by default. You might try
to disable this if you have bad input signal
diff --git a/arch/arm26/mm/fault.c b/arch/arm26/mm/fault.c
index dacca8bb7744..bd6f2db608b7 100644
--- a/arch/arm26/mm/fault.c
+++ b/arch/arm26/mm/fault.c
@@ -176,12 +176,12 @@ survive:
* Handle the "normal" cases first - successful and sigbus
*/
switch (fault) {
- case 2:
+ case VM_FAULT_MAJOR:
tsk->maj_flt++;
return fault;
- case 1:
+ case VM_FAULT_MINOR:
tsk->min_flt++;
- case 0:
+ case VM_FAULT_SIGBUS:
return fault;
}
@@ -226,14 +226,11 @@ int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
/*
* Handle the "normal" case first
*/
- if (fault > 0)
+ switch (fault) {
+ case VM_FAULT_MINOR:
+ case VM_FAULT_MAJOR:
return 0;
-
- /*
- * We had some memory, but were unable to
- * successfully fix up this page fault.
- */
- if (fault == 0){
+ case VM_FAULT_SIGBUS:
goto do_sigbus;
}
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index fe1cc36b5aca..934c51078cce 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -284,13 +284,13 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
*/
switch (handle_mm_fault(mm, vma, address, writeaccess & 1)) {
- case 1:
+ case VM_FAULT_MINOR:
tsk->min_flt++;
break;
- case 2:
+ case VM_FAULT_MAJOR:
tsk->maj_flt++;
break;
- case 0:
+ case VM_FAULT_SIGBUS:
goto do_sigbus;
default:
goto out_of_memory;
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 41d02ac48233..8b3eb50c5105 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -163,13 +163,13 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
* the fault.
*/
switch (handle_mm_fault(mm, vma, ear0, write)) {
- case 1:
+ case VM_FAULT_MINOR:
current->min_flt++;
break;
- case 2:
+ case VM_FAULT_MAJOR:
current->maj_flt++;
break;
- case 0:
+ case VM_FAULT_SIGBUS:
goto do_sigbus;
default:
goto out_of_memory;
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index ac48b6d2aff6..aec15270d334 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -160,13 +160,13 @@ good_area:
printk("handle_mm_fault returns %d\n",fault);
#endif
switch (fault) {
- case 1:
+ case VM_FAULT_MINOR:
current->min_flt++;
break;
- case 2:
+ case VM_FAULT_MAJOR:
current->maj_flt++;
break;
- case 0:
+ case VM_FAULT_SIGBUS:
goto bus_err;
default:
goto out_of_memory;
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index eaa701479f5f..0ad945d4c0a4 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -178,17 +178,17 @@ good_area:
*/
switch (handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0)) {
- case 1:
+ case VM_FAULT_MINOR:
++current->min_flt;
break;
- case 2:
+ case VM_FAULT_MAJOR:
++current->maj_flt;
break;
- case 0:
+ case VM_FAULT_SIGBUS:
/*
- * We ran out of memory, or some other thing happened
- * to us that made us unable to handle the page fault
- * gracefully.
+ * We hit a hared mapping outside of the file, or some
+ * other thing happened to us that made us unable to
+ * handle the page fault gracefully.
*/
goto bad_area;
default:
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 74fc3bc68604..784f56d4684c 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -2071,7 +2071,7 @@ _GLOBAL(hmt_start_secondary)
blr
#endif
-#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)
+#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
_GLOBAL(smp_release_cpus)
/* All secondary cpus are spinning on a common
* spinloop, release them all now so they can start
diff --git a/arch/ppc64/kernel/machine_kexec.c b/arch/ppc64/kernel/machine_kexec.c
index fdb2fc649d72..4775f12a013c 100644
--- a/arch/ppc64/kernel/machine_kexec.c
+++ b/arch/ppc64/kernel/machine_kexec.c
@@ -185,7 +185,7 @@ void kexec_copy_flush(struct kimage *image)
void kexec_smp_down(void *arg)
{
if (ppc_md.cpu_irq_down)
- ppc_md.cpu_irq_down();
+ ppc_md.cpu_irq_down(1);
local_irq_disable();
kexec_smp_wait();
@@ -232,7 +232,7 @@ static void kexec_prepare_cpus(void)
/* after we tell the others to go down */
if (ppc_md.cpu_irq_down)
- ppc_md.cpu_irq_down();
+ ppc_md.cpu_irq_down(0);
put_cpu();
@@ -243,15 +243,19 @@ static void kexec_prepare_cpus(void)
static void kexec_prepare_cpus(void)
{
+ extern void smp_release_cpus(void);
/*
* move the secondarys to us so that we can copy
* the new kernel 0-0x100 safely
*
* do this if kexec in setup.c ?
+ *
+ * We need to release the cpus if we are ever going from an
+ * UP to an SMP kernel.
*/
- smp_relase_cpus();
+ smp_release_cpus();
if (ppc_md.cpu_irq_down)
- ppc_md.cpu_irq_down();
+ ppc_md.cpu_irq_down(0);
local_irq_disable();
}
diff --git a/arch/ppc64/kernel/mpic.c b/arch/ppc64/kernel/mpic.c
index e8fbab1df37f..cc262a05ddb4 100644
--- a/arch/ppc64/kernel/mpic.c
+++ b/arch/ppc64/kernel/mpic.c
@@ -794,10 +794,10 @@ void mpic_setup_this_cpu(void)
/*
* XXX: someone who knows mpic should check this.
- * do we need to eoi the ipi here (see xics comments)?
+ * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
* or can we reset the mpic in the new kernel?
*/
-void mpic_teardown_this_cpu(void)
+void mpic_teardown_this_cpu(int secondary)
{
struct mpic *mpic = mpic_primary;
unsigned long flags;
diff --git a/arch/ppc64/kernel/mpic.h b/arch/ppc64/kernel/mpic.h
index 99fbbc9a084c..ca78a7f10528 100644
--- a/arch/ppc64/kernel/mpic.h
+++ b/arch/ppc64/kernel/mpic.h
@@ -256,7 +256,7 @@ extern unsigned int mpic_irq_get_priority(unsigned int irq);
extern void mpic_setup_this_cpu(void);
/* Clean up for kexec (or cpu offline or ...) */
-extern void mpic_teardown_this_cpu(void);
+extern void mpic_teardown_this_cpu(int secondary);
/* Request IPIs on primary mpic */
extern void mpic_request_ipis(void);
diff --git a/arch/ppc64/kernel/xics.c b/arch/ppc64/kernel/xics.c
index 677c4450984a..d9dc6f28d050 100644
--- a/arch/ppc64/kernel/xics.c
+++ b/arch/ppc64/kernel/xics.c
@@ -647,29 +647,30 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
}
}
-void xics_teardown_cpu(void)
+void xics_teardown_cpu(int secondary)
{
int cpu = smp_processor_id();
- int status;
ops->cppr_info(cpu, 0x00);
iosync();
/*
- * we need to EOI the IPI if we got here from kexec down IPI
- *
- * xics doesn't care if we duplicate an EOI as long as we
- * don't EOI and raise priority.
- *
- * probably need to check all the other interrupts too
- * should we be flagging idle loop instead?
- * or creating some task to be scheduled?
+ * Some machines need to have at least one cpu in the GIQ,
+ * so leave the master cpu in the group.
*/
- ops->xirr_info_set(cpu, XICS_IPI);
-
- status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
- (1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
- WARN_ON(status != 0);
+ if (secondary) {
+ /*
+ * we need to EOI the IPI if we got here from kexec down IPI
+ *
+ * probably need to check all the other interrupts too
+ * should we be flagging idle loop instead?
+ * or creating some task to be scheduled?
+ */
+ ops->xirr_info_set(cpu, XICS_IPI);
+ rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
+ (1UL << interrupt_server_size) - 1 -
+ default_distrib_server, 0);
+ }
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/sh64/mm/fault.c b/arch/sh64/mm/fault.c
index a24932881dbb..f08d0eaf6497 100644
--- a/arch/sh64/mm/fault.c
+++ b/arch/sh64/mm/fault.c
@@ -223,13 +223,13 @@ good_area:
*/
survive:
switch (handle_mm_fault(mm, vma, address, writeaccess)) {
- case 1:
+ case VM_FAULT_MINOR:
tsk->min_flt++;
break;
- case 2:
+ case VM_FAULT_MAJOR:
tsk->maj_flt++;
break;
- case 0:
+ case VM_FAULT_SIGBUS:
goto do_sigbus;
default:
goto out_of_memory;
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 13792721037e..493819e543a5 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -439,13 +439,13 @@ good_area:
* the fault.
*/
switch (handle_mm_fault(mm, vma, address, write)) {
- case 1:
+ case VM_FAULT_MINOR:
tsk->min_flt++;
break;
- case 2:
+ case VM_FAULT_MAJOR:
tsk->maj_flt++;
break;
- case 0:
+ case VM_FAULT_SIGBUS:
goto do_sigbus;
default:
goto out_of_memory;
diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/dispatcher/dswload.c
index 1ac197ccfc80..d11620018421 100644
--- a/drivers/acpi/dispatcher/dswload.c
+++ b/drivers/acpi/dispatcher/dswload.c
@@ -491,12 +491,6 @@ acpi_ds_load2_begin_op (
if ((!(walk_state->op_info->flags & AML_NSOPCODE) &&
(walk_state->opcode != AML_INT_NAMEPATH_OP)) ||
(!(walk_state->op_info->flags & AML_NAMED))) {
- if ((walk_state->op_info->class == AML_CLASS_EXECUTE) ||
- (walk_state->op_info->class == AML_CLASS_CONTROL)) {
- ACPI_REPORT_WARNING ((
- "Encountered executable code at module level, [%s]\n",
- acpi_ps_get_opcode_name (walk_state->opcode)));
- }
return_ACPI_STATUS (AE_OK);
}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index bdd9f37f8101..7289da3c4db6 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -145,10 +145,14 @@ acpi_os_vprintf(const char *fmt, va_list args)
#endif
}
+extern int acpi_in_resume;
void *
acpi_os_allocate(acpi_size size)
{
- return kmalloc(size, GFP_KERNEL);
+ if (acpi_in_resume)
+ return kmalloc(size, GFP_ATOMIC);
+ else
+ return kmalloc(size, GFP_KERNEL);
}
void
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 65cea07abbc3..834c2ceff1aa 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -798,6 +798,11 @@ acpi_pci_link_resume(
return_VALUE(0);
}
+/*
+ * FIXME: this is a workaround to avoid nasty warning. It will be removed
+ * after every device calls pci_disable_device in .resume.
+ */
+int acpi_in_resume;
static int
irqrouter_resume(
struct sys_device *dev)
@@ -807,6 +812,7 @@ irqrouter_resume(
ACPI_FUNCTION_TRACE("irqrouter_resume");
+ acpi_in_resume = 1;
list_for_each(node, &acpi_link.entries) {
link = list_entry(node, struct acpi_pci_link, node);
if (!link) {
@@ -816,6 +822,7 @@ irqrouter_resume(
}
acpi_pci_link_resume(link);
}
+ acpi_in_resume = 0;
return_VALUE(0);
}
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 7df85af75371..94daf40ae323 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -960,6 +960,15 @@ static void save_match(ide_hwif_t *hwif, ide_hwif_t *new, ide_hwif_t **match)
}
#endif /* MAX_HWIFS > 1 */
+static inline int hwif_to_node(ide_hwif_t *hwif)
+{
+ if (hwif->pci_dev)
+ return pcibus_to_node(hwif->pci_dev->bus);
+ else
+ /* Add ways to determine the node of other busses here */
+ return -1;
+}
+
/*
* init request queue
*/
@@ -978,8 +987,7 @@ static int ide_init_queue(ide_drive_t *drive)
* do not.
*/
- q = blk_init_queue_node(do_ide_request, &ide_lock,
- pcibus_to_node(drive->hwif->pci_dev->bus));
+ q = blk_init_queue_node(do_ide_request, &ide_lock, hwif_to_node(hwif));
if (!q)
return 1;
@@ -1048,6 +1056,8 @@ static int init_irq (ide_hwif_t *hwif)
BUG_ON(in_interrupt());
BUG_ON(irqs_disabled());
+ BUG_ON(hwif == NULL);
+
down(&ide_cfg_sem);
hwif->hwgroup = NULL;
#if MAX_HWIFS > 1
@@ -1097,7 +1107,7 @@ static int init_irq (ide_hwif_t *hwif)
spin_unlock_irq(&ide_lock);
} else {
hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), GFP_KERNEL,
- pcibus_to_node(hwif->drives[0].hwif->pci_dev->bus));
+ hwif_to_node(hwif->drives[0].hwif));
if (!hwgroup)
goto out_up;
diff --git a/drivers/infiniband/include/ib_cm.h b/drivers/infiniband/include/ib_cm.h
index e5d74a730a70..da650115e79a 100644
--- a/drivers/infiniband/include/ib_cm.h
+++ b/drivers/infiniband/include/ib_cm.h
@@ -169,7 +169,8 @@ enum ib_cm_rej_reason {
IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = __constant_htons(21),
IB_CM_REJ_INVALID_ALT_HOP_LIMIT = __constant_htons(22),
IB_CM_REJ_INVALID_ALT_PACKET_RATE = __constant_htons(23),
- IB_CM_REJ_PORT_REDIRECT = __constant_htons(24),
+ IB_CM_REJ_PORT_CM_REDIRECT = __constant_htons(24),
+ IB_CM_REJ_PORT_REDIRECT = __constant_htons(25),
IB_CM_REJ_INVALID_MTU = __constant_htons(26),
IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = __constant_htons(27),
IB_CM_REJ_CONSUMER_DEFINED = __constant_htons(28),
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6f60abbaebd5..fa00816a3cf7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -600,9 +600,10 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
ipoib_mcast_send(dev, (union ib_gid *) (phdr->hwaddr + 4), skb);
} else {
- /* unicast GID -- should be ARP reply */
+ /* unicast GID -- should be ARP or RARP reply */
- if (be16_to_cpup((u16 *) skb->data) != ETH_P_ARP) {
+ if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
+ (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
IPOIB_GID_FMT "\n",
skb->dst ? "neigh" : "dst",
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 70bca955e0de..41df4cda66e2 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -818,8 +818,7 @@ int bitmap_unplug(struct bitmap *bitmap)
return 0;
}
-static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset,
- unsigned long sectors, int in_sync);
+static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset);
/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
* the in-memory bitmap from the on-disk bitmap -- also, sets up the
* memory mapping of the bitmap file
@@ -828,7 +827,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset,
* previously kicked from the array, we mark all the bits as
* 1's in order to cause a full resync.
*/
-static int bitmap_init_from_disk(struct bitmap *bitmap, int in_sync)
+static int bitmap_init_from_disk(struct bitmap *bitmap)
{
unsigned long i, chunks, index, oldindex, bit;
struct page *page = NULL, *oldpage = NULL;
@@ -929,8 +928,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, int in_sync)
}
if (test_bit(bit, page_address(page))) {
/* if the disk bit is set, set the memory bit */
- bitmap_set_memory_bits(bitmap,
- i << CHUNK_BLOCK_SHIFT(bitmap), 1, in_sync);
+ bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap));
bit_cnt++;
}
}
@@ -1426,35 +1424,53 @@ void bitmap_close_sync(struct bitmap *bitmap)
}
}
-static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset,
- unsigned long sectors, int in_sync)
+static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset)
{
/* For each chunk covered by any of these sectors, set the
- * counter to 1 and set resync_needed unless in_sync. They should all
+ * counter to 1 and set resync_needed. They should all
* be 0 at this point
*/
- while (sectors) {
- int secs;
- bitmap_counter_t *bmc;
- spin_lock_irq(&bitmap->lock);
- bmc = bitmap_get_counter(bitmap, offset, &secs, 1);
- if (!bmc) {
- spin_unlock_irq(&bitmap->lock);
- return;
- }
- if (! *bmc) {
- struct page *page;
- *bmc = 1 | (in_sync? 0 : NEEDED_MASK);
- bitmap_count_page(bitmap, offset, 1);
- page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
- set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
- }
+
+ int secs;
+ bitmap_counter_t *bmc;
+ spin_lock_irq(&bitmap->lock);
+ bmc = bitmap_get_counter(bitmap, offset, &secs, 1);
+ if (!bmc) {
spin_unlock_irq(&bitmap->lock);
- if (sectors > secs)
- sectors -= secs;
- else
- sectors = 0;
+ return;
+ }
+ if (! *bmc) {
+ struct page *page;
+ *bmc = 1 | NEEDED_MASK;
+ bitmap_count_page(bitmap, offset, 1);
+ page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
+ set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
}
+ spin_unlock_irq(&bitmap->lock);
+
+}
+
+/*
+ * flush out any pending updates
+ */
+void bitmap_flush(mddev_t *mddev)
+{
+ struct bitmap *bitmap = mddev->bitmap;
+ int sleep;
+
+ if (!bitmap) /* there was no bitmap */
+ return;
+
+ /* run the daemon_work three time to ensure everything is flushed
+ * that can be
+ */
+ sleep = bitmap->daemon_sleep;
+ bitmap->daemon_sleep = 0;
+ bitmap_daemon_work(bitmap);
+ bitmap_daemon_work(bitmap);
+ bitmap_daemon_work(bitmap);
+ bitmap->daemon_sleep = sleep;
+ bitmap_update_sb(bitmap);
}
/*
@@ -1565,7 +1581,8 @@ int bitmap_create(mddev_t *mddev)
/* now that we have some pages available, initialize the in-memory
* bitmap from the on-disk bitmap */
- err = bitmap_init_from_disk(bitmap, mddev->recovery_cp == MaxSector);
+ err = bitmap_init_from_disk(bitmap);
+
if (err)
return err;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 12031c9d3f1e..b08df8b9b2ca 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1230,7 +1230,7 @@ static int __init dm_mirror_init(void)
if (r)
return r;
- _kmirrord_wq = create_workqueue("kmirrord");
+ _kmirrord_wq = create_singlethread_workqueue("kmirrord");
if (!_kmirrord_wq) {
DMERR("couldn't start kmirrord");
dm_dirty_log_exit();
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 6580e0fa4a47..480f658db6f2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1798,6 +1798,8 @@ static int do_md_stop(mddev_t * mddev, int ro)
goto out;
mddev->ro = 1;
} else {
+ bitmap_flush(mddev);
+ wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
if (mddev->ro)
set_disk_ro(disk, 0);
blk_queue_make_request(mddev->queue, md_fail_request);
@@ -3484,7 +3486,6 @@ static void md_do_sync(mddev_t *mddev)
goto skip;
}
ITERATE_MDDEV(mddev2,tmp) {
- printk(".");
if (mddev2 == mddev)
continue;
if (mddev2->curr_resync &&
@@ -4007,3 +4008,4 @@ EXPORT_SYMBOL(md_wakeup_thread);
EXPORT_SYMBOL(md_print_devices);
EXPORT_SYMBOL(md_check_recovery);
MODULE_LICENSE("GPL");
+MODULE_ALIAS("md");
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d3a64a04a6d8..51d9645ed09c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -893,7 +893,6 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
if (!uptodate) {
md_error(r1_bio->mddev,
conf->mirrors[r1_bio->read_disk].rdev);
- set_bit(R1BIO_Degraded, &r1_bio->state);
} else
set_bit(R1BIO_Uptodate, &r1_bio->state);
rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
@@ -918,10 +917,9 @@ static int end_sync_write(struct bio *bio, unsigned int bytes_done, int error)
mirror = i;
break;
}
- if (!uptodate) {
+ if (!uptodate)
md_error(mddev, conf->mirrors[mirror].rdev);
- set_bit(R1BIO_Degraded, &r1_bio->state);
- }
+
update_head_pos(mirror, r1_bio);
if (atomic_dec_and_test(&r1_bio->remaining)) {
@@ -1109,6 +1107,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
int i;
int write_targets = 0;
int sync_blocks;
+ int still_degraded = 0;
if (!conf->r1buf_pool)
{
@@ -1137,7 +1136,10 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
return 0;
}
- if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, mddev->degraded) &&
+ /* before building a request, check if we can skip these blocks..
+ * This call the bitmap_start_sync doesn't actually record anything
+ */
+ if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
!conf->fullsync) {
/* We can skip this block, and probably several more */
*skipped = 1;
@@ -1203,24 +1205,23 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
if (i == disk) {
bio->bi_rw = READ;
bio->bi_end_io = end_sync_read;
- } else if (conf->mirrors[i].rdev &&
- !conf->mirrors[i].rdev->faulty &&
- (!conf->mirrors[i].rdev->in_sync ||
- sector_nr + RESYNC_SECTORS > mddev->recovery_cp)) {
+ } else if (conf->mirrors[i].rdev == NULL ||
+ conf->mirrors[i].rdev->faulty) {
+ still_degraded = 1;
+ continue;
+ } else if (!conf->mirrors[i].rdev->in_sync ||
+ sector_nr + RESYNC_SECTORS > mddev->recovery_cp) {
bio->bi_rw = WRITE;
bio->bi_end_io = end_sync_write;
write_targets ++;
} else
+ /* no need to read or write here */
continue;
bio->bi_sector = sector_nr + conf->mirrors[i].rdev->data_offset;
bio->bi_bdev = conf->mirrors[i].rdev->bdev;
bio->bi_private = r1_bio;
}
- if (write_targets + 1 < conf->raid_disks)
- /* array degraded, can't clear bitmap */
- set_bit(R1BIO_Degraded, &r1_bio->state);
-
if (write_targets == 0) {
/* There is nowhere to write, so all non-sync
* drives must be failed - so we are finished
@@ -1243,7 +1244,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
break;
if (sync_blocks == 0) {
if (!bitmap_start_sync(mddev->bitmap, sector_nr,
- &sync_blocks, mddev->degraded) &&
+ &sync_blocks, still_degraded) &&
!conf->fullsync)
break;
if (sync_blocks < (PAGE_SIZE>>9))
diff --git a/drivers/media/video/bttv-cards.c b/drivers/media/video/bttv-cards.c
index 6c52fd0bb7df..a97b9b958ed6 100644
--- a/drivers/media/video/bttv-cards.c
+++ b/drivers/media/video/bttv-cards.c
@@ -95,7 +95,7 @@ static int __devinit pvr_boot(struct bttv *btv);
static unsigned int triton1=0;
static unsigned int vsfx=0;
static unsigned int latency = UNSET;
-static unsigned int no_overlay=-1;
+int no_overlay=-1;
static unsigned int card[BTTV_MAX] = { [ 0 ... (BTTV_MAX-1) ] = UNSET };
static unsigned int pll[BTTV_MAX] = { [ 0 ... (BTTV_MAX-1) ] = UNSET };
@@ -4296,9 +4296,11 @@ void __devinit bttv_check_chipset(void)
printk(KERN_INFO "bttv: Host bridge needs VSFX enabled.\n");
if (pcipci_fail) {
printk(KERN_WARNING "bttv: BT848 and your chipset may not work together.\n");
- if (UNSET == no_overlay) {
- printk(KERN_WARNING "bttv: going to disable overlay.\n");
+ if (!no_overlay) {
+ printk(KERN_WARNING "bttv: overlay will be disabled.\n");
no_overlay = 1;
+ } else {
+ printk(KERN_WARNING "bttv: overlay forced. Use this option at your own risk.\n");
}
}
if (UNSET != latency)
diff --git a/drivers/media/video/bttv-driver.c b/drivers/media/video/bttv-driver.c
index 67f331eeeb19..eee9322ce21b 100644
--- a/drivers/media/video/bttv-driver.c
+++ b/drivers/media/video/bttv-driver.c
@@ -1,5 +1,5 @@
/*
- $Id: bttv-driver.c,v 1.45 2005/07/20 19:43:24 mkrufky Exp $
+ $Id: bttv-driver.c,v 1.52 2005/08/04 00:55:16 mchehab Exp $
bttv - Bt848 frame grabber driver
@@ -80,6 +80,7 @@ static unsigned int irq_iswitch = 0;
static unsigned int uv_ratio = 50;
static unsigned int full_luma_range = 0;
static unsigned int coring = 0;
+extern int no_overlay;
/* API features (turn on/off stuff for testing) */
static unsigned int v4l2 = 1;
@@ -2151,6 +2152,10 @@ static int bttv_s_fmt(struct bttv_fh *fh, struct bttv *btv,
return 0;
}
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ if (no_overlay > 0) {
+ printk ("V4L2_BUF_TYPE_VIDEO_OVERLAY: no_overlay\n");
+ return -EINVAL;
+ }
return setup_window(fh, btv, &f->fmt.win, 1);
case V4L2_BUF_TYPE_VBI_CAPTURE:
retval = bttv_switch_type(fh,f->type);
@@ -2224,9 +2229,11 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
/* others */
cap->type = VID_TYPE_CAPTURE|
VID_TYPE_TUNER|
- VID_TYPE_OVERLAY|
VID_TYPE_CLIPPING|
VID_TYPE_SCALES;
+ if (no_overlay <= 0)
+ cap->type |= VID_TYPE_OVERLAY;
+
cap->maxwidth = bttv_tvnorms[btv->tvnorm].swidth;
cap->maxheight = bttv_tvnorms[btv->tvnorm].sheight;
cap->minwidth = 48;
@@ -2302,6 +2309,11 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
struct video_window *win = arg;
struct v4l2_window w2;
+ if (no_overlay > 0) {
+ printk ("VIDIOCSWIN: no_overlay\n");
+ return -EINVAL;
+ }
+
w2.field = V4L2_FIELD_ANY;
w2.w.left = win->x;
w2.w.top = win->y;
@@ -2577,10 +2589,12 @@ static int bttv_do_ioctl(struct inode *inode, struct file *file,
cap->version = BTTV_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_VIDEO_OVERLAY |
V4L2_CAP_VBI_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
+ if (no_overlay <= 0)
+ cap->capabilities |= V4L2_CAP_VIDEO_OVERLAY;
+
if (bttv_tvcards[btv->c.type].tuner != UNSET &&
bttv_tvcards[btv->c.type].tuner != TUNER_ABSENT)
cap->capabilities |= V4L2_CAP_TUNER;
@@ -3076,7 +3090,7 @@ static struct file_operations bttv_fops =
static struct video_device bttv_video_template =
{
.name = "UNSET",
- .type = VID_TYPE_CAPTURE|VID_TYPE_TUNER|VID_TYPE_OVERLAY|
+ .type = VID_TYPE_CAPTURE|VID_TYPE_TUNER|
VID_TYPE_CLIPPING|VID_TYPE_SCALES,
.hardware = VID_HARDWARE_BT848,
.fops = &bttv_fops,
@@ -3756,6 +3770,12 @@ static void bttv_unregister_video(struct bttv *btv)
/* register video4linux devices */
static int __devinit bttv_register_video(struct bttv *btv)
{
+ if (no_overlay <= 0) {
+ bttv_video_template.type |= VID_TYPE_OVERLAY;
+ } else {
+ printk("bttv: Overlay support disabled.\n");
+ }
+
/* video */
btv->video_dev = vdev_init(btv, &bttv_video_template, "video");
if (NULL == btv->video_dev)
diff --git a/include/asm-ppc64/machdep.h b/include/asm-ppc64/machdep.h
index f0c1d2d92672..f0ef06375947 100644
--- a/include/asm-ppc64/machdep.h
+++ b/include/asm-ppc64/machdep.h
@@ -84,7 +84,7 @@ struct machdep_calls {
void (*init_IRQ)(void);
int (*get_irq)(struct pt_regs *);
- void (*cpu_irq_down)(void);
+ void (*cpu_irq_down)(int secondary);
/* PCI stuff */
void (*pcibios_fixup)(void);
diff --git a/include/asm-ppc64/xics.h b/include/asm-ppc64/xics.h
index 0c45e14e26ca..1092af55d707 100644
--- a/include/asm-ppc64/xics.h
+++ b/include/asm-ppc64/xics.h
@@ -17,7 +17,7 @@
void xics_init_IRQ(void);
int xics_get_irq(struct pt_regs *);
void xics_setup_cpu(void);
-void xics_teardown_cpu(void);
+void xics_teardown_cpu(int secondary);
void xics_cause_IPI(int cpu);
void xics_request_IPIs(void);
void xics_migrate_irqs_away(void);
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h
index 6213e976eade..4bf1659f8aa8 100644
--- a/include/linux/raid/bitmap.h
+++ b/include/linux/raid/bitmap.h
@@ -248,6 +248,7 @@ struct bitmap {
/* these are used only by md/bitmap */
int bitmap_create(mddev_t *mddev);
+void bitmap_flush(mddev_t *mddev);
void bitmap_destroy(mddev_t *mddev);
int bitmap_active(struct bitmap *bitmap);
diff --git a/kernel/sys.c b/kernel/sys.c
index 000e81ad2c1d..0bcaed6560ac 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -404,7 +404,6 @@ void kernel_halt(void)
{
notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
system_state = SYSTEM_HALT;
- device_suspend(PMSG_SUSPEND);
device_shutdown();
printk(KERN_EMERG "System halted.\n");
machine_halt();
@@ -415,7 +414,6 @@ void kernel_power_off(void)
{
notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
system_state = SYSTEM_POWER_OFF;
- device_suspend(PMSG_SUSPEND);
device_shutdown();
printk(KERN_EMERG "Power down.\n");
machine_power_off();
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index fea262860ea0..a6516a64b297 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -49,9 +49,6 @@ asmlinkage long sys_add_key(const char __user *_type,
goto error;
type[31] = '\0';
- if (!type[0])
- goto error;
-
ret = -EPERM;
if (type[0] == '.')
goto error;
@@ -144,6 +141,10 @@ asmlinkage long sys_request_key(const char __user *_type,
goto error;
type[31] = '\0';
+ ret = -EPERM;
+ if (type[0] == '.')
+ goto error;
+
/* pull the description into kernel space */
ret = -EFAULT;
dlen = strnlen_user(_description, PAGE_SIZE - 1);
@@ -362,7 +363,7 @@ long keyctl_revoke_key(key_serial_t id)
key_put(key);
error:
- return 0;
+ return ret;
} /* end keyctl_revoke_key() */
@@ -685,6 +686,8 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
goto can_read_key2;
ret = PTR_ERR(skey);
+ if (ret == -EAGAIN)
+ ret = -EACCES;
goto error2;
}
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index dfcd983af1fd..90c1506d007c 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -405,7 +405,7 @@ struct key *request_key_and_link(struct key_type *type,
key_user_put(user);
/* link the new key into the appropriate keyring */
- if (!PTR_ERR(key))
+ if (!IS_ERR(key))
request_key_link(key, dest_keyring);
}