summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2010-01-18 16:01:12 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2010-01-18 16:01:12 +1100
commit95621e07c2901747c9835edc38fd1fffd67f8b83 (patch)
tree842cc5d30f8326d99027c4441149682a9c819d89 /kernel
parent4c1fc035018f9f34f36185d7f4580307ea25b010 (diff)
parent5c438978c338f64a5f42dbf280be1d90961279f9 (diff)
Merge remote branch 'trivial/for-next'
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/ksysfs.c2
-rw-r--r--kernel/sched_cpupri.c2
-rw-r--r--kernel/trace/ring_buffer.c4
4 files changed, 5 insertions, 5 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 5feed232be9d..78f7f86aa238 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -398,7 +398,7 @@ static void kauditd_send_skb(struct sk_buff *skb)
skb_get(skb);
err = netlink_unicast(audit_sock, skb, audit_nlk_pid, 0);
if (err < 0) {
- BUG_ON(err != -ECONNREFUSED); /* Shoudn't happen */
+ BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */
printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid);
audit_log_lost("auditd dissapeared\n");
audit_pid = 0;
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 3feaf5a74514..ac08efca54c3 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -33,7 +33,7 @@ static ssize_t uevent_seqnum_show(struct kobject *kobj,
}
KERNEL_ATTR_RO(uevent_seqnum);
-/* uevent helper program, used during early boo */
+/* uevent helper program, used during early boot */
static ssize_t uevent_helper_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 597b33099dfa..3db4b1a0e921 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -58,7 +58,7 @@ static int convert_prio(int prio)
* @lowest_mask: A mask to fill in with selected CPUs (or NULL)
*
* Note: This function returns the recommended CPUs as calculated during the
- * current invokation. By the time the call returns, the CPUs may have in
+ * current invocation. By the time the call returns, the CPUs may have in
* fact changed priorities any number of times. While not ideal, it is not
* an issue of correctness since the normal rebalancer logic will correct
* any discrepancies created by racing against the uncertainty of the current
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index edefe3b2801b..7dab5a9ee100 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2539,7 +2539,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
* @buffer: The ring buffer to enable writes
*
* Note, multiple disables will need the same number of enables
- * to truely enable the writing (much like preempt_disable).
+ * to truly enable the writing (much like preempt_disable).
*/
void ring_buffer_record_enable(struct ring_buffer *buffer)
{
@@ -2575,7 +2575,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
* @cpu: The CPU to enable.
*
* Note, multiple disables will need the same number of enables
- * to truely enable the writing (much like preempt_disable).
+ * to truly enable the writing (much like preempt_disable).
*/
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
{