From afdb7023b849cffda679fcec324ff592d7b24a51 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Thu, 3 Apr 2008 16:40:48 +0300 Subject: x86: __show_registers() and __show_regs() API unification Currently the low-level function to dump user-passed registers on i386 is called __show_registers() whereas on x86-64 it's called __show_regs(). Unify the API to simplify porting of kmemcheck to x86-64. Cc: Vegard Nossum Signed-off-by: Pekka Enberg Signed-off-by: Ingo Molnar --- arch/x86/kernel/process_32.c | 4 ++-- arch/x86/kernel/process_64.c | 12 ++++++++---- arch/x86/kernel/traps_32.c | 2 +- arch/x86/kernel/traps_64.c | 2 +- include/asm-x86/kdebug.h | 3 +-- 5 files changed, 13 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 6d5483356e74..2e8edce73b53 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -194,7 +194,7 @@ void cpu_idle(void) } } -void __show_registers(struct pt_regs *regs, int all) +void __show_regs(struct pt_regs *regs, int all) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; unsigned long d0, d1, d2, d3, d6, d7; @@ -255,7 +255,7 @@ void __show_registers(struct pt_regs *regs, int all) void show_regs(struct pt_regs *regs) { - __show_registers(regs, 1); + __show_regs(regs, 1); show_trace(NULL, regs, ®s->sp, regs->bp); } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index ac54ff56df80..93ea156bba6d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -180,7 +180,7 @@ void cpu_idle(void) } /* Prints also some state that isn't saved in the pt_regs */ -void __show_regs(struct pt_regs * regs) +void __show_regs(struct pt_regs * regs, int all) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; unsigned long d0, d1, d2, d3, d6, d7; @@ -219,13 +219,17 @@ void __show_regs(struct pt_regs * regs) rdmsrl(MSR_GS_BASE, gs); rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); + printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", + fs,fsindex,gs,gsindex,shadowgs); + + if (!all) + return; + cr0 = read_cr0(); cr2 = read_cr2(); cr3 = read_cr3(); cr4 = read_cr4(); - printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", - fs,fsindex,gs,gsindex,shadowgs); printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); @@ -242,7 +246,7 @@ void __show_regs(struct pt_regs * regs) void show_regs(struct pt_regs *regs) { printk("CPU %d:", smp_processor_id()); - __show_regs(regs); + __show_regs(regs, 1); show_trace(NULL, regs, (void *)(regs + 1), regs->bp); } diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index 08d752de4eee..a4739a818baa 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c @@ -330,7 +330,7 @@ void show_registers(struct pt_regs *regs) int i; print_modules(); - __show_registers(regs, 0); + __show_regs(regs, 0); printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)", TASK_COMM_LEN, current->comm, task_pid_nr(current), diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index adff76ea97c4..552506ce497f 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c @@ -470,7 +470,7 @@ void show_registers(struct pt_regs *regs) sp = regs->sp; ip = (u8 *) regs->ip - code_prologue; printk("CPU %d ", cpu); - __show_regs(regs); + __show_regs(regs, 1); printk("Process %s (pid: %d, threadinfo %p, task %p)\n", cur->comm, cur->pid, task_thread_info(cur), cur); diff --git a/include/asm-x86/kdebug.h b/include/asm-x86/kdebug.h index 96651bb59ba1..fe1fbdec1e1c 100644 --- a/include/asm-x86/kdebug.h +++ b/include/asm-x86/kdebug.h @@ -27,10 +27,9 @@ extern void printk_address(unsigned long address, int reliable); extern void die(const char *, struct pt_regs *,long); extern int __must_check __die(const char *, struct pt_regs *, long); extern void show_registers(struct pt_regs *regs); -extern void __show_registers(struct pt_regs *, int all); extern void show_trace(struct task_struct *t, struct pt_regs *regs, unsigned long *sp, unsigned long bp); -extern void __show_regs(struct pt_regs *regs); +extern void __show_regs(struct pt_regs *regs, int all); extern void show_regs(struct pt_regs *regs); extern unsigned long oops_begin(void); extern void oops_end(unsigned long, struct pt_regs *, int signr); -- cgit v1.2.3 From 5995e9b7ec563126bb93eec05cfca7b8d58f26d5 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Tue, 20 May 2008 11:15:43 +0200 Subject: x86: add save_stack_trace_bp() for tracing from a specific stack frame This will help kmemcheck (and possibly other debugging tools) since we can now simply pass regs->bp to the stack tracer instead of specifying the number of stack frames to skip, which is unreliable if gcc decides to inline functions, etc. Note that this makes the API incomplete for other architectures, but I expect that those can be updated lazily, e.g. when they need it. Cc: Arjan van de Ven Signed-off-by: Vegard Nossum --- arch/x86/kernel/stacktrace.c | 7 +++++++ include/linux/stacktrace.h | 1 + 2 files changed, 8 insertions(+) diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index c28c342c162f..97202adc504c 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -75,6 +75,13 @@ void save_stack_trace(struct stack_trace *trace) trace->entries[trace->nr_entries++] = ULONG_MAX; } +void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp) +{ + dump_trace(current, NULL, NULL, bp, &save_stack_ops, trace); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace); diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 5da9794b2d78..2a3cbe878774 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -9,6 +9,7 @@ struct stack_trace { }; extern void save_stack_trace(struct stack_trace *trace); +extern void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp); extern void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace); -- cgit v1.2.3 From a2d8c355cab2599bd369aa30746442990b73d3f4 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Fri, 13 Jun 2008 15:31:11 +0200 Subject: stacktrace: add forward-declaration struct task_struct This is needed if the header is to be free-standing. Signed-off-by: Vegard Nossum --- include/linux/stacktrace.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 2a3cbe878774..519ad2d8f092 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -2,6 +2,8 @@ #define __LINUX_STACKTRACE_H #ifdef CONFIG_STACKTRACE +struct task_struct; + struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; -- cgit v1.2.3 From c9506812f317bca0edcbc717c8fdabdd1d0a264b Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 9 May 2008 20:32:44 +0200 Subject: slab: move struct kmem_cache to headers Move the SLAB struct kmem_cache definition to like with SLUB so kmemcheck can access ->ctor and ->flags. Cc: Ingo Molnar Cc: Christoph Lameter Cc: Andrew Morton Signed-off-by: Pekka Enberg Signed-off-by: Vegard Nossum --- include/linux/slab_def.h | 81 ++++++++++++++++++++++++++++++++++++++++++++++++ mm/slab.c | 81 ------------------------------------------------ 2 files changed, 81 insertions(+), 81 deletions(-) diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 39c3a5eb8ebe..37b483754d14 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -15,6 +15,87 @@ #include /* kmalloc_sizes.h needs L1_CACHE_BYTES */ #include +/* + * struct kmem_cache + * + * manages a cache. + */ + +struct kmem_cache { +/* 1) per-cpu data, touched during every alloc/free */ + struct array_cache *array[NR_CPUS]; +/* 2) Cache tunables. Protected by cache_chain_mutex */ + unsigned int batchcount; + unsigned int limit; + unsigned int shared; + + unsigned int buffer_size; + u32 reciprocal_buffer_size; +/* 3) touched by every alloc & free from the backend */ + + unsigned int flags; /* constant flags */ + unsigned int num; /* # of objs per slab */ + +/* 4) cache_grow/shrink */ + /* order of pgs per slab (2^n) */ + unsigned int gfporder; + + /* force GFP flags, e.g. GFP_DMA */ + gfp_t gfpflags; + + size_t colour; /* cache colouring range */ + unsigned int colour_off; /* colour offset */ + struct kmem_cache *slabp_cache; + unsigned int slab_size; + unsigned int dflags; /* dynamic flags */ + + /* constructor func */ + void (*ctor)(struct kmem_cache *, void *); + +/* 5) cache creation/removal */ + const char *name; + struct list_head next; + +/* 6) statistics */ +#ifdef CONFIG_DEBUG_SLAB + unsigned long num_active; + unsigned long num_allocations; + unsigned long high_mark; + unsigned long grown; + unsigned long reaped; + unsigned long errors; + unsigned long max_freeable; + unsigned long node_allocs; + unsigned long node_frees; + unsigned long node_overflow; + atomic_t allochit; + atomic_t allocmiss; + atomic_t freehit; + atomic_t freemiss; + + /* + * If debugging is enabled, then the allocator can add additional + * fields and/or padding to every object. buffer_size contains the total + * object size including these internal fields, the following two + * variables contain the offset to the user object and its size. + */ + int obj_offset; + int obj_size; +#endif /* CONFIG_DEBUG_SLAB */ + + /* + * We put nodelists[] at the end of kmem_cache, because we want to size + * this array to nr_node_ids slots instead of MAX_NUMNODES + * (see kmem_cache_init()) + * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache + * is statically defined, so we reserve the max number of nodes. + */ + struct kmem_list3 *nodelists[MAX_NUMNODES]; + /* + * Do not add fields after nodelists[] + */ +}; + /* Size description struct for general caches. */ struct cache_sizes { size_t cs_size; diff --git a/mm/slab.c b/mm/slab.c index 06236e4ddc1b..7a464e6e392d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -371,87 +371,6 @@ static void kmem_list3_init(struct kmem_list3 *parent) MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ } while (0) -/* - * struct kmem_cache - * - * manages a cache. - */ - -struct kmem_cache { -/* 1) per-cpu data, touched during every alloc/free */ - struct array_cache *array[NR_CPUS]; -/* 2) Cache tunables. Protected by cache_chain_mutex */ - unsigned int batchcount; - unsigned int limit; - unsigned int shared; - - unsigned int buffer_size; - u32 reciprocal_buffer_size; -/* 3) touched by every alloc & free from the backend */ - - unsigned int flags; /* constant flags */ - unsigned int num; /* # of objs per slab */ - -/* 4) cache_grow/shrink */ - /* order of pgs per slab (2^n) */ - unsigned int gfporder; - - /* force GFP flags, e.g. GFP_DMA */ - gfp_t gfpflags; - - size_t colour; /* cache colouring range */ - unsigned int colour_off; /* colour offset */ - struct kmem_cache *slabp_cache; - unsigned int slab_size; - unsigned int dflags; /* dynamic flags */ - - /* constructor func */ - void (*ctor)(struct kmem_cache *, void *); - -/* 5) cache creation/removal */ - const char *name; - struct list_head next; - -/* 6) statistics */ -#if STATS - unsigned long num_active; - unsigned long num_allocations; - unsigned long high_mark; - unsigned long grown; - unsigned long reaped; - unsigned long errors; - unsigned long max_freeable; - unsigned long node_allocs; - unsigned long node_frees; - unsigned long node_overflow; - atomic_t allochit; - atomic_t allocmiss; - atomic_t freehit; - atomic_t freemiss; -#endif -#if DEBUG - /* - * If debugging is enabled, then the allocator can add additional - * fields and/or padding to every object. buffer_size contains the total - * object size including these internal fields, the following two - * variables contain the offset to the user object and its size. - */ - int obj_offset; - int obj_size; -#endif - /* - * We put nodelists[] at the end of kmem_cache, because we want to size - * this array to nr_node_ids slots instead of MAX_NUMNODES - * (see kmem_cache_init()) - * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache - * is statically defined, so we reserve the max number of nodes. - */ - struct kmem_list3 *nodelists[MAX_NUMNODES]; - /* - * Do not add fields after nodelists[] - */ -}; - #define CFLGS_OFF_SLAB (0x80000000UL) #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) -- cgit v1.2.3 From 4b34d433198f97523756b267a236c803b13f3ae0 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Wed, 21 May 2008 22:53:13 +0200 Subject: tasklets: new tasklet scheduling function Rationale: kmemcheck needs to be able to schedule a tasklet without touching any dynamically allocated memory _at_ _all_ (since that would lead to a recursive page fault). This tasklet is used for writing the error reports to the kernel log. The new scheduling function avoids touching any other tasklets by inserting the new tasklist as the head of the "tasklet_hi" list instead of on the tail. Also don't wake up the softirq thread lest the scheduler access some tracked memory and we go down with a recursive page fault. In this case, we'd better just wait for the maximum time of 1/HZ for the message to appear. Signed-off-by: Vegard Nossum --- include/linux/interrupt.h | 14 ++++++++++++++ kernel/softirq.c | 11 +++++++++++ 2 files changed, 25 insertions(+) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f1fc7470d26c..ce0598b1e225 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -377,6 +377,20 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t) __tasklet_hi_schedule(t); } +extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); + +/* + * This version avoids touching any other tasklets. Needed for kmemcheck + * in order not to take any page faults while enqueueing this tasklet; + * consider VERY carefully whether you really need this or + * tasklet_hi_schedule()... + */ +static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) + __tasklet_hi_schedule_first(t); +} + static inline void tasklet_disable_nosync(struct tasklet_struct *t) { diff --git a/kernel/softirq.c b/kernel/softirq.c index 36e061740047..44cf21f8cf51 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -393,6 +393,17 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) EXPORT_SYMBOL(__tasklet_hi_schedule); +void __tasklet_hi_schedule_first(struct tasklet_struct *t) +{ + BUG_ON(!irqs_disabled()); + + t->next = __get_cpu_var(tasklet_hi_vec).head; + __get_cpu_var(tasklet_hi_vec).head = t; + __raise_softirq_irqoff(TASKLET_SOFTIRQ); +} + +EXPORT_SYMBOL(__tasklet_hi_schedule_first); + static void tasklet_action(struct softirq_action *a) { struct tasklet_struct *list; -- cgit v1.2.3 From 3994d3f08b618f9c40af1d402c3e4ecec946b5dc Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 28 Apr 2008 22:47:29 +0300 Subject: kmemcheck: add Vegard and Pekka to MAINTAINERS Acked-by: Vegard Nossum Signed-off-by: Pekka Enberg Signed-off-by: Ingo Molnar --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 9d4304266043..880b8ca0ce4c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2413,6 +2413,14 @@ M: jason.wessel@windriver.com L: kgdb-bugreport@lists.sourceforge.net S: Maintained +KMEMCHECK +P: Vegard Nossum +M: vegardno@ifi.uio.no +P Pekka Enberg +M: penberg@cs.helsinki.fi +L: linux-kernel@vger.kernel.org +S: Maintained + KPROBES P: Ananth N Mavinakayanahalli M: ananth@in.ibm.com -- cgit v1.2.3 From 385e31b9eae0528bada07d16a189f3f40df23961 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Fri, 4 Apr 2008 00:51:41 +0200 Subject: kmemcheck: add the kmemcheck core General description: kmemcheck is a patch to the linux kernel that detects use of uninitialized memory. It does this by trapping every read and write to memory that was allocated dynamically (e.g. using kmalloc()). If a memory address is read that has not previously been written to, a message is printed to the kernel log. (Thanks to Andi Kleen for the set_memory_4k() solution.) Signed-off-by: Vegard Nossum Signed-off-by: Pekka Enberg Signed-off-by: Ingo Molnar --- Documentation/kmemcheck.txt | 135 +++++++++++ arch/x86/mm/Makefile | 2 + arch/x86/mm/kmemcheck/Makefile | 3 + arch/x86/mm/kmemcheck/error.c | 215 +++++++++++++++++ arch/x86/mm/kmemcheck/error.h | 15 ++ arch/x86/mm/kmemcheck/kmemcheck.c | 477 ++++++++++++++++++++++++++++++++++++++ arch/x86/mm/kmemcheck/opcode.c | 70 ++++++ arch/x86/mm/kmemcheck/opcode.h | 9 + arch/x86/mm/kmemcheck/pte.c | 22 ++ arch/x86/mm/kmemcheck/pte.h | 10 + arch/x86/mm/kmemcheck/shadow.c | 174 ++++++++++++++ arch/x86/mm/kmemcheck/shadow.h | 16 ++ arch/x86/mm/kmemcheck/smp.c | 79 +++++++ arch/x86/mm/kmemcheck/smp.h | 23 ++ arch/x86/mm/kmemcheck/string.c | 91 ++++++++ include/asm-x86/kmemcheck.h | 36 +++ include/asm-x86/pgtable.h | 4 +- include/asm-x86/pgtable_32.h | 6 + include/linux/kmemcheck.h | 21 ++ include/linux/mm_types.h | 4 + init/main.c | 2 + kernel/sysctl.c | 12 + 22 files changed, 1424 insertions(+), 2 deletions(-) create mode 100644 Documentation/kmemcheck.txt create mode 100644 arch/x86/mm/kmemcheck/Makefile create mode 100644 arch/x86/mm/kmemcheck/error.c create mode 100644 arch/x86/mm/kmemcheck/error.h create mode 100644 arch/x86/mm/kmemcheck/kmemcheck.c create mode 100644 arch/x86/mm/kmemcheck/opcode.c create mode 100644 arch/x86/mm/kmemcheck/opcode.h create mode 100644 arch/x86/mm/kmemcheck/pte.c create mode 100644 arch/x86/mm/kmemcheck/pte.h create mode 100644 arch/x86/mm/kmemcheck/shadow.c create mode 100644 arch/x86/mm/kmemcheck/shadow.h create mode 100644 arch/x86/mm/kmemcheck/smp.c create mode 100644 arch/x86/mm/kmemcheck/smp.h create mode 100644 arch/x86/mm/kmemcheck/string.c create mode 100644 include/asm-x86/kmemcheck.h create mode 100644 include/linux/kmemcheck.h diff --git a/Documentation/kmemcheck.txt b/Documentation/kmemcheck.txt new file mode 100644 index 000000000000..843a63c4180f --- /dev/null +++ b/Documentation/kmemcheck.txt @@ -0,0 +1,135 @@ +Contents +======== + + 1. How to use + 2. Technical description + 3. Changes to the slab allocators + 4. Problems + 5. Parameters + 6. Future enhancements + + +How to use (IMPORTANT) +====================== + +Always remember this: kmemcheck _will_ give false positives. So don't enable +it and spam the mailing list with its reports; you are not going to be heard, +and it will make people's skins thicker for when the real errors are found. + +Instead, I encourage maintainers and developers to find errors in _their_ +_own_ code. And if you find false positives, you can try to work around them, +try to figure out if it's a real bug or not, or simply ignore them. Most +developers know their own code and will quickly and efficiently determine the +root cause of a kmemcheck report. This is therefore also the most efficient +way to work with kmemcheck. + +If you still want to run kmemcheck to inspect others' code, the rule of thumb +should be: If it's not obvious (to you), don't tell us about it either. Most +likely the code is correct and you'll only waste our time. If you can work +out the error, please do send the maintainer a heads up and/or a patch, but +don't expect him/her to fix something that wasn't wrong in the first place. + + +Technical description +===================== + +kmemcheck works by marking memory pages non-present. This means that whenever +somebody attempts to access the page, a page fault is generated. The page +fault handler notices that the page was in fact only hidden, and so it calls +on the kmemcheck code to make further investigations. + +When the investigations are completed, kmemcheck "shows" the page by marking +it present (as it would be under normal circumstances). This way, the +interrupted code can continue as usual. + +But after the instruction has been executed, we should hide the page again, so +that we can catch the next access too! Now kmemcheck makes use of a debugging +feature of the processor, namely single-stepping. When the processor has +finished the one instruction that generated the memory access, a debug +exception is raised. From here, we simply hide the page again and continue +execution, this time with the single-stepping feature turned off. + + +Changes to the slab allocators +============================== + +kmemcheck requires some assistance from the memory allocator in order to work. +The memory allocator needs to + +1. Tell kmemcheck about newly allocated pages and pages that are about to + be freed. This allows kmemcheck to set up and tear down the shadow memory + for the pages in question. The shadow memory stores the status of each byte + in the allocation proper, e.g. whether it is initialized or uninitialized. +2. Tell kmemcheck which parts of memory should be marked uninitialized. There + are actually a few more states, such as "not yet allocated" and "recently + freed". + +If a slab cache is set up using the SLAB_NOTRACK flag, it will never return +memory that can take page faults because of kmemcheck. + +If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still +request memory with the __GFP_NOTRACK flag. This does not prevent the page +faults from occurring, however, but marks the object in question as being +initialized so that no warnings will ever be produced for this object. + +Currently, the SLAB and SLUB allocators are supported by kmemcheck. + + +Problems +======== + +The most prominent problem seems to be that of bit-fields. kmemcheck can only +track memory with byte granularity. Therefore, when gcc generates code to +access only one bit in a bit-field, there is really no way for kmemcheck to +know which of the other bits will be used or thrown away. Consequently, there +may be bogus warnings for bit-field accesses. There is some experimental +support to detect this automatically, though it is probably better to work +around this by explicitly initializing whole bit-fields at once. + +Some allocations are used for DMA. As DMA doesn't go through the paging +mechanism, we have absolutely no way to detect DMA writes. This means that +spurious warnings may be seen on access to DMA memory. DMA allocations should +be annotated with the __GFP_NOTRACK flag or allocated from caches marked +SLAB_NOTRACK to work around this problem. + + +Parameters +========== + +In addition to enabling CONFIG_KMEMCHECK before the kernel is compiled, the +parameter kmemcheck=1 must be passed to the kernel when it is started in order +to actually do the tracking. So by default, there is only a very small +(probably negligible) overhead for enabling the config option. + +Similarly, kmemcheck may be turned on or off at run-time using, respectively: + +echo 1 > /proc/sys/kernel/kmemcheck + and +echo 0 > /proc/sys/kernel/kmemcheck + +Note that this is a lazy setting; once turned off, the old allocations will +still have to take a single page fault exception before tracking is turned off +for that particular page. Enabling kmemcheck on will only enable tracking for +allocations made from that point onwards. + +The default mode is the one-shot mode, where only the first error is reported +before kmemcheck is disabled. This mode can be enabled by passing kmemcheck=2 +to the kernel at boot, or running + +echo 2 > /proc/sys/kernel/kmemcheck + +when the kernel is already running. + + +Future enhancements +=================== + +There is already some preliminary support for catching use-after-free errors. +What still needs to be done is delaying kfree() so that memory is not +reallocated immediately after freeing it. [Suggested by Pekka Enberg.] + +It should be possible to allow SMP systems by duplicating the page tables for +each processor in the system. This is probably extremely difficult, however. +[Suggested by Ingo Molnar.] + +Support for instruction set extensions like XMM, SSE2, etc. diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index b7b3e4c7cfc9..1f19d3e3abe1 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -8,6 +8,8 @@ obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o obj-$(CONFIG_HIGHMEM) += highmem_32.o +obj-$(CONFIG_KMEMCHECK) += kmemcheck/ + ifeq ($(CONFIG_X86_32),y) obj-$(CONFIG_NUMA) += discontig_32.o else diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile new file mode 100644 index 000000000000..f888b5c934be --- /dev/null +++ b/arch/x86/mm/kmemcheck/Makefile @@ -0,0 +1,3 @@ +obj-y := error.o kmemcheck.o opcode.o pte.o shadow.o string.o + +obj-$(CONFIG_KMEMCHECK_USE_SMP) += smp.o diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c new file mode 100644 index 000000000000..9261f9c48740 --- /dev/null +++ b/arch/x86/mm/kmemcheck/error.c @@ -0,0 +1,215 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "shadow.h" + +enum kmemcheck_error_type { + KMEMCHECK_ERROR_INVALID_ACCESS, + KMEMCHECK_ERROR_BUG, +}; + +#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT) + +struct kmemcheck_error { + enum kmemcheck_error_type type; + + union { + /* KMEMCHECK_ERROR_INVALID_ACCESS */ + struct { + /* Kind of access that caused the error */ + enum kmemcheck_shadow state; + /* Address and size of the erroneous read */ + unsigned long address; + unsigned int size; + }; + }; + + struct pt_regs regs; + struct stack_trace trace; + unsigned long trace_entries[32]; + + /* We compress it to a char. */ + unsigned char shadow_copy[SHADOW_COPY_SIZE]; +}; + +/* + * Create a ring queue of errors to output. We can't call printk() directly + * from the kmemcheck traps, since this may call the console drivers and + * result in a recursive fault. + */ +static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE]; +static unsigned int error_count; +static unsigned int error_rd; +static unsigned int error_wr; +static unsigned int error_missed_count; + +static struct kmemcheck_error *error_next_wr(void) +{ + struct kmemcheck_error *e; + + if (error_count == ARRAY_SIZE(error_fifo)) { + ++error_missed_count; + return NULL; + } + + e = &error_fifo[error_wr]; + if (++error_wr == ARRAY_SIZE(error_fifo)) + error_wr = 0; + ++error_count; + return e; +} + +static struct kmemcheck_error *error_next_rd(void) +{ + struct kmemcheck_error *e; + + if (error_count == 0) + return NULL; + + e = &error_fifo[error_rd]; + if (++error_rd == ARRAY_SIZE(error_fifo)) + error_rd = 0; + --error_count; + return e; +} + +static void do_wakeup(unsigned long); +static DECLARE_TASKLET(kmemcheck_tasklet, &do_wakeup, 0); + +/* + * Save the context of an error report. + */ +void kmemcheck_error_save(enum kmemcheck_shadow state, + unsigned long address, unsigned int size, struct pt_regs *regs) +{ + static unsigned long prev_ip; + + struct kmemcheck_error *e; + enum shadow *shadow_copy; + + /* Don't report several adjacent errors from the same EIP. */ + if (regs->ip == prev_ip) + return; + prev_ip = regs->ip; + + e = error_next_wr(); + if (!e) + return; + + e->type = KMEMCHECK_ERROR_INVALID_ACCESS; + + e->state = state; + e->address = address; + e->size = size; + + /* Save regs */ + memcpy(&e->regs, regs, sizeof(*regs)); + + /* Save stack trace */ + e->trace.nr_entries = 0; + e->trace.entries = e->trace_entries; + e->trace.max_entries = ARRAY_SIZE(e->trace_entries); + e->trace.skip = 0; + save_stack_trace_bp(&e->trace, regs->bp); + + /* Round address down to nearest 16 bytes */ + shadow_copy = kmemcheck_shadow_lookup(address + & ~(SHADOW_COPY_SIZE - 1)); + BUG_ON(!shadow_copy); + + memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE); + + tasklet_hi_schedule_first(&kmemcheck_tasklet); +} + +/* + * Save the context of a kmemcheck bug. + */ +void kmemcheck_error_save_bug(struct pt_regs *regs) +{ + struct kmemcheck_error *e; + + e = error_next_wr(); + if (!e) + return; + + e->type = KMEMCHECK_ERROR_BUG; + + memcpy(&e->regs, regs, sizeof(*regs)); + + e->trace.nr_entries = 0; + e->trace.entries = e->trace_entries; + e->trace.max_entries = ARRAY_SIZE(e->trace_entries); + e->trace.skip = 1; + save_stack_trace(&e->trace); + + tasklet_hi_schedule_first(&kmemcheck_tasklet); +} + +void kmemcheck_error_recall(void) +{ + static const char *desc[] = { + [KMEMCHECK_SHADOW_UNALLOCATED] = "unallocated", + [KMEMCHECK_SHADOW_UNINITIALIZED] = "uninitialized", + [KMEMCHECK_SHADOW_INITIALIZED] = "initialized", + [KMEMCHECK_SHADOW_FREED] = "freed", + }; + + static const char short_desc[] = { + [KMEMCHECK_SHADOW_UNALLOCATED] = 'a', + [KMEMCHECK_SHADOW_UNINITIALIZED] = 'u', + [KMEMCHECK_SHADOW_INITIALIZED] = 'i', + [KMEMCHECK_SHADOW_FREED] = 'f', + }; + + struct kmemcheck_error *e; + unsigned int i; + + e = error_next_rd(); + if (!e) + return; + + switch (e->type) { + case KMEMCHECK_ERROR_INVALID_ACCESS: + printk(KERN_ERR "kmemcheck: Caught %d-bit read " + "from %s memory (%p)\n", + e->size, e->state < ARRAY_SIZE(desc) ? + desc[e->state] : "(invalid shadow state)", + (void *) e->address); + + printk(KERN_INFO); + for (i = 0; i < SHADOW_COPY_SIZE; ++i) { + if (e->shadow_copy[i] < ARRAY_SIZE(short_desc)) + printk("%c", short_desc[e->shadow_copy[i]]); + else + printk("?"); + } + printk("\n"); + printk(KERN_INFO "%*c\n", + 1 + (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^'); + break; + case KMEMCHECK_ERROR_BUG: + printk(KERN_EMERG "kmemcheck: Fatal error\n"); + break; + } + + __show_regs(&e->regs, 1); + print_stack_trace(&e->trace, 0); +} + +static void do_wakeup(unsigned long data) +{ + while (error_count > 0) + kmemcheck_error_recall(); + + if (error_missed_count > 0) { + printk(KERN_WARNING "kmemcheck: Lost %d error reports because " + "the queue was too small\n", error_missed_count); + error_missed_count = 0; + } +} diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h new file mode 100644 index 000000000000..0efc2e8d0a20 --- /dev/null +++ b/arch/x86/mm/kmemcheck/error.h @@ -0,0 +1,15 @@ +#ifndef ARCH__X86__MM__KMEMCHECK__ERROR_H +#define ARCH__X86__MM__KMEMCHECK__ERROR_H + +#include + +#include "shadow.h" + +void kmemcheck_error_save(enum kmemcheck_shadow state, + unsigned long address, unsigned int size, struct pt_regs *regs); + +void kmemcheck_error_save_bug(struct pt_regs *regs); + +void kmemcheck_error_recall(void); + +#endif diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c new file mode 100644 index 000000000000..0c0201b17f2b --- /dev/null +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -0,0 +1,477 @@ +/** + * kmemcheck - a heavyweight memory checker for the linux kernel + * Copyright (C) 2007, 2008 Vegard Nossum + * (With a lot of help from Ingo Molnar and Pekka Enberg.) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2) as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "error.h" +#include "opcode.h" +#include "pte.h" +#include "shadow.h" +#include "smp.h" + +void __init kmemcheck_init(void) +{ + printk(KERN_INFO "kmemcheck: \"Bugs, beware!\"\n"); + + kmemcheck_smp_init(); + +#if defined(CONFIG_SMP) && !defined(CONFIG_KMEMCHECK_USE_SMP) + /* Limit SMP to use a single CPU. We rely on the fact that this code + * runs before SMP is set up. */ + if (setup_max_cpus > 1) { + printk(KERN_INFO + "kmemcheck: Limiting number of CPUs to 1.\n"); + setup_max_cpus = 1; + } +#endif +} + +#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT +int kmemcheck_enabled = 0; +#endif + +#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT +int kmemcheck_enabled = 1; +#endif + +#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT +int kmemcheck_enabled = 2; +#endif + +/* + * We need to parse the kmemcheck= option before any memory is allocated. + */ +static int __init param_kmemcheck(char *str) +{ + if (!str) + return -EINVAL; + + sscanf(str, "%d", &kmemcheck_enabled); + return 0; +} + +early_param("kmemcheck", param_kmemcheck); + +int kmemcheck_show_addr(unsigned long address) +{ + pte_t *pte; + + pte = kmemcheck_pte_lookup(address); + if (!pte) + return 0; + + set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); + __flush_tlb_one(address); + return 1; +} + +int kmemcheck_hide_addr(unsigned long address) +{ + pte_t *pte; + + pte = kmemcheck_pte_lookup(address); + if (!pte) + return 0; + + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); + __flush_tlb_one(address); + return 1; +} + +struct kmemcheck_context { + bool busy; + int balance; + + unsigned long addr1; + unsigned long addr2; + unsigned long flags; +}; + +static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context); + +bool kmemcheck_active(struct pt_regs *regs) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + + return data->balance > 0; +} + +/* + * Called from the #PF handler. + */ +void kmemcheck_show(struct pt_regs *regs) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + int n; + + BUG_ON(!irqs_disabled()); + + kmemcheck_pause_allbutself(); + + if (unlikely(data->balance != 0)) { + kmemcheck_show_addr(data->addr1); + kmemcheck_show_addr(data->addr2); + kmemcheck_error_save_bug(regs); + data->balance = 0; + kmemcheck_resume(); + return; + } + + n = 0; + n += kmemcheck_show_addr(data->addr1); + n += kmemcheck_show_addr(data->addr2); + + /* None of the addresses actually belonged to kmemcheck. Note that + * this is not an error. */ + if (n == 0) { + kmemcheck_resume(); + return; + } + + ++data->balance; + + /* + * The IF needs to be cleared as well, so that the faulting + * instruction can run "uninterrupted". Otherwise, we might take + * an interrupt and start executing that before we've had a chance + * to hide the page again. + * + * NOTE: In the rare case of multiple faults, we must not override + * the original flags: + */ + if (!(regs->flags & X86_EFLAGS_TF)) + data->flags = regs->flags; + + regs->flags |= X86_EFLAGS_TF; + regs->flags &= ~X86_EFLAGS_IF; +} + +/* + * Called from the #DB handler. + */ +void kmemcheck_hide(struct pt_regs *regs) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + int n; + + BUG_ON(!irqs_disabled()); + + if (data->balance == 0) { + kmemcheck_resume(); + return; + } + + if (unlikely(data->balance != 1)) { + kmemcheck_show_addr(data->addr1); + kmemcheck_show_addr(data->addr2); + kmemcheck_error_save_bug(regs); + data->addr1 = 0; + data->addr2 = 0; + data->balance = 0; + + if (!(data->flags & X86_EFLAGS_TF)) + regs->flags &= ~X86_EFLAGS_TF; + if (data->flags & X86_EFLAGS_IF) + regs->flags |= X86_EFLAGS_IF; + kmemcheck_resume(); + return; + } + + n = 0; + if (kmemcheck_enabled) { + n += kmemcheck_hide_addr(data->addr1); + n += kmemcheck_hide_addr(data->addr2); + } else { + n += kmemcheck_show_addr(data->addr1); + n += kmemcheck_show_addr(data->addr2); + } + + if (n == 0) { + kmemcheck_resume(); + return; + } + + --data->balance; + + data->addr1 = 0; + data->addr2 = 0; + + if (!(data->flags & X86_EFLAGS_TF)) + regs->flags &= ~X86_EFLAGS_TF; + if (data->flags & X86_EFLAGS_IF) + regs->flags |= X86_EFLAGS_IF; + kmemcheck_resume(); +} + +void kmemcheck_show_pages(struct page *p, unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; ++i) { + unsigned long address; + pte_t *pte; + unsigned int level; + + address = (unsigned long) page_address(&p[i]); + pte = lookup_address(address, &level); + BUG_ON(!pte); + BUG_ON(level != PG_LEVEL_4K); + + set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN)); + __flush_tlb_one(address); + } +} + +bool kmemcheck_page_is_tracked(struct page *p) +{ + /* This will also check the "hidden" flag of the PTE. */ + return kmemcheck_pte_lookup((unsigned long) page_address(p)); +} + +void kmemcheck_hide_pages(struct page *p, unsigned int n) +{ + unsigned int i; + + set_memory_4k((unsigned long) page_address(p), n); + + for (i = 0; i < n; ++i) { + unsigned long address; + pte_t *pte; + unsigned int level; + + address = (unsigned long) page_address(&p[i]); + pte = lookup_address(address, &level); + BUG_ON(!pte); + BUG_ON(level != PG_LEVEL_4K); + + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); + set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN)); + __flush_tlb_one(address); + } +} + +/* + * Check that an access does not span across two different pages, because + * that will mess up our shadow lookup. + */ +static bool check_page_boundary(struct pt_regs *regs, + unsigned long addr, unsigned int size) +{ + if (size == 8) + return false; + if (size == 16 && (addr & PAGE_MASK) == ((addr + 1) & PAGE_MASK)) + return false; + if (size == 32 && (addr & PAGE_MASK) == ((addr + 3) & PAGE_MASK)) + return false; +#ifdef CONFIG_X86_64 + if (size == 64 && (addr & PAGE_MASK) == ((addr + 7) & PAGE_MASK)) + return false; +#endif + + /* + * XXX: The addr/size data is also really interesting if this + * case ever triggers. We should make a separate class of errors + * for this case. -Vegard + */ + kmemcheck_error_save_bug(regs); + return true; +} + +static void kmemcheck_read(struct pt_regs *regs, + unsigned long address, unsigned int size) +{ + void *shadow; + enum kmemcheck_shadow status; + + shadow = kmemcheck_shadow_lookup(address); + if (!shadow) + return; + + if (check_page_boundary(regs, address, size)) + return; + + status = kmemcheck_shadow_test(shadow, size); + if (status == KMEMCHECK_SHADOW_INITIALIZED) + return; + + if (kmemcheck_enabled) + kmemcheck_error_save(status, address, size, regs); + + if (kmemcheck_enabled == 2) + kmemcheck_enabled = 0; + + /* Don't warn about it again. */ + kmemcheck_shadow_set(shadow, size); +} + +static void kmemcheck_write(struct pt_regs *regs, + unsigned long address, unsigned int size) +{ + void *shadow; + + shadow = kmemcheck_shadow_lookup(address); + if (!shadow) + return; + + if (check_page_boundary(regs, address, size)) + return; + + kmemcheck_shadow_set(shadow, size); +} + +enum kmemcheck_method { + KMEMCHECK_READ, + KMEMCHECK_WRITE, +}; + +void kmemcheck_access(struct pt_regs *regs, + unsigned long fallback_address, enum kmemcheck_method fallback_method) +{ + const uint8_t *insn; + const uint8_t *insn_primary; + unsigned int size; + + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + + /* Recursive fault -- ouch. */ + if (data->busy) { + kmemcheck_show_addr(fallback_address); + kmemcheck_error_save_bug(regs); + return; + } + + data->busy = true; + + insn = (const uint8_t *) regs->ip; + insn_primary = kmemcheck_opcode_get_primary(insn); + + size = kmemcheck_opcode_get_size(insn); + + switch (insn_primary[0]) { +#ifdef CONFIG_KMEMCHECK_BITOPS_OK + /* AND, OR, XOR */ + /* + * Unfortunately, these instructions have to be excluded from + * our regular checking since they access only some (and not + * all) bits. This clears out "bogus" bitfield-access warnings. + */ + case 0x80: + case 0x81: + case 0x82: + case 0x83: + switch ((insn_primary[1] >> 3) & 7) { + /* OR */ + case 1: + /* AND */ + case 4: + /* XOR */ + case 6: + kmemcheck_write(regs, fallback_address, size); + data->addr1 = fallback_address; + data->addr2 = 0; + data->busy = false; + return; + + /* ADD */ + case 0: + /* ADC */ + case 2: + /* SBB */ + case 3: + /* SUB */ + case 5: + /* CMP */ + case 7: + break; + } + break; +#endif + + /* MOVS, MOVSB, MOVSW, MOVSD */ + case 0xa4: + case 0xa5: + /* These instructions are special because they take two + * addresses, but we only get one page fault. */ + kmemcheck_read(regs, regs->si, size); + kmemcheck_write(regs, regs->di, size); + data->addr1 = regs->si; + data->addr2 = regs->di; + data->busy = false; + return; + + /* CMPS, CMPSB, CMPSW, CMPSD */ + case 0xa6: + case 0xa7: + kmemcheck_read(regs, regs->si, size); + kmemcheck_read(regs, regs->di, size); + data->addr1 = regs->si; + data->addr2 = regs->di; + data->busy = false; + return; + } + + /* If the opcode isn't special in any way, we use the data from the + * page fault handler to determine the address and type of memory + * access. */ + switch (fallback_method) { + case KMEMCHECK_READ: + kmemcheck_read(regs, fallback_address, size); + data->addr1 = fallback_address; + data->addr2 = 0; + data->busy = false; + return; + case KMEMCHECK_WRITE: + kmemcheck_write(regs, fallback_address, size); + data->addr1 = fallback_address; + data->addr2 = 0; + data->busy = false; + return; + } +} + +bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, + unsigned long error_code) +{ + pte_t *pte; + unsigned int level; + + pte = lookup_address(address, &level); + if (!pte) + return false; + if (level != PG_LEVEL_4K) + return false; + if (!pte_hidden(*pte)) + return false; + + if (error_code & 2) + kmemcheck_access(regs, address, KMEMCHECK_WRITE); + else + kmemcheck_access(regs, address, KMEMCHECK_READ); + + kmemcheck_show(regs); + return true; +} diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c new file mode 100644 index 000000000000..be0c8b7be0d3 --- /dev/null +++ b/arch/x86/mm/kmemcheck/opcode.c @@ -0,0 +1,70 @@ +#include + +#include "opcode.h" + +static bool opcode_is_prefix(uint8_t b) +{ + return + /* Group 1 */ + b == 0xf0 || b == 0xf2 || b == 0xf3 + /* Group 2 */ + || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26 + || b == 0x64 || b == 0x65 || b == 0x2e || b == 0x3e + /* Group 3 */ + || b == 0x66 + /* Group 4 */ + || b == 0x67; +} + +static bool opcode_is_rex_prefix(uint8_t b) +{ + return (b & 0xf0) == 0x40; +} + +/* This is a VERY crude opcode decoder. We only need to find the size of the + * load/store that caused our #PF and this should work for all the opcodes + * that we care about. Moreover, the ones who invented this instruction set + * should be shot. */ +unsigned int kmemcheck_opcode_get_size(const uint8_t *op) +{ + /* Default operand size */ + int operand_size_override = 32; + + /* prefixes */ + for (; opcode_is_prefix(*op); ++op) { + if (*op == 0x66) + operand_size_override = 16; + } + +#ifdef CONFIG_X86_64 + /* REX prefix */ + if (opcode_is_rex_prefix(*op)) { + if (*op & 0x08) + return 64; + ++op; + } +#endif + + /* escape opcode */ + if (*op == 0x0f) { + ++op; + + if (*op == 0xb6) + return 8; + if (*op == 0xb7) + return 16; + } + + return (*op & 1) ? operand_size_override : 8; +} + +const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op) +{ + /* skip prefixes */ + while (opcode_is_prefix(*op)) + ++op; + if (opcode_is_rex_prefix(*op)) + ++op; + return op; +} + diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h new file mode 100644 index 000000000000..a19b8fa37660 --- /dev/null +++ b/arch/x86/mm/kmemcheck/opcode.h @@ -0,0 +1,9 @@ +#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H +#define ARCH__X86__MM__KMEMCHECK__OPCODE_H + +#include + +unsigned int kmemcheck_opcode_get_size(const uint8_t *op); +const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op); + +#endif diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c new file mode 100644 index 000000000000..4ead26eeaf96 --- /dev/null +++ b/arch/x86/mm/kmemcheck/pte.c @@ -0,0 +1,22 @@ +#include + +#include + +#include "pte.h" + +pte_t *kmemcheck_pte_lookup(unsigned long address) +{ + pte_t *pte; + unsigned int level; + + pte = lookup_address(address, &level); + if (!pte) + return NULL; + if (level != PG_LEVEL_4K) + return NULL; + if (!pte_hidden(*pte)) + return NULL; + + return pte; +} + diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h new file mode 100644 index 000000000000..9f5966456492 --- /dev/null +++ b/arch/x86/mm/kmemcheck/pte.h @@ -0,0 +1,10 @@ +#ifndef ARCH__X86__MM__KMEMCHECK__PTE_H +#define ARCH__X86__MM__KMEMCHECK__PTE_H + +#include + +#include + +pte_t *kmemcheck_pte_lookup(unsigned long address); + +#endif diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c new file mode 100644 index 000000000000..07ed3d619d72 --- /dev/null +++ b/arch/x86/mm/kmemcheck/shadow.c @@ -0,0 +1,174 @@ +#include + +#include +#include + +#include "pte.h" +#include "shadow.h" + +/* + * Return the shadow address for the given address. Returns NULL if the + * address is not tracked. + * + * We need to be extremely careful not to follow any invalid pointers, + * because this function can be called for *any* possible address. + */ +void *kmemcheck_shadow_lookup(unsigned long address) +{ + pte_t *pte; + struct page *page; + + if (!virt_addr_valid(address)) + return NULL; + + pte = kmemcheck_pte_lookup(address); + if (!pte) + return NULL; + + page = virt_to_page(address); + if (!page->shadow) + return NULL; + return page->shadow + (address & (PAGE_SIZE - 1)); +} + +static void mark_shadow(void *address, unsigned int n, + enum kmemcheck_shadow status) +{ + void *shadow; + + shadow = kmemcheck_shadow_lookup((unsigned long) address); + if (!shadow) + return; + __memset(shadow, status, n); +} + +void kmemcheck_mark_unallocated(void *address, unsigned int n) +{ + mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED); +} + +void kmemcheck_mark_uninitialized(void *address, unsigned int n) +{ + mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED); +} + +/* + * Fill the shadow memory of the given address such that the memory at that + * address is marked as being initialized. + */ +void kmemcheck_mark_initialized(void *address, unsigned int n) +{ + mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED); +} + +void kmemcheck_mark_freed(void *address, unsigned int n) +{ + mark_shadow(address, n, KMEMCHECK_SHADOW_FREED); +} + +void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; ++i) + kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE); +} + +void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; ++i) + kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE); +} + +enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) +{ + uint8_t *x; + + x = shadow; + +#ifdef CONFIG_KMEMCHECK_PARTIAL_OK + /* + * Make sure _some_ bytes are initialized. Gcc frequently generates + * code to access neighboring bytes. + */ + switch (size) { +#ifdef CONFIG_X86_64 + case 64: + if (x[7] == KMEMCHECK_SHADOW_INITIALIZED) + return x[7]; + if (x[6] == KMEMCHECK_SHADOW_INITIALIZED) + return x[6]; + if (x[5] == KMEMCHECK_SHADOW_INITIALIZED) + return x[5]; + if (x[4] == KMEMCHECK_SHADOW_INITIALIZED) + return x[4]; +#endif + case 32: + if (x[3] == KMEMCHECK_SHADOW_INITIALIZED) + return x[3]; + if (x[2] == KMEMCHECK_SHADOW_INITIALIZED) + return x[2]; + case 16: + if (x[1] == KMEMCHECK_SHADOW_INITIALIZED) + return x[1]; + case 8: + if (x[0] == KMEMCHECK_SHADOW_INITIALIZED) + return x[0]; + } +#else + switch (size) { +#ifdef CONFIG_X86_64 + case 64: + if (x[7] != KMEMCHECK_SHADOW_INITIALIZED) + return x[7]; + if (x[6] != KMEMCHECK_SHADOW_INITIALIZED) + return x[6]; + if (x[5] != KMEMCHECK_SHADOW_INITIALIZED) + return x[5]; + if (x[4] != KMEMCHECK_SHADOW_INITIALIZED) + return x[4]; +#endif + case 32: + if (x[3] != KMEMCHECK_SHADOW_INITIALIZED) + return x[3]; + if (x[2] != KMEMCHECK_SHADOW_INITIALIZED) + return x[2]; + case 16: + if (x[1] != KMEMCHECK_SHADOW_INITIALIZED) + return x[1]; + case 8: + if (x[0] != KMEMCHECK_SHADOW_INITIALIZED) + return x[0]; + } +#endif + + return x[0]; +} + +void kmemcheck_shadow_set(void *shadow, unsigned int size) +{ + uint8_t *x; + + x = shadow; + + switch (size) { +#ifdef CONFIG_X86_64 + case 64: + x[7] = KMEMCHECK_SHADOW_INITIALIZED; + x[6] = KMEMCHECK_SHADOW_INITIALIZED; + x[5] = KMEMCHECK_SHADOW_INITIALIZED; + x[4] = KMEMCHECK_SHADOW_INITIALIZED; +#endif + case 32: + x[3] = KMEMCHECK_SHADOW_INITIALIZED; + x[2] = KMEMCHECK_SHADOW_INITIALIZED; + case 16: + x[1] = KMEMCHECK_SHADOW_INITIALIZED; + case 8: + x[0] = KMEMCHECK_SHADOW_INITIALIZED; + } + + return; +} diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h new file mode 100644 index 000000000000..af46d9ab9d86 --- /dev/null +++ b/arch/x86/mm/kmemcheck/shadow.h @@ -0,0 +1,16 @@ +#ifndef ARCH__X86__MM__KMEMCHECK__SHADOW_H +#define ARCH__X86__MM__KMEMCHECK__SHADOW_H + +enum kmemcheck_shadow { + KMEMCHECK_SHADOW_UNALLOCATED, + KMEMCHECK_SHADOW_UNINITIALIZED, + KMEMCHECK_SHADOW_INITIALIZED, + KMEMCHECK_SHADOW_FREED, +}; + +void *kmemcheck_shadow_lookup(unsigned long address); + +enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size); +void kmemcheck_shadow_set(void *shadow, unsigned int size); + +#endif diff --git a/arch/x86/mm/kmemcheck/smp.c b/arch/x86/mm/kmemcheck/smp.c new file mode 100644 index 000000000000..c4ff615b16db --- /dev/null +++ b/arch/x86/mm/kmemcheck/smp.c @@ -0,0 +1,79 @@ +#include +#include +#include + +#include + +#include "smp.h" + +static spinlock_t nmi_spinlock; + +static atomic_t nmi_wait; +static atomic_t nmi_resume; +static atomic_t paused; + +static int nmi_notifier(struct notifier_block *self, + unsigned long val, void *data) +{ + if (val != DIE_NMI_IPI || !atomic_read(&nmi_wait)) + return NOTIFY_DONE; + + atomic_inc(&paused); + + /* Pause until the fault has been handled */ + while (!atomic_read(&nmi_resume)) + cpu_relax(); + + atomic_dec(&paused); + + return NOTIFY_STOP; +} + +static struct notifier_block nmi_nb = { + .notifier_call = &nmi_notifier, +}; + +void kmemcheck_smp_init(void) +{ + int err; + + err = register_die_notifier(&nmi_nb); + BUG_ON(err); +} + +void kmemcheck_pause_allbutself(void) +{ + int cpus; + cpumask_t mask = cpu_online_map; + + spin_lock(&nmi_spinlock); + + cpus = num_online_cpus() - 1; + + atomic_set(&paused, 0); + atomic_set(&nmi_wait, 1); + atomic_set(&nmi_resume, 0); + + cpu_clear(safe_smp_processor_id(), mask); + if (!cpus_empty(mask)) + send_IPI_mask(mask, NMI_VECTOR); + + while (atomic_read(&paused) != cpus) + cpu_relax(); + + atomic_set(&nmi_wait, 0); +} + +void kmemcheck_resume(void) +{ + int cpus; + + cpus = num_online_cpus() - 1; + + atomic_set(&nmi_resume, 1); + + while (atomic_read(&paused) != 0) + cpu_relax(); + + spin_unlock(&nmi_spinlock); +} diff --git a/arch/x86/mm/kmemcheck/smp.h b/arch/x86/mm/kmemcheck/smp.h new file mode 100644 index 000000000000..dc65f16e3ac6 --- /dev/null +++ b/arch/x86/mm/kmemcheck/smp.h @@ -0,0 +1,23 @@ +#ifndef ARCH__X86__MM__KMEMCHECK__SMP_H +#define ARCH__X86__MM__KMEMCHECK__SMP_H + +#ifdef CONFIG_KMEMCHECK_USE_SMP +void kmemcheck_smp_init(void); + +void kmemcheck_pause_allbutself(void); +void kmemcheck_resume(void); +#else +static inline void kmemcheck_smp_init(void) +{ +} + +static inline void kmemcheck_pause_allbutself(void) +{ +} + +static inline void kmemcheck_resume(void) +{ +} +#endif + +#endif diff --git a/arch/x86/mm/kmemcheck/string.c b/arch/x86/mm/kmemcheck/string.c new file mode 100644 index 000000000000..0d21d227ecba --- /dev/null +++ b/arch/x86/mm/kmemcheck/string.c @@ -0,0 +1,91 @@ +#include +#include +#include +#include +#include +#include + +#include "shadow.h" +#include "smp.h" + +/* + * A faster implementation of memset() when tracking is enabled where the + * whole memory area is within a single page. + */ +static void memset_one_page(void *s, int c, size_t n) +{ + unsigned long addr; + void *x; + unsigned long flags; + + addr = (unsigned long) s; + + x = kmemcheck_shadow_lookup(addr); + if (!x) { + /* The page isn't being tracked. */ + __memset(s, c, n); + return; + } + + /* While we are not guarding the page in question, nobody else + * should be able to change them. */ + local_irq_save(flags); + + kmemcheck_pause_allbutself(); + kmemcheck_show_addr(addr); + __memset(s, c, n); + __memset(x, KMEMCHECK_SHADOW_INITIALIZED, n); + if (kmemcheck_enabled) + kmemcheck_hide_addr(addr); + kmemcheck_resume(); + + local_irq_restore(flags); +} + +/* + * A faster implementation of memset() when tracking is enabled. We cannot + * assume that all pages within the range are tracked, so copying has to be + * split into page-sized (or smaller, for the ends) chunks. + */ +void *kmemcheck_memset(void *s, int c, size_t n) +{ + unsigned long addr; + unsigned long start_page, start_offset; + unsigned long end_page, end_offset; + unsigned long i; + + if (!n) + return s; + + if (!slab_is_available()) { + __memset(s, c, n); + return s; + } + + addr = (unsigned long) s; + + start_page = addr & PAGE_MASK; + end_page = (addr + n) & PAGE_MASK; + + if (start_page == end_page) { + /* The entire area is within the same page. Good, we only + * need one memset(). */ + memset_one_page(s, c, n); + return s; + } + + start_offset = addr & ~PAGE_MASK; + end_offset = (addr + n) & ~PAGE_MASK; + + /* Clear the head, body, and tail of the memory area. */ + if (start_offset < PAGE_SIZE) + memset_one_page(s, c, PAGE_SIZE - start_offset); + for (i = start_page + PAGE_SIZE; i < end_page; i += PAGE_SIZE) + memset_one_page((void *) i, c, PAGE_SIZE); + if (end_offset > 0) + memset_one_page((void *) end_page, c, end_offset); + + return s; +} + +EXPORT_SYMBOL(kmemcheck_memset); diff --git a/include/asm-x86/kmemcheck.h b/include/asm-x86/kmemcheck.h new file mode 100644 index 000000000000..f625398a3612 --- /dev/null +++ b/include/asm-x86/kmemcheck.h @@ -0,0 +1,36 @@ +#ifndef ASM_X86_KMEMCHECK_H +#define ASM_X86_KMEMCHECK_H + +#include +#include + +#ifdef CONFIG_KMEMCHECK +bool kmemcheck_active(struct pt_regs *regs); + +void kmemcheck_show(struct pt_regs *regs); +void kmemcheck_hide(struct pt_regs *regs); + +bool kmemcheck_fault(struct pt_regs *regs, + unsigned long address, unsigned long error_code); +#else +static inline bool kmemcheck_active(struct pt_regs *regs) +{ + return false; +} + +static inline void kmemcheck_show(struct pt_regs *regs) +{ +} + +static inline void kmemcheck_hide(struct pt_regs *regs) +{ +} + +static inline bool kmemcheck_fault(struct pt_regs *regs, + unsigned long address, unsigned long error_code) +{ + return false; +} +#endif /* CONFIG_KMEMCHECK */ + +#endif diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 97c271b2910b..0333c351f9a7 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -16,7 +16,7 @@ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ #define _PAGE_BIT_UNUSED2 10 -#define _PAGE_BIT_UNUSED3 11 +#define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ @@ -36,9 +36,9 @@ #define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */ #define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1) #define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2) -#define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3) #define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT) #define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE) +#define _PAGE_HIDDEN (_AC(1, L)<<_PAGE_BIT_HIDDEN) #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX) diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 32ca03109a4c..d7248132caf4 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -85,6 +85,12 @@ extern unsigned long pg0[]; #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) +#ifdef CONFIG_KMEMCHECK +#define pte_hidden(x) ((x).pte_low & (_PAGE_HIDDEN)) +#else +#define pte_hidden(x) 0 +#endif + /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val((x))) #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h new file mode 100644 index 000000000000..3831c700bb8d --- /dev/null +++ b/include/linux/kmemcheck.h @@ -0,0 +1,21 @@ +#ifndef LINUX_KMEMCHECK_H +#define LINUX_KMEMCHECK_H + +#include + +#ifdef CONFIG_KMEMCHECK +extern int kmemcheck_enabled; + +void kmemcheck_init(void); + +int kmemcheck_show_addr(unsigned long address); +int kmemcheck_hide_addr(unsigned long address); +#else +#define kmemcheck_enabled 0 + +static inline void kmemcheck_init(void) +{ +} +#endif /* CONFIG_KMEMCHECK */ + +#endif /* LINUX_KMEMCHECK_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 02a27ae78539..f54e52add213 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -94,6 +94,10 @@ struct page { #ifdef CONFIG_CGROUP_MEM_RES_CTLR unsigned long page_cgroup; #endif + +#ifdef CONFIG_KMEMCHECK + void *shadow; +#endif }; /* diff --git a/init/main.c b/init/main.c index f7fb20021d48..39a9ec4d3e3a 100644 --- a/init/main.c +++ b/init/main.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include @@ -779,6 +780,7 @@ static void __init do_pre_smp_initcalls(void) { extern int spawn_ksoftirqd(void); + kmemcheck_init(); migration_init(); spawn_ksoftirqd(); if (!nosoftlockup) diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 29116652dca8..69a32c137e8b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -813,6 +814,17 @@ static struct ctl_table kern_table[] = { .child = key_sysctls, }, #endif +#ifdef CONFIG_KMEMCHECK + { + .ctl_name = CTL_UNNUMBERED, + .procname = "kmemcheck", + .data = &kmemcheck_enabled, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif + /* * NOTE: do not add new entries to this table unless you have read * Documentation/sysctl/ctl_unnumbered.txt -- cgit v1.2.3 From 787ecfaa503dc63ff1831ddc74b15dad49bace1d Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Fri, 4 Apr 2008 00:53:23 +0200 Subject: x86: add hooks for kmemcheck The hooks that we modify are: - Page fault handler (to handle kmemcheck faults) - Debug exception handler (to hide pages after single-stepping the instruction that caused the page fault) Also redefine memset() to use the optimized version if kmemcheck is enabled. (Thanks to Pekka Enberg for minimizing the impact on the page fault handler.) Signed-off-by: Vegard Nossum Signed-off-by: Pekka Enberg Signed-off-by: Ingo Molnar --- arch/x86/kernel/traps_32.c | 9 +++++++++ arch/x86/mm/fault.c | 18 +++++++++++++++--- include/asm-x86/string_32.h | 8 ++++++++ 3 files changed, 32 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index a4739a818baa..4ad88dd06678 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c @@ -57,6 +57,7 @@ #include #include #include +#include #include "mach_traps.h" @@ -906,6 +907,14 @@ void __kprobes do_debug(struct pt_regs *regs, long error_code) get_debugreg(condition, 6); + /* Catch kmemcheck conditions first of all! */ + if (condition & DR_STEP) { + if (kmemcheck_active(regs)) { + kmemcheck_hide(regs); + return; + } + } + /* * The processor cleared BTF, so don't mark that we need it set. */ diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 8bcb6f40ccb6..3717195c4b9f 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -33,6 +33,7 @@ #include #include #include +#include #include /* @@ -604,6 +605,13 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) si_code = SEGV_MAPERR; + /* + * Detect and handle instructions that would cause a page fault for + * both a tracked kernel page and a userspace page. + */ + if(kmemcheck_active(regs)) + kmemcheck_hide(regs); + if (notify_page_fault(regs)) return; @@ -625,9 +633,13 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) #else if (unlikely(address >= TASK_SIZE64)) { #endif - if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && - vmalloc_fault(address) >= 0) - return; + if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { + if (vmalloc_fault(address) >= 0) + return; + + if (kmemcheck_fault(regs, address, error_code)) + return; + } /* Can handle a stale RO->RW TLB */ if (spurious_fault(address, error_code)) diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h index b49369ad9a61..fade1855736b 100644 --- a/include/asm-x86/string_32.h +++ b/include/asm-x86/string_32.h @@ -262,6 +262,14 @@ __asm__ __volatile__( \ __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \ __memset((s),(c),(count))) +/* If kmemcheck is enabled, our best bet is a custom memset() that disables + * checking in order to save a whole lot of (unnecessary) page faults. */ +#ifdef CONFIG_KMEMCHECK +void *kmemcheck_memset(void *s, int c, size_t n); +#undef memset +#define memset(s, c, n) kmemcheck_memset((s), (c), (n)) +#endif + /* * find the first occurrence of byte 'c', or 1 past the area if none */ -- cgit v1.2.3 From 862849a36e6087faac6349de0b1bcc66ff98411b Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 4 Apr 2008 11:16:06 +0300 Subject: x86: add hooks for kmemcheck on x86_64 This makes the necessary changes for kmemcheck to work on x86_64. Signed-off-by: Pekka Enberg Signed-off-by: Ingo Molnar Signed-off-by: Vegard Nossum --- arch/x86/kernel/traps_64.c | 9 +++++++++ include/asm-x86/pgtable_64.h | 6 ++++++ include/asm-x86/string_64.h | 1 + 3 files changed, 16 insertions(+) diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index 552506ce497f..97a8f52c3ffe 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c @@ -53,6 +53,7 @@ #include #include #include +#include asmlinkage void divide_error(void); asmlinkage void debug(void); @@ -911,6 +912,14 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs, get_debugreg(condition, 6); + /* Catch kmemcheck conditions first of all! */ + if (condition & DR_STEP) { + if (kmemcheck_active(regs)) { + kmemcheck_hide(regs); + return; + } + } + /* * The processor cleared BTF, so don't mark that we need it set. */ diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 1cc50d22d735..7836ccc28cf2 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -169,6 +169,12 @@ static inline int pmd_bad(pmd_t pmd) #define pte_none(x) (!pte_val((x))) #define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE)) +#ifdef CONFIG_KMEMCHECK +#define pte_hidden(x) (pte_val((x)) & (_PAGE_HIDDEN)) +#else +#define pte_hidden(x) 0 +#endif + #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */ #define pte_page(x) pfn_to_page(pte_pfn((x))) #define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT) diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h index 52b5ab383395..49874fdb2c51 100644 --- a/include/asm-x86/string_64.h +++ b/include/asm-x86/string_64.h @@ -45,6 +45,7 @@ extern void *__memcpy(void *to, const void *from, size_t len); #define __HAVE_ARCH_MEMSET void *memset(void *s, int c, size_t n); +void *__memset(void *s, int c, size_t n); #define __HAVE_ARCH_MEMMOVE void *memmove(void *dest, const void *src, size_t count); -- cgit v1.2.3 From e6df1035b1b488cafde1e69f1a25f2706c3ac1f7 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 31 May 2008 15:56:17 +0200 Subject: kmemcheck: add mm functions With kmemcheck enabled, the slab allocator needs to do this: 1. Tell kmemcheck to allocate the shadow memory which stores the status of each byte in the allocation proper, e.g. whether it is initialized or uninitialized. 2. Tell kmemcheck which parts of memory that should be marked uninitialized. There are actually a few more states, such as "not yet allocated" and "recently freed". If a slab cache is set up using the SLAB_NOTRACK flag, it will never return memory that can take page faults because of kmemcheck. If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still request memory with the __GFP_NOTRACK flag. This does not prevent the page faults from occuring, however, but marks the object in question as being initialized so that no warnings will ever be produced for this object. Parts of this patch were contributed by Pekka Enberg but merged for atomicity. Signed-off-by: Vegard Nossum Signed-off-by: Pekka Enberg Signed-off-by: Ingo Molnar --- arch/x86/kernel/process.c | 2 +- include/linux/gfp.h | 3 +- include/linux/kmemcheck.h | 48 +++++++++++++++++++++++ include/linux/slab.h | 7 ++++ kernel/fork.c | 16 ++++---- mm/Makefile | 2 +- mm/kmemcheck.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 164 insertions(+), 11 deletions(-) create mode 100644 mm/kmemcheck.c diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index ba370dc8685b..d61d452db5ea 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -42,7 +42,7 @@ void arch_task_cache_init(void) task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, __alignof__(union thread_xstate), - SLAB_PANIC, NULL); + SLAB_PANIC | SLAB_NOTRACK, NULL); } static void do_nothing(void *unused) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index b414be387180..7c1db877d36c 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -50,8 +50,9 @@ struct vm_area_struct; #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ #define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ +#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ -#define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */ +#define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */ #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /* This equals 0, but use constants in case they ever change */ diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index 3831c700bb8d..bc02c3fe5d8c 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h @@ -8,6 +8,27 @@ extern int kmemcheck_enabled; void kmemcheck_init(void); +/* The slab-related functions. */ +void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, + struct page *page, int order); +void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order); +void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, + size_t size); +void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size); + +void kmemcheck_show_pages(struct page *p, unsigned int n); +void kmemcheck_hide_pages(struct page *p, unsigned int n); + +bool kmemcheck_page_is_tracked(struct page *p); + +void kmemcheck_mark_unallocated(void *address, unsigned int n); +void kmemcheck_mark_uninitialized(void *address, unsigned int n); +void kmemcheck_mark_initialized(void *address, unsigned int n); +void kmemcheck_mark_freed(void *address, unsigned int n); + +void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n); +void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n); + int kmemcheck_show_addr(unsigned long address); int kmemcheck_hide_addr(unsigned long address); #else @@ -16,6 +37,33 @@ int kmemcheck_hide_addr(unsigned long address); static inline void kmemcheck_init(void) { } + +static inline void +kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, + struct page *page, int order) +{ +} + +static inline void +kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) +{ +} + +static inline void +kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, + size_t size) +{ +} + +static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object, + size_t size) +{ +} + +static inline bool kmemcheck_page_is_tracked(struct page *p) +{ + return false; +} #endif /* CONFIG_KMEMCHECK */ #endif /* LINUX_KMEMCHECK_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h index c2ad35016599..a47900aac5cc 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -34,6 +34,13 @@ # define SLAB_DEBUG_OBJECTS 0x00000000UL #endif +/* Don't track use of uninitialized memory */ +#ifdef CONFIG_KMEMCHECK +# define SLAB_NOTRACK 0x00800000UL +#else +# define SLAB_NOTRACK 0x00000000UL +#endif + /* The following flags affect the page allocator grouping pages by mobility */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ diff --git a/kernel/fork.c b/kernel/fork.c index 19908b26cf80..25c2aa3294f5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -150,7 +150,7 @@ void __init fork_init(unsigned long mempages) /* create a slab on which task_structs can be allocated */ task_struct_cachep = kmem_cache_create("task_struct", sizeof(struct task_struct), - ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL); + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); #endif /* do the arch specific task caches init */ @@ -1415,23 +1415,23 @@ void __init proc_caches_init(void) { sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, - sighand_ctor); + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU| + SLAB_NOTRACK, sighand_ctor); signal_cachep = kmem_cache_create("signal_cache", sizeof(struct signal_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); files_cachep = kmem_cache_create("files_cache", sizeof(struct files_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); fs_cachep = kmem_cache_create("fs_cache", sizeof(struct fs_struct), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); vm_area_cachep = kmem_cache_create("vm_area_struct", sizeof(struct vm_area_struct), 0, - SLAB_PANIC, NULL); + SLAB_PANIC|SLAB_NOTRACK, NULL); mm_cachep = kmem_cache_create("mm_struct", sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL); } /* diff --git a/mm/Makefile b/mm/Makefile index 18c143b3c46c..4801918f63ed 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -27,10 +27,10 @@ obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o obj-$(CONFIG_SLOB) += slob.o obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLUB) += slub.o +obj-$(CONFIG_KMEMCHECK) += kmemcheck.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_SMP) += allocpercpu.o obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o - diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c new file mode 100644 index 000000000000..4efdf1ef545b --- /dev/null +++ b/mm/kmemcheck.c @@ -0,0 +1,97 @@ +#include +#include +#include +#include + +void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, + struct page *page, int order) +{ + struct page *shadow; + int pages; + int i; + + pages = 1 << order; + + /* + * With kmemcheck enabled, we need to allocate a memory area for the + * shadow bits as well. + */ + shadow = alloc_pages_node(node, flags, order); + if (!shadow) { + if (printk_ratelimit()) + printk(KERN_ERR "kmemcheck: failed to allocate " + "shadow bitmap\n"); + return; + } + + for(i = 0; i < pages; ++i) + page[i].shadow = page_address(&shadow[i]); + + /* + * Mark it as non-present for the MMU so that our accesses to + * this memory will trigger a page fault and let us analyze + * the memory accesses. + */ + kmemcheck_hide_pages(page, pages); + + /* + * Objects from caches that have a constructor don't get + * cleared when they're allocated, so we need to do it here. + */ + if (s->ctor) + kmemcheck_mark_uninitialized_pages(page, pages); + else + kmemcheck_mark_unallocated_pages(page, pages); +} + +void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) +{ + struct page *shadow; + int pages; + int i; + + pages = 1 << order; + + kmemcheck_show_pages(page, pages); + + shadow = virt_to_page(page[0].shadow); + + for(i = 0; i < pages; ++i) + page[i].shadow = NULL; + + __free_pages(shadow, order); +} + +void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, + size_t size) +{ + if (gfpflags & __GFP_ZERO) + return; + if (s->flags & SLAB_NOTRACK) + return; + + if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) { + /* + * Allow notracked objects to be allocated from + * tracked caches. Note however that these objects + * will still get page faults on access, they just + * won't ever be flagged as uninitialized. If page + * faults are not acceptable, the slab cache itself + * should be marked NOTRACK. + */ + kmemcheck_mark_initialized(object, size); + } else if (!s->ctor) { + /* + * New objects should be marked uninitialized before + * they're returned to the called. + */ + kmemcheck_mark_uninitialized(object, size); + } +} + +void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size) +{ + /* TODO: RCU freeing is unsupported for now; hide false positives. */ + if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU)) + kmemcheck_mark_freed(object, size); +} -- cgit v1.2.3 From 18fd427debcf37c06917b55295df682fd05fee76 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Fri, 4 Apr 2008 00:54:48 +0200 Subject: slub: add hooks for kmemcheck Parts of this patch were contributed by Pekka Enberg but merged for atomicity. Cc: Christoph Lameter Signed-off-by: Vegard Nossum Signed-off-by: Pekka Enberg Signed-off-by: Ingo Molnar --- mm/slub.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 0987d1cd943c..def86b4d4010 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -23,6 +23,7 @@ #include #include #include +#include /* * Lock order: @@ -174,7 +175,7 @@ static inline void ClearSlabDebug(struct page *page) SLAB_TRACE | SLAB_DESTROY_BY_RCU) #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ - SLAB_CACHE_DMA) + SLAB_CACHE_DMA | SLAB_NOTRACK) #ifndef ARCH_KMALLOC_MINALIGN #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) @@ -1122,6 +1123,10 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); } + + if (kmemcheck_enabled && !(s->flags & SLAB_NOTRACK)) + kmemcheck_alloc_shadow(s, flags, node, page, compound_order(page)); + page->objects = oo_objects(oo); mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? @@ -1195,6 +1200,9 @@ static void __free_slab(struct kmem_cache *s, struct page *page) ClearSlabDebug(page); } + if (kmemcheck_page_is_tracked(page) && !(s->flags & SLAB_NOTRACK)) + kmemcheck_free_shadow(s, page, compound_order(page)); + mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, @@ -1645,6 +1653,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, if (unlikely((gfpflags & __GFP_ZERO) && object)) memset(object, 0, c->objsize); + kmemcheck_slab_alloc(s, gfpflags, object, c->objsize); return object; } @@ -1749,6 +1758,7 @@ static __always_inline void slab_free(struct kmem_cache *s, local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); + kmemcheck_slab_free(s, object, c->objsize); debug_check_no_locks_freed(object, c->objsize); if (!(s->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(object, s->objsize); @@ -2600,7 +2610,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags) if (!s || !text || !kmem_cache_open(s, flags, text, realsize, ARCH_KMALLOC_MINALIGN, - SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) { + SLAB_CACHE_DMA|SLAB_NOTRACK|__SYSFS_ADD_DEFERRED, + NULL)) { kfree(s); kfree(text); goto unlock_out; @@ -4298,6 +4309,8 @@ static char *create_unique_id(struct kmem_cache *s) *p++ = 'a'; if (s->flags & SLAB_DEBUG_FREE) *p++ = 'F'; + if (!(s->flags & SLAB_NOTRACK)) + *p++ = 't'; if (p != name + 1) *p++ = '-'; p += sprintf(p, "%07d", s->size); -- cgit v1.2.3 From 30532cb3c49a2a9fed94127aab26003c52398a51 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 9 May 2008 20:35:53 +0200 Subject: slab: add hooks for kmemcheck We now have SLAB support for kmemcheck! This means that it doesn't matter whether one chooses SLAB or SLUB, or indeed whether Linus chooses to chuck SLAB or SLUB.. ;-) Cc: Ingo Molnar Cc: Christoph Lameter Cc: Andrew Morton Signed-off-by: Pekka Enberg Signed-off-by: Vegard Nossum --- mm/slab.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 7a464e6e392d..d1e2785d723b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -111,6 +111,7 @@ #include #include #include +#include #include #include @@ -176,13 +177,13 @@ SLAB_STORE_USER | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ - SLAB_DEBUG_OBJECTS) + SLAB_DEBUG_OBJECTS | SLAB_NOTRACK) #else # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ SLAB_CACHE_DMA | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ - SLAB_DEBUG_OBJECTS) + SLAB_DEBUG_OBJECTS | SLAB_NOTRACK) #endif /* @@ -1611,6 +1612,10 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) NR_SLAB_UNRECLAIMABLE, nr_pages); for (i = 0; i < nr_pages; i++) __SetPageSlab(page + i); + + if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) + kmemcheck_alloc_shadow(cachep, flags, nodeid, page, cachep->gfporder); + return page_address(page); } @@ -1623,6 +1628,9 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) struct page *page = virt_to_page(addr); const unsigned long nr_freed = i; + if (kmemcheck_page_is_tracked(page) && !(cachep->flags & SLAB_NOTRACK)) + kmemcheck_free_shadow(cachep, page, cachep->gfporder); + if (cachep->flags & SLAB_RECLAIM_ACCOUNT) sub_zone_page_state(page_zone(page), NR_SLAB_RECLAIMABLE, nr_freed); @@ -3337,6 +3345,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, local_irq_restore(save_flags); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); + if (likely(ptr)) + kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep)); + if (unlikely((flags & __GFP_ZERO) && ptr)) memset(ptr, 0, obj_size(cachep)); @@ -3391,6 +3402,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); prefetchw(objp); + if (likely(objp)) + kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep)); + if (unlikely((flags & __GFP_ZERO) && objp)) memset(objp, 0, obj_size(cachep)); @@ -3506,6 +3520,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) check_irq_off(); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); + kmemcheck_slab_free(cachep, objp, obj_size(cachep)); + /* * Skip calling cache_free_alien() when the platform is not numa. * This will avoid cache misses that happen while accessing slabp (which -- cgit v1.2.3 From 233784e4a3a8f3a584787764dae677342138b31e Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Fri, 4 Apr 2008 00:51:41 +0200 Subject: kmemcheck: enable in the x86 Kconfig let it rip! Signed-off-by: Pekka Enberg Signed-off-by: Ingo Molnar Signed-off-by: Vegard Nossum --- arch/x86/Kconfig.debug | 108 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 18363374d51a..eeeb5225778b 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -246,6 +246,114 @@ config DEFAULT_IO_DELAY_TYPE default IO_DELAY_TYPE_NONE endif +menuconfig KMEMCHECK + bool "kmemcheck: trap use of uninitialized memory" + depends on X86 + depends on !X86_USE_3DNOW + depends on SLUB || SLAB + depends on !CC_OPTIMIZE_FOR_SIZE + depends on !DEBUG_PAGEALLOC + select FRAME_POINTER + select STACKTRACE + default n + help + This option enables tracing of dynamically allocated kernel memory + to see if memory is used before it has been given an initial value. + Be aware that this requires half of your memory for bookkeeping and + will insert extra code at *every* read and write to tracked memory + thus slow down the kernel code (but user code is unaffected). + + The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable + or enable kmemcheck at boot-time. If the kernel is started with + kmemcheck=0, the large memory and CPU overhead is not incurred. + +choice + prompt "kmemcheck: default mode at boot" + depends on KMEMCHECK + default KMEMCHECK_ONESHOT_BY_DEFAULT + help + This option controls the default behaviour of kmemcheck when the + kernel boots and no kmemcheck= parameter is given. + +config KMEMCHECK_DISABLED_BY_DEFAULT + bool "disabled" + depends on KMEMCHECK + +config KMEMCHECK_ENABLED_BY_DEFAULT + bool "enabled" + depends on KMEMCHECK + +config KMEMCHECK_ONESHOT_BY_DEFAULT + bool "one-shot" + depends on KMEMCHECK + help + In one-shot mode, only the first error detected is reported before + kmemcheck is disabled. + +endchoice + +config KMEMCHECK_USE_SMP + bool "kmemcheck: use multiple CPUs" + depends on KMEMCHECK + depends on SMP + default n + help + This option will prevent kmemcheck from disabling all but one CPU + on boot. This means that whenever a page fault is taken, all the + other CPUs in the system are halted. This is potentially extremely + expensive, depending on the number of CPUs in the system (the more + the worse). + + The upside is that kmemcheck can be compiled into the kernel with + very little overhead by default if kmemcheck is disabled at run- + time. + + If you want to compile a kernel specifically for the purpose of + playing with kmemcheck, you should say n here. If you want a normal + kernel with the possibility of enabling kmemcheck without + recompiling, you should say y here. + +config KMEMCHECK_QUEUE_SIZE + int "kmemcheck: error queue size" + depends on KMEMCHECK + default 64 + help + Select the maximum number of errors to store in the queue. This + queue will be emptied once every second, so this is effectively a + limit on how many reports to print in one go. Note however, that + if the number of errors occuring between two bursts is larger than + this number, the extra error reports will get lost. + +config KMEMCHECK_SHADOW_COPY_SHIFT + int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)" + depends on KMEMCHECK + range 2 8 + default 6 + help + Select the number of shadow bytes to save along with each entry of + the queue. These bytes indicate what parts of an allocation are + initialized, uninitialized, etc. and will be displayed when an + error is detected to help the debugging of a particular problem. + +config KMEMCHECK_PARTIAL_OK + bool "kmemcheck: allow partially uninitialized memory" + depends on KMEMCHECK + default y + help + This option works around certain GCC optimizations that produce + 32-bit reads from 16-bit variables where the upper 16 bits are + thrown away afterwards. This may of course also hide some real + bugs. + +config KMEMCHECK_BITOPS_OK + bool "kmemcheck: allow bit-field manipulation" + depends on KMEMCHECK + default n + help + This option silences warnings that would be generated for bit-field + accesses where not all the bits are initialized at the same time. + This may also hide some real bugs. + config DEBUG_BOOT_PARAMS bool "Debug boot parameters" depends on DEBUG_KERNEL -- cgit v1.2.3 From 1e85e220bd9cb79d9956ce71d5c197b260b3500a Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Tue, 1 Jul 2008 10:45:51 +0200 Subject: kmemcheck: fix sparse warnings This patch adds some missing header files (with declarations) and makes one function static. Signed-off-by: Vegard Nossum --- arch/x86/mm/kmemcheck/error.c | 1 + arch/x86/mm/kmemcheck/kmemcheck.c | 2 +- arch/x86/mm/kmemcheck/shadow.c | 1 + include/linux/kmemcheck.h | 1 + 4 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c index 9261f9c48740..56410c63b465 100644 --- a/arch/x86/mm/kmemcheck/error.c +++ b/arch/x86/mm/kmemcheck/error.c @@ -6,6 +6,7 @@ #include #include +#include "error.h" #include "shadow.h" enum kmemcheck_error_type { diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 0c0201b17f2b..342151f0d781 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -348,7 +348,7 @@ enum kmemcheck_method { KMEMCHECK_WRITE, }; -void kmemcheck_access(struct pt_regs *regs, +static void kmemcheck_access(struct pt_regs *regs, unsigned long fallback_address, enum kmemcheck_method fallback_method) { const uint8_t *insn; diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c index 07ed3d619d72..0cb144f14b60 100644 --- a/arch/x86/mm/kmemcheck/shadow.c +++ b/arch/x86/mm/kmemcheck/shadow.c @@ -1,3 +1,4 @@ +#include #include #include diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index bc02c3fe5d8c..b2d83efaabc6 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h @@ -1,6 +1,7 @@ #ifndef LINUX_KMEMCHECK_H #define LINUX_KMEMCHECK_H +#include #include #ifdef CONFIG_KMEMCHECK -- cgit v1.2.3 From 9ae6ef1ca46de6bfc35c7414b14efeac6c95c5e3 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Tue, 1 Jul 2008 10:47:02 +0200 Subject: softirq: raise the right softirq Without this patch, tasklet_hi_schedule_first() would raise the wrong softirq and we relied upon somebody else using tasklet_hi_schedule() in order to get our tasklet scheduled. Signed-off-by: Vegard Nossum --- kernel/softirq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/softirq.c b/kernel/softirq.c index 44cf21f8cf51..23dc8891ce47 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -399,7 +399,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) t->next = __get_cpu_var(tasklet_hi_vec).head; __get_cpu_var(tasklet_hi_vec).head = t; - __raise_softirq_irqoff(TASKLET_SOFTIRQ); + __raise_softirq_irqoff(HI_SOFTIRQ); } EXPORT_SYMBOL(__tasklet_hi_schedule_first); -- cgit v1.2.3 From ee53ed996bb2b50ed36471c160be2c9547f95052 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Tue, 1 Jul 2008 11:48:26 +0200 Subject: kmemcheck: use the proper comment style Signed-off-by: Vegard Nossum --- arch/x86/mm/kmemcheck/kmemcheck.c | 24 ++++++++++++++++-------- arch/x86/mm/kmemcheck/opcode.c | 6 ++++-- arch/x86/mm/kmemcheck/string.c | 12 ++++++++---- 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 342151f0d781..37949c3a5859 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -39,8 +39,10 @@ void __init kmemcheck_init(void) kmemcheck_smp_init(); #if defined(CONFIG_SMP) && !defined(CONFIG_KMEMCHECK_USE_SMP) - /* Limit SMP to use a single CPU. We rely on the fact that this code - * runs before SMP is set up. */ + /* + * Limit SMP to use a single CPU. We rely on the fact that this code + * runs before SMP is set up. + */ if (setup_max_cpus > 1) { printk(KERN_INFO "kmemcheck: Limiting number of CPUs to 1.\n"); @@ -144,8 +146,10 @@ void kmemcheck_show(struct pt_regs *regs) n += kmemcheck_show_addr(data->addr1); n += kmemcheck_show_addr(data->addr2); - /* None of the addresses actually belonged to kmemcheck. Note that - * this is not an error. */ + /* + * None of the addresses actually belonged to kmemcheck. Note that + * this is not an error. + */ if (n == 0) { kmemcheck_resume(); return; @@ -414,8 +418,10 @@ static void kmemcheck_access(struct pt_regs *regs, /* MOVS, MOVSB, MOVSW, MOVSD */ case 0xa4: case 0xa5: - /* These instructions are special because they take two - * addresses, but we only get one page fault. */ + /* + * These instructions are special because they take two + * addresses, but we only get one page fault. + */ kmemcheck_read(regs, regs->si, size); kmemcheck_write(regs, regs->di, size); data->addr1 = regs->si; @@ -434,9 +440,11 @@ static void kmemcheck_access(struct pt_regs *regs, return; } - /* If the opcode isn't special in any way, we use the data from the + /* + * If the opcode isn't special in any way, we use the data from the * page fault handler to determine the address and type of memory - * access. */ + * access. + */ switch (fallback_method) { case KMEMCHECK_READ: kmemcheck_read(regs, fallback_address, size); diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c index be0c8b7be0d3..194aeee366a9 100644 --- a/arch/x86/mm/kmemcheck/opcode.c +++ b/arch/x86/mm/kmemcheck/opcode.c @@ -21,10 +21,12 @@ static bool opcode_is_rex_prefix(uint8_t b) return (b & 0xf0) == 0x40; } -/* This is a VERY crude opcode decoder. We only need to find the size of the +/* + * This is a VERY crude opcode decoder. We only need to find the size of the * load/store that caused our #PF and this should work for all the opcodes * that we care about. Moreover, the ones who invented this instruction set - * should be shot. */ + * should be shot. + */ unsigned int kmemcheck_opcode_get_size(const uint8_t *op) { /* Default operand size */ diff --git a/arch/x86/mm/kmemcheck/string.c b/arch/x86/mm/kmemcheck/string.c index 0d21d227ecba..1a62bf0479fa 100644 --- a/arch/x86/mm/kmemcheck/string.c +++ b/arch/x86/mm/kmemcheck/string.c @@ -27,8 +27,10 @@ static void memset_one_page(void *s, int c, size_t n) return; } - /* While we are not guarding the page in question, nobody else - * should be able to change them. */ + /* + * While we are not guarding the page in question, nobody else + * should be able to change them. + */ local_irq_save(flags); kmemcheck_pause_allbutself(); @@ -68,8 +70,10 @@ void *kmemcheck_memset(void *s, int c, size_t n) end_page = (addr + n) & PAGE_MASK; if (start_page == end_page) { - /* The entire area is within the same page. Good, we only - * need one memset(). */ + /* + * The entire area is within the same page. Good, we only + * need one memset(). + */ memset_one_page(s, c, n); return s; } -- cgit v1.2.3 From fbd013adab160d9ee596647d41d7298a61e3f44d Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 12 Jul 2008 14:11:33 +0200 Subject: kmemcheck: fix use of uninitialized spinlock Without this, a "bad spinlock magic" message will appear quite early during boot (the first use of this spinlock). Signed-off-by: Vegard Nossum Signed-off-by: Ingo Molnar --- arch/x86/mm/kmemcheck/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/kmemcheck/smp.c b/arch/x86/mm/kmemcheck/smp.c index cd17ddfda082..62b604992c63 100644 --- a/arch/x86/mm/kmemcheck/smp.c +++ b/arch/x86/mm/kmemcheck/smp.c @@ -7,7 +7,7 @@ #include "smp.h" #include -static spinlock_t nmi_spinlock; +static DEFINE_SPINLOCK(nmi_spinlock); static atomic_t nmi_wait; static atomic_t nmi_resume; -- cgit v1.2.3 From 872d08e2995ec8badd8eadc2b04967ba1bf9ed75 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 12 Jul 2008 23:10:47 +0200 Subject: kmemcheck: constrain tracking to non-debugged caches kmemcheck really cannot be used in conjunction with SLAB debugging or SLUB debugging. SLAB debugging is controlled by a config option, so we can express the (inverse) dependency there, while SLAB debugging may be toggled for the different caches at run-time. Signed-off-by: Vegard Nossum Signed-off-by: Ingo Molnar --- arch/x86/Kconfig.debug | 2 +- mm/slub.c | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index eeeb5225778b..baad73207cdb 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -250,7 +250,7 @@ menuconfig KMEMCHECK bool "kmemcheck: trap use of uninitialized memory" depends on X86 depends on !X86_USE_3DNOW - depends on SLUB || SLAB + depends on SLUB || (SLAB && !DEBUG_SLAB) depends on !CC_OPTIMIZE_FOR_SIZE depends on !DEBUG_PAGEALLOC select FRAME_POINTER diff --git a/mm/slub.c b/mm/slub.c index af9298ded96b..e1e37bc9bb73 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1124,8 +1124,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK); } - if (kmemcheck_enabled && !(s->flags & SLAB_NOTRACK)) + if (kmemcheck_enabled + && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) + { kmemcheck_alloc_shadow(s, flags, node, page, compound_order(page)); + } page->objects = oo_objects(oo); mod_zone_page_state(page_zone(page), -- cgit v1.2.3 From 16984967d4849a3a850b52033df75d790a7b1ee9 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 12 Jul 2008 23:13:52 +0200 Subject: kmemcheck: mark SMP support BROKEN It was recently discovered that our SMP support is in fact broken; if the NMI watchdog is enabled, the machine will hang at boot, with the message "Testing NMI watchdog". Most likely we're getting an extra NMI (or missing one) and as a result we might be waiting for a "phantom" CPU, i.e. one that doesn't exist in the first place, because we bumped the counter one time too many or one time too few. Signed-off-by: Vegard Nossum Signed-off-by: Ingo Molnar --- arch/x86/Kconfig.debug | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index baad73207cdb..17ac253d09a1 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -296,6 +296,7 @@ config KMEMCHECK_USE_SMP bool "kmemcheck: use multiple CPUs" depends on KMEMCHECK depends on SMP + depends on BROKEN default n help This option will prevent kmemcheck from disabling all but one CPU -- cgit v1.2.3 From a3ffadbe796c8b251ec602b621206f980eb1f0dd Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 12 Jul 2008 23:18:43 +0200 Subject: kmemcheck: use capital Y/N in kconfig help-texts This is consistent with other kconfig help-texts. Signed-off-by: Vegard Nossum Signed-off-by: Ingo Molnar --- arch/x86/Kconfig.debug | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 17ac253d09a1..552cf57af948 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -310,9 +310,9 @@ config KMEMCHECK_USE_SMP time. If you want to compile a kernel specifically for the purpose of - playing with kmemcheck, you should say n here. If you want a normal + playing with kmemcheck, you should say N here. If you want a normal kernel with the possibility of enabling kmemcheck without - recompiling, you should say y here. + recompiling, you should say Y here. config KMEMCHECK_QUEUE_SIZE int "kmemcheck: error queue size" -- cgit v1.2.3 From d537cd48bde7d2c6f505fde5bf2cf27d45779227 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 12 Jul 2008 23:25:09 +0200 Subject: kmemcheck: remove unnecessary tests in the slab allocator If the page is tracked, we don't need to check whether the cache supports tracking; it should never happen that a tracked page can belong to a non-tracked cache. Signed-off-by: Vegard Nossum Signed-off-by: Ingo Molnar --- mm/slab.c | 2 +- mm/slub.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index c549d3253445..9c44366ef054 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1628,7 +1628,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr) struct page *page = virt_to_page(addr); const unsigned long nr_freed = i; - if (kmemcheck_page_is_tracked(page) && !(cachep->flags & SLAB_NOTRACK)) + if (kmemcheck_page_is_tracked(page)) kmemcheck_free_shadow(cachep, page, cachep->gfporder); if (cachep->flags & SLAB_RECLAIM_ACCOUNT) diff --git a/mm/slub.c b/mm/slub.c index e1e37bc9bb73..dbbd455ab5e3 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1203,7 +1203,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) ClearSlabDebug(page); } - if (kmemcheck_page_is_tracked(page) && !(s->flags & SLAB_NOTRACK)) + if (kmemcheck_page_is_tracked(page)) kmemcheck_free_shadow(s, page, compound_order(page)); mod_zone_page_state(page_zone(page), -- cgit v1.2.3 From 6409b3d382f8ec99c1ab312dd1e42f833465b9e6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 22 Jul 2008 12:20:56 +0200 Subject: kmemcheck: export kmemcheck_mark_initialized fix: Building modules, stage 2. MODPOST 345 modules ERROR: "kmemcheck_mark_initialized" [drivers/net/tokenring/3c359.ko] undefined! ERROR: "kmemcheck_mark_initialized" [drivers/net/sis900.ko] undefined! ERROR: "kmemcheck_mark_initialized" [drivers/net/sfc/sfc.ko] undefined! ERROR: "kmemcheck_mark_initialized" [drivers/net/s2io.ko] undefined! [...] Signed-off-by: Ingo Molnar --- arch/x86/mm/kmemcheck/shadow.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c index 0cb144f14b60..cbb317cceac1 100644 --- a/arch/x86/mm/kmemcheck/shadow.c +++ b/arch/x86/mm/kmemcheck/shadow.c @@ -1,4 +1,5 @@ #include +#include #include #include @@ -61,6 +62,7 @@ void kmemcheck_mark_initialized(void *address, unsigned int n) { mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED); } +EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized); void kmemcheck_mark_freed(void *address, unsigned int n) { -- cgit v1.2.3 From 12f50da2e1ad0cda2fb5754298bd03b3b02eb78b Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 24 Jul 2008 16:09:32 -0700 Subject: kmemcheck: include module.h to prevent warnings kmemcheck/shadow.c needs to include to prevent the following warnings: linux-next-20080724/arch/x86/mm/kmemcheck/shadow.c:64: warning : data definition has no type or storage class linux-next-20080724/arch/x86/mm/kmemcheck/shadow.c:64: warning : type defaults to 'int' in declaration of 'EXPORT_SYMBOL_GPL' linux-next-20080724/arch/x86/mm/kmemcheck/shadow.c:64: warning : parameter names (without types) in function declaration Signed-off-by: Randy Dunlap Cc: vegardno@ifi.uio.no Cc: penberg@cs.helsinki.fi Cc: akpm Signed-off-by: Ingo Molnar --- arch/x86/mm/kmemcheck/shadow.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c index cbb317cceac1..b7b5dbaa7c7c 100644 --- a/arch/x86/mm/kmemcheck/shadow.c +++ b/arch/x86/mm/kmemcheck/shadow.c @@ -1,6 +1,7 @@ #include #include #include +#include #include #include -- cgit v1.2.3 From 19dd5283c3b1f3f8abab705c23658403910dfe23 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sun, 20 Jul 2008 10:44:54 +0200 Subject: kmemcheck: add DMA hooks This patch hooks into the DMA API to prevent the reporting of the false positives that would otherwise be reported when memory is accessed that is also used directly by devices. Signed-off-by: Vegard Nossum --- include/asm-x86/dma-mapping.h | 2 ++ include/linux/kmemcheck.h | 16 ++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h index ad9cd6d49bfc..337c2cf5fa27 100644 --- a/include/asm-x86/dma-mapping.h +++ b/include/asm-x86/dma-mapping.h @@ -6,6 +6,7 @@ * documentation. */ +#include #include #include #include @@ -105,6 +106,7 @@ dma_map_single(struct device *hwdev, void *ptr, size_t size, struct dma_mapping_ops *ops = get_dma_ops(hwdev); BUG_ON(!valid_dma_direction(direction)); + kmemcheck_mark_initialized(ptr, size); return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); } diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index b2d83efaabc6..57bb1254cb72 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h @@ -65,6 +65,22 @@ static inline bool kmemcheck_page_is_tracked(struct page *p) { return false; } + +static inline void kmemcheck_mark_unallocated(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_initialized(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_freed(void *address, unsigned int n) +{ +} #endif /* CONFIG_KMEMCHECK */ #endif /* LINUX_KMEMCHECK_H */ -- cgit v1.2.3 From 1a3eac722554186437a3c3f86145a97f8b2baf19 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Wed, 23 Jul 2008 17:46:10 +0200 Subject: kmemcheck: work with sizes in terms of bytes instead of bits This is more useful when we want to calculate an index/offset and allows us to condense the code significantly. Signed-off-by: Vegard Nossum --- arch/x86/mm/kmemcheck/error.c | 2 +- arch/x86/mm/kmemcheck/kmemcheck.c | 10 +----- arch/x86/mm/kmemcheck/opcode.c | 12 +++---- arch/x86/mm/kmemcheck/shadow.c | 76 ++++++--------------------------------- 4 files changed, 19 insertions(+), 81 deletions(-) diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c index 56410c63b465..1b62d9000e0a 100644 --- a/arch/x86/mm/kmemcheck/error.c +++ b/arch/x86/mm/kmemcheck/error.c @@ -179,7 +179,7 @@ void kmemcheck_error_recall(void) case KMEMCHECK_ERROR_INVALID_ACCESS: printk(KERN_ERR "kmemcheck: Caught %d-bit read " "from %s memory (%p)\n", - e->size, e->state < ARRAY_SIZE(desc) ? + 8 * e->size, e->state < ARRAY_SIZE(desc) ? desc[e->state] : "(invalid shadow state)", (void *) e->address); diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 37949c3a5859..b5d6577bfd27 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -285,16 +285,8 @@ void kmemcheck_hide_pages(struct page *p, unsigned int n) static bool check_page_boundary(struct pt_regs *regs, unsigned long addr, unsigned int size) { - if (size == 8) + if (addr & PAGE_MASK == (addr + size - 1) & PAGE_MASK) return false; - if (size == 16 && (addr & PAGE_MASK) == ((addr + 1) & PAGE_MASK)) - return false; - if (size == 32 && (addr & PAGE_MASK) == ((addr + 3) & PAGE_MASK)) - return false; -#ifdef CONFIG_X86_64 - if (size == 64 && (addr & PAGE_MASK) == ((addr + 7) & PAGE_MASK)) - return false; -#endif /* * XXX: The addr/size data is also really interesting if this diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c index 194aeee366a9..27f709dd3bad 100644 --- a/arch/x86/mm/kmemcheck/opcode.c +++ b/arch/x86/mm/kmemcheck/opcode.c @@ -30,19 +30,19 @@ static bool opcode_is_rex_prefix(uint8_t b) unsigned int kmemcheck_opcode_get_size(const uint8_t *op) { /* Default operand size */ - int operand_size_override = 32; + int operand_size_override = 4; /* prefixes */ for (; opcode_is_prefix(*op); ++op) { if (*op == 0x66) - operand_size_override = 16; + operand_size_override = 2; } #ifdef CONFIG_X86_64 /* REX prefix */ if (opcode_is_rex_prefix(*op)) { if (*op & 0x08) - return 64; + return 8; ++op; } #endif @@ -52,12 +52,12 @@ unsigned int kmemcheck_opcode_get_size(const uint8_t *op) ++op; if (*op == 0xb6) - return 8; + return 1; if (*op == 0xb7) - return 16; + return 2; } - return (*op & 1) ? operand_size_override : 8; + return (*op & 1) ? operand_size_override : 1; } const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op) diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c index b7b5dbaa7c7c..26ea5802bb58 100644 --- a/arch/x86/mm/kmemcheck/shadow.c +++ b/arch/x86/mm/kmemcheck/shadow.c @@ -89,6 +89,7 @@ void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n) enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) { uint8_t *x; + unsigned int i; x = shadow; @@ -97,54 +98,15 @@ enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) * Make sure _some_ bytes are initialized. Gcc frequently generates * code to access neighboring bytes. */ - switch (size) { -#ifdef CONFIG_X86_64 - case 64: - if (x[7] == KMEMCHECK_SHADOW_INITIALIZED) - return x[7]; - if (x[6] == KMEMCHECK_SHADOW_INITIALIZED) - return x[6]; - if (x[5] == KMEMCHECK_SHADOW_INITIALIZED) - return x[5]; - if (x[4] == KMEMCHECK_SHADOW_INITIALIZED) - return x[4]; -#endif - case 32: - if (x[3] == KMEMCHECK_SHADOW_INITIALIZED) - return x[3]; - if (x[2] == KMEMCHECK_SHADOW_INITIALIZED) - return x[2]; - case 16: - if (x[1] == KMEMCHECK_SHADOW_INITIALIZED) - return x[1]; - case 8: - if (x[0] == KMEMCHECK_SHADOW_INITIALIZED) - return x[0]; + for (i = 0; i < size; ++i) { + if (x[i] == KMEMCHECK_SHADOW_INITIALIZED) + return x[i]; } #else - switch (size) { -#ifdef CONFIG_X86_64 - case 64: - if (x[7] != KMEMCHECK_SHADOW_INITIALIZED) - return x[7]; - if (x[6] != KMEMCHECK_SHADOW_INITIALIZED) - return x[6]; - if (x[5] != KMEMCHECK_SHADOW_INITIALIZED) - return x[5]; - if (x[4] != KMEMCHECK_SHADOW_INITIALIZED) - return x[4]; -#endif - case 32: - if (x[3] != KMEMCHECK_SHADOW_INITIALIZED) - return x[3]; - if (x[2] != KMEMCHECK_SHADOW_INITIALIZED) - return x[2]; - case 16: - if (x[1] != KMEMCHECK_SHADOW_INITIALIZED) - return x[1]; - case 8: - if (x[0] != KMEMCHECK_SHADOW_INITIALIZED) - return x[0]; + /* All bytes must be initialized. */ + for (i = 0; i < size; ++i) { + if (x[i] != KMEMCHECK_SHADOW_INITIALIZED) + return x[i]; } #endif @@ -154,25 +116,9 @@ enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size) void kmemcheck_shadow_set(void *shadow, unsigned int size) { uint8_t *x; + unsigned int i; x = shadow; - - switch (size) { -#ifdef CONFIG_X86_64 - case 64: - x[7] = KMEMCHECK_SHADOW_INITIALIZED; - x[6] = KMEMCHECK_SHADOW_INITIALIZED; - x[5] = KMEMCHECK_SHADOW_INITIALIZED; - x[4] = KMEMCHECK_SHADOW_INITIALIZED; -#endif - case 32: - x[3] = KMEMCHECK_SHADOW_INITIALIZED; - x[2] = KMEMCHECK_SHADOW_INITIALIZED; - case 16: - x[1] = KMEMCHECK_SHADOW_INITIALIZED; - case 8: - x[0] = KMEMCHECK_SHADOW_INITIALIZED; - } - - return; + for (i = 0; i < size; ++i) + x[i] = KMEMCHECK_SHADOW_INITIALIZED; } -- cgit v1.2.3 From 86b5b776c5d0d3fb2513aca9fb7f4a04cf2289d9 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Tue, 26 Aug 2008 15:46:38 +0200 Subject: kmemcheck: allow memory accesses that cross page boundaries Signed-off-by: Vegard Nossum --- arch/x86/mm/kmemcheck/kmemcheck.c | 183 +++++++++++++++++++++++--------------- 1 file changed, 109 insertions(+), 74 deletions(-) diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index b5d6577bfd27..8b03b05c6f1a 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -107,8 +107,13 @@ struct kmemcheck_context { bool busy; int balance; - unsigned long addr1; - unsigned long addr2; + /* + * There can be at most two memory operands to an instruction, but + * each address can cross a page boundary -- so we may need up to + * four addresses that must be hidden/revealed for each fault. + */ + unsigned long addr[4]; + unsigned long n_addrs; unsigned long flags; }; @@ -121,36 +126,66 @@ bool kmemcheck_active(struct pt_regs *regs) return data->balance > 0; } +/* Save an address that needs to be shown/hidden */ +static void kmemcheck_save_addr(unsigned long addr) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + + data->addr[data->n_addrs++] = addr; + + BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr)); +} + +static unsigned int kmemcheck_show_all(void) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + unsigned int i; + unsigned int n; + + n = 0; + for (i = 0; i < data->n_addrs; ++i) + n += kmemcheck_show_addr(data->addr[i]); + + return n; +} + +static unsigned int kmemcheck_hide_all(void) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + unsigned int i; + unsigned int n; + + n = 0; + for (i = 0; i < data->n_addrs; ++i) + n += kmemcheck_hide_addr(data->addr[i]); + + return n; +} + /* * Called from the #PF handler. */ void kmemcheck_show(struct pt_regs *regs) { struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); - int n; BUG_ON(!irqs_disabled()); kmemcheck_pause_allbutself(); if (unlikely(data->balance != 0)) { - kmemcheck_show_addr(data->addr1); - kmemcheck_show_addr(data->addr2); + kmemcheck_show_all(); kmemcheck_error_save_bug(regs); data->balance = 0; kmemcheck_resume(); return; } - n = 0; - n += kmemcheck_show_addr(data->addr1); - n += kmemcheck_show_addr(data->addr2); - /* * None of the addresses actually belonged to kmemcheck. Note that * this is not an error. */ - if (n == 0) { + if (kmemcheck_show_all() == 0) { kmemcheck_resume(); return; } @@ -189,11 +224,9 @@ void kmemcheck_hide(struct pt_regs *regs) } if (unlikely(data->balance != 1)) { - kmemcheck_show_addr(data->addr1); - kmemcheck_show_addr(data->addr2); + kmemcheck_show_all(); kmemcheck_error_save_bug(regs); - data->addr1 = 0; - data->addr2 = 0; + data->n_addrs = 0; data->balance = 0; if (!(data->flags & X86_EFLAGS_TF)) @@ -204,14 +237,10 @@ void kmemcheck_hide(struct pt_regs *regs) return; } - n = 0; - if (kmemcheck_enabled) { - n += kmemcheck_hide_addr(data->addr1); - n += kmemcheck_hide_addr(data->addr2); - } else { - n += kmemcheck_show_addr(data->addr1); - n += kmemcheck_show_addr(data->addr2); - } + if (kmemcheck_enabled) + n = kmemcheck_hide_all(); + else + n = kmemcheck_show_all(); if (n == 0) { kmemcheck_resume(); @@ -220,8 +249,7 @@ void kmemcheck_hide(struct pt_regs *regs) --data->balance; - data->addr1 = 0; - data->addr2 = 0; + data->n_addrs = 0; if (!(data->flags & X86_EFLAGS_TF)) regs->flags &= ~X86_EFLAGS_TF; @@ -278,44 +306,24 @@ void kmemcheck_hide_pages(struct page *p, unsigned int n) } } -/* - * Check that an access does not span across two different pages, because - * that will mess up our shadow lookup. - */ -static bool check_page_boundary(struct pt_regs *regs, +/* Access may NOT cross page boundary */ +static void kmemcheck_read_strict(struct pt_regs *regs, unsigned long addr, unsigned int size) -{ - if (addr & PAGE_MASK == (addr + size - 1) & PAGE_MASK) - return false; - - /* - * XXX: The addr/size data is also really interesting if this - * case ever triggers. We should make a separate class of errors - * for this case. -Vegard - */ - kmemcheck_error_save_bug(regs); - return true; -} - -static void kmemcheck_read(struct pt_regs *regs, - unsigned long address, unsigned int size) { void *shadow; enum kmemcheck_shadow status; - shadow = kmemcheck_shadow_lookup(address); + shadow = kmemcheck_shadow_lookup(addr); if (!shadow) return; - if (check_page_boundary(regs, address, size)) - return; - + kmemcheck_save_addr(addr); status = kmemcheck_shadow_test(shadow, size); if (status == KMEMCHECK_SHADOW_INITIALIZED) return; if (kmemcheck_enabled) - kmemcheck_error_save(status, address, size, regs); + kmemcheck_error_save(status, addr, size, regs); if (kmemcheck_enabled == 2) kmemcheck_enabled = 0; @@ -324,19 +332,58 @@ static void kmemcheck_read(struct pt_regs *regs, kmemcheck_shadow_set(shadow, size); } -static void kmemcheck_write(struct pt_regs *regs, - unsigned long address, unsigned int size) +/* Access may cross page boundary */ +static void kmemcheck_read(struct pt_regs *regs, + unsigned long addr, unsigned int size) +{ + unsigned long page = addr & PAGE_MASK; + unsigned long next_addr = addr + size - 1; + unsigned long next_page = next_addr & PAGE_MASK; + + if (likely(page == next_page)) { + kmemcheck_read_strict(regs, addr, size); + return; + } + + /* + * What we do is basically to split the access across the + * two pages and handle each part separately. Yes, this means + * that we may now see reads that are 3 + 5 bytes, for + * example (and if both are uninitialized, there will be two + * reports), but it makes the code a lot simpler. + */ + kmemcheck_read_strict(regs, addr, next_page - addr); + kmemcheck_read_strict(regs, next_page, next_addr - next_page); +} + +static void kmemcheck_write_strict(struct pt_regs *regs, + unsigned long addr, unsigned int size) { void *shadow; - shadow = kmemcheck_shadow_lookup(address); + shadow = kmemcheck_shadow_lookup(addr); if (!shadow) return; - if (check_page_boundary(regs, address, size)) + kmemcheck_save_addr(addr); + kmemcheck_shadow_set(shadow, size); +} + +static void kmemcheck_write(struct pt_regs *regs, + unsigned long addr, unsigned int size) +{ + unsigned long page = addr & PAGE_MASK; + unsigned long next_addr = addr + size - 1; + unsigned long next_page = next_addr & PAGE_MASK; + + if (likely(page == next_page)) { + kmemcheck_write_strict(regs, addr, size); return; + } - kmemcheck_shadow_set(shadow, size); + /* See comment in kmemcheck_read(). */ + kmemcheck_write_strict(regs, addr, next_page - addr); + kmemcheck_write_strict(regs, next_page, next_addr - next_page); } enum kmemcheck_method { @@ -387,10 +434,7 @@ static void kmemcheck_access(struct pt_regs *regs, /* XOR */ case 6: kmemcheck_write(regs, fallback_address, size); - data->addr1 = fallback_address; - data->addr2 = 0; - data->busy = false; - return; + goto out; /* ADD */ case 0: @@ -416,20 +460,14 @@ static void kmemcheck_access(struct pt_regs *regs, */ kmemcheck_read(regs, regs->si, size); kmemcheck_write(regs, regs->di, size); - data->addr1 = regs->si; - data->addr2 = regs->di; - data->busy = false; - return; + goto out; /* CMPS, CMPSB, CMPSW, CMPSD */ case 0xa6: case 0xa7: kmemcheck_read(regs, regs->si, size); kmemcheck_read(regs, regs->di, size); - data->addr1 = regs->si; - data->addr2 = regs->di; - data->busy = false; - return; + goto out; } /* @@ -440,17 +478,14 @@ static void kmemcheck_access(struct pt_regs *regs, switch (fallback_method) { case KMEMCHECK_READ: kmemcheck_read(regs, fallback_address, size); - data->addr1 = fallback_address; - data->addr2 = 0; - data->busy = false; - return; + goto out; case KMEMCHECK_WRITE: kmemcheck_write(regs, fallback_address, size); - data->addr1 = fallback_address; - data->addr2 = 0; - data->busy = false; - return; + goto out; } + +out: + data->busy = false; } bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, -- cgit v1.2.3 From 7d9ed050ae80b40117a42c3c1541cd7662c89746 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Tue, 26 Aug 2008 21:38:02 +0200 Subject: kmemcheck: add some more documentation This addresses a review comment made by Randy Dunlap. Signed-off-by: Vegard Nossum --- arch/x86/mm/kmemcheck/string.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/mm/kmemcheck/string.c b/arch/x86/mm/kmemcheck/string.c index 1a62bf0479fa..5776f6177dae 100644 --- a/arch/x86/mm/kmemcheck/string.c +++ b/arch/x86/mm/kmemcheck/string.c @@ -48,6 +48,10 @@ static void memset_one_page(void *s, int c, size_t n) * A faster implementation of memset() when tracking is enabled. We cannot * assume that all pages within the range are tracked, so copying has to be * split into page-sized (or smaller, for the ends) chunks. + * + * This function is NOT supposed to be used directly by modules; instead, + * when kmemcheck is enabled, memset() is defined as a macro which will call + * kmemcheck_memset(); for that reason is the function exported. */ void *kmemcheck_memset(void *s, int c, size_t n) { -- cgit v1.2.3 From 69fc3b188186260679d76870ec7e63fd8abaf305 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Tue, 26 Aug 2008 15:26:13 +0200 Subject: kmemcheck: add some comments Signed-off-by: Vegard Nossum --- mm/kmemcheck.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c index 4efdf1ef545b..eaa41b802611 100644 --- a/mm/kmemcheck.c +++ b/mm/kmemcheck.c @@ -65,8 +65,14 @@ void kmemcheck_free_shadow(struct kmem_cache *s, struct page *page, int order) void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, size_t size) { + /* + * Has already been memset(), which initializes the shadow for us + * as well. + */ if (gfpflags & __GFP_ZERO) return; + + /* No need to initialize the shadow of a non-tracked slab. */ if (s->flags & SLAB_NOTRACK) return; -- cgit v1.2.3 From c29fc328e8d56c755ca2daa7558ac9940dd7e892 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Tue, 26 Aug 2008 15:25:00 +0200 Subject: kmemcheck: save memory contents on use of uninitialized memory Signed-off-by: Vegard Nossum --- arch/x86/Kconfig.debug | 2 +- arch/x86/mm/kmemcheck/error.c | 23 ++++++++++++++++++----- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index dc111a642323..0d618abacfb7 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -357,7 +357,7 @@ config KMEMCHECK_SHADOW_COPY_SHIFT int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)" depends on KMEMCHECK range 2 8 - default 6 + default 5 help Select the number of shadow bytes to save along with each entry of the queue. These bytes indicate what parts of an allocation are diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c index 1b62d9000e0a..0066517811e2 100644 --- a/arch/x86/mm/kmemcheck/error.c +++ b/arch/x86/mm/kmemcheck/error.c @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -36,6 +37,7 @@ struct kmemcheck_error { /* We compress it to a char. */ unsigned char shadow_copy[SHADOW_COPY_SIZE]; + unsigned char memory_copy[SHADOW_COPY_SIZE]; }; /* @@ -91,7 +93,8 @@ void kmemcheck_error_save(enum kmemcheck_shadow state, static unsigned long prev_ip; struct kmemcheck_error *e; - enum shadow *shadow_copy; + void *shadow_copy; + void *memory_copy; /* Don't report several adjacent errors from the same EIP. */ if (regs->ip == prev_ip) @@ -125,6 +128,11 @@ void kmemcheck_error_save(enum kmemcheck_shadow state, memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE); + kmemcheck_show_addr(address); + memory_copy = (void *) (address & ~(SHADOW_COPY_SIZE - 1)); + memcpy(e->memory_copy, memory_copy, SHADOW_COPY_SIZE); + kmemcheck_hide_addr(address); + tasklet_hi_schedule_first(&kmemcheck_tasklet); } @@ -183,16 +191,21 @@ void kmemcheck_error_recall(void) desc[e->state] : "(invalid shadow state)", (void *) e->address); + printk(KERN_INFO); + for (i = 0; i < SHADOW_COPY_SIZE; ++i) + printk("%02x", e->memory_copy[i]); + printk("\n"); + printk(KERN_INFO); for (i = 0; i < SHADOW_COPY_SIZE; ++i) { if (e->shadow_copy[i] < ARRAY_SIZE(short_desc)) - printk("%c", short_desc[e->shadow_copy[i]]); + printk(" %c", short_desc[e->shadow_copy[i]]); else - printk("?"); + printk(" ?"); } printk("\n"); - printk(KERN_INFO "%*c\n", - 1 + (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^'); + printk(KERN_INFO "%*c\n", 2 + 2 + * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^'); break; case KMEMCHECK_ERROR_BUG: printk(KERN_EMERG "kmemcheck: Fatal error\n"); -- cgit v1.2.3 From db4b0f1e8ae7c9ffde8bef2eb3dcd4fad5988ac1 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Tue, 26 Aug 2008 21:43:09 +0200 Subject: kmemcheck: implement REP MOVS/STOS emulation It turns out that certains CPUs (so far we've seen the pattern only in P4 machines) will single-step a REP MOVS/STOS instruction only after RCX/ECX/CX has reached zero. This means that prior to this patch, kmemcheck would only detect the very first such instruction on those machines. Note that only REP MOVS/STOS instructions seem to be affected by this; REP CMPS, REP LODS, etc. are fine. It's NOT possible to do a "real" emulation of the instruction. We might access a kernel address in %esi and a userspace address in %edi. And if we try to access userspace from kmemcheck (which is called from My final hack turned out like this (I'm quite proud of it): Instead of emulating the _whole_ REP MOVS/STOS, we only emulate the REP part. That is, on #PF, we increment %eip by one, which means that when the give is the #DB straight afterwards). Now, in the #DB, we check the flag that says "was this really a REP instruction?" and if it was, we start counting down %ecx and rewinding %eip each time until %ecx is 0. Each time we return to the original instruction and let the CPU execute it natively. When %ecx is 0, we turn off single-stepping and hide the pages again. What's also neat about this, although it sounds slow, is that we actually only modify the page tables at the beginning and the end of a whole REP MOVS/STOS sequence, so this should in theory be faster on non-P4s as well (we skip TWO TLB flushes and don't need the #PF in-between each iteration of the loop either). Signed-off-by: Vegard Nossum --- arch/x86/kernel/traps_32.c | 8 +-- arch/x86/kernel/traps_64.c | 8 +-- arch/x86/mm/kmemcheck/kmemcheck.c | 133 +++++++++++++++++++++++++++++++++++++- arch/x86/mm/kmemcheck/opcode.c | 34 +++++++--- arch/x86/mm/kmemcheck/opcode.h | 3 +- include/asm-x86/kmemcheck.h | 6 ++ 6 files changed, 170 insertions(+), 22 deletions(-) diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c index 65829b035a74..fdbf2b484caf 100644 --- a/arch/x86/kernel/traps_32.c +++ b/arch/x86/kernel/traps_32.c @@ -898,12 +898,8 @@ void __kprobes do_debug(struct pt_regs *regs, long error_code) get_debugreg(condition, 6); /* Catch kmemcheck conditions first of all! */ - if (condition & DR_STEP) { - if (kmemcheck_active(regs)) { - kmemcheck_hide(regs); - return; - } - } + if (condition & DR_STEP && kmemcheck_trap(regs)) + return; /* * The processor cleared BTF, so don't mark that we need it set. diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c index dde8c88bc91e..3eac0de1ba87 100644 --- a/arch/x86/kernel/traps_64.c +++ b/arch/x86/kernel/traps_64.c @@ -904,12 +904,8 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs, get_debugreg(condition, 6); /* Catch kmemcheck conditions first of all! */ - if (condition & DR_STEP) { - if (kmemcheck_active(regs)) { - kmemcheck_hide(regs); - return; - } - } + if (condition & DR_STEP && kmemcheck_trap(regs)) + return; /* * The processor cleared BTF, so don't mark that we need it set. diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 8b03b05c6f1a..366a63890981 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -115,6 +115,21 @@ struct kmemcheck_context { unsigned long addr[4]; unsigned long n_addrs; unsigned long flags; + + /* + * The address of the REP prefix if we are currently emulating a + * REP instruction; otherwise 0. + */ + const uint8_t *rep; + + /* The address of the REX prefix. */ + const uint8_t *rex; + + /* Address of the primary instruction opcode. */ + const uint8_t *insn; + + /* Data size of the instruction that caused a fault. */ + unsigned int size; }; static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context); @@ -237,6 +252,12 @@ void kmemcheck_hide(struct pt_regs *regs) return; } + if (data->rep) { + /* Save state and take it up later. */ + regs->ip = (unsigned long) data->rep; + data->rep = NULL; + } + if (kmemcheck_enabled) n = kmemcheck_hide_all(); else @@ -394,6 +415,8 @@ enum kmemcheck_method { static void kmemcheck_access(struct pt_regs *regs, unsigned long fallback_address, enum kmemcheck_method fallback_method) { + const uint8_t *rep_prefix; + const uint8_t *rex_prefix; const uint8_t *insn; const uint8_t *insn_primary; unsigned int size; @@ -412,7 +435,56 @@ static void kmemcheck_access(struct pt_regs *regs, insn = (const uint8_t *) regs->ip; insn_primary = kmemcheck_opcode_get_primary(insn); - size = kmemcheck_opcode_get_size(insn); + kmemcheck_opcode_decode(insn, &rep_prefix, &rex_prefix, &size); + + if (rep_prefix && *rep_prefix == 0xf3) { + /* + * Due to an incredibly silly Intel bug, REP MOVS and + * REP STOS instructions may generate just one single- + * stepping trap on Pentium 4 CPUs. Other CPUs, including + * AMDs, seem to generate traps after each repetition. + * + * What we do is really a very ugly hack; we increment the + * instruction pointer before returning so that the next + * time around we'll hit an ordinary MOVS or STOS + * instruction. Now, in the debug exception, we know that + * the instruction is really a REP MOVS/STOS, so instead + * of clearing the single-stepping flag, we just continue + * single-stepping the instruction until we're done. + * + * We currently don't handle REP MOVS/STOS instructions + * which have other (additional) instruction prefixes in + * front of REP, so we BUG on those. + */ + switch (insn_primary[0]) { + /* REP MOVS */ + case 0xa4: + case 0xa5: + BUG_ON(regs->ip != (unsigned long) rep_prefix); + + kmemcheck_read(regs, regs->si, size); + kmemcheck_write(regs, regs->di, size); + data->rep = rep_prefix; + data->rex = rex_prefix; + data->insn = insn_primary; + data->size = size; + regs->ip = (unsigned long) data->rep + 1; + goto out; + + /* REP STOS */ + case 0xaa: + case 0xab: + BUG_ON(regs->ip != (unsigned long) rep_prefix); + + kmemcheck_write(regs, regs->di, size); + data->rep = rep_prefix; + data->rex = rex_prefix; + data->insn = insn_primary; + data->size = size; + regs->ip = (unsigned long) data->rep + 1; + goto out; + } + } switch (insn_primary[0]) { #ifdef CONFIG_KMEMCHECK_BITOPS_OK @@ -510,3 +582,62 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, kmemcheck_show(regs); return true; } + +bool kmemcheck_trap(struct pt_regs *regs) +{ + struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context); + unsigned long cx; +#ifdef CONFIG_X86_64 + uint32_t ecx; +#endif + + if (!kmemcheck_active(regs)) + return false; + + if (!data->rep) { + kmemcheck_hide(regs); + return true; + } + + /* + * We're emulating a REP MOVS/STOS instruction. Are we done yet? + * Of course, 64-bit needs to handle CX/ECX/RCX differently... + */ +#ifdef CONFIG_X86_64 + if (data->rex && data->rex[0] & 0x08) { + cx = regs->cx - 1; + regs->cx = cx; + } else { + /* Without REX, 64-bit wants to use %ecx by default. */ + ecx = regs->cx - 1; + cx = ecx; + regs->cx = (regs->cx & ~((1UL << 32) - 1)) | ecx; + } +#else + cx = regs->cx - 1; + regs->cx = cx; +#endif + if (cx) { + data->n_addrs = 0; + + switch (data->insn[0]) { + case 0xa4: + case 0xa5: + kmemcheck_read(regs, regs->si, data->size); + kmemcheck_write(regs, regs->di, data->size); + break; + case 0xaa: + case 0xab: + kmemcheck_write(regs, regs->di, data->size); + break; + } + + /* Without the REP prefix, we have to do this ourselves... */ + regs->ip = (unsigned long) data->rep + 1; + return true; + } + + /* We're done. */ + kmemcheck_hide(regs); + return true; +} diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c index 27f709dd3bad..88a9662e19aa 100644 --- a/arch/x86/mm/kmemcheck/opcode.c +++ b/arch/x86/mm/kmemcheck/opcode.c @@ -27,22 +27,35 @@ static bool opcode_is_rex_prefix(uint8_t b) * that we care about. Moreover, the ones who invented this instruction set * should be shot. */ -unsigned int kmemcheck_opcode_get_size(const uint8_t *op) +void kmemcheck_opcode_decode(const uint8_t *op, + const uint8_t **rep_prefix, const uint8_t **rex_prefix, + unsigned int *size) { /* Default operand size */ int operand_size_override = 4; + *rep_prefix = NULL; + /* prefixes */ for (; opcode_is_prefix(*op); ++op) { + if (*op == 0xf2 || *op == 0xf3) + *rep_prefix = op; if (*op == 0x66) operand_size_override = 2; } + *rex_prefix = NULL; + #ifdef CONFIG_X86_64 /* REX prefix */ if (opcode_is_rex_prefix(*op)) { - if (*op & 0x08) - return 8; + *rex_prefix = op; + + if (*op & 0x08) { + *size = 8; + return; + } + ++op; } #endif @@ -51,13 +64,18 @@ unsigned int kmemcheck_opcode_get_size(const uint8_t *op) if (*op == 0x0f) { ++op; - if (*op == 0xb6) - return 1; - if (*op == 0xb7) - return 2; + if (*op == 0xb6) { + *size = 1; + return; + } + + if (*op == 0xb7) { + *size = 2; + return; + } } - return (*op & 1) ? operand_size_override : 1; + *size = (*op & 1) ? operand_size_override : 1; } const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op) diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h index a19b8fa37660..f744d8e7eb45 100644 --- a/arch/x86/mm/kmemcheck/opcode.h +++ b/arch/x86/mm/kmemcheck/opcode.h @@ -3,7 +3,8 @@ #include -unsigned int kmemcheck_opcode_get_size(const uint8_t *op); +void kmemcheck_opcode_decode(const uint8_t *op, + const uint8_t **rep_pfx, const uint8_t **rex_pfx, unsigned int *size); const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op); #endif diff --git a/include/asm-x86/kmemcheck.h b/include/asm-x86/kmemcheck.h index f625398a3612..ed01518f297e 100644 --- a/include/asm-x86/kmemcheck.h +++ b/include/asm-x86/kmemcheck.h @@ -12,6 +12,7 @@ void kmemcheck_hide(struct pt_regs *regs); bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code); +bool kmemcheck_trap(struct pt_regs *regs); #else static inline bool kmemcheck_active(struct pt_regs *regs) { @@ -31,6 +32,11 @@ static inline bool kmemcheck_fault(struct pt_regs *regs, { return false; } + +static inline bool kmemcheck_trap(struct pt_regs *regs) +{ + return false; +} #endif /* CONFIG_KMEMCHECK */ #endif -- cgit v1.2.3 From 6189ce7dbf3bba7984be375b97f3319b24639197 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sun, 7 Sep 2008 18:38:19 +0200 Subject: kmemcheck: hide/show pages in each iteration of a REP instruction In certain cases when the REP MOVS/STOS instruction crossed a page boundary, the previous page would not be hidden. Thus, subsequent accesses to that page would not be detected, and we would fail to update the shadow state. However, if another multi-operand instruction happened to get a page fault (with this page in its other operand), kmemcheck would notice that this page is ours and hide it again afterwards. This would lead to correct execution, except for the possibly huge number of false-positive warnings that were reported because we failed to update the shadow state. So for now, we take the slow, but safe route of hiding/showing the pages in question on each iteration of the REP instruction. Signed-off-by: Vegard Nossum --- arch/x86/mm/kmemcheck/kmemcheck.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 366a63890981..427e160da188 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -618,7 +618,7 @@ bool kmemcheck_trap(struct pt_regs *regs) regs->cx = cx; #endif if (cx) { - data->n_addrs = 0; + kmemcheck_hide(regs); switch (data->insn[0]) { case 0xa4: @@ -632,8 +632,7 @@ bool kmemcheck_trap(struct pt_regs *regs) break; } - /* Without the REP prefix, we have to do this ourselves... */ - regs->ip = (unsigned long) data->rep + 1; + kmemcheck_show(regs); return true; } -- cgit v1.2.3 From 2c364c7a32f85ca76732278591c495a8d89614a0 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sun, 7 Sep 2008 18:52:15 +0200 Subject: kmemcheck: rip out the optimized memset() Although a good idea in principle, the task is really too complex to be implemented in an elegant manner. It's too much code to keep around, so let's just drop it to keep the code simple. (Correctness is always more important and this was a premature optimization to begin with.) Signed-off-by: Vegard Nossum --- arch/x86/mm/kmemcheck/Makefile | 2 +- arch/x86/mm/kmemcheck/shadow.c | 2 +- arch/x86/mm/kmemcheck/string.c | 99 ------------------------------------------ include/asm-x86/string_32.h | 8 ---- include/asm-x86/string_64.h | 1 - 5 files changed, 2 insertions(+), 110 deletions(-) delete mode 100644 arch/x86/mm/kmemcheck/string.c diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile index f888b5c934be..138a6ad2945e 100644 --- a/arch/x86/mm/kmemcheck/Makefile +++ b/arch/x86/mm/kmemcheck/Makefile @@ -1,3 +1,3 @@ -obj-y := error.o kmemcheck.o opcode.o pte.o shadow.o string.o +obj-y := error.o kmemcheck.o opcode.o pte.o shadow.o obj-$(CONFIG_KMEMCHECK_USE_SMP) += smp.o diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c index 26ea5802bb58..196dddc70cf6 100644 --- a/arch/x86/mm/kmemcheck/shadow.c +++ b/arch/x86/mm/kmemcheck/shadow.c @@ -42,7 +42,7 @@ static void mark_shadow(void *address, unsigned int n, shadow = kmemcheck_shadow_lookup((unsigned long) address); if (!shadow) return; - __memset(shadow, status, n); + memset(shadow, status, n); } void kmemcheck_mark_unallocated(void *address, unsigned int n) diff --git a/arch/x86/mm/kmemcheck/string.c b/arch/x86/mm/kmemcheck/string.c deleted file mode 100644 index 5776f6177dae..000000000000 --- a/arch/x86/mm/kmemcheck/string.c +++ /dev/null @@ -1,99 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include "shadow.h" -#include "smp.h" - -/* - * A faster implementation of memset() when tracking is enabled where the - * whole memory area is within a single page. - */ -static void memset_one_page(void *s, int c, size_t n) -{ - unsigned long addr; - void *x; - unsigned long flags; - - addr = (unsigned long) s; - - x = kmemcheck_shadow_lookup(addr); - if (!x) { - /* The page isn't being tracked. */ - __memset(s, c, n); - return; - } - - /* - * While we are not guarding the page in question, nobody else - * should be able to change them. - */ - local_irq_save(flags); - - kmemcheck_pause_allbutself(); - kmemcheck_show_addr(addr); - __memset(s, c, n); - __memset(x, KMEMCHECK_SHADOW_INITIALIZED, n); - if (kmemcheck_enabled) - kmemcheck_hide_addr(addr); - kmemcheck_resume(); - - local_irq_restore(flags); -} - -/* - * A faster implementation of memset() when tracking is enabled. We cannot - * assume that all pages within the range are tracked, so copying has to be - * split into page-sized (or smaller, for the ends) chunks. - * - * This function is NOT supposed to be used directly by modules; instead, - * when kmemcheck is enabled, memset() is defined as a macro which will call - * kmemcheck_memset(); for that reason is the function exported. - */ -void *kmemcheck_memset(void *s, int c, size_t n) -{ - unsigned long addr; - unsigned long start_page, start_offset; - unsigned long end_page, end_offset; - unsigned long i; - - if (!n) - return s; - - if (!slab_is_available()) { - __memset(s, c, n); - return s; - } - - addr = (unsigned long) s; - - start_page = addr & PAGE_MASK; - end_page = (addr + n) & PAGE_MASK; - - if (start_page == end_page) { - /* - * The entire area is within the same page. Good, we only - * need one memset(). - */ - memset_one_page(s, c, n); - return s; - } - - start_offset = addr & ~PAGE_MASK; - end_offset = (addr + n) & ~PAGE_MASK; - - /* Clear the head, body, and tail of the memory area. */ - if (start_offset < PAGE_SIZE) - memset_one_page(s, c, PAGE_SIZE - start_offset); - for (i = start_page + PAGE_SIZE; i < end_page; i += PAGE_SIZE) - memset_one_page((void *) i, c, PAGE_SIZE); - if (end_offset > 0) - memset_one_page((void *) end_page, c, end_offset); - - return s; -} - -EXPORT_SYMBOL(kmemcheck_memset); diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h index a68be2408e72..193578cd1fd9 100644 --- a/include/asm-x86/string_32.h +++ b/include/asm-x86/string_32.h @@ -315,14 +315,6 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern, (count)) \ : __memset((s), (c), (count))) -/* If kmemcheck is enabled, our best bet is a custom memset() that disables - * checking in order to save a whole lot of (unnecessary) page faults. */ -#ifdef CONFIG_KMEMCHECK -void *kmemcheck_memset(void *s, int c, size_t n); -#undef memset -#define memset(s, c, n) kmemcheck_memset((s), (c), (n)) -#endif - /* * find the first occurrence of byte 'c', or 1 past the area if none */ diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h index 49874fdb2c51..52b5ab383395 100644 --- a/include/asm-x86/string_64.h +++ b/include/asm-x86/string_64.h @@ -45,7 +45,6 @@ extern void *__memcpy(void *to, const void *from, size_t len); #define __HAVE_ARCH_MEMSET void *memset(void *s, int c, size_t n); -void *__memset(void *s, int c, size_t n); #define __HAVE_ARCH_MEMMOVE void *memmove(void *dest, const void *src, size_t count); -- cgit v1.2.3 From 6c580f8230aa435a06c9051cfce023321665fcfe Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sun, 7 Sep 2008 19:31:16 +0200 Subject: kmemcheck: rip out SMP code The code is BROKEN and useless. If somebody finds a way to make it work, the history is there and the code may be reintroduced. Per-CPU page tables is probably the right way to do this properly. Signed-off-by: Vegard Nossum --- arch/x86/Kconfig.debug | 22 ----------- arch/x86/mm/kmemcheck/Makefile | 2 - arch/x86/mm/kmemcheck/kmemcheck.c | 22 ++--------- arch/x86/mm/kmemcheck/smp.c | 80 --------------------------------------- arch/x86/mm/kmemcheck/smp.h | 23 ----------- 5 files changed, 3 insertions(+), 146 deletions(-) delete mode 100644 arch/x86/mm/kmemcheck/smp.c delete mode 100644 arch/x86/mm/kmemcheck/smp.h diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 0d618abacfb7..91aa7cef327e 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -320,28 +320,6 @@ config KMEMCHECK_ONESHOT_BY_DEFAULT endchoice -config KMEMCHECK_USE_SMP - bool "kmemcheck: use multiple CPUs" - depends on KMEMCHECK - depends on SMP - depends on BROKEN - default n - help - This option will prevent kmemcheck from disabling all but one CPU - on boot. This means that whenever a page fault is taken, all the - other CPUs in the system are halted. This is potentially extremely - expensive, depending on the number of CPUs in the system (the more - the worse). - - The upside is that kmemcheck can be compiled into the kernel with - very little overhead by default if kmemcheck is disabled at run- - time. - - If you want to compile a kernel specifically for the purpose of - playing with kmemcheck, you should say N here. If you want a normal - kernel with the possibility of enabling kmemcheck without - recompiling, you should say Y here. - config KMEMCHECK_QUEUE_SIZE int "kmemcheck: error queue size" depends on KMEMCHECK diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile index 138a6ad2945e..4666b7a778be 100644 --- a/arch/x86/mm/kmemcheck/Makefile +++ b/arch/x86/mm/kmemcheck/Makefile @@ -1,3 +1 @@ obj-y := error.o kmemcheck.o opcode.o pte.o shadow.o - -obj-$(CONFIG_KMEMCHECK_USE_SMP) += smp.o diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 427e160da188..eef8c6ab9f35 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -30,15 +30,11 @@ #include "opcode.h" #include "pte.h" #include "shadow.h" -#include "smp.h" void __init kmemcheck_init(void) { printk(KERN_INFO "kmemcheck: \"Bugs, beware!\"\n"); - kmemcheck_smp_init(); - -#if defined(CONFIG_SMP) && !defined(CONFIG_KMEMCHECK_USE_SMP) /* * Limit SMP to use a single CPU. We rely on the fact that this code * runs before SMP is set up. @@ -48,7 +44,6 @@ void __init kmemcheck_init(void) "kmemcheck: Limiting number of CPUs to 1.\n"); setup_max_cpus = 1; } -#endif } #ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT @@ -186,13 +181,10 @@ void kmemcheck_show(struct pt_regs *regs) BUG_ON(!irqs_disabled()); - kmemcheck_pause_allbutself(); - if (unlikely(data->balance != 0)) { kmemcheck_show_all(); kmemcheck_error_save_bug(regs); data->balance = 0; - kmemcheck_resume(); return; } @@ -200,10 +192,8 @@ void kmemcheck_show(struct pt_regs *regs) * None of the addresses actually belonged to kmemcheck. Note that * this is not an error. */ - if (kmemcheck_show_all() == 0) { - kmemcheck_resume(); + if (kmemcheck_show_all() == 0) return; - } ++data->balance; @@ -233,10 +223,8 @@ void kmemcheck_hide(struct pt_regs *regs) BUG_ON(!irqs_disabled()); - if (data->balance == 0) { - kmemcheck_resume(); + if (data->balance == 0) return; - } if (unlikely(data->balance != 1)) { kmemcheck_show_all(); @@ -248,7 +236,6 @@ void kmemcheck_hide(struct pt_regs *regs) regs->flags &= ~X86_EFLAGS_TF; if (data->flags & X86_EFLAGS_IF) regs->flags |= X86_EFLAGS_IF; - kmemcheck_resume(); return; } @@ -263,10 +250,8 @@ void kmemcheck_hide(struct pt_regs *regs) else n = kmemcheck_show_all(); - if (n == 0) { - kmemcheck_resume(); + if (n == 0) return; - } --data->balance; @@ -276,7 +261,6 @@ void kmemcheck_hide(struct pt_regs *regs) regs->flags &= ~X86_EFLAGS_TF; if (data->flags & X86_EFLAGS_IF) regs->flags |= X86_EFLAGS_IF; - kmemcheck_resume(); } void kmemcheck_show_pages(struct page *p, unsigned int n) diff --git a/arch/x86/mm/kmemcheck/smp.c b/arch/x86/mm/kmemcheck/smp.c deleted file mode 100644 index 62b604992c63..000000000000 --- a/arch/x86/mm/kmemcheck/smp.c +++ /dev/null @@ -1,80 +0,0 @@ -#include -#include -#include - -#include - -#include "smp.h" -#include - -static DEFINE_SPINLOCK(nmi_spinlock); - -static atomic_t nmi_wait; -static atomic_t nmi_resume; -static atomic_t paused; - -static int nmi_notifier(struct notifier_block *self, - unsigned long val, void *data) -{ - if (val != DIE_NMI_IPI || !atomic_read(&nmi_wait)) - return NOTIFY_DONE; - - atomic_inc(&paused); - - /* Pause until the fault has been handled */ - while (!atomic_read(&nmi_resume)) - cpu_relax(); - - atomic_dec(&paused); - - return NOTIFY_STOP; -} - -static struct notifier_block nmi_nb = { - .notifier_call = &nmi_notifier, -}; - -void kmemcheck_smp_init(void) -{ - int err; - - err = register_die_notifier(&nmi_nb); - BUG_ON(err); -} - -void kmemcheck_pause_allbutself(void) -{ - int cpus; - cpumask_t mask = cpu_online_map; - - spin_lock(&nmi_spinlock); - - cpus = num_online_cpus() - 1; - - atomic_set(&paused, 0); - atomic_set(&nmi_wait, 1); - atomic_set(&nmi_resume, 0); - - cpu_clear(safe_smp_processor_id(), mask); - if (!cpus_empty(mask)) - send_IPI_mask(mask, NMI_VECTOR); - - while (atomic_read(&paused) != cpus) - cpu_relax(); - - atomic_set(&nmi_wait, 0); -} - -void kmemcheck_resume(void) -{ - int cpus; - - cpus = num_online_cpus() - 1; - - atomic_set(&nmi_resume, 1); - - while (atomic_read(&paused) != 0) - cpu_relax(); - - spin_unlock(&nmi_spinlock); -} diff --git a/arch/x86/mm/kmemcheck/smp.h b/arch/x86/mm/kmemcheck/smp.h deleted file mode 100644 index dc65f16e3ac6..000000000000 --- a/arch/x86/mm/kmemcheck/smp.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef ARCH__X86__MM__KMEMCHECK__SMP_H -#define ARCH__X86__MM__KMEMCHECK__SMP_H - -#ifdef CONFIG_KMEMCHECK_USE_SMP -void kmemcheck_smp_init(void); - -void kmemcheck_pause_allbutself(void); -void kmemcheck_resume(void); -#else -static inline void kmemcheck_smp_init(void) -{ -} - -static inline void kmemcheck_pause_allbutself(void) -{ -} - -static inline void kmemcheck_resume(void) -{ -} -#endif - -#endif -- cgit v1.2.3 From 4b7f562ea9736b8b111ffeb60a7311075ce76849 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 8 Sep 2008 16:50:43 +0200 Subject: kmemcheck: build fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix: arch/x86/mm/kmemcheck/kmemcheck.c: In function ‘kmemcheck_init’: arch/x86/mm/kmemcheck/kmemcheck.c:42: error: ‘setup_max_cpus’ undeclared (first use in this function) arch/x86/mm/kmemcheck/kmemcheck.c:42: error: (Each undeclared identifier is reported only once arch/x86/mm/kmemcheck/kmemcheck.c:42: error: for each function it appears in.) Signed-off-by: Ingo Molnar --- arch/x86/mm/kmemcheck/kmemcheck.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index eef8c6ab9f35..2c259e432aa3 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -35,6 +35,7 @@ void __init kmemcheck_init(void) { printk(KERN_INFO "kmemcheck: \"Bugs, beware!\"\n"); +#ifdef CONFIG_SMP /* * Limit SMP to use a single CPU. We rely on the fact that this code * runs before SMP is set up. @@ -44,6 +45,7 @@ void __init kmemcheck_init(void) "kmemcheck: Limiting number of CPUs to 1.\n"); setup_max_cpus = 1; } +#endif } #ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT -- cgit v1.2.3 From 9228010f12bdbabef9393ed202653da9611acc66 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Fri, 12 Sep 2008 21:47:25 +0200 Subject: kmemcheck: hide/show pages in each iteration of a REP instruction #2 I made a mistake when fixing the REP handling the first time around; after the previous patch, we would rewind the instruction pointer too far (back to the REP prefix), and we would get another page fault. This patch is admittedly not the most elegant I've seen, but it will do the trick for now. Now we rewind only back to the instruction proper, and single-step that over and over again, as was originally intended. Signed-off-by: Vegard Nossum --- arch/x86/mm/kmemcheck/kmemcheck.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 2c259e432aa3..426beca599f3 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -604,7 +604,11 @@ bool kmemcheck_trap(struct pt_regs *regs) regs->cx = cx; #endif if (cx) { + unsigned long rep = (unsigned long) data->rep; kmemcheck_hide(regs); + /* Without the REP prefix, we have to do this ourselves... */ + data->rep = (void *) rep; + regs->ip = rep + 1; switch (data->insn[0]) { case 0xa4: -- cgit v1.2.3 From f5e7e97f133e8295be0d049d811914c111b1775c Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Thu, 11 Sep 2008 17:31:07 +0200 Subject: kmemcheck: lazy checking for MOVS instructions This patch adds the support for lazy (as opposed to eager) checking for [REP] MOVS instructions (mostly used in memcpy()). This means that if both the source and destination addresses are tracked by kmemcheck, we copy the shadow memory instead of checking that it is initialized. In this way, we get rid of a few more false positives. Signed-off-by: Vegard Nossum --- arch/x86/mm/kmemcheck/kmemcheck.c | 123 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 117 insertions(+), 6 deletions(-) diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 426beca599f3..5a08a70a9ddf 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -393,6 +393,120 @@ static void kmemcheck_write(struct pt_regs *regs, kmemcheck_write_strict(regs, next_page, next_addr - next_page); } +/* + * Copying is hard. We have two addresses, each of which may be split across + * a page (and each page will have different shadow addresses). + */ +static void kmemcheck_copy(struct pt_regs *regs, + unsigned long src_addr, unsigned long dst_addr, unsigned int size) +{ + uint8_t shadow[8]; + enum kmemcheck_shadow status; + + unsigned long page; + unsigned long next_addr; + unsigned long next_page; + + uint8_t *x; + unsigned int i; + unsigned int n; + + BUG_ON(size > sizeof(shadow)); + + page = src_addr & PAGE_MASK; + next_addr = src_addr + size - 1; + next_page = next_addr & PAGE_MASK; + + if (likely(page == next_page)) { + /* Same page */ + x = kmemcheck_shadow_lookup(src_addr); + if (x) { + kmemcheck_save_addr(src_addr); + for (i = 0; i < size; ++i) + shadow[i] = x[i]; + } else { + for (i = 0; i < size; ++i) + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } else { + n = next_page - src_addr; + BUG_ON(n > sizeof(shadow)); + + /* First page */ + x = kmemcheck_shadow_lookup(src_addr); + if (x) { + kmemcheck_save_addr(src_addr); + for (i = 0; i < n; ++i) + shadow[i] = x[i]; + } else { + /* Not tracked */ + for (i = 0; i < n; ++i) + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + + /* Second page */ + x = kmemcheck_shadow_lookup(next_page); + if (x) { + kmemcheck_save_addr(next_page); + for (i = n; i < size; ++i) + shadow[i] = x[i - n]; + } else { + /* Not tracked */ + for (i = n; i < size; ++i) + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } + + page = dst_addr & PAGE_MASK; + next_addr = dst_addr + size - 1; + next_page = next_addr & PAGE_MASK; + + if (likely(page == next_page)) { + /* Same page */ + x = kmemcheck_shadow_lookup(dst_addr); + if (x) { + kmemcheck_save_addr(dst_addr); + for (i = 0; i < size; ++i) { + x[i] = shadow[i]; + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } + } else { + n = next_page - dst_addr; + BUG_ON(n > sizeof(shadow)); + + /* First page */ + x = kmemcheck_shadow_lookup(dst_addr); + if (x) { + kmemcheck_save_addr(dst_addr); + for (i = 0; i < n; ++i) { + x[i] = shadow[i]; + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } + + /* Second page */ + x = kmemcheck_shadow_lookup(next_page); + if (x) { + kmemcheck_save_addr(next_page); + for (i = n; i < size; ++i) { + x[i - n] = shadow[i]; + shadow[i] = KMEMCHECK_SHADOW_INITIALIZED; + } + } + } + + status = kmemcheck_shadow_test(shadow, size); + if (status == KMEMCHECK_SHADOW_INITIALIZED) + return; + + if (kmemcheck_enabled) + kmemcheck_error_save(status, src_addr, size, regs); + + if (kmemcheck_enabled == 2) + kmemcheck_enabled = 0; +} + enum kmemcheck_method { KMEMCHECK_READ, KMEMCHECK_WRITE, @@ -448,8 +562,7 @@ static void kmemcheck_access(struct pt_regs *regs, case 0xa5: BUG_ON(regs->ip != (unsigned long) rep_prefix); - kmemcheck_read(regs, regs->si, size); - kmemcheck_write(regs, regs->di, size); + kmemcheck_copy(regs, regs->si, regs->di, size); data->rep = rep_prefix; data->rex = rex_prefix; data->insn = insn_primary; @@ -516,8 +629,7 @@ static void kmemcheck_access(struct pt_regs *regs, * These instructions are special because they take two * addresses, but we only get one page fault. */ - kmemcheck_read(regs, regs->si, size); - kmemcheck_write(regs, regs->di, size); + kmemcheck_copy(regs, regs->si, regs->di, size); goto out; /* CMPS, CMPSB, CMPSW, CMPSD */ @@ -613,8 +725,7 @@ bool kmemcheck_trap(struct pt_regs *regs) switch (data->insn[0]) { case 0xa4: case 0xa5: - kmemcheck_read(regs, regs->si, data->size); - kmemcheck_write(regs, regs->di, data->size); + kmemcheck_copy(regs, regs->si, regs->di, data->size); break; case 0xaa: case 0xab: -- cgit v1.2.3 From a7fff94fffaa0cecb923d6c1bb9b7691f608b49a Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sun, 21 Sep 2008 11:09:25 +0200 Subject: Revert "kmemcheck: use set_memory_4k() instead of disabling PSE" This reverts commit 10142e6c6778e201216f68bb378e139de7ee2551. Unfortunately, set_memory_4k() does not work in atomic context (well, when irqs are disabled), because it needs to flush TLBs on all CPUs. While this shouldn't be a problem on uniprocessor, the pagetable API does not distinguish between SMP and non-SMP and never allows changing the page tables when irqs are disabled. Conflicts: arch/x86/kernel/cpu/common.c arch/x86/kernel/kmemcheck.c arch/x86/mm/fault.c arch/x86/mm/init_32.c include/asm-x86/kmemcheck.h Signed-off-by: Vegard Nossum --- arch/x86/kernel/cpu/common.c | 7 +++++++ arch/x86/kernel/head_32.S | 2 +- arch/x86/mm/init_32.c | 2 +- mm/kmemcheck.c | 8 ++++++++ 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 80ab20d4fa39..d4300f29b524 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -628,6 +628,13 @@ void __init early_cpu_init(void) early_cpu_detect(); validate_pat_support(&boot_cpu_data); + +#ifdef CONFIG_KMEMCHECK + /* + * We need 4K granular PTEs for kmemcheck: + */ + setup_clear_cpu_cap(X86_FEATURE_PSE); +#endif } /* Make sure %fs is initialized properly in idle threads */ diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index a7010c3a377a..3c3b7e66294f 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -60,7 +60,7 @@ LOW_PAGES = 1<<(32-PAGE_SHIFT_asm) * pagetables from above the 16MB DMA limit, so we'll have to set * up pagetables 16MB more (worst-case): */ -#ifdef CONFIG_DEBUG_PAGEALLOC +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) LOW_PAGES = LOW_PAGES + 0x1000000 #endif diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index d37f29376b0c..4e8555aa1858 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -118,7 +118,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) pte_t *page_table = NULL; if (after_init_bootmem) { -#ifdef CONFIG_DEBUG_PAGEALLOC +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); #endif if (!page_table) diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c index eaa41b802611..ffe5a8cc1e9b 100644 --- a/mm/kmemcheck.c +++ b/mm/kmemcheck.c @@ -10,6 +10,14 @@ void kmemcheck_alloc_shadow(struct kmem_cache *s, gfp_t flags, int node, int pages; int i; +#ifdef CONFIG_X86_64 + /* XXX: x86_64 doesn't honour PSE capabilities, so we need the call + * to set_memory_4k(). However, that one wants to flush all CPUs, + * which doesn't work when irqs are disabled. Temporary hack: */ + if (irqs_disabled()) + return; +#endif + pages = 1 << order; /* -- cgit v1.2.3 From 9f24b3ed3058f022dad24f8304b9ad53adf5aa71 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sun, 21 Sep 2008 12:21:46 +0200 Subject: x86: use REP MOVS instruction for memcpy if kmemcheck is enabled Since the REP MOVS instruction carries both operands, it means that we can copy the shadow bits instead of reporting the load eagerly. Signed-off-by: Vegard Nossum --- arch/x86/Makefile | 5 +++++ include/asm-x86/string_32.h | 8 ++++++++ include/asm-x86/string_64.h | 8 ++++++++ 3 files changed, 21 insertions(+) diff --git a/arch/x86/Makefile b/arch/x86/Makefile index f5631da585b6..8a955ce46861 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -80,6 +80,11 @@ else KBUILD_CFLAGS += $(stackp-y) endif +# Don't unroll struct assignments with kmemcheck enabled +ifeq ($(CONFIG_KMEMCHECK),y) + KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy) +endif + # Stackpointer is addressed different for 32 bit and 64 bit x86 sp-$(CONFIG_X86_32) := esp sp-$(CONFIG_X86_64) := rsp diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h index 193578cd1fd9..d283008a5f77 100644 --- a/include/asm-x86/string_32.h +++ b/include/asm-x86/string_32.h @@ -177,10 +177,18 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) * No 3D Now! */ +#ifndef CONFIG_KMEMCHECK #define memcpy(t, f, n) \ (__builtin_constant_p((n)) \ ? __constant_memcpy((t), (f), (n)) \ : __memcpy((t), (f), (n))) +#else +/* + * kmemcheck becomes very happy if we use the REP instructions unconditionally, + * because it means that we know both memory operands in advance. + */ +#define memcpy(t, f, n) __memcpy((t), (f), (n)) +#endif #endif diff --git a/include/asm-x86/string_64.h b/include/asm-x86/string_64.h index 52b5ab383395..c5897524e6a2 100644 --- a/include/asm-x86/string_64.h +++ b/include/asm-x86/string_64.h @@ -27,6 +27,7 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t function. */ #define __HAVE_ARCH_MEMCPY 1 +#ifndef CONFIG_KMEMCHECK #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 extern void *memcpy(void *to, const void *from, size_t len); #else @@ -42,6 +43,13 @@ extern void *__memcpy(void *to, const void *from, size_t len); __ret; \ }) #endif +#else +/* + * kmemcheck becomes very happy if we use the REP instructions unconditionally, + * because it means that we know both memory operands in advance. + */ +#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len)) +#endif #define __HAVE_ARCH_MEMSET void *memset(void *s, int c, size_t n); -- cgit v1.2.3 From c9f1cb5d7507c67cc53c8c9932b2c79343228c8c Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sun, 5 Oct 2008 19:22:30 +0200 Subject: kmemcheck: use set_memory_4k() on x86_64 only x86_32 already disables PSE capabilities and is fine. We need this to avoid the BUG in cache-flushing SMP call. Signed-off-by: Vegard Nossum Signed-off-by: Ingo Molnar --- arch/x86/mm/kmemcheck/kmemcheck.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index 5a08a70a9ddf..d649aa76ed3c 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -295,7 +295,9 @@ void kmemcheck_hide_pages(struct page *p, unsigned int n) { unsigned int i; +#ifdef CONFIG_X86_64 set_memory_4k((unsigned long) page_address(p), n); +#endif for (i = 0; i < n; ++i) { unsigned long address; -- cgit v1.2.3 From c1f93ec18664295caea7aa2f2ceda021a25b96bc Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sun, 5 Oct 2008 19:25:04 +0200 Subject: kmemcheck: fix crash in PnP BIOS calls Ingo Molnar reported this crash: > PnPBIOS: Scanning system for PnP BIOS support... > PnPBIOS: Found PnP BIOS installation structure at 0xc00fc550 > PnPBIOS: PnP BIOS version 1.0, entry 0xf0000:0xc580, dseg 0xf0000 > BUG: unable to handle kernel paging request at 0000c6ef It turns out that BIOS calls are made with a different code segment. So when kmemcheck tries to dereference the EIP/RIP (using the kernel data segment register), we get the unhandled page fault. I think we can solve this by verifying (in the page fault handler) that the faulting code is using the kernel CS. Signed-off-by: Vegard Nossum Signed-off-by: Ingo Molnar --- arch/x86/mm/kmemcheck/kmemcheck.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c index d649aa76ed3c..bd739a4fa339 100644 --- a/arch/x86/mm/kmemcheck/kmemcheck.c +++ b/arch/x86/mm/kmemcheck/kmemcheck.c @@ -666,6 +666,17 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address, pte_t *pte; unsigned int level; + /* + * XXX: Is it safe to assume that memory accesses from virtual 86 + * mode or non-kernel code segments will _never_ access kernel + * memory (e.g. tracked pages)? For now, we need this to avoid + * invoking kmemcheck for PnP BIOS calls. + */ + if (regs->flags & X86_VM_MASK) + return false; + if (regs->cs != __KERNEL_CS) + return false; + pte = lookup_address(address, &level); if (!pte) return false; -- cgit v1.2.3 From f2d5bcd8284c28cf559f06dc0d12472648c33878 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Mon, 6 Oct 2008 14:49:06 +0200 Subject: kmemcheck: tag warning printks To help things like Ingo's auto-QA or kerneloops pick them up. Signed-off-by: Vegard Nossum Cc: "Pekka Enberg" Signed-off-by: Ingo Molnar --- arch/x86/mm/kmemcheck/error.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c index 0066517811e2..5ec9f5a93f47 100644 --- a/arch/x86/mm/kmemcheck/error.c +++ b/arch/x86/mm/kmemcheck/error.c @@ -185,7 +185,7 @@ void kmemcheck_error_recall(void) switch (e->type) { case KMEMCHECK_ERROR_INVALID_ACCESS: - printk(KERN_ERR "kmemcheck: Caught %d-bit read " + printk(KERN_ERR "WARNING: kmemcheck: Caught %d-bit read " "from %s memory (%p)\n", 8 * e->size, e->state < ARRAY_SIZE(desc) ? desc[e->state] : "(invalid shadow state)", @@ -208,7 +208,7 @@ void kmemcheck_error_recall(void) * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^'); break; case KMEMCHECK_ERROR_BUG: - printk(KERN_EMERG "kmemcheck: Fatal error\n"); + printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n"); break; } -- cgit v1.2.3