diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2008-09-04 12:04:20 +1000 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2008-09-04 12:04:20 +1000 |
commit | f095316e21107fc5b28cccdd7ebc141750ac5921 (patch) | |
tree | d083f1dcad9218ceac618377bc6d119c00cedbb6 /include/asm-x86 | |
parent | 35284746d3a0cff04dcfa13a2afd5e872de78783 (diff) | |
parent | 6f8ea076573ba5b0fd63de20031fb9f70d085be8 (diff) |
Merge commit 'ftrace/auto-ftrace-next'
Conflicts:
kernel/module.c
Diffstat (limited to 'include/asm-x86')
-rw-r--r-- | include/asm-x86/ftrace.h | 10 | ||||
-rw-r--r-- | include/asm-x86/irqflags.h | 56 | ||||
-rw-r--r-- | include/asm-x86/paravirt.h | 5 |
3 files changed, 71 insertions, 0 deletions
diff --git a/include/asm-x86/ftrace.h b/include/asm-x86/ftrace.h index be0e004ad148..1bb6f9bbe1ab 100644 --- a/include/asm-x86/ftrace.h +++ b/include/asm-x86/ftrace.h @@ -7,6 +7,16 @@ #ifndef __ASSEMBLY__ extern void mcount(void); + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* + * call mcount is "e8 <4 byte offset>" + * The addr points to the 4 byte offset and the caller of this + * function wants the pointer to e8. Simply subtract one. + */ + return addr - 1; +} #endif #endif /* CONFIG_FTRACE */ diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h index 424acb48cd61..13980f82fd69 100644 --- a/include/asm-x86/irqflags.h +++ b/include/asm-x86/irqflags.h @@ -51,6 +51,61 @@ static inline void native_halt(void) #endif +#ifdef CONFIG_X86_64 +/* + * Only returns from a trap or exception to a NMI context (intra-privilege + * level near return) to the same SS and CS segments. Should be used + * upon trap or exception return when nested over a NMI context so no iret is + * issued. It takes care of modifying the eflags, rsp and returning to the + * previous function. + * + * The stack, at that point, looks like : + * + * 0(rsp) RIP + * 8(rsp) CS + * 16(rsp) EFLAGS + * 24(rsp) RSP + * 32(rsp) SS + * + * Upon execution : + * Copy EIP to the top of the return stack + * Update top of return stack address + * Pop eflags into the eflags register + * Make the return stack current + * Near return (popping the return address from the return stack) + */ +#define NATIVE_INTERRUPT_RETURN_NMI_SAFE pushq %rax; \ + movq %rsp, %rax; \ + movq 24+8(%rax), %rsp; \ + pushq 0+8(%rax); \ + pushq 16+8(%rax); \ + movq (%rax), %rax; \ + popfq; \ + ret +#else +/* + * Protected mode only, no V8086. Implies that protected mode must + * be entered before NMIs or MCEs are enabled. Only returns from a trap or + * exception to a NMI context (intra-privilege level far return). Should be used + * upon trap or exception return when nested over a NMI context so no iret is + * issued. + * + * The stack, at that point, looks like : + * + * 0(esp) EIP + * 4(esp) CS + * 8(esp) EFLAGS + * + * Upon execution : + * Copy the stack eflags to top of stack + * Pop eflags into the eflags register + * Far return: pop EIP and CS into their register, and additionally pop EFLAGS. + */ +#define NATIVE_INTERRUPT_RETURN_NMI_SAFE pushl 8(%esp); \ + popfl; \ + lret $4 +#endif + #ifdef CONFIG_PARAVIRT #include <asm/paravirt.h> #else @@ -109,6 +164,7 @@ static inline unsigned long __raw_local_irq_save(void) #define ENABLE_INTERRUPTS(x) sti #define DISABLE_INTERRUPTS(x) cli +#define INTERRUPT_RETURN_NMI_SAFE NATIVE_INTERRUPT_RETURN_NMI_SAFE #ifdef CONFIG_X86_64 #define SWAPGS swapgs diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index 8d6ae2f760d0..d5808653db92 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h @@ -175,6 +175,7 @@ struct pv_cpu_ops { /* Normal iret. Jump to this with the standard iret stack frame set up. */ void (*iret)(void); + void (*nmi_return)(void); void (*swapgs)(void); @@ -1577,6 +1578,10 @@ static inline unsigned long __raw_local_irq_save(void) PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret)) +#define INTERRUPT_RETURN_NMI_SAFE \ + PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_nmi_return), CLBR_NONE, \ + jmp *%cs:pv_cpu_ops+PV_CPU_nmi_return) + #define DISABLE_INTERRUPTS(clobbers) \ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ PV_SAVE_REGS; \ |