diff options
Diffstat (limited to 'arch/um/kernel')
-rw-r--r-- | arch/um/kernel/exec.c | 4 | ||||
-rw-r--r-- | arch/um/kernel/irq.c | 171 | ||||
-rw-r--r-- | arch/um/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/um/kernel/skas/clone.c | 28 | ||||
-rw-r--r-- | arch/um/kernel/skas/mmu.c | 87 | ||||
-rw-r--r-- | arch/um/kernel/time.c | 17 | ||||
-rw-r--r-- | arch/um/kernel/tlb.c | 14 | ||||
-rw-r--r-- | arch/um/kernel/um_arch.c | 5 |
8 files changed, 185 insertions, 143 deletions
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index e8fd5d540b05..4d8498100341 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c @@ -26,9 +26,7 @@ void flush_thread(void) arch_flush_thread(¤t->thread.arch); - ret = unmap(¤t->mm->context.id, 0, STUB_START, 0, &data); - ret = ret || unmap(¤t->mm->context.id, STUB_END, - host_task_size - STUB_END, 1, &data); + ret = unmap(¤t->mm->context.id, 0, TASK_SIZE, 1, &data); if (ret) { printk(KERN_ERR "flush_thread - clearing address space failed, " "err = %d\n", ret); diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 3741d2380060..82af5191e73d 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -20,7 +20,7 @@ #include <os.h> #include <irq_user.h> #include <irq_kern.h> -#include <as-layout.h> +#include <linux/time-internal.h> extern void free_irqs(void); @@ -38,6 +38,12 @@ struct irq_reg { bool active; bool pending; bool wakeup; +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT + bool pending_on_resume; + void (*timetravel_handler)(int, int, void *, + struct time_travel_event *); + struct time_travel_event event; +#endif }; struct irq_entry { @@ -51,6 +57,7 @@ struct irq_entry { static DEFINE_SPINLOCK(irq_lock); static LIST_HEAD(active_fds); static DECLARE_BITMAP(irqs_allocated, NR_IRQS); +static bool irqs_suspended; static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs) { @@ -74,9 +81,65 @@ static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs) } } -void sigio_handler_suspend(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT +static void irq_event_handler(struct time_travel_event *ev) { - /* nothing */ + struct irq_reg *reg = container_of(ev, struct irq_reg, event); + + /* do nothing if suspended - just to cause a wakeup */ + if (irqs_suspended) + return; + + generic_handle_irq(reg->irq); +} + +static bool irq_do_timetravel_handler(struct irq_entry *entry, + enum um_irq_type t) +{ + struct irq_reg *reg = &entry->reg[t]; + + if (!reg->timetravel_handler) + return false; + + /* prevent nesting - we'll get it again later when we SIGIO ourselves */ + if (reg->pending_on_resume) + return true; + + reg->timetravel_handler(reg->irq, entry->fd, reg->id, ®->event); + + if (!reg->event.pending) + return false; + + if (irqs_suspended) + reg->pending_on_resume = true; + return true; +} +#else +static bool irq_do_timetravel_handler(struct irq_entry *entry, + enum um_irq_type t) +{ + return false; +} +#endif + +static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t, + struct uml_pt_regs *regs) +{ + struct irq_reg *reg = &entry->reg[t]; + + if (!reg->events) + return; + + if (os_epoll_triggered(idx, reg->events) <= 0) + return; + + if (irq_do_timetravel_handler(entry, t)) + return; + + if (irqs_suspended) + return; + + irq_io_loop(reg, regs); } void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) @@ -84,6 +147,9 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) struct irq_entry *irq_entry; int n, i; + if (irqs_suspended && !um_irq_timetravel_handler_used()) + return; + while (1) { /* This is now lockless - epoll keeps back-referencesto the irqs * which have trigger it so there is no need to walk the irq @@ -105,19 +171,13 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) irq_entry = os_epoll_get_data_pointer(i); - for (t = 0; t < NUM_IRQ_TYPES; t++) { - int events = irq_entry->reg[t].events; - - if (!events) - continue; - - if (os_epoll_triggered(i, events) > 0) - irq_io_loop(&irq_entry->reg[t], regs); - } + for (t = 0; t < NUM_IRQ_TYPES; t++) + sigio_reg_handler(i, irq_entry, t, regs); } } - free_irqs(); + if (!irqs_suspended) + free_irqs(); } static struct irq_entry *get_irq_entry_by_fd(int fd) @@ -169,7 +229,9 @@ static void update_or_free_irq_entry(struct irq_entry *entry) free_irq_entry(entry, false); } -static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id) +static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id, + void (*timetravel_handler)(int, int, void *, + struct time_travel_event *)) { struct irq_entry *irq_entry; int err, events = os_event_mask(type); @@ -206,6 +268,13 @@ static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id) irq_entry->reg[type].active = true; irq_entry->reg[type].events = events; +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT + if (um_irq_timetravel_handler_used()) { + irq_entry->reg[type].timetravel_handler = timetravel_handler; + irq_entry->reg[type].event.fn = irq_event_handler; + } +#endif + WARN_ON(!update_irq_entry(irq_entry)); spin_unlock_irqrestore(&irq_lock, flags); @@ -339,9 +408,12 @@ void um_free_irq(int irq, void *dev) } EXPORT_SYMBOL(um_free_irq); -int um_request_irq(int irq, int fd, enum um_irq_type type, - irq_handler_t handler, unsigned long irqflags, - const char *devname, void *dev_id) +static int +_um_request_irq(int irq, int fd, enum um_irq_type type, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id, + void (*timetravel_handler)(int, int, void *, + struct time_travel_event *)) { int err; @@ -360,7 +432,7 @@ int um_request_irq(int irq, int fd, enum um_irq_type type, return -ENOSPC; if (fd != -1) { - err = activate_fd(irq, fd, type, dev_id); + err = activate_fd(irq, fd, type, dev_id, timetravel_handler); if (err) goto error; } @@ -374,20 +446,41 @@ error: clear_bit(irq, irqs_allocated); return err; } + +int um_request_irq(int irq, int fd, enum um_irq_type type, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id) +{ + return _um_request_irq(irq, fd, type, handler, irqflags, + devname, dev_id, NULL); +} EXPORT_SYMBOL(um_request_irq); +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT +int um_request_irq_tt(int irq, int fd, enum um_irq_type type, + irq_handler_t handler, unsigned long irqflags, + const char *devname, void *dev_id, + void (*timetravel_handler)(int, int, void *, + struct time_travel_event *)) +{ + return _um_request_irq(irq, fd, type, handler, irqflags, + devname, dev_id, timetravel_handler); +} +EXPORT_SYMBOL(um_request_irq_tt); +#endif + #ifdef CONFIG_PM_SLEEP void um_irqs_suspend(void) { struct irq_entry *entry; unsigned long flags; - sig_info[SIGIO] = sigio_handler_suspend; + irqs_suspended = true; spin_lock_irqsave(&irq_lock, flags); list_for_each_entry(entry, &active_fds, list) { enum um_irq_type t; - bool wake = false; + bool clear = true; for (t = 0; t < NUM_IRQ_TYPES; t++) { if (!entry->reg[t].events) @@ -400,13 +493,17 @@ void um_irqs_suspend(void) * any FDs that should be suspended. */ if (entry->reg[t].wakeup || - entry->reg[t].irq == SIGIO_WRITE_IRQ) { - wake = true; + entry->reg[t].irq == SIGIO_WRITE_IRQ +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT + || entry->reg[t].timetravel_handler +#endif + ) { + clear = false; break; } } - if (!wake) { + if (clear) { entry->suspended = true; os_clear_fd_async(entry->fd); entry->sigio_workaround = @@ -421,7 +518,31 @@ void um_irqs_resume(void) struct irq_entry *entry; unsigned long flags; - spin_lock_irqsave(&irq_lock, flags); + + local_irq_save(flags); +#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT + /* + * We don't need to lock anything here since we're in resume + * and nothing else is running, but have disabled IRQs so we + * don't try anything else with the interrupt list from there. + */ + list_for_each_entry(entry, &active_fds, list) { + enum um_irq_type t; + + for (t = 0; t < NUM_IRQ_TYPES; t++) { + struct irq_reg *reg = &entry->reg[t]; + + if (reg->pending_on_resume) { + irq_enter(); + generic_handle_irq(reg->irq); + irq_exit(); + reg->pending_on_resume = false; + } + } + } +#endif + + spin_lock(&irq_lock); list_for_each_entry(entry, &active_fds, list) { if (entry->suspended) { int err = os_set_fd_async(entry->fd); @@ -437,7 +558,7 @@ void um_irqs_resume(void) } spin_unlock_irqrestore(&irq_lock, flags); - sig_info[SIGIO] = sigio_handler; + irqs_suspended = false; send_sigio_to_self(); } diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 81d508daf67c..c5011064b5dd 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -157,7 +157,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, struct task_struct * p, unsigned long tls) { void (*handler)(void); - int kthread = current->flags & PF_KTHREAD; + int kthread = current->flags & (PF_KTHREAD | PF_IO_WORKER); int ret = 0; p->thread = (struct thread_struct) INIT_THREAD; diff --git a/arch/um/kernel/skas/clone.c b/arch/um/kernel/skas/clone.c index bfb70c456b30..592cdb138441 100644 --- a/arch/um/kernel/skas/clone.c +++ b/arch/um/kernel/skas/clone.c @@ -24,29 +24,25 @@ void __attribute__ ((__section__ (".__syscall_stub"))) stub_clone_handler(void) { - struct stub_data *data = (struct stub_data *) STUB_DATA; + int stack; + struct stub_data *data = (void *) ((unsigned long)&stack & ~(UM_KERN_PAGE_SIZE - 1)); long err; err = stub_syscall2(__NR_clone, CLONE_PARENT | CLONE_FILES | SIGCHLD, - STUB_DATA + UM_KERN_PAGE_SIZE / 2 - sizeof(void *)); - if (err != 0) - goto out; + (unsigned long)data + UM_KERN_PAGE_SIZE / 2 - sizeof(void *)); + if (err) { + data->parent_err = err; + goto done; + } err = stub_syscall4(__NR_ptrace, PTRACE_TRACEME, 0, 0, 0); - if (err) - goto out; + if (err) { + data->child_err = err; + goto done; + } - remap_stack(data->fd, data->offset); - goto done; + remap_stack_and_trap(); - out: - /* - * save current result. - * Parent: pid; - * child: retcode of mmap already saved and it jumps around this - * assignment - */ - data->err = err; done: trap_myself(); } diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c index d9961163da66..125df465e8ea 100644 --- a/arch/um/kernel/skas/mmu.c +++ b/arch/um/kernel/skas/mmu.c @@ -14,47 +14,6 @@ #include <os.h> #include <skas.h> -static int init_stub_pte(struct mm_struct *mm, unsigned long proc, - unsigned long kernel) -{ - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - pgd = pgd_offset(mm, proc); - - p4d = p4d_alloc(mm, pgd, proc); - if (!p4d) - goto out; - - pud = pud_alloc(mm, p4d, proc); - if (!pud) - goto out_pud; - - pmd = pmd_alloc(mm, pud, proc); - if (!pmd) - goto out_pmd; - - pte = pte_alloc_map(mm, pmd, proc); - if (!pte) - goto out_pte; - - *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); - *pte = pte_mkread(*pte); - return 0; - - out_pte: - pmd_free(mm, pmd); - out_pmd: - pud_free(mm, pud); - out_pud: - p4d_free(mm, p4d); - out: - return -ENOMEM; -} - int init_new_context(struct task_struct *task, struct mm_struct *mm) { struct mm_context *from_mm = NULL; @@ -98,52 +57,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm) return ret; } -void uml_setup_stubs(struct mm_struct *mm) -{ - int err, ret; - - ret = init_stub_pte(mm, STUB_CODE, - (unsigned long) __syscall_stub_start); - if (ret) - goto out; - - ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); - if (ret) - goto out; - - mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start); - mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); - - /* dup_mmap already holds mmap_lock */ - err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, - VM_READ | VM_MAYREAD | VM_EXEC | - VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP, - mm->context.stub_pages); - if (err) { - printk(KERN_ERR "install_special_mapping returned %d\n", err); - goto out; - } - return; - -out: - force_sigsegv(SIGSEGV); -} - -void arch_exit_mmap(struct mm_struct *mm) -{ - pte_t *pte; - - pte = virt_to_pte(mm, STUB_CODE); - if (pte != NULL) - pte_clear(mm, STUB_CODE, pte); - - pte = virt_to_pte(mm, STUB_DATA); - if (pte == NULL) - return; - - pte_clear(mm, STUB_DATA, pte); -} - void destroy_context(struct mm_struct *mm) { struct mm_context *mmu = &mm->context; diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 315248b03941..e0cdb9694fb8 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -278,6 +278,7 @@ static void __time_travel_add_event(struct time_travel_event *e, { struct time_travel_event *tmp; bool inserted = false; + unsigned long flags; if (e->pending) return; @@ -285,6 +286,7 @@ static void __time_travel_add_event(struct time_travel_event *e, e->pending = true; e->time = time; + local_irq_save(flags); list_for_each_entry(tmp, &time_travel_events, list) { /* * Add the new entry before one with higher time, @@ -307,6 +309,7 @@ static void __time_travel_add_event(struct time_travel_event *e, tmp = time_travel_first_event(); time_travel_ext_update_request(tmp->time); time_travel_next_event = tmp->time; + local_irq_restore(flags); } static void time_travel_add_event(struct time_travel_event *e, @@ -318,6 +321,12 @@ static void time_travel_add_event(struct time_travel_event *e, __time_travel_add_event(e, time); } +void time_travel_add_event_rel(struct time_travel_event *e, + unsigned long long delay_ns) +{ + time_travel_add_event(e, time_travel_time + delay_ns); +} + void time_travel_periodic_timer(struct time_travel_event *e) { time_travel_add_event(&time_travel_timer_event, @@ -381,12 +390,16 @@ static void time_travel_deliver_event(struct time_travel_event *e) } } -static bool time_travel_del_event(struct time_travel_event *e) +bool time_travel_del_event(struct time_travel_event *e) { + unsigned long flags; + if (!e->pending) return false; + local_irq_save(flags); list_del(&e->list); e->pending = false; + local_irq_restore(flags); return true; } @@ -587,6 +600,8 @@ extern u64 time_travel_ext_req(u32 op, u64 time); /* these are empty macros so the struct/fn need not exist */ #define time_travel_add_event(e, time) do { } while (0) +/* externally not usable - redefine here so we can */ +#undef time_travel_del_event #define time_travel_del_event(e) do { } while (0) #endif diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 61776790cd67..bc38f79ca3a3 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -162,9 +162,6 @@ static int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *last; int ret = 0; - if ((addr >= STUB_START) && (addr < STUB_END)) - return -EINVAL; - if (hvc->index != 0) { last = &hvc->ops[hvc->index - 1]; if ((last->type == MUNMAP) && @@ -226,9 +223,6 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr, pte = pte_offset_kernel(pmd, addr); do { - if ((addr >= STUB_START) && (addr < STUB_END)) - continue; - r = pte_read(*pte); w = pte_write(*pte); x = pte_exec(*pte); @@ -346,12 +340,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, /* This is not an else because ret is modified above */ if (ret) { + struct mm_id *mm_idp = ¤t->mm->context.id; + printk(KERN_ERR "fix_range_common: failed, killing current " "process: %d\n", task_tgid_vnr(current)); - /* We are under mmap_lock, release it such that current can terminate */ - mmap_write_unlock(current->mm); - force_sig(SIGKILL); - do_signal(¤t->thread.regs); + mm_idp->kill = 1; } } @@ -472,6 +465,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address) struct mm_id *mm_id; address &= PAGE_MASK; + pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto kill; diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 80e2660782a0..74e07e748a9b 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -249,6 +249,7 @@ void uml_finishsetup(void) } /* Set during early boot */ +unsigned long stub_start; unsigned long task_size; EXPORT_SYMBOL(task_size); @@ -283,6 +284,10 @@ int __init linux_main(int argc, char **argv) add_arg(DEFAULT_COMMAND_LINE_CONSOLE); host_task_size = os_get_top_address(); + /* reserve two pages for the stubs */ + host_task_size -= 2 * PAGE_SIZE; + stub_start = host_task_size; + /* * TASK_SIZE needs to be PGDIR_SIZE aligned or else exit_mmap craps * out |