blob: cd17ddfda082dfefb2a67aea55475f7482197e7d (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
|
#include <linux/kdebug.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <mach_ipi.h>
#include "smp.h"
#include <asm/irq_vectors.h>
static spinlock_t nmi_spinlock;
static atomic_t nmi_wait;
static atomic_t nmi_resume;
static atomic_t paused;
static int nmi_notifier(struct notifier_block *self,
unsigned long val, void *data)
{
if (val != DIE_NMI_IPI || !atomic_read(&nmi_wait))
return NOTIFY_DONE;
atomic_inc(&paused);
/* Pause until the fault has been handled */
while (!atomic_read(&nmi_resume))
cpu_relax();
atomic_dec(&paused);
return NOTIFY_STOP;
}
static struct notifier_block nmi_nb = {
.notifier_call = &nmi_notifier,
};
void kmemcheck_smp_init(void)
{
int err;
err = register_die_notifier(&nmi_nb);
BUG_ON(err);
}
void kmemcheck_pause_allbutself(void)
{
int cpus;
cpumask_t mask = cpu_online_map;
spin_lock(&nmi_spinlock);
cpus = num_online_cpus() - 1;
atomic_set(&paused, 0);
atomic_set(&nmi_wait, 1);
atomic_set(&nmi_resume, 0);
cpu_clear(safe_smp_processor_id(), mask);
if (!cpus_empty(mask))
send_IPI_mask(mask, NMI_VECTOR);
while (atomic_read(&paused) != cpus)
cpu_relax();
atomic_set(&nmi_wait, 0);
}
void kmemcheck_resume(void)
{
int cpus;
cpus = num_online_cpus() - 1;
atomic_set(&nmi_resume, 1);
while (atomic_read(&paused) != 0)
cpu_relax();
spin_unlock(&nmi_spinlock);
}
|