From dc1d628a67a8f042e711ea5accc0beedc3ef0092 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Mar 2010 15:55:04 +0100 Subject: perf: Provide generic perf_sample_data initialization This makes it easier to extend perf_sample_data and fixes a bug on arm and sparc, which failed to set ->raw to NULL, which can cause crashes when combined with PERF_SAMPLE_RAW. It also optimizes PowerPC and tracepoint, because the struct initialization is forced to zero out the whole structure. Signed-off-by: Peter Zijlstra Acked-by: Jean Pihet Reviewed-by: Frederic Weisbecker Acked-by: David S. Miller Cc: Jamie Iles Cc: Paul Mackerras Cc: Stephane Eranian Cc: stable@kernel.org LKML-Reference: <20100304140100.315416040@chello.nl> Signed-off-by: Ingo Molnar --- arch/arm/kernel/perf_event.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel/perf_event.c') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index c54ceb3d1f97..3875d99cc40f 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -965,7 +965,7 @@ armv6pmu_handle_irq(int irq_num, */ armv6_pmcr_write(pmcr); - data.addr = 0; + perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx <= armpmu->num_events; ++idx) { @@ -1945,7 +1945,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) */ regs = get_irq_regs(); - data.addr = 0; + perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx <= armpmu->num_events; ++idx) { -- cgit v1.2.3 From ddee87f208b6229d2910dd5930c87089dc56c87e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 25 Feb 2010 15:04:14 +0100 Subject: ARM: 5959/1: ARM: perf-events: request PMU interrupts with IRQF_NOBALANCING If IRQ balancing is used on a multicore ARM system, PMU interrupt lines may be relocated onto CPUs other than the one causing the counter overflow. This can result in misattribution of events to the wrong core and, in the case that the CPU handling the interrupt has not experience counter overflow, the interrupt can be disabled because the handler returns IRQ_NONE. This patch adds the IRQF_NOBALANCING flag to the request_irq call in perf_events.c. Acked-by: Jamie Iles Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/perf_event.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel/perf_event.c') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index c54ceb3d1f97..b44d15948b56 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -332,7 +332,8 @@ armpmu_reserve_hardware(void) for (i = 0; i < pmu_irqs->num_irqs; ++i) { err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq, - IRQF_DISABLED, "armpmu", NULL); + IRQF_DISABLED | IRQF_NOBALANCING, + "armpmu", NULL); if (err) { pr_warning("unable to request IRQ%d for ARM " "perf counters\n", pmu_irqs->irqs[i]); -- cgit v1.2.3 From d10fca9f39238b07cc670b441d2b423de30359d2 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 26 Feb 2010 10:46:15 +0100 Subject: ARM: 5960/1: ARM: perf-events: fix v7 event selection mask The event selection mask for ARMv7 cores [ARMV7_EVTSEL_MASK] is incorrectly set to 0x7f. This means that the top bit of an event ID is ignored, so counting branch misses (id=0x10) and ISBs (id=0x90) give the same results. This patch sets the event selection mask to the correct value of 0xff. Signed-off-by: Jean Pihet Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel/perf_event.c') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index b44d15948b56..c45a155a73dc 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -1625,7 +1625,7 @@ enum armv7_counters { /* * EVTSEL: Event selection reg */ -#define ARMV7_EVTSEL_MASK 0x7f /* Mask for writable bits */ +#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ /* * SELECT: Counter selection reg -- cgit v1.2.3