summaryrefslogtreecommitdiff
path: root/arch/arm/kernel/perf_event.c
diff options
context:
space:
mode:
authorMichal Marek <mmarek@suse.cz>2011-06-07 15:37:51 +0200
committerMichal Marek <mmarek@suse.cz>2011-06-07 15:37:51 +0200
commit2e483528cebad089d0bb3f9aebb0ada22d968ffa (patch)
treed701405826b271e819a9a8500838cebd37b1364a /arch/arm/kernel/perf_event.c
parent163d3fe6a2357aba7b18b938d6ae6ce9570324e4 (diff)
parent55922c9d1b84b89cb946c777fddccb3247e7df2c (diff)
Merge commit 'v3.0-rc1' into kbuild/kbuild
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r--arch/arm/kernel/perf_event.c41
1 files changed, 27 insertions, 14 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 22e194eb8536..d53c0abc4dd3 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -79,6 +79,7 @@ struct arm_pmu {
void (*write_counter)(int idx, u32 val);
void (*start)(void);
void (*stop)(void);
+ void (*reset)(void *);
const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
@@ -204,11 +205,9 @@ armpmu_event_set_period(struct perf_event *event,
static u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx)
+ int idx, int overflow)
{
- int shift = 64 - 32;
- s64 prev_raw_count, new_raw_count;
- u64 delta;
+ u64 delta, prev_raw_count, new_raw_count;
again:
prev_raw_count = local64_read(&hwc->prev_count);
@@ -218,8 +217,13 @@ again:
new_raw_count) != prev_raw_count)
goto again;
- delta = (new_raw_count << shift) - (prev_raw_count << shift);
- delta >>= shift;
+ new_raw_count &= armpmu->max_period;
+ prev_raw_count &= armpmu->max_period;
+
+ if (overflow)
+ delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
+ else
+ delta = new_raw_count - prev_raw_count;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
@@ -236,7 +240,7 @@ armpmu_read(struct perf_event *event)
if (hwc->idx < 0)
return;
- armpmu_event_update(event, hwc, hwc->idx);
+ armpmu_event_update(event, hwc, hwc->idx, 0);
}
static void
@@ -254,7 +258,7 @@ armpmu_stop(struct perf_event *event, int flags)
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
- armpmu_event_update(event, hwc, hwc->idx);
+ armpmu_event_update(event, hwc, hwc->idx, 0);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
@@ -556,11 +560,6 @@ static int armpmu_event_init(struct perf_event *event)
event->destroy = hw_perf_event_destroy;
if (!atomic_inc_not_zero(&active_events)) {
- if (atomic_read(&active_events) > armpmu->num_events) {
- atomic_dec(&active_events);
- return -ENOSPC;
- }
-
mutex_lock(&pmu_reserve_mutex);
if (atomic_read(&active_events) == 0) {
err = armpmu_reserve_hardware();
@@ -624,6 +623,19 @@ static struct pmu pmu = {
#include "perf_event_v6.c"
#include "perf_event_v7.c"
+/*
+ * Ensure the PMU has sane values out of reset.
+ * This requires SMP to be available, so exists as a separate initcall.
+ */
+static int __init
+armpmu_reset(void)
+{
+ if (armpmu && armpmu->reset)
+ return on_each_cpu(armpmu->reset, NULL, 1);
+ return 0;
+}
+arch_initcall(armpmu_reset);
+
static int __init
init_hw_perf_events(void)
{
@@ -729,7 +741,8 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
- while (tail && !((unsigned long)tail & 0x3))
+ while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+ tail && !((unsigned long)tail & 0x3))
tail = user_backtrace(tail, entry);
}