summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-03-05 13:38:51 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-06-16 11:54:56 +0200
commit2cdbfd6567203ee9cf37012be0342ee830028f09 (patch)
treee45dd9b6469ef6d61238e34236bc2b61a00abbda /kernel
parent9a2dc0e6c531d595bcdf2c66d0be131679bd02df (diff)
perf/core: Fix endless multiplex timer
commit 90c91dfb86d0ff545bd329d3ddd72c147e2ae198 upstream. Kan and Andi reported that we fail to kill rotation when the flexible events go empty, but the context does not. XXX moar Fixes: fd7d55172d1e ("perf/cgroups: Don't rotate events for cgroups unnecessarily") Reported-by: Andi Kleen <ak@linux.intel.com> Reported-by: Kan Liang <kan.liang@linux.intel.com> Tested-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Wen Yang <wenyang@linux.alibaba.com> Link: https://lkml.kernel.org/r/20200305123851.GX2596@hirez.programming.kicks-ass.net Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d3be2cd57af1..2f3e892ffce3 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2086,6 +2086,7 @@ __perf_remove_from_context(struct perf_event *event,
if (!ctx->nr_events && ctx->is_active) {
ctx->is_active = 0;
+ ctx->rotate_necessary = 0;
if (ctx->task) {
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
cpuctx->task_ctx = NULL;
@@ -2952,12 +2953,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
if (!ctx->nr_active || !(is_active & EVENT_ALL))
return;
- /*
- * If we had been multiplexing, no rotations are necessary, now no events
- * are active.
- */
- ctx->rotate_necessary = 0;
-
perf_pmu_disable(ctx->pmu);
if (is_active & EVENT_PINNED) {
list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
@@ -2967,6 +2962,13 @@ static void ctx_sched_out(struct perf_event_context *ctx,
if (is_active & EVENT_FLEXIBLE) {
list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list)
group_sched_out(event, cpuctx, ctx);
+
+ /*
+ * Since we cleared EVENT_FLEXIBLE, also clear
+ * rotate_necessary, is will be reset by
+ * ctx_flexible_sched_in() when needed.
+ */
+ ctx->rotate_necessary = 0;
}
perf_pmu_enable(ctx->pmu);
}
@@ -3705,6 +3707,12 @@ ctx_event_to_rotate(struct perf_event_context *ctx)
typeof(*event), group_node);
}
+ /*
+ * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in()
+ * finds there are unschedulable events, it will set it again.
+ */
+ ctx->rotate_necessary = 0;
+
return event;
}