summaryrefslogtreecommitdiff
path: root/kernel/trace/trace_selftest.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_selftest.c')
-rw-r--r--kernel/trace/trace_selftest.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 94a01a32867f..00a47b5c4f5b 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -82,10 +82,12 @@ trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
*/
static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
{
- unsigned long cnt = 0;
- int cpu;
- int ret = 0;
+ unsigned long flags, cnt = 0;
+ int cpu, ret = 0;
+ /* Don't allow flipping of max traces now */
+ raw_local_irq_save(flags);
+ __raw_spin_lock(&ftrace_max_lock);
for_each_possible_cpu(cpu) {
if (!head_page(tr->data[cpu]))
continue;
@@ -96,6 +98,8 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
if (ret)
break;
}
+ __raw_spin_unlock(&ftrace_max_lock);
+ raw_local_irq_restore(flags);
if (count)
*count = cnt;
@@ -406,11 +410,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
#ifdef CONFIG_SCHED_TRACER
static int trace_wakeup_test_thread(void *data)
{
- struct completion *x = data;
-
/* Make this a RT thread, doesn't need to be too high */
+ struct sched_param param = { .sched_priority = 5 };
+ struct completion *x = data;
- rt_mutex_setprio(current, MAX_RT_PRIO - 5);
+ sched_setscheduler(current, SCHED_FIFO, &param);
/* Make it know we have a new prio */
complete(x);