Skip to content

Commit

Permalink
tracing: Disable snapshot buffer when stopping instance tracers
Browse files Browse the repository at this point in the history
commit b538bf7 upstream.

It use to be that only the top level instance had a snapshot buffer (for
latency tracers like wakeup and irqsoff). When stopping a tracer in an
instance would not disable the snapshot buffer. This could have some
unintended consequences if the irqsoff tracer is enabled.

Consolidate the tracing_start/stop() with tracing_start/stop_tr() so that
all instances behave the same. The tracing_start/stop() functions will
just call their respective tracing_start/stop_tr() with the global_array
passed in.

Link: https://lkml.kernel.org/r/20231205220011.041220035@goodmis.org

Cc: stable@vger.kernel.org
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Fixes: 6d9b3fa ("tracing: Move tracing_max_latency into trace_array")
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
rostedt authored and gregkh committed Dec 13, 2023
1 parent 12c48e8 commit 0486a1f
Showing 1 changed file with 34 additions and 76 deletions.
110 changes: 34 additions & 76 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -2359,133 +2359,91 @@ int is_tracing_stopped(void)
return global_trace.stop_count;
}

/**
* tracing_start - quick start of the tracer
*
* If tracing is enabled but was stopped by tracing_stop,
* this will start the tracer back up.
*/
void tracing_start(void)
static void tracing_start_tr(struct trace_array *tr)
{
struct trace_buffer *buffer;
unsigned long flags;

if (tracing_disabled)
return;

raw_spin_lock_irqsave(&global_trace.start_lock, flags);
if (--global_trace.stop_count) {
if (global_trace.stop_count < 0) {
raw_spin_lock_irqsave(&tr->start_lock, flags);
if (--tr->stop_count) {
if (WARN_ON_ONCE(tr->stop_count < 0)) {
/* Someone screwed up their debugging */
WARN_ON_ONCE(1);
global_trace.stop_count = 0;
tr->stop_count = 0;
}
goto out;
}

/* Prevent the buffers from switching */
arch_spin_lock(&global_trace.max_lock);
arch_spin_lock(&tr->max_lock);

buffer = global_trace.array_buffer.buffer;
buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);

#ifdef CONFIG_TRACER_MAX_TRACE
buffer = global_trace.max_buffer.buffer;
buffer = tr->max_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
#endif

arch_spin_unlock(&global_trace.max_lock);

out:
raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
}

static void tracing_start_tr(struct trace_array *tr)
{
struct trace_buffer *buffer;
unsigned long flags;

if (tracing_disabled)
return;

/* If global, we need to also start the max tracer */
if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
return tracing_start();

raw_spin_lock_irqsave(&tr->start_lock, flags);

if (--tr->stop_count) {
if (tr->stop_count < 0) {
/* Someone screwed up their debugging */
WARN_ON_ONCE(1);
tr->stop_count = 0;
}
goto out;
}

buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
arch_spin_unlock(&tr->max_lock);

out:
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}

/**
* tracing_stop - quick stop of the tracer
* tracing_start - quick start of the tracer
*
* Light weight way to stop tracing. Use in conjunction with
* tracing_start.
* If tracing is enabled but was stopped by tracing_stop,
* this will start the tracer back up.
*/
void tracing_stop(void)
void tracing_start(void)

{
return tracing_start_tr(&global_trace);
}

static void tracing_stop_tr(struct trace_array *tr)
{
struct trace_buffer *buffer;
unsigned long flags;

raw_spin_lock_irqsave(&global_trace.start_lock, flags);
if (global_trace.stop_count++)
raw_spin_lock_irqsave(&tr->start_lock, flags);
if (tr->stop_count++)
goto out;

/* Prevent the buffers from switching */
arch_spin_lock(&global_trace.max_lock);
arch_spin_lock(&tr->max_lock);

buffer = global_trace.array_buffer.buffer;
buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);

#ifdef CONFIG_TRACER_MAX_TRACE
buffer = global_trace.max_buffer.buffer;
buffer = tr->max_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
#endif

arch_spin_unlock(&global_trace.max_lock);
arch_spin_unlock(&tr->max_lock);

out:
raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}

static void tracing_stop_tr(struct trace_array *tr)
/**
* tracing_stop - quick stop of the tracer
*
* Light weight way to stop tracing. Use in conjunction with
* tracing_start.
*/
void tracing_stop(void)
{
struct trace_buffer *buffer;
unsigned long flags;

/* If global, we need to also stop the max tracer */
if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
return tracing_stop();

raw_spin_lock_irqsave(&tr->start_lock, flags);
if (tr->stop_count++)
goto out;

buffer = tr->array_buffer.buffer;
if (buffer)
ring_buffer_record_disable(buffer);

out:
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
return tracing_stop_tr(&global_trace);
}

static int trace_save_cmdline(struct task_struct *tsk)
Expand Down

0 comments on commit 0486a1f

Please sign in to comment.