ftrace, x86: rename in_nmi variable
Impact: clean up
The in_nmi variable in x86 arch ftrace.c is a misnomer.
Andrew Morton pointed out that the in_nmi variable is incremented
by all CPUS. It can be set when another CPU is running an NMI.
Since this is actually intentional, the fix is to rename it to
what it really is: "nmi_running"
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 4c68358..e3fad2e 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -82,7 +82,7 @@
* are the same as what exists.
*/
-static atomic_t in_nmi = ATOMIC_INIT(0);
+static atomic_t nmi_running = ATOMIC_INIT(0);
static int mod_code_status; /* holds return value of text write */
static int mod_code_write; /* set when NMI should do the write */
static void *mod_code_ip; /* holds the IP to write to */
@@ -115,8 +115,8 @@
void arch_ftrace_nmi_enter(void)
{
- atomic_inc(&in_nmi);
- /* Must have in_nmi seen before reading write flag */
+ atomic_inc(&nmi_running);
+ /* Must have nmi_running seen before reading write flag */
smp_mb();
if (mod_code_write) {
ftrace_mod_code();
@@ -126,19 +126,19 @@
void arch_ftrace_nmi_exit(void)
{
- /* Finish all executions before clearing in_nmi */
+ /* Finish all executions before clearing nmi_running */
smp_wmb();
- atomic_dec(&in_nmi);
+ atomic_dec(&nmi_running);
}
static void wait_for_nmi(void)
{
- if (!atomic_read(&in_nmi))
+ if (!atomic_read(&nmi_running))
return;
do {
cpu_relax();
- } while(atomic_read(&in_nmi));
+ } while (atomic_read(&nmi_running));
nmi_wait_count++;
}
@@ -374,16 +374,16 @@
* this page for dynamic ftrace. They have been
* simplified to ignore all traces in NMI context.
*/
-static atomic_t in_nmi;
+static atomic_t nmi_running;
void arch_ftrace_nmi_enter(void)
{
- atomic_inc(&in_nmi);
+ atomic_inc(&nmi_running);
}
void arch_ftrace_nmi_exit(void)
{
- atomic_dec(&in_nmi);
+ atomic_dec(&nmi_running);
}
#endif /* !CONFIG_DYNAMIC_FTRACE */
@@ -475,7 +475,7 @@
&return_to_handler;
/* Nmi's are currently unsupported */
- if (unlikely(atomic_read(&in_nmi)))
+ if (unlikely(atomic_read(&nmi_running)))
return;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))