Merge branches 'tracing/fastboot', 'tracing/ftrace', 'tracing/function-graph-tracer' and 'tracing/hw-branch-tracing' into tracing/core
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index 16f9487..b946ac1 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/dmi.h>
#include <linux/dmar.h>
+#include <linux/ftrace.h>
#include <asm/atomic.h>
#include <asm/smp.h>
@@ -800,7 +801,7 @@
* [ if a single-CPU system runs an SMP kernel then we call the local
* interrupt as well. Thus we cannot inline the local irq ... ]
*/
-void smp_apic_timer_interrupt(struct pt_regs *regs)
+void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 60eb84e..11c65e8 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -13,6 +13,7 @@
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/ftrace.h>
#include <asm/uaccess.h>
#include <asm/io_apic.h>
#include <asm/idle.h>
@@ -47,7 +48,7 @@
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
-asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
+asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
struct irq_desc *desc;
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index a9b8560..82c6755 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -44,6 +44,7 @@
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
+ IRQENTRY_TEXT
*(.fixup)
*(.gnu.warning)
_etext = .; /* End of text section */
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index 46e0544..1a614c0 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -35,6 +35,7 @@
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
+ IRQENTRY_TEXT
*(.fixup)
*(.gnu.warning)
_etext = .; /* End of text section */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index eba835a..c61fab1 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -288,6 +288,16 @@
*(.kprobes.text) \
VMLINUX_SYMBOL(__kprobes_text_end) = .;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#define IRQENTRY_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLINUX_SYMBOL(__irqentry_text_start) = .; \
+ *(.irqentry.text) \
+ VMLINUX_SYMBOL(__irqentry_text_end) = .;
+#else
+#define IRQENTRY_TEXT
+#endif
+
/* Section used for early init (in .S files) */
#define HEAD_TEXT *(.head.text)
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 11cac81..44020f3 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -377,6 +377,16 @@
*/
#define __notrace_funcgraph notrace
+/*
+ * We want to which function is an entrypoint of a hardirq.
+ * That will help us to put a signal on output.
+ */
+#define __irq_entry __attribute__((__section__(".irqentry.text")))
+
+/* Limits of hardirq entrypoints */
+extern char __irqentry_text_start[];
+extern char __irqentry_text_end[];
+
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
/* Type of the callback handlers for tracing function graph*/
@@ -414,6 +424,7 @@
#else
#define __notrace_funcgraph
+#define __irq_entry
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
diff --git a/include/trace/boot.h b/include/trace/boot.h
index 6b54537..088ea08 100644
--- a/include/trace/boot.h
+++ b/include/trace/boot.h
@@ -1,6 +1,10 @@
#ifndef _LINUX_TRACE_BOOT_H
#define _LINUX_TRACE_BOOT_H
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/init.h>
+
/*
* Structure which defines the trace of an initcall
* while it is called.
@@ -9,7 +13,7 @@
*/
struct boot_trace_call {
pid_t caller;
- char func[KSYM_NAME_LEN];
+ char func[KSYM_SYMBOL_LEN];
};
/*
@@ -17,7 +21,7 @@
* while it returns.
*/
struct boot_trace_ret {
- char func[KSYM_NAME_LEN];
+ char func[KSYM_SYMBOL_LEN];
int result;
unsigned long long duration; /* nsecs */
};
diff --git a/include/trace/sched.h b/include/trace/sched.h
index 9b2854a..f4549d5 100644
--- a/include/trace/sched.h
+++ b/include/trace/sched.h
@@ -30,8 +30,8 @@
TPARGS(rq, prev, next));
DECLARE_TRACE(sched_migrate_task,
- TPPROTO(struct rq *rq, struct task_struct *p, int dest_cpu),
- TPARGS(rq, p, dest_cpu));
+ TPPROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
+ TPARGS(p, orig_cpu, dest_cpu));
DECLARE_TRACE(sched_process_free,
TPPROTO(struct task_struct *p),
diff --git a/kernel/sched.c b/kernel/sched.c
index 7729c4b..d377097 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1851,6 +1851,8 @@
clock_offset = old_rq->clock - new_rq->clock;
+ trace_sched_migrate_task(p, task_cpu(p), new_cpu);
+
#ifdef CONFIG_SCHEDSTATS
if (p->se.wait_start)
p->se.wait_start -= clock_offset;
@@ -2868,7 +2870,6 @@
|| unlikely(!cpu_active(dest_cpu)))
goto out;
- trace_sched_migrate_task(rq, p, dest_cpu);
/* force the process onto the specified CPU */
if (migrate_task(p, dest_cpu, &req)) {
/* Need to wait for migration thread (might exit: take ref). */
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index af60eef..4bf39fc 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -231,6 +231,49 @@
return true;
}
+static enum print_line_t
+print_graph_irq(struct trace_seq *s, unsigned long addr,
+ enum trace_type type, int cpu, pid_t pid)
+{
+ int ret;
+
+ if (addr < (unsigned long)__irqentry_text_start ||
+ addr >= (unsigned long)__irqentry_text_end)
+ return TRACE_TYPE_UNHANDLED;
+
+ if (type == TRACE_GRAPH_ENT) {
+ ret = trace_seq_printf(s, "==========> | ");
+ } else {
+ /* Cpu */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
+ ret = print_graph_cpu(s, cpu);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+ /* Proc */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
+ ret = print_graph_proc(s, pid);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ ret = trace_seq_printf(s, " | ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
+ /* No overhead */
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
+ ret = trace_seq_printf(s, " ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
+ ret = trace_seq_printf(s, "<========== |\n");
+ }
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ return TRACE_TYPE_HANDLED;
+}
static enum print_line_t
print_graph_duration(unsigned long long duration, struct trace_seq *s)
@@ -344,7 +387,7 @@
static enum print_line_t
print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
- struct trace_seq *s)
+ struct trace_seq *s, pid_t pid, int cpu)
{
int i;
int ret;
@@ -357,8 +400,18 @@
return TRACE_TYPE_PARTIAL_LINE;
}
- /* No time */
- ret = trace_seq_printf(s, " | ");
+ /* Interrupt */
+ ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid);
+ if (ret == TRACE_TYPE_UNHANDLED) {
+ /* No time */
+ ret = trace_seq_printf(s, " | ");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ } else {
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
/* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
@@ -410,7 +463,7 @@
if (trace_branch_is_leaf(iter, field))
return print_graph_entry_leaf(iter, field, s);
else
- return print_graph_entry_nested(field, s);
+ return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
}
@@ -474,6 +527,11 @@
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
}
+
+ ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid);
+ if (ret == TRACE_TYPE_PARTIAL_LINE)
+ return TRACE_TYPE_PARTIAL_LINE;
+
return TRACE_TYPE_HANDLED;
}
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 0b1dc9f..fe83141 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -114,6 +114,7 @@
".text" => 1,
".sched.text" => 1,
".spinlock.text" => 1,
+ ".irqentry.text" => 1,
);
$objdump = "objdump" if ((length $objdump) == 0);