blob: e78386a0029f6972a9d18979c0bfc3ea296b89f8 [file] [log] [blame]
David S. Miller10e26722006-11-16 13:38:57 -08001#include <linux/sched.h>
2#include <linux/stacktrace.h>
3#include <linux/thread_info.h>
David S. Miller667f0ce2010-04-21 03:08:11 -07004#include <linux/ftrace.h>
Paul Gortmaker066bcac2011-07-22 13:18:16 -04005#include <linux/export.h>
David S. Miller10e26722006-11-16 13:38:57 -08006#include <asm/ptrace.h>
David S. Miller85a79352008-03-24 20:06:24 -07007#include <asm/stacktrace.h>
David S. Miller10e26722006-11-16 13:38:57 -08008
David S. Miller4f70f7a2008-08-12 18:33:56 -07009#include "kstack.h"
10
David S. Miller6a5726d2008-11-28 01:19:41 -080011static void __save_stack_trace(struct thread_info *tp,
12 struct stack_trace *trace,
13 bool skip_sched)
David S. Miller10e26722006-11-16 13:38:57 -080014{
David S. Miller6f63e782008-08-13 17:17:52 -070015 unsigned long ksp, fp;
David S. Miller667f0ce2010-04-21 03:08:11 -070016#ifdef CONFIG_FUNCTION_GRAPH_TRACER
17 struct task_struct *t;
18 int graph = 0;
19#endif
David S. Miller10e26722006-11-16 13:38:57 -080020
David S. Miller6a5726d2008-11-28 01:19:41 -080021 if (tp == current_thread_info()) {
22 stack_trace_flush();
23 __asm__ __volatile__("mov %%fp, %0" : "=r" (ksp));
24 } else {
25 ksp = tp->ksp;
26 }
David S. Miller10e26722006-11-16 13:38:57 -080027
28 fp = ksp + STACK_BIAS;
David S. Miller667f0ce2010-04-21 03:08:11 -070029#ifdef CONFIG_FUNCTION_GRAPH_TRACER
30 t = tp->task;
31#endif
David S. Miller10e26722006-11-16 13:38:57 -080032 do {
David S. Miller14d2c682008-05-21 18:15:53 -070033 struct sparc_stackf *sf;
David S. Miller77c664f2008-04-24 03:28:52 -070034 struct pt_regs *regs;
35 unsigned long pc;
David S. Miller10e26722006-11-16 13:38:57 -080036
David S. Miller4f70f7a2008-08-12 18:33:56 -070037 if (!kstack_valid(tp, fp))
David S. Miller10e26722006-11-16 13:38:57 -080038 break;
39
David S. Miller14d2c682008-05-21 18:15:53 -070040 sf = (struct sparc_stackf *) fp;
41 regs = (struct pt_regs *) (sf + 1);
David S. Miller77c664f2008-04-24 03:28:52 -070042
David S. Miller4f70f7a2008-08-12 18:33:56 -070043 if (kstack_is_trap_frame(tp, regs)) {
David S. Miller14d2c682008-05-21 18:15:53 -070044 if (!(regs->tstate & TSTATE_PRIV))
45 break;
David S. Miller77c664f2008-04-24 03:28:52 -070046 pc = regs->tpc;
47 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
48 } else {
David S. Miller14d2c682008-05-21 18:15:53 -070049 pc = sf->callers_pc;
50 fp = (unsigned long)sf->fp + STACK_BIAS;
David S. Miller77c664f2008-04-24 03:28:52 -070051 }
52
David S. Miller10e26722006-11-16 13:38:57 -080053 if (trace->skip > 0)
54 trace->skip--;
David S. Miller667f0ce2010-04-21 03:08:11 -070055 else if (!skip_sched || !in_sched_functions(pc)) {
David S. Miller77c664f2008-04-24 03:28:52 -070056 trace->entries[trace->nr_entries++] = pc;
David S. Miller667f0ce2010-04-21 03:08:11 -070057#ifdef CONFIG_FUNCTION_GRAPH_TRACER
58 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
59 int index = t->curr_ret_stack;
60 if (t->ret_stack && index >= graph) {
61 pc = t->ret_stack[index - graph].ret;
62 if (trace->nr_entries <
63 trace->max_entries)
64 trace->entries[trace->nr_entries++] = pc;
65 graph++;
66 }
67 }
68#endif
69 }
David S. Miller10e26722006-11-16 13:38:57 -080070 } while (trace->nr_entries < trace->max_entries);
71}
David S. Miller6a5726d2008-11-28 01:19:41 -080072
73void save_stack_trace(struct stack_trace *trace)
74{
75 __save_stack_trace(current_thread_info(), trace, false);
76}
Ingo Molnar7b4c9502008-07-03 09:17:55 +020077EXPORT_SYMBOL_GPL(save_stack_trace);
David S. Miller6a5726d2008-11-28 01:19:41 -080078
79void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
80{
81 struct thread_info *tp = task_thread_info(tsk);
82
83 __save_stack_trace(tp, trace, true);
84}
85EXPORT_SYMBOL_GPL(save_stack_trace_tsk);