blob: 523dbfbac03ddedac0709a15102bc4e460b93d1f [file] [log] [blame]
Palmer Dabbelt07037db2017-07-10 18:06:09 -07001/*
2 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
3 * Lennox Wu <lennox.wu@sunplusct.com>
4 * Chen Liqin <liqin.chen@sunplusct.com>
5 * Copyright (C) 2012 Regents of the University of California
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see the file COPYING, or write
19 * to the Free Software Foundation, Inc.,
20 */
21
22
23#include <linux/mm.h>
24#include <linux/kernel.h>
25#include <linux/interrupt.h>
26#include <linux/perf_event.h>
27#include <linux/signal.h>
28#include <linux/uaccess.h>
29
30#include <asm/pgalloc.h>
31#include <asm/ptrace.h>
ShihPo Hunga96ac5c2019-06-17 12:26:17 +080032#include <asm/tlbflush.h>
Palmer Dabbelt07037db2017-07-10 18:06:09 -070033
34/*
35 * This routine handles page faults. It determines the address and the
36 * problem, and then passes it off to one of the appropriate routines.
37 */
38asmlinkage void do_page_fault(struct pt_regs *regs)
39{
40 struct task_struct *tsk;
41 struct vm_area_struct *vma;
42 struct mm_struct *mm;
43 unsigned long addr, cause;
44 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
Souptick Joarder50a7ca32018-08-17 15:44:47 -070045 int code = SEGV_MAPERR;
46 vm_fault_t fault;
Palmer Dabbelt07037db2017-07-10 18:06:09 -070047
48 cause = regs->scause;
49 addr = regs->sbadaddr;
50
51 tsk = current;
52 mm = tsk->mm;
53
54 /*
55 * Fault-in kernel-space virtual memory on-demand.
56 * The 'reference' page table is init_mm.pgd.
57 *
58 * NOTE! We MUST NOT take any locks for this case. We may
59 * be in an interrupt or a critical region, and should
60 * only copy the information from the master page table,
61 * nothing more.
62 */
63 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
64 goto vmalloc_fault;
65
66 /* Enable interrupts if they were enabled in the parent context. */
Christoph Hellwig11252032018-01-04 18:35:03 +010067 if (likely(regs->sstatus & SR_SPIE))
Palmer Dabbelt07037db2017-07-10 18:06:09 -070068 local_irq_enable();
69
70 /*
71 * If we're in an interrupt, have no user context, or are running
72 * in an atomic region, then we must not take the fault.
73 */
74 if (unlikely(faulthandler_disabled() || !mm))
75 goto no_context;
76
77 if (user_mode(regs))
78 flags |= FAULT_FLAG_USER;
79
80 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
81
82retry:
83 down_read(&mm->mmap_sem);
84 vma = find_vma(mm, addr);
85 if (unlikely(!vma))
86 goto bad_area;
87 if (likely(vma->vm_start <= addr))
88 goto good_area;
89 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
90 goto bad_area;
91 if (unlikely(expand_stack(vma, addr)))
92 goto bad_area;
93
94 /*
95 * Ok, we have a good vm_area for this memory access, so
96 * we can handle it.
97 */
98good_area:
99 code = SEGV_ACCERR;
100
101 switch (cause) {
102 case EXC_INST_PAGE_FAULT:
103 if (!(vma->vm_flags & VM_EXEC))
104 goto bad_area;
105 break;
106 case EXC_LOAD_PAGE_FAULT:
107 if (!(vma->vm_flags & VM_READ))
108 goto bad_area;
109 break;
110 case EXC_STORE_PAGE_FAULT:
111 if (!(vma->vm_flags & VM_WRITE))
112 goto bad_area;
113 flags |= FAULT_FLAG_WRITE;
114 break;
115 default:
116 panic("%s: unhandled cause %lu", __func__, cause);
117 }
118
119 /*
120 * If for any reason at all we could not handle the fault,
121 * make sure we exit gracefully rather than endlessly redo
122 * the fault.
123 */
124 fault = handle_mm_fault(vma, addr, flags);
125
126 /*
127 * If we need to retry but a fatal signal is pending, handle the
128 * signal first. We do not need to release the mmap_sem because it
129 * would already be released in __lock_page_or_retry in mm/filemap.c.
130 */
131 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk))
132 return;
133
134 if (unlikely(fault & VM_FAULT_ERROR)) {
135 if (fault & VM_FAULT_OOM)
136 goto out_of_memory;
137 else if (fault & VM_FAULT_SIGBUS)
138 goto do_sigbus;
139 BUG();
140 }
141
142 /*
143 * Major/minor page fault accounting is only done on the
144 * initial attempt. If we go through a retry, it is extremely
145 * likely that the page will be found in page cache at that point.
146 */
147 if (flags & FAULT_FLAG_ALLOW_RETRY) {
148 if (fault & VM_FAULT_MAJOR) {
149 tsk->maj_flt++;
150 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
151 1, regs, addr);
152 } else {
153 tsk->min_flt++;
154 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
155 1, regs, addr);
156 }
157 if (fault & VM_FAULT_RETRY) {
158 /*
159 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
160 * of starvation.
161 */
162 flags &= ~(FAULT_FLAG_ALLOW_RETRY);
163 flags |= FAULT_FLAG_TRIED;
164
165 /*
166 * No need to up_read(&mm->mmap_sem) as we would
167 * have already released it in __lock_page_or_retry
168 * in mm/filemap.c.
169 */
170 goto retry;
171 }
172 }
173
174 up_read(&mm->mmap_sem);
175 return;
176
177 /*
178 * Something tried to access memory that isn't in our memory map.
179 * Fix it, but check if it's kernel or user first.
180 */
181bad_area:
182 up_read(&mm->mmap_sem);
183 /* User mode accesses just cause a SIGSEGV */
184 if (user_mode(regs)) {
185 do_trap(regs, SIGSEGV, code, addr, tsk);
186 return;
187 }
188
189no_context:
190 /* Are we prepared to handle this kernel fault? */
191 if (fixup_exception(regs))
192 return;
193
194 /*
195 * Oops. The kernel tried to access some bad page. We'll have to
196 * terminate things with extreme prejudice.
197 */
198 bust_spinlocks(1);
199 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
200 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
201 "paging request", addr);
202 die(regs, "Oops");
203 do_exit(SIGKILL);
204
205 /*
206 * We ran out of memory, call the OOM killer, and return the userspace
207 * (which will retry the fault, or kill us if we got oom-killed).
208 */
209out_of_memory:
210 up_read(&mm->mmap_sem);
211 if (!user_mode(regs))
212 goto no_context;
213 pagefault_out_of_memory();
214 return;
215
216do_sigbus:
217 up_read(&mm->mmap_sem);
218 /* Kernel mode? Handle exceptions or die */
219 if (!user_mode(regs))
220 goto no_context;
221 do_trap(regs, SIGBUS, BUS_ADRERR, addr, tsk);
222 return;
223
224vmalloc_fault:
225 {
226 pgd_t *pgd, *pgd_k;
227 pud_t *pud, *pud_k;
228 p4d_t *p4d, *p4d_k;
229 pmd_t *pmd, *pmd_k;
230 pte_t *pte_k;
231 int index;
232
233 if (user_mode(regs))
234 goto bad_area;
235
236 /*
237 * Synchronize this task's top level page-table
238 * with the 'reference' page table.
239 *
240 * Do _not_ use "tsk->active_mm->pgd" here.
241 * We might be inside an interrupt in the middle
242 * of a task switch.
Christoph Hellwig7549cdf2018-01-09 15:00:32 +0100243 *
244 * Note: Use the old spbtr name instead of using the current
245 * satp name to support binutils 2.29 which doesn't know about
246 * the privileged ISA 1.10 yet.
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700247 */
248 index = pgd_index(addr);
249 pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index;
250 pgd_k = init_mm.pgd + index;
251
252 if (!pgd_present(*pgd_k))
253 goto no_context;
254 set_pgd(pgd, *pgd_k);
255
256 p4d = p4d_offset(pgd, addr);
257 p4d_k = p4d_offset(pgd_k, addr);
258 if (!p4d_present(*p4d_k))
259 goto no_context;
260
261 pud = pud_offset(p4d, addr);
262 pud_k = pud_offset(p4d_k, addr);
263 if (!pud_present(*pud_k))
264 goto no_context;
265
266 /*
267 * Since the vmalloc area is global, it is unnecessary
268 * to copy individual PTEs
269 */
270 pmd = pmd_offset(pud, addr);
271 pmd_k = pmd_offset(pud_k, addr);
272 if (!pmd_present(*pmd_k))
273 goto no_context;
274 set_pmd(pmd, *pmd_k);
275
276 /*
277 * Make sure the actual PTE exists as well to
278 * catch kernel vmalloc-area accesses to non-mapped
279 * addresses. If we don't do this, this will just
280 * silently loop forever.
281 */
282 pte_k = pte_offset_kernel(pmd_k, addr);
283 if (!pte_present(*pte_k))
284 goto no_context;
ShihPo Hunga96ac5c2019-06-17 12:26:17 +0800285
286 /*
287 * The kernel assumes that TLBs don't cache invalid
288 * entries, but in RISC-V, SFENCE.VMA specifies an
289 * ordering constraint, not a cache flush; it is
290 * necessary even after writing invalid entries.
291 * Relying on flush_tlb_fix_spurious_fault would
292 * suffice, but the extra traps reduce
293 * performance. So, eagerly SFENCE.VMA.
294 */
295 local_flush_tlb_page(addr);
296
Palmer Dabbelt07037db2017-07-10 18:06:09 -0700297 return;
298 }
299}