Skip to content

Commit 21ee33d

Browse files
torvaldsgregkh
authored andcommitted
mm/fault: convert remaining simple cases to lock_mm_and_find_vma()
commit a050ba1 upstream. This does the simple pattern conversion of alpha, arc, csky, hexagon, loongarch, nios2, sh, sparc32, and xtensa to the lock_mm_and_find_vma() helper. They all have the regular fault handling pattern without odd special cases. The remaining architectures all have something that keeps us from a straightforward conversion: ia64 and parisc have stacks that can grow both up as well as down (and ia64 has special address region checks). And m68k, microblaze, openrisc, sparc64, and um end up having extra rules about only expanding the stack down a limited amount below the user space stack pointer. That is something that x86 used to do too (long long ago), and it probably could just be skipped, but it still makes the conversion less than trivial. Note that this conversion was done manually and with the exception of alpha without any build testing, because I have a fairly limited cross- building environment. The cases are all simple, and I went through the changes several times, but... Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Samuel Mendoza-Jonas <[email protected]> Signed-off-by: David Woodhouse <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 1f4197f commit 21ee33d

File tree

18 files changed

+45
-124
lines changed

18 files changed

+45
-124
lines changed

arch/alpha/Kconfig

+1
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ config ALPHA
2828
select GENERIC_SMP_IDLE_THREAD
2929
select HAVE_ARCH_AUDITSYSCALL
3030
select HAVE_MOD_ARCH_SPECIFIC
31+
select LOCK_MM_AND_FIND_VMA
3132
select MODULES_USE_ELF_RELA
3233
select ODD_RT_SIGACTION
3334
select OLD_SIGSUSPEND

arch/alpha/mm/fault.c

+3-10
Original file line numberDiff line numberDiff line change
@@ -119,20 +119,12 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
119119
flags |= FAULT_FLAG_USER;
120120
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
121121
retry:
122-
mmap_read_lock(mm);
123-
vma = find_vma(mm, address);
122+
vma = lock_mm_and_find_vma(mm, address, regs);
124123
if (!vma)
125-
goto bad_area;
126-
if (vma->vm_start <= address)
127-
goto good_area;
128-
if (!(vma->vm_flags & VM_GROWSDOWN))
129-
goto bad_area;
130-
if (expand_stack(vma, address))
131-
goto bad_area;
124+
goto bad_area_nosemaphore;
132125

133126
/* Ok, we have a good vm_area for this memory access, so
134127
we can handle it. */
135-
good_area:
136128
si_code = SEGV_ACCERR;
137129
if (cause < 0) {
138130
if (!(vma->vm_flags & VM_EXEC))
@@ -189,6 +181,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
189181
bad_area:
190182
mmap_read_unlock(mm);
191183

184+
bad_area_nosemaphore:
192185
if (user_mode(regs))
193186
goto do_sigsegv;
194187

arch/arc/Kconfig

+1
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ config ARC
4141
select HAVE_PERF_EVENTS
4242
select HAVE_SYSCALL_TRACEPOINTS
4343
select IRQ_DOMAIN
44+
select LOCK_MM_AND_FIND_VMA
4445
select MODULES_USE_ELF_RELA
4546
select OF
4647
select OF_EARLY_FLATTREE

arch/arc/mm/fault.c

+3-8
Original file line numberDiff line numberDiff line change
@@ -113,15 +113,9 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
113113

114114
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
115115
retry:
116-
mmap_read_lock(mm);
117-
118-
vma = find_vma(mm, address);
116+
vma = lock_mm_and_find_vma(mm, address, regs);
119117
if (!vma)
120-
goto bad_area;
121-
if (unlikely(address < vma->vm_start)) {
122-
if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
123-
goto bad_area;
124-
}
118+
goto bad_area_nosemaphore;
125119

126120
/*
127121
* vm_area is good, now check permissions for this memory access
@@ -161,6 +155,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
161155
bad_area:
162156
mmap_read_unlock(mm);
163157

158+
bad_area_nosemaphore:
164159
/*
165160
* Major/minor page fault accounting
166161
* (in case of retry we only land here once)

arch/csky/Kconfig

+1
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@ config CSKY
9696
select HAVE_RSEQ
9797
select HAVE_STACKPROTECTOR
9898
select HAVE_SYSCALL_TRACEPOINTS
99+
select LOCK_MM_AND_FIND_VMA
99100
select MAY_HAVE_SPARSE_IRQ
100101
select MODULES_USE_ELF_RELA if MODULES
101102
select OF

arch/csky/mm/fault.c

+5-17
Original file line numberDiff line numberDiff line change
@@ -97,13 +97,12 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
9797
BUG();
9898
}
9999

100-
static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
100+
static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
101101
{
102102
/*
103103
* Something tried to access memory that isn't in our memory map.
104104
* Fix it, but check if it's kernel or user first.
105105
*/
106-
mmap_read_unlock(mm);
107106
/* User mode accesses just cause a SIGSEGV */
108107
if (user_mode(regs)) {
109108
do_trap(regs, SIGSEGV, code, addr);
@@ -238,32 +237,21 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
238237
if (is_write(regs))
239238
flags |= FAULT_FLAG_WRITE;
240239
retry:
241-
mmap_read_lock(mm);
242-
vma = find_vma(mm, addr);
240+
vma = lock_mm_and_find_vma(mm, address, regs);
243241
if (unlikely(!vma)) {
244-
bad_area(regs, mm, code, addr);
245-
return;
246-
}
247-
if (likely(vma->vm_start <= addr))
248-
goto good_area;
249-
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
250-
bad_area(regs, mm, code, addr);
251-
return;
252-
}
253-
if (unlikely(expand_stack(vma, addr))) {
254-
bad_area(regs, mm, code, addr);
242+
bad_area_nosemaphore(regs, mm, code, addr);
255243
return;
256244
}
257245

258246
/*
259247
* Ok, we have a good vm_area for this memory access, so
260248
* we can handle it.
261249
*/
262-
good_area:
263250
code = SEGV_ACCERR;
264251

265252
if (unlikely(access_error(regs, vma))) {
266-
bad_area(regs, mm, code, addr);
253+
mmap_read_unlock(mm);
254+
bad_area_nosemaphore(regs, mm, code, addr);
267255
return;
268256
}
269257

arch/hexagon/Kconfig

+1
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@ config HEXAGON
2828
select GENERIC_SMP_IDLE_THREAD
2929
select STACKTRACE_SUPPORT
3030
select GENERIC_CLOCKEVENTS_BROADCAST
31+
select LOCK_MM_AND_FIND_VMA
3132
select MODULES_USE_ELF_RELA
3233
select GENERIC_CPU_DEVICES
3334
select ARCH_WANT_LD_ORPHAN_WARN

arch/hexagon/mm/vm_fault.c

+4-14
Original file line numberDiff line numberDiff line change
@@ -57,21 +57,10 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
5757

5858
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
5959
retry:
60-
mmap_read_lock(mm);
61-
vma = find_vma(mm, address);
62-
if (!vma)
63-
goto bad_area;
60+
vma = lock_mm_and_find_vma(mm, address, regs);
61+
if (unlikely(!vma))
62+
goto bad_area_nosemaphore;
6463

65-
if (vma->vm_start <= address)
66-
goto good_area;
67-
68-
if (!(vma->vm_flags & VM_GROWSDOWN))
69-
goto bad_area;
70-
71-
if (expand_stack(vma, address))
72-
goto bad_area;
73-
74-
good_area:
7564
/* Address space is OK. Now check access rights. */
7665
si_code = SEGV_ACCERR;
7766

@@ -140,6 +129,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
140129
bad_area:
141130
mmap_read_unlock(mm);
142131

132+
bad_area_nosemaphore:
143133
if (user_mode(regs)) {
144134
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
145135
return;

arch/loongarch/Kconfig

+1
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ config LOONGARCH
107107
select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
108108
select IRQ_FORCED_THREADING
109109
select IRQ_LOONGARCH_CPU
110+
select LOCK_MM_AND_FIND_VMA
110111
select MMU_GATHER_MERGE_VMAS if MMU
111112
select MODULES_USE_ELF_RELA if MODULES
112113
select NEED_PER_CPU_EMBED_FIRST_CHUNK

arch/loongarch/mm/fault.c

+6-10
Original file line numberDiff line numberDiff line change
@@ -166,22 +166,18 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
166166

167167
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
168168
retry:
169-
mmap_read_lock(mm);
170-
vma = find_vma(mm, address);
171-
if (!vma)
172-
goto bad_area;
173-
if (vma->vm_start <= address)
174-
goto good_area;
175-
if (!(vma->vm_flags & VM_GROWSDOWN))
176-
goto bad_area;
177-
if (!expand_stack(vma, address))
178-
goto good_area;
169+
vma = lock_mm_and_find_vma(mm, address, regs);
170+
if (unlikely(!vma))
171+
goto bad_area_nosemaphore;
172+
goto good_area;
173+
179174
/*
180175
* Something tried to access memory that isn't in our memory map..
181176
* Fix it, but check if it's kernel or user first..
182177
*/
183178
bad_area:
184179
mmap_read_unlock(mm);
180+
bad_area_nosemaphore:
185181
do_sigsegv(regs, write, address, si_code);
186182
return;
187183

arch/nios2/Kconfig

+1
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ config NIOS2
1616
select HAVE_ARCH_TRACEHOOK
1717
select HAVE_ARCH_KGDB
1818
select IRQ_DOMAIN
19+
select LOCK_MM_AND_FIND_VMA
1920
select MODULES_USE_ELF_RELA
2021
select OF
2122
select OF_EARLY_FLATTREE

arch/nios2/mm/fault.c

+2-15
Original file line numberDiff line numberDiff line change
@@ -86,27 +86,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
8686

8787
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
8888

89-
if (!mmap_read_trylock(mm)) {
90-
if (!user_mode(regs) && !search_exception_tables(regs->ea))
91-
goto bad_area_nosemaphore;
9289
retry:
93-
mmap_read_lock(mm);
94-
}
95-
96-
vma = find_vma(mm, address);
90+
vma = lock_mm_and_find_vma(mm, address, regs);
9791
if (!vma)
98-
goto bad_area;
99-
if (vma->vm_start <= address)
100-
goto good_area;
101-
if (!(vma->vm_flags & VM_GROWSDOWN))
102-
goto bad_area;
103-
if (expand_stack(vma, address))
104-
goto bad_area;
92+
goto bad_area_nosemaphore;
10593
/*
10694
* Ok, we have a good vm_area for this memory access, so
10795
* we can handle it..
10896
*/
109-
good_area:
11097
code = SEGV_ACCERR;
11198

11299
switch (cause) {

arch/sh/Kconfig

+1
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ config SUPERH
5656
select HAVE_STACKPROTECTOR
5757
select HAVE_SYSCALL_TRACEPOINTS
5858
select IRQ_FORCED_THREADING
59+
select LOCK_MM_AND_FIND_VMA
5960
select MODULES_USE_ELF_RELA
6061
select NEED_SG_DMA_LENGTH
6162
select NO_DMA if !MMU && !DMA_COHERENT

arch/sh/mm/fault.c

+2-15
Original file line numberDiff line numberDiff line change
@@ -439,29 +439,16 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
439439
}
440440

441441
retry:
442-
mmap_read_lock(mm);
443-
444-
vma = find_vma(mm, address);
442+
vma = lock_mm_and_find_vma(mm, address, regs);
445443
if (unlikely(!vma)) {
446-
bad_area(regs, error_code, address);
447-
return;
448-
}
449-
if (likely(vma->vm_start <= address))
450-
goto good_area;
451-
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
452-
bad_area(regs, error_code, address);
453-
return;
454-
}
455-
if (unlikely(expand_stack(vma, address))) {
456-
bad_area(regs, error_code, address);
444+
bad_area_nosemaphore(regs, error_code, address);
457445
return;
458446
}
459447

460448
/*
461449
* Ok, we have a good vm_area for this memory access, so
462450
* we can handle it..
463451
*/
464-
good_area:
465452
if (unlikely(access_error(error_code, vma))) {
466453
bad_area_access_error(regs, error_code, address);
467454
return;

arch/sparc/Kconfig

+1
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ config SPARC32
5656
select DMA_DIRECT_REMAP
5757
select GENERIC_ATOMIC64
5858
select HAVE_UID16
59+
select LOCK_MM_AND_FIND_VMA
5960
select OLD_SIGACTION
6061
select ZONE_DMA
6162

arch/sparc/mm/fault_32.c

+8-24
Original file line numberDiff line numberDiff line change
@@ -143,28 +143,19 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
143143
if (pagefault_disabled() || !mm)
144144
goto no_context;
145145

146+
if (!from_user && address >= PAGE_OFFSET)
147+
goto no_context;
148+
146149
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
147150

148151
retry:
149-
mmap_read_lock(mm);
150-
151-
if (!from_user && address >= PAGE_OFFSET)
152-
goto bad_area;
153-
154-
vma = find_vma(mm, address);
152+
vma = lock_mm_and_find_vma(mm, address, regs);
155153
if (!vma)
156-
goto bad_area;
157-
if (vma->vm_start <= address)
158-
goto good_area;
159-
if (!(vma->vm_flags & VM_GROWSDOWN))
160-
goto bad_area;
161-
if (expand_stack(vma, address))
162-
goto bad_area;
154+
goto bad_area_nosemaphore;
163155
/*
164156
* Ok, we have a good vm_area for this memory access, so
165157
* we can handle it..
166158
*/
167-
good_area:
168159
code = SEGV_ACCERR;
169160
if (write) {
170161
if (!(vma->vm_flags & VM_WRITE))
@@ -318,17 +309,9 @@ static void force_user_fault(unsigned long address, int write)
318309

319310
code = SEGV_MAPERR;
320311

321-
mmap_read_lock(mm);
322-
vma = find_vma(mm, address);
312+
vma = lock_mm_and_find_vma(mm, address, regs);
323313
if (!vma)
324-
goto bad_area;
325-
if (vma->vm_start <= address)
326-
goto good_area;
327-
if (!(vma->vm_flags & VM_GROWSDOWN))
328-
goto bad_area;
329-
if (expand_stack(vma, address))
330-
goto bad_area;
331-
good_area:
314+
goto bad_area_nosemaphore;
332315
code = SEGV_ACCERR;
333316
if (write) {
334317
if (!(vma->vm_flags & VM_WRITE))
@@ -347,6 +330,7 @@ static void force_user_fault(unsigned long address, int write)
347330
return;
348331
bad_area:
349332
mmap_read_unlock(mm);
333+
bad_area_nosemaphore:
350334
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
351335
return;
352336

arch/xtensa/Kconfig

+1
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ config XTENSA
4949
select HAVE_SYSCALL_TRACEPOINTS
5050
select HAVE_VIRT_CPU_ACCOUNTING_GEN
5151
select IRQ_DOMAIN
52+
select LOCK_MM_AND_FIND_VMA
5253
select MODULES_USE_ELF_RELA
5354
select PERF_USE_VMALLOC
5455
select TRACE_IRQFLAGS_SUPPORT

0 commit comments

Comments
 (0)