x86, um: convert to saner kernel_execve() semantics

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 5d9ab0c..62435a0 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -26,7 +26,6 @@
 	jmp_buf *fault_catcher;
 	struct task_struct *prev_sched;
 	unsigned long temp_stack;
-	jmp_buf *exec_buf;
 	struct arch_thread arch;
 	jmp_buf switch_buf;
 	int mm_count;
@@ -54,7 +53,6 @@
 	.fault_addr		= NULL, \
 	.prev_sched		= NULL, \
 	.temp_stack		= 0, \
-	.exec_buf		= NULL, \
 	.arch			= INIT_ARCH_THREAD, \
 	.request		= { 0 } \
 }
diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
index 89b686c1..25dbd37 100644
--- a/arch/um/include/shared/os.h
+++ b/arch/um/include/shared/os.h
@@ -191,7 +191,6 @@
 extern int os_getpgrp(void);
 
 extern void init_new_thread_signals(void);
-extern int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr);
 
 extern int os_map_memory(void *virt, int fd, unsigned long long off,
 			 unsigned long len, int r, int w, int x);
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index e427301..565ca39 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -47,8 +47,3 @@
 #endif
 }
 EXPORT_SYMBOL(start_thread);
-
-void __noreturn ret_from_kernel_execve(struct pt_regs *unused)
-{
-	UML_LONGJMP(current->thread.exec_buf, 1);
-}
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index a1b50ad..94b0d8b 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -135,14 +135,10 @@
 	arg = current->thread.request.u.thread.arg;
 
 	/*
-	 * The return value is 1 if the kernel thread execs a process,
-	 * 0 if it just exits
+	 * callback returns only if the kernel thread execs a process
 	 */
-	n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
-	if (n == 1)
-		userspace(&current->thread.regs.regs);
-	else
-		do_exit(0);
+	n = fn(arg);
+	userspace(&current->thread.regs.regs);
 }
 
 /* Called magically, see new_thread_handler above */
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index 307f173..a04ec16 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -244,16 +244,3 @@
 	signal(SIGWINCH, SIG_IGN);
 	signal(SIGTERM, SIG_DFL);
 }
-
-int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr)
-{
-	jmp_buf buf;
-	int n;
-
-	*jmp_ptr = &buf;
-	n = UML_SETJMP(&buf);
-	if (n != 0)
-		return n;
-	(*fn)(arg);
-	return 0;
-}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d93eb9d..45edcba 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -98,6 +98,7 @@
 	select GENERIC_STRNCPY_FROM_USER
 	select GENERIC_STRNLEN_USER
 	select GENERIC_KERNEL_THREAD
+	select GENERIC_KERNEL_EXECVE
 
 config INSTRUCTION_DECODER
 	def_bool (KPROBES || PERF_EVENTS || UPROBES)
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 55d1555..16f3fc6 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -51,7 +51,6 @@
 # define __ARCH_WANT_SYS_UTIME
 # define __ARCH_WANT_SYS_WAITPID
 # define __ARCH_WANT_SYS_EXECVE
-# define __ARCH_WANT_KERNEL_EXECVE
 
 /*
  * "Conditional" syscalls
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index fe4cc30..91d2959 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -298,12 +298,20 @@
 	CFI_ENDPROC
 END(ret_from_fork)
 
-ENTRY(ret_from_kernel_execve)
-	movl %eax, %esp
-	movl $0,PT_EAX(%esp)
+ENTRY(ret_from_kernel_thread)
+	CFI_STARTPROC
+	pushl_cfi %eax
+	call schedule_tail
 	GET_THREAD_INFO(%ebp)
+	popl_cfi %eax
+	pushl_cfi $0x0202		# Reset kernel eflags
+	popfl_cfi
+	movl PT_EBP(%esp),%eax
+	call *PT_EBX(%esp)
+	movl $0,PT_EAX(%esp)
 	jmp syscall_exit
-END(ret_from_kernel_execve)
+	CFI_ENDPROC
+ENDPROC(ret_from_kernel_thread)
 
 /*
  * Interrupt exit functions should be protected against kprobes
@@ -994,21 +1002,6 @@
  */
 	.popsection
 
-ENTRY(ret_from_kernel_thread)
-	CFI_STARTPROC
-	pushl_cfi %eax
-	call schedule_tail
-	GET_THREAD_INFO(%ebp)
-	popl_cfi %eax
-	pushl_cfi $0x0202		# Reset kernel eflags
-	popfl_cfi
-	movl PT_EBP(%esp),%eax
-	call *PT_EBX(%esp)
-	call do_exit
-	ud2			# padding for call trace
-	CFI_ENDPROC
-ENDPROC(ret_from_kernel_thread)
-
 #ifdef CONFIG_XEN
 /* Xen doesn't set %esp to be precisely what the normal sysenter
    entrypoint expects, so fix it up before using the normal path. */
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 053c955..e1f98c2 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -459,15 +459,13 @@
 	jmp ret_from_sys_call			# go to the SYSRET fastpath
 
 1:
-	subq $REST_SKIP, %rsp	# move the stack pointer back
+	subq $REST_SKIP, %rsp	# leave space for volatiles
 	CFI_ADJUST_CFA_OFFSET	REST_SKIP
 	movq %rbp, %rdi
 	call *%rbx
-	# exit
-	mov %eax, %edi
-	call do_exit
-	ud2			# padding for call trace
-
+	movl $0, RAX(%rsp)
+	RESTORE_REST
+	jmp int_ret_from_sys_call
 	CFI_ENDPROC
 END(ret_from_fork)
 
@@ -1214,20 +1212,6 @@
 	jmp  2b
 	.previous
 
-ENTRY(ret_from_kernel_execve)
-	movq %rdi, %rsp
-	movl $0, RAX(%rsp)
-	// RESTORE_REST
-	movq 0*8(%rsp), %r15
-	movq 1*8(%rsp), %r14
-	movq 2*8(%rsp), %r13
-	movq 3*8(%rsp), %r12
-	movq 4*8(%rsp), %rbp
-	movq 5*8(%rsp), %rbx
-	addq $(6*8), %rsp
-	jmp int_ret_from_sys_call
-END(ret_from_kernel_execve)
-
 /* Call softirq on interrupt stack. Interrupts are off. */
 ENTRY(call_softirq)
 	CFI_STARTPROC
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index da85b6f..cab8eb8 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -14,6 +14,7 @@
 	def_bool y
 	select GENERIC_FIND_FIRST_BIT
 	select GENERIC_KERNEL_THREAD
+	select GENERIC_KERNEL_EXECVE
 
 config 64BIT
 	bool "64-bit kernel" if SUBARCH = "x86"