core: thread_enter_user_mode(): avoid leaking register content

Prior to this patch not all registers passed to user mode where assigned
a new value. This allows user mode to see the value of some registers
used by Core. With this patch all general purpose registers available in
user mode are either cleared or assigned a value.

Acked-by: Pipat Methavanitpong <pipat.methavanitpong@linaro.org>
Acked-by: Jerome Forissier <jerome@forissier.org>
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
diff --git a/core/arch/arm/kernel/asm-defines.c b/core/arch/arm/kernel/asm-defines.c
index f255cc1..6683b9e 100644
--- a/core/arch/arm/kernel/asm-defines.c
+++ b/core/arch/arm/kernel/asm-defines.c
@@ -30,6 +30,12 @@
 	DEFINE(THREAD_SVC_REG_R5, offsetof(struct thread_svc_regs, r5));
 	DEFINE(THREAD_SVC_REG_R6, offsetof(struct thread_svc_regs, r6));
 
+	/* struct thread_ctx_regs */
+	DEFINE(THREAD_CTX_REGS_USR_SP,
+	       offsetof(struct thread_ctx_regs, usr_sp));
+	DEFINE(THREAD_CTX_REGS_PC, offsetof(struct thread_ctx_regs, pc));
+	DEFINE(THREAD_CTX_REGS_CPSR, offsetof(struct thread_ctx_regs, cpsr));
+
 	/* struct thread_core_local */
 	DEFINE(THREAD_CORE_LOCAL_R0, offsetof(struct thread_core_local, r[0]));
 	DEFINE(THREAD_CORE_LOCAL_SM_PM_CTX_PHYS,
@@ -68,14 +74,17 @@
 	DEFINE(THREAD_CTX_REGS_SP, offsetof(struct thread_ctx_regs, sp));
 	DEFINE(THREAD_CTX_REGS_X0, offsetof(struct thread_ctx_regs, x[0]));
 	DEFINE(THREAD_CTX_REGS_X1, offsetof(struct thread_ctx_regs, x[1]));
+	DEFINE(THREAD_CTX_REGS_X2, offsetof(struct thread_ctx_regs, x[2]));
 	DEFINE(THREAD_CTX_REGS_X4, offsetof(struct thread_ctx_regs, x[4]));
 	DEFINE(THREAD_CTX_REGS_X19, offsetof(struct thread_ctx_regs, x[19]));
 
 	/* struct thread_user_mode_rec */
+	DEFINE(THREAD_USER_MODE_REC_CTX_REGS_PTR,
+	       offsetof(struct thread_user_mode_rec, ctx_regs_ptr));
 	DEFINE(THREAD_USER_MODE_REC_EXIT_STATUS0_PTR,
-		offsetof(struct thread_user_mode_rec, exit_status0_ptr));
+	       offsetof(struct thread_user_mode_rec, exit_status0_ptr));
 	DEFINE(THREAD_USER_MODE_REC_X19,
-		offsetof(struct thread_user_mode_rec, x[0]));
+	       offsetof(struct thread_user_mode_rec, x[0]));
 	DEFINE(THREAD_USER_MODE_REC_SIZE, sizeof(struct thread_user_mode_rec));
 
 	/* struct thread_core_local */
diff --git a/core/arch/arm/kernel/thread.c b/core/arch/arm/kernel/thread.c
index cf2d2a6..1794a39 100644
--- a/core/arch/arm/kernel/thread.c
+++ b/core/arch/arm/kernel/thread.c
@@ -1238,12 +1238,49 @@
 }
 #endif
 
+static void set_ctx_regs(struct thread_ctx_regs *regs, unsigned long a0,
+			 unsigned long a1, unsigned long a2, unsigned long a3,
+			 unsigned long user_sp, unsigned long entry_func,
+			 uint32_t spsr)
+{
+	/*
+	 * First clear all registers to avoid leaking information from
+	 * other TAs or even the Core itself.
+	 */
+	*regs = (struct thread_ctx_regs){ };
+#ifdef ARM32
+	regs->r0 = a0;
+	regs->r1 = a1;
+	regs->r2 = a2;
+	regs->r3 = a3;
+	regs->usr_sp = user_sp;
+	regs->pc = entry_func;
+	regs->cpsr = spsr;
+#endif
+#ifdef ARM64
+	regs->x[0] = a0;
+	regs->x[1] = a1;
+	regs->x[2] = a2;
+	regs->x[3] = a3;
+	regs->sp = user_sp;
+	regs->pc = entry_func;
+	regs->cpsr = spsr;
+	regs->x[13] = user_sp;	/* Used when running TA in Aarch32 */
+	regs->sp = user_sp;	/* Used when running TA in Aarch64 */
+	/* Set frame pointer (user stack can't be unwound past this point) */
+	regs->x[29] = 0;
+#endif
+}
+
 uint32_t thread_enter_user_mode(unsigned long a0, unsigned long a1,
 		unsigned long a2, unsigned long a3, unsigned long user_sp,
 		unsigned long entry_func, bool is_32bit,
 		uint32_t *exit_status0, uint32_t *exit_status1)
 {
-	uint32_t spsr;
+	uint32_t spsr = 0;
+	uint32_t exceptions = 0;
+	uint32_t rc = 0;
+	struct thread_ctx_regs *regs = NULL;
 
 	tee_ta_update_session_utime_resume();
 
@@ -1252,8 +1289,19 @@
 		*exit_status1 = 0xbadbadba;
 		return 0;
 	}
-	return __thread_enter_user_mode(a0, a1, a2, a3, user_sp, entry_func,
-					spsr, exit_status0, exit_status1);
+
+	exceptions = thread_mask_exceptions(THREAD_EXCP_ALL);
+	/*
+	 * We're using the per thread location of saved context registers
+	 * for temporary storage. Now that exceptions are masked they will
+	 * not be used for any thing else until they are eventually
+	 * unmasked when user mode has been entered.
+	 */
+	regs = thread_get_ctx_regs();
+	set_ctx_regs(regs, a0, a1, a2, a3, user_sp, entry_func, spsr);
+	rc = __thread_enter_user_mode(regs, exit_status0, exit_status1);
+	thread_unmask_exceptions(exceptions);
+	return rc;
 }
 
 #ifdef CFG_CORE_UNMAP_CORE_AT_EL0
diff --git a/core/arch/arm/kernel/thread_a32.S b/core/arch/arm/kernel/thread_a32.S
index 841405a..10c078a 100644
--- a/core/arch/arm/kernel/thread_a32.S
+++ b/core/arch/arm/kernel/thread_a32.S
@@ -262,11 +262,11 @@
  */
 
 /*
- * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
- *               unsigned long a2, unsigned long a3, unsigned long user_sp,
- *               unsigned long user_func, unsigned long spsr,
- *               uint32_t *exit_status0, uint32_t *exit_status1)
+ * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
+ *				     uint32_t *exit_status0,
+ *				     uint32_t *exit_status1);
  *
+ * This function depends on being called with exceptions masked.
  */
 FUNC __thread_enter_user_mode , :
 UNWIND(	.fnstart)
@@ -281,23 +281,22 @@
 	 */
 	push    {r4-r12,lr}
 
-	ldr     r4, [sp, #(10 * 0x4)]   /* user stack pointer */
-	ldr     r5, [sp, #(11 * 0x4)]   /* user function */
-	ldr     r6, [sp, #(12 * 0x4)]   /* spsr */
-
 	/*
 	 * Save old user sp and set new user sp.
 	 */
 	cps	#CPSR_MODE_SYS
-	mov	r7, sp
-	mov     sp, r4
+	mov	r4, sp
+	ldr	sp, [r0, #THREAD_CTX_REGS_USR_SP]
 	cps	#CPSR_MODE_SVC
-	push	{r7,r8}
+
+	push	{r1, r2, r4, r5}
 
 	/* Prepare user mode entry via eret_to_user_mode */
-	cpsid	aif
-	msr     spsr_fsxc, r6
-	mov	lr, r5
+	ldr	lr, [r0, #THREAD_CTX_REGS_PC]
+	ldr	r4, [r0, #THREAD_CTX_REGS_CPSR]
+	msr     spsr_fsxc, r4
+
+	ldm	r0, {r0-r12}
 
 	b	eret_to_user_mode
 UNWIND(	.fnend)
@@ -311,18 +310,18 @@
 FUNC thread_unwind_user_mode , :
 UNWIND(	.fnstart)
 UNWIND(	.cantunwind)
-	ldr     ip, [sp, #(15 * 0x4)]   /* &ctx->panicked */
-	str	r1, [ip]
-	ldr     ip, [sp, #(16 * 0x4)]   /* &ctx->panic_code */
-	str	r2, [ip]
+	/* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */
+	pop	{r4-r7}
+	str	r1, [r4]
+	str	r2, [r5]
 
 	/* Restore old user sp */
-	pop	{r4,r7}
 	cps	#CPSR_MODE_SYS
-	mov	sp, r4
+	mov	sp, r6
 	cps	#CPSR_MODE_SVC
 
-	pop     {r4-r12,pc}	/* Match the push in thread_enter_user_mode()*/
+	/* Match push {r4-r12,lr} in thread_enter_user_mode() */
+	pop     {r4-r12,pc}
 UNWIND(	.fnend)
 END_FUNC thread_unwind_user_mode
 
diff --git a/core/arch/arm/kernel/thread_a64.S b/core/arch/arm/kernel/thread_a64.S
index e7dbe94..d159e7e 100644
--- a/core/arch/arm/kernel/thread_a64.S
+++ b/core/arch/arm/kernel/thread_a64.S
@@ -63,27 +63,24 @@
 KEEP_PAGER thread_init_vbar
 
 /*
- * uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
- *               unsigned long a2, unsigned long a3, unsigned long user_sp,
- *               unsigned long user_func, unsigned long spsr,
- *               uint32_t *exit_status0, uint32_t *exit_status1)
+ * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
+ *				     uint32_t *exit_status0,
+ *				     uint32_t *exit_status1);
  *
+ * This function depends on being called with exceptions masked.
  */
 FUNC __thread_enter_user_mode , :
-	ldr	x8, [sp]
 	/*
 	 * Create the and fill in the struct thread_user_mode_rec
 	 */
 	sub	sp, sp, #THREAD_USER_MODE_REC_SIZE
-	store_xregs sp, THREAD_USER_MODE_REC_EXIT_STATUS0_PTR, 7, 8
+	store_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 0, 2
 	store_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
 
 	/*
-	 * Switch to SP_EL1
-	 * Disable exceptions
 	 * Save kern sp in x19
+	 * Switch to SP_EL1
 	 */
-	msr	daifset, #DAIFBIT_ALL
 	mov	x19, sp
 	msr	spsel, #1
 
@@ -101,17 +98,22 @@
 	/*
 	 * Initialize SPSR, ELR_EL1, and SP_EL0 to enter user mode
 	 */
-	msr	spsr_el1, x6
-	/* Set user sp */
-	mov	x13, x4		/* Used when running TA in Aarch32 */
-	msr	sp_el0, x4	/* Used when running TA in Aarch64 */
-	/* Set user function */
-	msr	elr_el1, x5
-	/* Set frame pointer (user stack can't be unwound past this point) */
-	mov x29, #0
+	load_xregs x0, THREAD_CTX_REGS_SP, 1, 3
+	msr	sp_el0, x1
+	msr	elr_el1, x2
+	msr	spsr_el1, x3
+
+	/*
+	 * Save the values for x0 and x1 in struct thread_core_local to be
+	 * restored later just before the eret.
+	 */
+	load_xregs x0, THREAD_CTX_REGS_X0, 1, 2
+	store_xregs sp, THREAD_CORE_LOCAL_X0, 1, 2
+
+	/* Load the rest of the general purpose registers */
+	load_xregs x0, THREAD_CTX_REGS_X2, 2, 30
 
 	/* Jump into user mode */
-	store_xregs sp, THREAD_CORE_LOCAL_X0, 0, 1
 	b eret_to_el0
 END_FUNC __thread_enter_user_mode
 KEEP_PAGER __thread_enter_user_mode
@@ -123,9 +125,11 @@
  */
 FUNC thread_unwind_user_mode , :
 	/* Store the exit status */
-	ldp	x3, x4, [sp, #THREAD_USER_MODE_REC_EXIT_STATUS0_PTR]
-	str	w1, [x3]
-	str	w2, [x4]
+	load_xregs sp, THREAD_USER_MODE_REC_CTX_REGS_PTR, 3, 5
+	str	w1, [x4]
+	str	w2, [x5]
+	/* Save x19..x30 */
+	store_xregs x3, THREAD_CTX_REGS_X19, 19, 30
 	/* Restore x19..x30 */
 	load_xregs sp, THREAD_USER_MODE_REC_X19, 19, 30
 	add	sp, sp, #THREAD_USER_MODE_REC_SIZE
diff --git a/core/arch/arm/kernel/thread_private.h b/core/arch/arm/kernel/thread_private.h
index 44e2183..1e5d664 100644
--- a/core/arch/arm/kernel/thread_private.h
+++ b/core/arch/arm/kernel/thread_private.h
@@ -57,8 +57,10 @@
 
 #ifdef ARM64
 struct thread_user_mode_rec {
+	uint64_t ctx_regs_ptr;
 	uint64_t exit_status0_ptr;
 	uint64_t exit_status1_ptr;
+	uint64_t pad;
 	uint64_t x[31 - 19]; /* x19..x30 */
 };
 #endif /*ARM64*/
@@ -168,10 +170,9 @@
  */
 void thread_resume(struct thread_ctx_regs *regs);
 
-uint32_t __thread_enter_user_mode(unsigned long a0, unsigned long a1,
-		unsigned long a2, unsigned long a3, unsigned long user_sp,
-		unsigned long user_func, unsigned long spsr,
-		uint32_t *exit_status0, uint32_t *exit_status1);
+uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
+				  uint32_t *exit_status0,
+				  uint32_t *exit_status1);
 
 /*
  * Private functions made available for thread_asm.S