core: support ASLR and paging
Adds support for CFG_WITH_PAGER=y and CFG_CORE_ASLR=y.
Acked-by: Jerome Forissier <jerome@forissier.org>
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
diff --git a/core/arch/arm/kernel/generic_boot.c b/core/arch/arm/kernel/generic_boot.c
index f0b4af1..4f6e463 100644
--- a/core/arch/arm/kernel/generic_boot.c
+++ b/core/arch/arm/kernel/generic_boot.c
@@ -314,8 +314,8 @@
static void init_vcore(tee_mm_pool_t *mm_vcore)
{
- const vaddr_t begin = TEE_RAM_VA_START;
- vaddr_t end = TEE_RAM_VA_START + TEE_RAM_VA_SIZE;
+ const vaddr_t begin = VCORE_START_VA;
+ vaddr_t end = begin + TEE_RAM_VA_SIZE;
#ifdef CFG_CORE_SANITIZE_KADDRESS
/* Carve out asan memory, flat maped after core memory */
@@ -328,6 +328,59 @@
panic("tee_mm_vcore init failed");
}
+/*
+ * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
+ * The init part is also paged just as the rest of the normal paged code, with
+ * the difference that it's preloaded during boot. When the backing store
+ * is configured the entire paged binary is copied in place and then also
+ * the init part. Since the init part has been relocated (references to
+ * addresses updated to compensate for the new load address) this has to be
+ * undone for the hashes of those pages to match with the original binary.
+ *
+ * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
+ * unchanged.
+ */
+static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
+{
+#ifdef CFG_CORE_ASLR
+ unsigned long *ptr = NULL;
+ const uint32_t *reloc = NULL;
+ const uint32_t *reloc_end = NULL;
+ unsigned long offs = boot_mmu_config.load_offset;
+ const struct boot_embdata *embdata = (const void *)__init_end;
+ vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START;
+ vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START;
+
+ reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
+ reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
+
+ for (; reloc < reloc_end; reloc++) {
+ if (*reloc < addr_start)
+ continue;
+ if (*reloc >= addr_end)
+ break;
+ ptr = (void *)(paged_store + *reloc - addr_start);
+ *ptr -= offs;
+ }
+#endif
+}
+
+static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
+ void *store)
+{
+ const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
+#ifdef CFG_CORE_ASLR
+ unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
+ const struct boot_embdata *embdata = (const void *)__init_end;
+ const void *reloc = __init_end + embdata->reloc_offset;
+
+ return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
+ reloc, embdata->reloc_len, store);
+#else
+ return fobj_ro_paged_alloc(num_pages, hashes, store);
+#endif
+}
+
static void init_runtime(unsigned long pageable_part)
{
size_t n;
@@ -391,6 +444,11 @@
core_mmu_get_type_by_pa(pageable_part)),
__pageable_part_end - __pageable_part_start);
asan_memcpy_unchecked(paged_store, __init_start, init_size);
+ /*
+ * Undo eventual relocation for the init part so the hash checks
+ * can pass.
+ */
+ undo_init_relocation(paged_store);
/* Check that hashes of what's in pageable area is OK */
DMSG("Checking hashes of pageable area");
@@ -446,8 +504,7 @@
mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
pageable_size);
assert(mm);
- fobj = fobj_ro_paged_alloc(tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE,
- hashes, paged_store);
+ fobj = ro_paged_alloc(mm, hashes, paged_store);
assert(fobj);
tee_pager_add_core_area(tee_mm_get_smem(mm), PAGER_AREA_TYPE_RO, fobj);
fobj_put(fobj);
diff --git a/core/arch/arm/kernel/generic_entry_a32.S b/core/arch/arm/kernel/generic_entry_a32.S
index 45111e7..9bb2f52 100644
--- a/core/arch/arm/kernel/generic_entry_a32.S
+++ b/core/arch/arm/kernel/generic_entry_a32.S
@@ -599,12 +599,16 @@
LOCAL_FUNC relocate , :
push {r4-r5}
/* r0 holds load offset */
- ldr r4, =__end
- ldr r2, [r4, #BOOT_EMBDATA_RELOC_OFFSET]
- ldr r3, [r4, #BOOT_EMBDATA_RELOC_LEN]
+#ifdef CFG_WITH_PAGER
+ ldr r12, =__init_end
+#else
+ ldr r12, =__end
+#endif
+ ldr r2, [r12, #BOOT_EMBDATA_RELOC_OFFSET]
+ ldr r3, [r12, #BOOT_EMBDATA_RELOC_LEN]
mov_imm r1, TEE_RAM_START
- add r2, r2, r4 /* start of relocations */
+ add r2, r2, r12 /* start of relocations */
add r3, r3, r2 /* end of relocations */
/*
@@ -617,9 +621,22 @@
* 32-bit value pointed out which increased with the load offset.
*/
+#ifdef CFG_WITH_PAGER
+ /*
+ * With pager enabled we can only relocate the pager and init
+ * parts, the rest has to be done when a page is populated.
+ */
+ sub r12, r12, r1
+#endif
+
b 2f
/* Loop over the relocation addresses and process all entries */
1: ldr r4, [r2], #4
+#ifdef CFG_WITH_PAGER
+ /* Skip too large addresses */
+ cmp r4, r12
+ bge 2f
+#endif
ldr r5, [r4, r1]
add r5, r5, r0
str r5, [r4, r1]
diff --git a/core/arch/arm/kernel/generic_entry_a64.S b/core/arch/arm/kernel/generic_entry_a64.S
index 70c96e1..93e4e2a 100644
--- a/core/arch/arm/kernel/generic_entry_a64.S
+++ b/core/arch/arm/kernel/generic_entry_a64.S
@@ -237,11 +237,15 @@
#ifdef CFG_CORE_ASLR
LOCAL_FUNC relocate , :
/* x0 holds load offset */
- adr x4, __end
- ldp w2, w3, [x4, #BOOT_EMBDATA_RELOC_OFFSET]
+#ifdef CFG_WITH_PAGER
+ adr x6, __init_end
+#else
+ adr x6, __end
+#endif
+ ldp w2, w3, [x6, #BOOT_EMBDATA_RELOC_OFFSET]
mov_imm x1, TEE_RAM_START
- add x2, x2, x4 /* start of relocations */
+ add x2, x2, x6 /* start of relocations */
add x3, x3, x2 /* end of relocations */
/*
@@ -254,9 +258,22 @@
* a 64-bit value pointed out which increased with the load offset.
*/
+#ifdef CFG_WITH_PAGER
+ /*
+ * With pager enabled we can only relocate the pager and init
+ * parts, the rest has to be done when a page is populated.
+ */
+ sub x6, x6, x1
+#endif
+
b 2f
/* Loop over the relocation addresses and process all entries */
1: ldr w4, [x2], #4
+#ifdef CFG_WITH_PAGER
+ /* Skip too large addresses */
+ cmp x4, x6
+ b.ge 2f
+#endif
add x4, x4, x1
ldr x5, [x4]
add x5, x5, x0
diff --git a/core/arch/arm/mm/core_mmu.c b/core/arch/arm/mm/core_mmu.c
index 51875de..7851fdf 100644
--- a/core/arch/arm/mm/core_mmu.c
+++ b/core/arch/arm/mm/core_mmu.c
@@ -2179,7 +2179,7 @@
{
vaddr_t v = (vaddr_t)va;
- return v >= TEE_TEXT_VA_START && v < get_linear_map_end();
+ return v >= VCORE_START_VA && v < get_linear_map_end();
}
#else
bool is_unpaged(void *va __unused)
diff --git a/core/arch/arm/mm/tee_pager.c b/core/arch/arm/mm/tee_pager.c
index 2d3bb5c..63dacb2 100644
--- a/core/arch/arm/mm/tee_pager.c
+++ b/core/arch/arm/mm/tee_pager.c
@@ -11,6 +11,7 @@
#include <kernel/abort.h>
#include <kernel/asan.h>
#include <kernel/cache_helpers.h>
+#include <kernel/linker.h>
#include <kernel/panic.h>
#include <kernel/spinlock.h>
#include <kernel/tee_misc.h>
@@ -470,7 +471,7 @@
* after end of memory.
*/
for (n = 0; n < num_pager_tables; n++) {
- if (!core_mmu_find_table(NULL, TEE_RAM_VA_START +
+ if (!core_mmu_find_table(NULL, VCORE_START_VA +
n * CORE_MMU_PGDIR_SIZE, UINT_MAX,
&pager_tables[n].tbl_info))
panic("can't find mmu tables");
diff --git a/mk/config.mk b/mk/config.mk
index 8151178..258ea75 100644
--- a/mk/config.mk
+++ b/mk/config.mk
@@ -274,9 +274,6 @@
# Enable paging, requires SRAM, can't be enabled by default
CFG_WITH_PAGER ?= n
-ifeq ($(CFG_WITH_PAGER)-$(CFG_CORE_ASLR),y-y)
-$(error CFG_WITH_PAGER and CFG_CORE_ASLR are currently incompatible)
-endif
# Runtime lock dependency checker: ensures that a proper locking hierarchy is
# used in the TEE core when acquiring and releasing mutexes. Any violation will