core: add fobj_ro_reloc_paged_alloc()

Adds a new type of fobj, struct fobj_ro_reloc_paged, which is created
with fobj_ro_reloc_paged_alloc(). It's like struct fobj_rop but with
support for relocation too.

Acked-by: Jerome Forissier <jerome@forissier.org>
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
diff --git a/core/include/mm/fobj.h b/core/include/mm/fobj.h
index db71e8a..5dcb723 100644
--- a/core/include/mm/fobj.h
+++ b/core/include/mm/fobj.h
@@ -85,6 +85,27 @@
 				 void *store);
 
 /*
+ * fobj_ro_reloc_paged_alloc() - Allocate initialized read-only storage with
+ *				 relocation
+ * @num_pages:	Number of pages covered
+ * @hashes:	Hashes to verify the pages
+ * @reloc_offs:	Offset from the base address in the relocations in @reloc
+ * @reloc:	Relocation data
+ * @reloc_len:	Length of relocation data
+ * @store:	Clear text data for all pages
+ *
+ * This object is like fobj_ro_paged_alloc() above, but in addition the
+ * relocation information is applied to a populated page. This makes sure
+ * the offset to which all pages are relocated doesn't leak out to storage.
+ *
+ * Returns a valid pointer on success or NULL on failure.
+ */
+struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes,
+				       unsigned int reloc_offs,
+				       const void *reloc,
+				       unsigned int reloc_len, void *store);
+
+/*
  * fobj_load_page() - Load a page into memory
  * @fobj:	Fobj pointer
  * @page_index:	Index of page in @fobj
diff --git a/core/mm/fobj.c b/core/mm/fobj.c
index fea2a4b..38b7608 100644
--- a/core/mm/fobj.c
+++ b/core/mm/fobj.c
@@ -5,6 +5,7 @@
 
 #include <crypto/crypto.h>
 #include <crypto/internal_aes-gcm.h>
+#include <kernel/generic_boot.h>
 #include <kernel/panic.h>
 #include <mm/core_memprot.h>
 #include <mm/core_mmu.h>
@@ -37,7 +38,7 @@
 	struct fobj fobj;
 };
 
-static struct fobj_ops ops_rw_paged;
+static const struct fobj_ops ops_rw_paged;
 
 static struct internal_aes_gcm_key rwp_ae_key;
 
@@ -191,7 +192,7 @@
 }
 KEEP_PAGER(rwp_save_page);
 
-static struct fobj_ops ops_rw_paged __rodata_unpaged = {
+static const struct fobj_ops ops_rw_paged __rodata_unpaged = {
 	.free = rwp_free,
 	.load_page = rwp_load_page,
 	.save_page = rwp_save_page,
@@ -203,7 +204,15 @@
 	struct fobj fobj;
 };
 
-static struct fobj_ops ops_ro_paged;
+static const struct fobj_ops ops_ro_paged;
+
+static void rop_init(struct fobj_rop *rop, const struct fobj_ops *ops,
+		     unsigned int num_pages, void *hashes, void *store)
+{
+	rop->hashes = hashes;
+	rop->store = store;
+	fobj_init(&rop->fobj, ops, num_pages);
+}
 
 struct fobj *fobj_ro_paged_alloc(unsigned int num_pages, void *hashes,
 				 void *store)
@@ -216,9 +225,7 @@
 	if (!rop)
 		return NULL;
 
-	rop->hashes = hashes;
-	rop->store = store;
-	fobj_init(&rop->fobj, &ops_ro_paged, num_pages);
+	rop_init(rop, &ops_ro_paged, num_pages, hashes, store);
 
 	return &rop->fobj;
 }
@@ -230,28 +237,38 @@
 	return container_of(fobj, struct fobj_rop, fobj);
 }
 
+static void rop_uninit(struct fobj_rop *rop)
+{
+	fobj_uninit(&rop->fobj);
+	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store)));
+	free(rop->hashes);
+}
+
 static void rop_free(struct fobj *fobj)
 {
 	struct fobj_rop *rop = to_rop(fobj);
 
-	fobj_uninit(fobj);
-	tee_mm_free(tee_mm_find(&tee_mm_sec_ddr, virt_to_phys(rop->store)));
-	free(rop->hashes);
+	rop_uninit(rop);
 	free(rop);
 }
 
+static TEE_Result rop_load_page_helper(struct fobj_rop *rop,
+				       unsigned int page_idx, void *va)
+{
+	const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
+	const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
+
+	assert(refcount_val(&rop->fobj.refc));
+	assert(page_idx < rop->fobj.num_pages);
+	memcpy(va, src, SMALL_PAGE_SIZE);
+
+	return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
+}
+
 static TEE_Result rop_load_page(struct fobj *fobj, unsigned int page_idx,
 				void *va)
 {
-	struct fobj_rop *rop = to_rop(fobj);
-	const uint8_t *hash = rop->hashes + page_idx * TEE_SHA256_HASH_SIZE;
-	const uint8_t *src = rop->store + page_idx * SMALL_PAGE_SIZE;
-
-	assert(refcount_val(&fobj->refc));
-	assert(page_idx < fobj->num_pages);
-	memcpy(va, src, SMALL_PAGE_SIZE);
-
-	return hash_sha256_check(hash, va, SMALL_PAGE_SIZE);
+	return rop_load_page_helper(to_rop(fobj), page_idx, va);
 }
 KEEP_PAGER(rop_load_page);
 
@@ -263,13 +280,186 @@
 }
 KEEP_PAGER(rop_save_page);
 
-static struct fobj_ops ops_ro_paged __rodata_unpaged = {
+static const struct fobj_ops ops_ro_paged __rodata_unpaged = {
 	.free = rop_free,
 	.load_page = rop_load_page,
 	.save_page = rop_save_page,
 };
 
-static struct fobj_ops ops_locked_paged;
+#ifdef CFG_CORE_ASLR
+/*
+ * When using relocated pages the relocation information must be applied
+ * before the pages can be used. With read-only paging the content is only
+ * integrity protected so relocation cannot be applied on pages in the less
+ * secure "store" or the load_address selected by ASLR could be given away.
+ * This means that each time a page has been loaded and verified it has to
+ * have its relocation information applied before it can be used.
+ *
+ * Only the relative relocations are supported, this allows a rather compact
+ * represenation of the needed relocation information in this struct.
+ * r_offset is replaced with the offset into the page that need to be updated,
+ * this number can never be larger than SMALL_PAGE_SIZE so a uint16_t can be
+ * used to represent it.
+ *
+ * All relocations are converted and stored in @relocs. @page_reloc_idx is
+ * an array of length @rop.fobj.num_pages with an entry for each page. If
+ * @page_reloc_idx[page_idx] isn't UINT16_MAX it's an index into @relocs.
+ */
+struct fobj_ro_reloc_paged {
+	uint16_t *page_reloc_idx;
+	uint16_t *relocs;
+	unsigned int num_relocs;
+	struct fobj_rop rop;
+};
+
+static const struct fobj_ops ops_ro_reloc_paged;
+
+static unsigned int get_num_rels(unsigned int num_pages,
+				 unsigned int reloc_offs,
+				 const uint32_t *reloc, unsigned int num_relocs)
+{
+	const unsigned int align_mask __maybe_unused = sizeof(long) - 1;
+	unsigned int nrels = 0;
+	unsigned int n = 0;
+	vaddr_t offs = 0;
+
+	/*
+	 * Count the number of relocations which are needed for these
+	 * pages.  Also check that the data is well formed, only expected
+	 * relocations and sorted in order of address which it applies to.
+	 */
+	for (; n < num_relocs; n++) {
+		assert(ALIGNMENT_IS_OK(reloc[n], unsigned long));
+		assert(offs < reloc[n]);	/* check that it's sorted */
+		offs = reloc[n];
+		if (offs >= reloc_offs &&
+		    offs <= reloc_offs + num_pages * SMALL_PAGE_SIZE)
+			nrels++;
+	}
+
+	return nrels;
+}
+
+static void init_rels(struct fobj_ro_reloc_paged *rrp, unsigned int reloc_offs,
+		      const uint32_t *reloc, unsigned int num_relocs)
+{
+	unsigned int npg = rrp->rop.fobj.num_pages;
+	unsigned int pg_idx = 0;
+	unsigned int reln = 0;
+	unsigned int n = 0;
+	uint32_t r = 0;
+
+	for (n = 0; n < npg; n++)
+		rrp->page_reloc_idx[n] = UINT16_MAX;
+
+	for (n = 0; n < num_relocs ; n++) {
+		if (reloc[n] < reloc_offs)
+			continue;
+
+		/* r is the offset from beginning of this fobj */
+		r = reloc[n] - reloc_offs;
+
+		pg_idx = r / SMALL_PAGE_SIZE;
+		if (pg_idx >= npg)
+			break;
+
+		if (rrp->page_reloc_idx[pg_idx] == UINT16_MAX)
+			rrp->page_reloc_idx[pg_idx] = reln;
+		rrp->relocs[reln] = r - pg_idx * SMALL_PAGE_SIZE;
+		reln++;
+	}
+
+	assert(reln == rrp->num_relocs);
+}
+
+struct fobj *fobj_ro_reloc_paged_alloc(unsigned int num_pages, void *hashes,
+				       unsigned int reloc_offs,
+				       const void *reloc,
+				       unsigned int reloc_len, void *store)
+{
+	struct fobj_ro_reloc_paged *rrp = NULL;
+	const unsigned int num_relocs = reloc_len / sizeof(uint32_t);
+	unsigned int nrels = 0;
+
+	assert(ALIGNMENT_IS_OK(reloc, uint32_t));
+	assert(ALIGNMENT_IS_OK(reloc_len, uint32_t));
+	assert(num_pages && hashes && store);
+	if (!reloc_len) {
+		assert(!reloc);
+		return fobj_ro_paged_alloc(num_pages, hashes, store);
+	}
+	assert(reloc);
+
+	nrels = get_num_rels(num_pages, reloc_offs, reloc, num_relocs);
+	if (!nrels)
+		return fobj_ro_paged_alloc(num_pages, hashes, store);
+
+	rrp = calloc(1, sizeof(*rrp) + num_pages * sizeof(uint16_t) +
+			nrels * sizeof(uint16_t));
+	if (!rrp)
+		return NULL;
+	rop_init(&rrp->rop, &ops_ro_reloc_paged, num_pages, hashes, store);
+	rrp->page_reloc_idx = (uint16_t *)(rrp + 1);
+	rrp->relocs = rrp->page_reloc_idx + num_pages;
+	rrp->num_relocs = nrels;
+	init_rels(rrp, reloc_offs, reloc, num_relocs);
+
+	return &rrp->rop.fobj;
+}
+
+static struct fobj_ro_reloc_paged *to_rrp(struct fobj *fobj)
+{
+	assert(fobj->ops == &ops_ro_reloc_paged);
+
+	return container_of(fobj, struct fobj_ro_reloc_paged, rop.fobj);
+}
+
+static void rrp_free(struct fobj *fobj)
+{
+	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
+
+	rop_uninit(&rrp->rop);
+	free(rrp);
+}
+
+static TEE_Result rrp_load_page(struct fobj *fobj, unsigned int page_idx,
+				void *va)
+{
+	struct fobj_ro_reloc_paged *rrp = to_rrp(fobj);
+	unsigned int end_rel = rrp->num_relocs;
+	TEE_Result res = TEE_SUCCESS;
+	unsigned long *where = NULL;
+	unsigned int n = 0;
+
+	res = rop_load_page_helper(&rrp->rop, page_idx, va);
+	if (res)
+		return res;
+
+	/* Find the reloc index of the next page to tell when we're done */
+	for (n = page_idx + 1; n < fobj->num_pages; n++) {
+		if (rrp->page_reloc_idx[n] != UINT16_MAX) {
+			end_rel = rrp->page_reloc_idx[n];
+			break;
+		}
+	}
+
+	for (n = rrp->page_reloc_idx[page_idx]; n < end_rel; n++) {
+		where = (void *)((vaddr_t)va + rrp->relocs[n]);
+		*where += boot_mmu_config.load_offset;
+	}
+
+	return TEE_SUCCESS;
+}
+KEEP_PAGER(rrp_load_page);
+
+static const struct fobj_ops ops_ro_reloc_paged __rodata_unpaged = {
+	.free = rrp_free,
+	.load_page = rrp_load_page,
+	.save_page = rop_save_page, /* Direct reuse */
+};
+#endif /*CFG_CORE_ASLR*/
+
+static const struct fobj_ops ops_locked_paged;
 
 struct fobj *fobj_locked_paged_alloc(unsigned int num_pages)
 {
@@ -315,7 +505,7 @@
 }
 KEEP_PAGER(lop_save_page);
 
-static struct fobj_ops ops_locked_paged __rodata_unpaged = {
+static const struct fobj_ops ops_locked_paged __rodata_unpaged = {
 	.free = lop_free,
 	.load_page = lop_load_page,
 	.save_page = lop_save_page,