core: fix tee_pager_release_one_phys() assert

Prior to this patch it was assumed in tee_pager_release_one_phys() that
a locked fobj would not span multiple page directories. This is not
correct since it depends on the base address and size of the locked
fobj. If the base address is close to the end of a page directory it can
very well happen. With CFG_CORE_ASLR=y this is bound to happen sooner or
later even if everything seems to work with CFG_CORE_ASLR=n. This patch
fixes this by instead counting the number of areas which uses the pmem
to be released. The number should be exactly one.

Acked-by: Jerome Forissier <jerome@forissier.org>
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
diff --git a/core/arch/arm/mm/tee_pager.c b/core/arch/arm/mm/tee_pager.c
index f08166d..2d3bb5c 100644
--- a/core/arch/arm/mm/tee_pager.c
+++ b/core/arch/arm/mm/tee_pager.c
@@ -1231,6 +1231,19 @@
 	}
 }
 
+static unsigned int __maybe_unused
+num_areas_with_pmem(struct tee_pager_pmem *pmem)
+{
+	struct tee_pager_area *a = NULL;
+	unsigned int num_matches = 0;
+
+	TAILQ_FOREACH(a, &pmem->fobj->areas, fobj_link)
+		if (pmem_is_covered_by_area(pmem, a))
+			num_matches++;
+
+	return num_matches;
+}
+
 /*
  * Find mapped pmem, hide and move to pageble pmem.
  * Return false if page was not mapped, and true if page was mapped.
@@ -1248,12 +1261,11 @@
 			continue;
 
 		/*
-		 * Locked pages may not be shared, these two asserts checks
-		 * that there's only a signed area recorded with this pmem.
+		 * Locked pages may not be shared. We're asserting that the
+		 * number of areas using this pmem is one and only one as
+		 * we're about to unmap it.
 		 */
-		assert(TAILQ_FIRST(&pmem->fobj->areas) == area);
-		assert(TAILQ_LAST(&pmem->fobj->areas,
-				  tee_pager_area_head) == area);
+		assert(num_areas_with_pmem(pmem) == 1);
 
 		tblidx = pmem_get_area_tblidx(pmem, area);
 		area_set_entry(area, tblidx, 0, 0);