core: vm_set_prot() and friends works across VM regions

Updates vm_set_prot() and friends to work on memory ranges which doesn't
necessarily align with the underlying VM regions. VM regions are split
as needed to perform the operations, with operations completed VM
regions in the supplied memory range are merged if possible. The only
restriction on a supplied memory range is that the already present
mapping is compatible with the change.

Note that this also affect pager which also splits and merges pager
areas as needed.

Acked-by: Pipat Methavanitpong <pipat.methavanitpong@linaro.org>
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
diff --git a/core/arch/arm/include/mm/tee_pager.h b/core/arch/arm/include/mm/tee_pager.h
index 9805c5f..d3463c3 100644
--- a/core/arch/arm/include/mm/tee_pager.h
+++ b/core/arch/arm/include/mm/tee_pager.h
@@ -152,6 +152,28 @@
 }
 #endif
 
+#ifdef CFG_PAGED_USER_TA
+TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va);
+#else
+static inline TEE_Result
+tee_pager_split_um_region(struct user_mode_ctx *uctx __unused,
+			  vaddr_t va __unused)
+{
+	return TEE_ERROR_NOT_SUPPORTED;
+}
+
+#endif
+#ifdef CFG_PAGED_USER_TA
+void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va,
+			       size_t len);
+#else
+static inline void
+tee_pager_merge_um_region(struct user_mode_ctx *uctx __unused,
+			  vaddr_t va __unused, size_t len __unused)
+{
+}
+#endif
+
 /*
  * tee_pager_rem_uma_areas() - Remove all user ta areas
  * @uctx:	user mode context
diff --git a/core/arch/arm/mm/tee_mmu.c b/core/arch/arm/mm/tee_mmu.c
index 37e25b7..cdfd6a1 100644
--- a/core/arch/arm/mm/tee_mmu.c
+++ b/core/arch/arm/mm/tee_mmu.c
@@ -307,46 +307,213 @@
 	return res;
 }
 
-static TEE_Result find_exact_vm_region(struct vm_info *vm_info, vaddr_t va,
-				       size_t len, struct vm_region **r_ret)
+static struct vm_region *find_vm_region(struct vm_info *vm_info, vaddr_t va)
 {
 	struct vm_region *r = NULL;
 
-	TAILQ_FOREACH(r, &vm_info->regions, link) {
-		if (core_is_buffer_intersect(r->va, r->size, va, len)) {
-			if (r->va != va || r->size != len)
-				return TEE_ERROR_BAD_PARAMETERS;
+	TAILQ_FOREACH(r, &vm_info->regions, link)
+		if (va >= r->va && va < r->va + r->size)
+			return r;
 
-			*r_ret = r;
-			return TEE_SUCCESS;
-		}
+	return NULL;
+}
+
+static bool va_range_is_contiguous(struct vm_region *r0, vaddr_t va,
+				   size_t len,
+				   bool (*cmp_regs)(const struct vm_region *r0,
+						    const struct vm_region *r,
+						    const struct vm_region *rn))
+{
+	struct vm_region *r = r0;
+
+	while (true) {
+		struct vm_region *r_next = TAILQ_NEXT(r, link);
+		vaddr_t r_end_va = r->va + r->size;
+
+		if (r_end_va >= va + len)
+			return true;
+		if (!r_next)
+			return false;
+		if (r_end_va != r_next->va)
+			return false;
+		if (cmp_regs && !cmp_regs(r0, r, r_next))
+			return false;
+		r = r_next;
+	}
+}
+
+static TEE_Result split_vm_region(struct user_mode_ctx *uctx,
+				  struct vm_region *r, vaddr_t va)
+{
+	struct vm_region *r2 = NULL;
+	size_t diff = va - r->va;
+
+	assert(diff && diff < r->size);
+
+	r2 = calloc(1, sizeof(*r2));
+	if (!r2)
+		return TEE_ERROR_OUT_OF_MEMORY;
+
+	if (mobj_is_paged(r->mobj)) {
+		TEE_Result res = tee_pager_split_um_region(uctx, va);
+
+		if (res)
+			return res;
 	}
 
-	return TEE_ERROR_ITEM_NOT_FOUND;
+	r2->mobj = mobj_get(r->mobj);
+	r2->offset = r->offset + diff;
+	r2->va = va;
+	r2->size = r->size - diff;
+	r2->attr = r->attr;
+	r2->flags = r->flags;
+
+	r->size = diff;
+
+	TAILQ_INSERT_AFTER(&uctx->vm_info.regions, r, r2, link);
+
+	return TEE_SUCCESS;
+}
+
+static TEE_Result split_vm_range(struct user_mode_ctx *uctx, vaddr_t va,
+				 size_t len,
+				 bool (*cmp_regs)(const struct vm_region *r0,
+						  const struct vm_region *r,
+						  const struct vm_region *rn),
+				 struct vm_region **r0_ret)
+{
+	TEE_Result res = TEE_SUCCESS;
+	struct vm_region *r = NULL;
+
+	if ((va | len) & SMALL_PAGE_MASK)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	/*
+	 * Find first vm_region in range and check that the entire range is
+	 * contiguous.
+	 */
+	r = find_vm_region(&uctx->vm_info, va);
+	if (!r || !va_range_is_contiguous(r, va, len, cmp_regs))
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	/*
+	 * If needed split regions so that va and len covers only complete
+	 * regions.
+	 */
+	if (va != r->va) {
+		res = split_vm_region(uctx, r, va);
+		if (res)
+			return res;
+		r = TAILQ_NEXT(r, link);
+	}
+
+	*r0_ret = r;
+	r = find_vm_region(&uctx->vm_info, va + len - 1);
+	if (!r)
+		return TEE_ERROR_BAD_PARAMETERS;
+	if (va + len != r->va + r->size) {
+		res = split_vm_region(uctx, r, va + len);
+		if (res)
+			return res;
+	}
+
+	return TEE_SUCCESS;
+}
+
+static void merge_vm_range(struct user_mode_ctx *uctx, vaddr_t va, size_t len)
+{
+	struct vm_region *r_next = NULL;
+	struct vm_region *r = NULL;
+
+	tee_pager_merge_um_region(uctx, va, len);
+
+	for (r = TAILQ_FIRST(&uctx->vm_info.regions);; r = r_next) {
+		r_next = TAILQ_NEXT(r, link);
+		if (!r_next)
+			return;
+
+		/* Try merging with the region just before va */
+		if (r->va + r->size < va)
+			continue;
+
+		/*
+		 * If r->va is well past our range we're done.
+		 * Note that if it's just the page after our range we'll
+		 * try to merge.
+		 */
+		if (r->va > va + len)
+			return;
+
+		if (r->va + r->size != r_next->va)
+			continue;
+		if (r->mobj != r_next->mobj ||
+		    r->flags != r_next->flags ||
+		    r->attr != r_next->attr)
+			continue;
+		if (r->offset + r->size != r_next->offset)
+			continue;
+
+		TAILQ_REMOVE(&uctx->vm_info.regions, r_next, link);
+		r->size += r_next->size;
+		mobj_put(r_next->mobj);
+		free(r_next);
+		r_next = r;
+	}
+}
+
+static bool cmp_region_for_remap(const struct vm_region *r0,
+				 const struct vm_region *r,
+				 const struct vm_region *rn)
+{
+	/*
+	 * All the essentionals has to match for remap to make sense. The
+	 * essentials are, mobj/fobj, attr, flags and the offset should be
+	 * contiguous.
+	 *
+	 * Note that vm_remap() depends on mobj/fobj to be the same.
+	 */
+	return r0->flags == r->flags && r0->attr == r->attr &&
+	       r0->mobj == r->mobj && rn->offset == r->offset + r->size;
 }
 
 TEE_Result vm_remap(struct user_mode_ctx *uctx, vaddr_t *new_va, vaddr_t old_va,
 		    size_t len, size_t pad_begin, size_t pad_end)
 {
+	struct vm_region_head regs = TAILQ_HEAD_INITIALIZER(regs);
 	TEE_Result res = TEE_SUCCESS;
+	struct vm_region *r0 = NULL;
 	struct vm_region *r = NULL;
+	struct vm_region *r_next = NULL;
+	struct vm_region *r_last = NULL;
+	struct vm_region *r_first = NULL;
 	struct fobj *fobj = NULL;
+	vaddr_t next_va = 0;
 
 	assert(thread_get_tsd()->ctx == &uctx->ctx);
 
-	res = find_exact_vm_region(&uctx->vm_info, old_va, len, &r);
+	if (!len || ((len | old_va) & SMALL_PAGE_MASK))
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	res = split_vm_range(uctx, old_va, len, cmp_region_for_remap, &r0);
 	if (res)
 		return res;
 
-	if (mobj_is_paged(r->mobj)) {
-		fobj = mobj_get_fobj(r->mobj);
+	if (mobj_is_paged(r0->mobj)) {
+		fobj = mobj_get_fobj(r0->mobj);
 		if (!fobj)
-			return TEE_ERROR_GENERIC;
-		tee_pager_rem_um_region(uctx, r->va, r->size);
+			panic();
 	}
-	maybe_free_pgt(uctx, r);
 
-	TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
+	for (r = r0; r; r = r_next) {
+		if (r->va + r->size > old_va + len)
+			break;
+		r_next = TAILQ_NEXT(r, link);
+		if (fobj)
+			tee_pager_rem_um_region(uctx, r->va, r->size);
+		maybe_free_pgt(uctx, r);
+		TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
+		TAILQ_INSERT_TAIL(&regs, r, link);
+	}
 
 	/*
 	 * Synchronize change to translation tables. Even though the pager
@@ -354,99 +521,163 @@
 	 */
 	tee_mmu_set_ctx(&uctx->ctx);
 
-	r->va = *new_va;
-	res = umap_add_region(&uctx->vm_info, r, pad_begin, pad_end);
-	if (res)
-		goto err_restore_map;
+	r_first = TAILQ_FIRST(&regs);
+	while (!TAILQ_EMPTY(&regs)) {
+		r = TAILQ_FIRST(&regs);
+		TAILQ_REMOVE(&regs, r, link);
+		if (r_last) {
+			r->va = r_last->va + r_last->size;
+			res = umap_add_region(&uctx->vm_info, r, 0, 0);
+		} else {
+			r->va = *new_va;
+			res = umap_add_region(&uctx->vm_info, r, pad_begin,
+					      pad_end + len - r->size);
+		}
+		if (!res)
+			r_last = r;
+		if (!res)
+			res = alloc_pgt(uctx);
+		if (fobj && !res)
+			res = tee_pager_add_um_area(uctx, r->va, fobj, r->attr);
 
-	res = alloc_pgt(uctx);
-	if (res)
-		goto err_restore_map_rem_reg;
+		if (res) {
+			/*
+			 * Something went wrong move all the recently added
+			 * regions back to regs for later reinsertion at
+			 * the original spot.
+			 */
+			struct vm_region *r_tmp = NULL;
 
-	if (fobj) {
-		res = tee_pager_add_um_area(uctx, r->va, fobj, r->attr);
-		if (res)
-			goto err_restore_map_rem_reg;
-		fobj_put(fobj);
+			if (r != r_last) {
+				/*
+				 * umap_add_region() failed, move r back to
+				 * regs before all the rest are moved back.
+				 */
+				TAILQ_INSERT_HEAD(&regs, r, link);
+			}
+			for (r = r_first; r_last && r != r_last; r = r_next) {
+				r_next = TAILQ_NEXT(r, link);
+				TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
+				if (r_tmp)
+					TAILQ_INSERT_AFTER(&regs, r_tmp, r,
+							   link);
+				else
+					TAILQ_INSERT_HEAD(&regs, r, link);
+				r_tmp = r;
+			}
+
+			goto err_restore_map;
+		}
 	}
 
+	fobj_put(fobj);
+
 	tee_mmu_set_ctx(&uctx->ctx);
-	*new_va = r->va;
+	*new_va = r_first->va;
 
 	return TEE_SUCCESS;
 
-err_restore_map_rem_reg:
-	TAILQ_REMOVE(&uctx->vm_info.regions, r, link);
 err_restore_map:
-	r->va = old_va;
-	if (umap_add_region(&uctx->vm_info, r, 0, 0))
-		panic("Cannot restore mapping");
-	if (alloc_pgt(uctx))
-		panic("Cannot restore mapping");
-	if (fobj) {
-		if (tee_pager_add_um_area(uctx, r->va, fobj, r->attr))
+	next_va = old_va;
+	while (!TAILQ_EMPTY(&regs)) {
+		r = TAILQ_FIRST(&regs);
+		TAILQ_REMOVE(&regs, r, link);
+		r->va = next_va;
+		next_va += r->size;
+		if (umap_add_region(&uctx->vm_info, r, 0, 0))
 			panic("Cannot restore mapping");
-		fobj_put(fobj);
+		if (alloc_pgt(uctx))
+			panic("Cannot restore mapping");
+		if (fobj && tee_pager_add_um_area(uctx, r->va, fobj, r->attr))
+			panic("Cannot restore mapping");
 	}
-
+	fobj_put(fobj);
 	tee_mmu_set_ctx(&uctx->ctx);
 
 	return res;
 }
 
+static bool cmp_region_for_get_flags(const struct vm_region *r0,
+				     const struct vm_region *r,
+				     const struct vm_region *rn __unused)
+{
+	return r0->flags == r->flags;
+}
+
 TEE_Result vm_get_flags(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
 			uint32_t *flags)
 {
-	TEE_Result res = TEE_SUCCESS;
 	struct vm_region *r = NULL;
 
-	res = find_exact_vm_region(&uctx->vm_info, va, len, &r);
-	if (!res)
-		*flags = r->flags;
+	if (!len || ((len | va) & SMALL_PAGE_MASK))
+		return TEE_ERROR_BAD_PARAMETERS;
 
-	return res;
+	r = find_vm_region(&uctx->vm_info, va);
+	if (!r)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	if (!va_range_is_contiguous(r, va, len, cmp_region_for_get_flags))
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	*flags = r->flags;
+
+	return TEE_SUCCESS;
 }
 
 TEE_Result vm_set_prot(struct user_mode_ctx *uctx, vaddr_t va, size_t len,
 		       uint32_t prot)
 {
 	TEE_Result res = TEE_SUCCESS;
+	struct vm_region *r0 = NULL;
 	struct vm_region *r = NULL;
 	bool was_writeable = false;
+	bool need_sync = false;
 
 	assert(thread_get_tsd()->ctx == &uctx->ctx);
 
-	if (prot & ~TEE_MATTR_PROT_MASK)
+	if (prot & ~TEE_MATTR_PROT_MASK || !len)
 		return TEE_ERROR_BAD_PARAMETERS;
 
-	/*
-	 * To keep thing simple: specified va and len have to match exactly
-	 * with an already registered region.
-	 */
-	res = find_exact_vm_region(&uctx->vm_info, va, len, &r);
+	res = split_vm_range(uctx, va, len, NULL, &r0);
 	if (res)
 		return res;
 
-	if ((r->attr & TEE_MATTR_PROT_MASK) == prot)
-		return TEE_SUCCESS;
+	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
+		if (r->va + r->size > va + len)
+			break;
+		if (r->attr & (TEE_MATTR_UW | TEE_MATTR_PW))
+			was_writeable = true;
 
-	was_writeable = r->attr & (TEE_MATTR_UW | TEE_MATTR_PW);
+		if (!mobj_is_paged(r->mobj))
+			need_sync = true;
 
-	r->attr &= ~TEE_MATTR_PROT_MASK;
-	r->attr |= prot;
+		r->attr &= ~TEE_MATTR_PROT_MASK;
+		r->attr |= prot;
+	}
 
-	if (mobj_is_paged(r->mobj)) {
-		if (!tee_pager_set_um_area_attr(uctx, va, len, prot))
-			return TEE_ERROR_GENERIC;
-	} else if ((prot & TEE_MATTR_UX) && was_writeable) {
+	if (need_sync) {
 		/* Synchronize changes to translation tables */
 		tee_mmu_set_ctx(&uctx->ctx);
-
-		cache_op_inner(DCACHE_AREA_CLEAN,
-			       (void *)va, len);
-		cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
 	}
 
+	for (r = r0; r; r = TAILQ_NEXT(r, link)) {
+		if (r->va + r->size > va + len)
+			break;
+		if (mobj_is_paged(r->mobj)) {
+			if (!tee_pager_set_um_area_attr(uctx, r->va, r->size,
+							prot))
+				panic();
+		} else if (was_writeable) {
+			cache_op_inner(DCACHE_AREA_CLEAN, (void *)r->va,
+				       r->size);
+		}
+
+	}
+	if (need_sync && was_writeable)
+		cache_op_inner(ICACHE_INVALIDATE, NULL, 0);
+
+	merge_vm_range(uctx, va, len);
+
 	return TEE_SUCCESS;
 }
 
@@ -461,22 +692,27 @@
 {
 	TEE_Result res = TEE_SUCCESS;
 	struct vm_region *r = NULL;
+	struct vm_region *r_next = NULL;
 
 	assert(thread_get_tsd()->ctx == &uctx->ctx);
 
-	/*
-	 * To keep thing simple: specified va and len has to match exactly
-	 * with an already registered region.
-	 */
-	res = find_exact_vm_region(&uctx->vm_info, va,
-				   ROUNDUP(len, SMALL_PAGE_SIZE), &r);
+	if (!len || ((len | va) & SMALL_PAGE_MASK))
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	res = split_vm_range(uctx, va, len, NULL, &r);
 	if (res)
 		return res;
 
-	if (mobj_is_paged(r->mobj))
-		tee_pager_rem_um_region(uctx, r->va, r->size);
-	maybe_free_pgt(uctx, r);
-	umap_remove_region(&uctx->vm_info, r);
+	while (true) {
+		r_next = TAILQ_NEXT(r, link);
+		if (mobj_is_paged(r->mobj))
+			tee_pager_rem_um_region(uctx, r->va, r->size);
+		maybe_free_pgt(uctx, r);
+		umap_remove_region(&uctx->vm_info, r);
+		if (!r_next || r->va + r->size == va + len)
+			break;
+		r = r_next;
+	}
 
 	/*
 	 * Synchronize change to translation tables. Even though the pager
diff --git a/core/arch/arm/mm/tee_pager.c b/core/arch/arm/mm/tee_pager.c
index 7b0c48d..ee76270 100644
--- a/core/arch/arm/mm/tee_pager.c
+++ b/core/arch/arm/mm/tee_pager.c
@@ -500,16 +500,21 @@
 	return (void *)core_mmu_idx2va(ti, idx);
 }
 
-static void area_insert_tail(struct tee_pager_area *area)
+static void area_insert(struct tee_pager_area_head *head,
+			struct tee_pager_area *area,
+			struct tee_pager_area *a_prev)
 {
 	uint32_t exceptions = pager_lock_check_stack(8);
 
-	TAILQ_INSERT_TAIL(&tee_pager_area_head, area, link);
+	if (a_prev)
+		TAILQ_INSERT_AFTER(head, a_prev, area, link);
+	else
+		TAILQ_INSERT_HEAD(head, area, link);
 	TAILQ_INSERT_TAIL(&area->fobj->areas, area, fobj_link);
 
 	pager_unlock(exceptions);
 }
-KEEP_PAGER(area_insert_tail);
+KEEP_PAGER(area_insert);
 
 void tee_pager_add_core_area(vaddr_t base, enum tee_pager_area_type type,
 			     struct fobj *fobj)
@@ -556,7 +561,7 @@
 		area->base = b;
 		area->size = s2;
 		area->flags = flags;
-		area_insert_tail(area);
+		area_insert(&tee_pager_area_head, area, NULL);
 
 		b += s2;
 		s -= s2;
@@ -696,7 +701,8 @@
 static TEE_Result pager_add_um_area(struct user_mode_ctx *uctx, vaddr_t base,
 				    struct fobj *fobj, uint32_t prot)
 {
-	struct tee_pager_area *area;
+	struct tee_pager_area *a_prev = NULL;
+	struct tee_pager_area *area = NULL;
 	vaddr_t b = base;
 	size_t fobj_pgoffs = 0;
 	size_t s = fobj->num_pages * SMALL_PAGE_SIZE;
@@ -708,12 +714,20 @@
 		TAILQ_INIT(uctx->areas);
 	}
 
+	area = TAILQ_FIRST(uctx->areas);
+	while (area) {
+		if (core_is_buffer_intersect(b, s, area->base,
+					     area->size))
+			return TEE_ERROR_BAD_PARAMETERS;
+		if (b < area->base)
+			break;
+		a_prev = area;
+		area = TAILQ_NEXT(area, link);
+	}
+
 	while (s) {
 		size_t s2;
 
-		if (find_area(uctx->areas, b))
-			return TEE_ERROR_BAD_PARAMETERS;
-
 		s2 = MIN(CORE_MMU_PGDIR_SIZE - (b & CORE_MMU_PGDIR_MASK), s);
 		area = calloc(1, sizeof(*area));
 		if (!area)
@@ -727,8 +741,9 @@
 		area->size = s2;
 		area->flags = prot;
 
-		TAILQ_INSERT_TAIL(uctx->areas, area, link);
-		TAILQ_INSERT_TAIL(&fobj->areas, area, fobj_link);
+		area_insert(uctx->areas, area, a_prev);
+
+		a_prev = area;
 		b += s2;
 		s -= s2;
 		fobj_pgoffs += s2 / SMALL_PAGE_SIZE;
@@ -809,6 +824,107 @@
 	return TEE_SUCCESS;
 }
 
+static void split_area(struct tee_pager_area_head *area_head,
+		       struct tee_pager_area *area, struct tee_pager_area *a2,
+		       vaddr_t va)
+{
+	uint32_t exceptions = pager_lock_check_stack(64);
+	size_t diff = va - area->base;
+
+	a2->fobj = fobj_get(area->fobj);
+	a2->fobj_pgoffs = area->fobj_pgoffs + diff / SMALL_PAGE_SIZE;
+	a2->type = area->type;
+	a2->flags = area->flags;
+	a2->base = va;
+	a2->size = area->size - diff;
+	a2->pgt = area->pgt;
+	area->size = diff;
+
+	TAILQ_INSERT_AFTER(area_head, area, a2, link);
+	TAILQ_INSERT_AFTER(&area->fobj->areas, area, a2, fobj_link);
+
+	pager_unlock(exceptions);
+}
+KEEP_PAGER(split_area);
+
+TEE_Result tee_pager_split_um_region(struct user_mode_ctx *uctx, vaddr_t va)
+{
+	struct tee_pager_area *area = NULL;
+	struct tee_pager_area *a2 = NULL;
+
+	if (va & SMALL_PAGE_MASK)
+		return TEE_ERROR_BAD_PARAMETERS;
+
+	TAILQ_FOREACH(area, uctx->areas, link) {
+		if (va == area->base || va == area->base + area->size)
+			return TEE_SUCCESS;
+		if (va > area->base && va < area->base + area->size) {
+			a2 = calloc(1, sizeof(*a2));
+			if (!a2)
+				return TEE_ERROR_OUT_OF_MEMORY;
+			split_area(uctx->areas, area, a2, va);
+			return TEE_SUCCESS;
+		}
+	}
+
+	return TEE_SUCCESS;
+}
+
+static void merge_area_with_next(struct tee_pager_area_head *area_head,
+				 struct tee_pager_area *a,
+				 struct tee_pager_area *a_next)
+{
+	uint32_t exceptions = pager_lock_check_stack(64);
+
+	TAILQ_REMOVE(area_head, a_next, link);
+	TAILQ_REMOVE(&a_next->fobj->areas, a_next, fobj_link);
+	a->size += a_next->size;
+
+	pager_unlock(exceptions);
+}
+KEEP_PAGER(merge_area_with_next);
+
+void tee_pager_merge_um_region(struct user_mode_ctx *uctx, vaddr_t va,
+			       size_t len)
+{
+	struct tee_pager_area *a_next = NULL;
+	struct tee_pager_area *a = NULL;
+
+	if ((va | len) & SMALL_PAGE_MASK)
+		return;
+
+	for (a = TAILQ_FIRST(uctx->areas);; a = a_next) {
+		a_next = TAILQ_NEXT(a, link);
+		if (!a_next)
+			return;
+
+		/* Try merging with the area just before va */
+		if (a->base + a->size < va)
+			continue;
+
+		/*
+		 * If a->base is well past our range we're done.
+		 * Note that if it's just the page after our range we'll
+		 * try to merge.
+		 */
+		if (a->base > va + len)
+			return;
+
+		if (a->base + a->size != a_next->base)
+			continue;
+		if (a->fobj != a_next->fobj || a->type != a_next->type ||
+		    a->flags != a_next->flags || a->pgt != a_next->pgt)
+			continue;
+		if (a->fobj_pgoffs + a->size / SMALL_PAGE_SIZE !=
+		    a_next->fobj_pgoffs)
+			continue;
+
+		merge_area_with_next(uctx->areas, a, a_next);
+		free_area(a_next);
+		a_next = a;
+	}
+}
+
 static void rem_area(struct tee_pager_area_head *area_head,
 		     struct tee_pager_area *area)
 {