ion: map cma heap to user with vm_insert_page, not remap_pfn_range

remap_pfn_range takes any memory even if not backed by struct page.
All ion heaps are nowadays, which is why ion_heap_map_user does the
call like this (starting with a page, getting its pfn):

remap_pfn_range(vma, addr, page_to_pfn(page), len,
				      vma->vm_page_prot);

remap_pfn_range sets:
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;

Because of VM_IO and VM_PFNMAP the mapped buffer can't be used by
the gasket driver. In user mmap is called, and the pointer is passed
all the way to gasket's user part, which ioctl's into

drivers/staging/gasket/gasket_page_table.c gasket_perform_mapping

to map the user pointer for DMA. Gasket needs the backing physical
page, and uses get_user_pages_fast for this. This fails because

mm/gup.c check_vma_flags

if (vm_flags & (VM_IO | VM_PFNMAP))
		return -EFAULT;

This makes ION buffers allocated from the CMA heap unusable by
gasket without copying it to a virtual user address with
different vm_flags.

So we need a user mapping with different vm_flags, which can be
achieved with vm_insert_page. It requires caller to have struct
page to work with, which we do. For CMA this mapping is straight
forward as cma_alloc by definition allocates entire physically
contiguous pages. Scatter gather table thus only have one entry
per ION buffer from the CMA heap, see ion_cma_allocate.

Further details:
https://lkml.org/lkml/2006/3/16/170

BUG: 142164990

Change-Id: I434cbc4e89d86275850cada705253453c1e3f071
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index fa3e4b7e..e1b83e6 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -102,10 +102,42 @@
 	kfree(buffer->sg_table);
 }
 
+int ion_cma_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+		      struct vm_area_struct *vma)
+{
+	struct page *pages = buffer->priv_virt;
+	unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+	unsigned long start = page_to_pfn(pages);
+	unsigned long end = start + nr_pages;
+	unsigned long pfn = start + vma->vm_pgoff;
+	unsigned long addr = vma->vm_start;
+	int ret;
+
+	/* VM_DONTEXPAND Disable vma merging and expanding with mremap()
+	 * VM_DONTDUMP Omit vma from core dump
+	 */
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+
+	while (pfn < end) {
+		struct page *page = pfn_to_page(pfn);
+		ret = vm_insert_page(vma, addr, page);
+		if (ret)
+			return ret;
+
+		pfn++;
+		addr += PAGE_SIZE;
+		if (addr >= vma->vm_end) {
+			break;
+		}
+	}
+
+	return 0;
+}
+
 static struct ion_heap_ops ion_cma_ops = {
 	.allocate = ion_cma_allocate,
 	.free = ion_cma_free,
-	.map_user = ion_heap_map_user,
+	.map_user = ion_cma_map_user,
 	.map_kernel = ion_heap_map_kernel,
 	.unmap_kernel = ion_heap_unmap_kernel,
 };