CHROMIUM: mali: GPUCORE-17007 : Add reference counting for GPU VA regions
This commit adds reference counting for GPU VA regions.
This prevents a use-after-free issue when a GPU memory region
is freed while still being mapped on the CPU side.
BUG=b:122846190
TEST=boot daisy, minnie and kevin, login and run webgl aquarium
Change-Id: I4ff93087cb837d957edaac7ce95b818d59e8c3b7
Signed-off-by: Dominik Behr <dbehr@chromium.org>
Reviewed-on: https://chromium-review.googlesource.com/1409755
Reviewed-by: Stphane Marchesin <marcheu@chromium.org>
(cherry picked from commit 58f5700c44ca841822c3f3ac78b422f8f06f5117)
Reviewed-on: https://chromium-review.googlesource.com/1539802
Commit-Ready: Douglas Anderson <dianders@chromium.org>
Tested-by: Douglas Anderson <dianders@chromium.org>
Reviewed-by: Stéphane Marchesin <marcheu@chromium.org>
diff --git a/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c
index 8d71926..118511a 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c
@@ -65,7 +65,7 @@
kbase_gpu_vm_lock(katom->kctx);
region = kbase_region_tracker_find_region_enclosing_address(katom->kctx,
katom->jc);
- if (!region || (region->flags & KBASE_REG_FREE))
+ if (kbase_is_region_invalid_or_free(region))
goto out_unlock;
page_array = kbase_get_cpu_phy_pages(region);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gwt.c b/drivers/gpu/arm/midgard/mali_kbase_gwt.c
index 0481f80..2d1263d 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_gwt.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_gwt.c
@@ -35,7 +35,7 @@
int err = 0;
reg = rb_entry(rbnode, struct kbase_va_region, rblink);
- if (reg->nr_pages && !(reg->flags & KBASE_REG_FREE) &&
+ if (reg->nr_pages && !kbase_is_region_invalid_or_free(reg) &&
(reg->flags & KBASE_REG_GPU_WR)) {
err = kbase_mmu_update_pages(kctx, reg->start_pfn,
kbase_get_gpu_phy_pages(reg),
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd.c b/drivers/gpu/arm/midgard/mali_kbase_jd.c
index 7a862bcc..4f6a346 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_jd.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_jd.c
@@ -282,7 +282,7 @@
katom->kctx,
res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
/* did we find a matching region object? */
- if (NULL == reg || (reg->flags & KBASE_REG_FREE)) {
+ if (kbase_is_region_invalid_or_free(reg)) {
/* roll back */
goto failed_loop;
}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem.c b/drivers/gpu/arm/midgard/mali_kbase_mem.c
index 3eff83a..1c1f0d1 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mem.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem.c
@@ -362,7 +362,7 @@
merged_back = 1;
if (merged_front) {
/* We already merged with prev, free it */
- kbase_free_alloced_region(reg);
+ kfree(reg);
}
}
}
@@ -422,7 +422,7 @@
if (at_reg->start_pfn == start_pfn && at_reg->nr_pages == nr_pages) {
rb_replace_node(&(at_reg->rblink), &(new_reg->rblink),
reg_rbtree);
- kbase_free_alloced_region(at_reg);
+ kfree(at_reg);
}
/* New region replaces the start of the old one, so insert before. */
else if (at_reg->start_pfn == start_pfn) {
@@ -557,12 +557,11 @@
tmp = find_region_enclosing_range_rbtree(rbtree, gpu_pfn,
nr_pages);
- if (!tmp) {
- dev_warn(dev, "Enclosing region not found: 0x%08llx gpu_pfn, %zu nr_pages", gpu_pfn, nr_pages);
+ if (kbase_is_region_invalid(tmp)) {
+ dev_warn(dev, "Enclosing region not found or invalid: 0x%08llx gpu_pfn, %zu nr_pages", gpu_pfn, nr_pages);
err = -ENOMEM;
goto exit;
- }
- if (!(tmp->flags & KBASE_REG_FREE)) {
+ } else if (!kbase_is_region_free(tmp)) {
dev_warn(dev, "!(tmp->flags & KBASE_REG_FREE): tmp->start_pfn=0x%llx tmp->flags=0x%lx tmp->nr_pages=0x%zx gpu_pfn=0x%llx nr_pages=0x%zx\n",
tmp->start_pfn, tmp->flags,
tmp->nr_pages, gpu_pfn, nr_pages);
@@ -635,6 +634,16 @@
if (rbnode) {
rb_erase(rbnode, rbtree);
reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ WARN_ON(reg->va_refcnt != 1);
+ /* Reset the start_pfn - as the rbtree is being
+ * destroyed and we've already erased this region, there
+ * is no further need to attempt to remove it.
+ * This won't affect the cleanup if the region was
+ * being used as a sticky resource as the cleanup
+ * related to sticky resources anyways need to be
+ * performed before the term of region tracker.
+ */
+ reg->start_pfn = 0;
kbase_free_alloced_region(reg);
}
} while (rbnode);
@@ -906,6 +915,7 @@
if (!new_reg)
return NULL;
+ new_reg->va_refcnt = 1;
new_reg->cpu_alloc = NULL; /* no alloc bound yet */
new_reg->gpu_alloc = NULL; /* no alloc bound yet */
new_reg->rbtree = rbtree;
@@ -964,6 +974,8 @@
if (WARN_ON(!kctx))
return;
+ if (WARN_ON(kbase_is_region_invalid(reg)))
+ return;
mutex_lock(&kctx->jit_evict_lock);
@@ -1012,10 +1024,12 @@
kbase_mem_phy_alloc_put(reg->cpu_alloc);
kbase_mem_phy_alloc_put(reg->gpu_alloc);
- /* To detect use-after-free in debug builds */
- KBASE_DEBUG_CODE(reg->flags |= KBASE_REG_FREE);
+
+ reg->flags |= KBASE_REG_VA_FREED;
+ kbase_va_region_alloc_put(kctx, reg);
+ } else {
+ kfree(reg);
}
- kfree(reg);
}
KBASE_EXPORT_TEST_API(kbase_free_alloced_region);
@@ -1159,7 +1173,6 @@
if (err)
return err;
- err = kbase_remove_va_region(reg);
return err;
}
@@ -1330,8 +1343,8 @@
/* find the region where the virtual address is contained */
reg = kbase_region_tracker_find_region_enclosing_address(kctx,
sset->mem_handle.basep.handle);
- if (!reg) {
- dev_warn(kctx->kbdev->dev, "Can't find region at VA 0x%016llX",
+ if (kbase_is_region_invalid_or_free(reg)) {
+ dev_warn(kctx->kbdev->dev, "Can't find a valid region at VA 0x%016llX",
sset->mem_handle.basep.handle);
err = -EINVAL;
goto out_unlock;
@@ -1525,7 +1538,7 @@
/* A real GPU va */
/* Validate the region */
reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
- if (!reg || (reg->flags & KBASE_REG_FREE)) {
+ if (kbase_is_region_invalid_or_free(reg)) {
dev_warn(kctx->kbdev->dev, "kbase_mem_free called with nonexistent gpu_addr 0x%llX",
gpu_addr);
err = -EINVAL;
@@ -3598,7 +3611,7 @@
/* Find the region */
reg = kbase_region_tracker_find_region_enclosing_address(
kctx, gpu_addr);
- if (NULL == reg || (reg->flags & KBASE_REG_FREE))
+ if (kbase_is_region_invalid_or_free(reg))
goto failed;
/* Allocate the metadata object */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem.h b/drivers/gpu/arm/midgard/mali_kbase_mem.h
index 901f1cf..6e38aa0 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mem.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem.h
@@ -308,6 +308,23 @@
/* Memory has permanent kernel side mapping */
#define KBASE_REG_PERMANENT_KERNEL_MAPPING (1ul << 25)
+/* GPU VA region has been freed by the userspace, but still remains allocated
+ * due to the reference held by CPU mappings created on the GPU VA region.
+ *
+ * A region with this flag set has had kbase_gpu_munmap() called on it, but can
+ * still be looked-up in the region tracker as a non-free region. Hence must
+ * not create or update any more GPU mappings on such regions because they will
+ * not be unmapped when the region is finally destroyed.
+ *
+ * Since such regions are still present in the region tracker, new allocations
+ * attempted with BASE_MEM_SAME_VA might fail if their address intersects with
+ * a region with this flag set.
+ *
+ * In addition, this flag indicates the gpu_alloc member might no longer valid
+ * e.g. in infinite cache simulation.
+ */
+#define KBASE_REG_VA_FREED (1ul << 26)
+
#define KBASE_REG_ZONE_SAME_VA KBASE_REG_ZONE(0)
/* only used with 32-bit clients */
@@ -338,8 +355,69 @@
u16 jit_usage_id;
/* The JIT bin this allocation came from */
u8 jit_bin_id;
+
+ int va_refcnt; /* number of users of this va */
};
+static inline bool kbase_is_region_free(struct kbase_va_region *reg)
+{
+ return (!reg || reg->flags & KBASE_REG_FREE);
+}
+
+static inline bool kbase_is_region_invalid(struct kbase_va_region *reg)
+{
+ return (!reg || reg->flags & KBASE_REG_VA_FREED);
+}
+
+static inline bool kbase_is_region_invalid_or_free(struct kbase_va_region *reg)
+{
+ /* Possibly not all functions that find regions would be using this
+ * helper, so they need to be checked when maintaining this function.
+ */
+ return (kbase_is_region_invalid(reg) || kbase_is_region_free(reg));
+}
+
+int kbase_remove_va_region(struct kbase_va_region *reg);
+static inline void kbase_region_refcnt_free(struct kbase_va_region *reg)
+{
+ /* If region was mapped then remove va region*/
+ if (reg->start_pfn)
+ kbase_remove_va_region(reg);
+
+ /* To detect use-after-free in debug builds */
+ KBASE_DEBUG_CODE(reg->flags |= KBASE_REG_FREE);
+ kfree(reg);
+}
+
+static inline struct kbase_va_region *kbase_va_region_alloc_get(
+ struct kbase_context *kctx, struct kbase_va_region *region)
+{
+ lockdep_assert_held(&kctx->reg_lock);
+
+ WARN_ON(!region->va_refcnt);
+
+ /* non-atomic as kctx->reg_lock is held */
+ region->va_refcnt++;
+
+ return region;
+}
+
+static inline struct kbase_va_region *kbase_va_region_alloc_put(
+ struct kbase_context *kctx, struct kbase_va_region *region)
+{
+ lockdep_assert_held(&kctx->reg_lock);
+
+ WARN_ON(region->va_refcnt <= 0);
+ WARN_ON(region->flags & KBASE_REG_FREE);
+
+ /* non-atomic as kctx->reg_lock is held */
+ region->va_refcnt--;
+ if (!region->va_refcnt)
+ kbase_region_refcnt_free(region);
+
+ return NULL;
+}
+
/* Common functions */
static inline struct tagged_addr *kbase_get_cpu_phy_pages(
struct kbase_va_region *reg)
@@ -825,7 +903,6 @@
int kbase_add_va_region_rbtree(struct kbase_device *kbdev,
struct kbase_va_region *reg, u64 addr, size_t nr_pages,
size_t align);
-int kbase_remove_va_region(struct kbase_va_region *reg);
bool kbase_check_alloc_flags(unsigned long flags);
bool kbase_check_import_flags(unsigned long flags);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
index bc95a0f..f08b0c0 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
@@ -188,7 +188,7 @@
kctx, gpu_addr);
}
- if (reg == NULL || (reg->flags & KBASE_REG_FREE) != 0)
+ if (kbase_is_region_invalid_or_free(reg))
goto out_unlock;
kern_mapping = reg->cpu_alloc->permanent_map;
@@ -425,7 +425,7 @@
/* Validate the region */
reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
- if (!reg || (reg->flags & KBASE_REG_FREE))
+ if (kbase_is_region_invalid_or_free(reg))
goto out_unlock;
switch (query) {
@@ -791,7 +791,7 @@
/* Validate the region */
reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
- if (!reg || (reg->flags & KBASE_REG_FREE))
+ if (kbase_is_region_invalid_or_free(reg))
goto out_unlock;
/* Is the region being transitioning between not needed and needed? */
@@ -1266,10 +1266,8 @@
(ai[i].handle.basep.handle >> PAGE_SHIFT) << PAGE_SHIFT);
/* validate found region */
- if (!aliasing_reg)
- goto bad_handle; /* Not found */
- if (aliasing_reg->flags & KBASE_REG_FREE)
- goto bad_handle; /* Free region */
+ if (kbase_is_region_invalid_or_free(aliasing_reg))
+ goto bad_handle; /* Not found/already free */
if (aliasing_reg->flags & KBASE_REG_DONT_NEED)
goto bad_handle; /* Ephemeral region */
if (!(aliasing_reg->flags & KBASE_REG_GPU_CACHED))
@@ -1561,7 +1559,7 @@
/* Validate the region */
reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
- if (!reg || (reg->flags & KBASE_REG_FREE))
+ if (kbase_is_region_invalid_or_free(reg))
goto out_unlock;
KBASE_DEBUG_ASSERT(reg->cpu_alloc);
@@ -1699,6 +1697,7 @@
list_del(&map->mappings_list);
+ kbase_va_region_alloc_put(map->kctx, map->region);
kbase_gpu_vm_unlock(map->kctx);
kbase_mem_phy_alloc_put(map->alloc);
@@ -1853,7 +1852,7 @@
goto out;
}
- map->region = reg;
+ map->region = kbase_va_region_alloc_get(kctx, reg);
map->free_on_close = free_on_close;
map->kctx = kctx;
map->alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
@@ -2084,7 +2083,7 @@
reg = kbase_region_tracker_find_region_enclosing_address(kctx,
(u64)vma->vm_pgoff << PAGE_SHIFT);
- if (reg && !(reg->flags & KBASE_REG_FREE)) {
+ if (!kbase_is_region_invalid_or_free(reg)) {
/* will this mapping overflow the size of the region? */
if (nr_pages > (reg->nr_pages -
(vma->vm_pgoff - reg->start_pfn))) {
@@ -2273,7 +2272,7 @@
reg = kbase_region_tracker_find_region_enclosing_address(kctx,
gpu_addr);
- if (!reg || (reg->flags & KBASE_REG_FREE))
+ if (kbase_is_region_invalid_or_free(reg))
goto out_unlock;
/* check access permissions can be satisfied
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu.c b/drivers/gpu/arm/midgard/mali_kbase_mmu.c
index 3ba861d..e2d2914 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mmu.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_mmu.c
@@ -227,7 +227,7 @@
/* Find region and check if it should be writable. */
region = kbase_region_tracker_find_region_enclosing_address(kctx,
faulting_as->fault_addr);
- if (!region || region->flags & KBASE_REG_FREE) {
+ if (kbase_is_region_invalid_or_free(region)) {
kbase_gpu_vm_unlock(kctx);
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
"Memory is not mapped on the GPU");
@@ -631,7 +631,7 @@
region = kbase_region_tracker_find_region_enclosing_address(kctx,
faulting_as->fault_addr);
- if (!region || region->flags & KBASE_REG_FREE) {
+ if (kbase_is_region_invalid_or_free(region)) {
kbase_gpu_vm_unlock(kctx);
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
"Memory is not mapped on the GPU");
diff --git a/drivers/gpu/arm/midgard/mali_kbase_softjobs.c b/drivers/gpu/arm/midgard/mali_kbase_softjobs.c
index b774c3b..0f66ac6 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_softjobs.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_softjobs.c
@@ -641,8 +641,8 @@
katom->kctx, user_extres.ext_resource &
~BASE_EXT_RES_ACCESS_EXCLUSIVE);
- if (NULL == reg || NULL == reg->gpu_alloc ||
- (reg->flags & KBASE_REG_FREE)) {
+ if (kbase_is_region_invalid_or_free(reg) ||
+ reg->gpu_alloc == NULL) {
ret = -EINVAL;
goto out_unlock;
}
diff --git a/drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c b/drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c
index 3aab51a..9cb0465 100644
--- a/drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c
+++ b/drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c
@@ -303,12 +303,15 @@
if ((PFN_DOWN(BASE_MEM_COOKIE_BASE) <= pgoff) &&
(PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) > pgoff)) {
int cookie = pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
- struct kbase_va_region *reg =
- kctx->pending_regions[cookie];
+ struct kbase_va_region *reg;
- if (!reg)
+ /* Need to hold gpu vm lock when using reg */
+ kbase_gpu_vm_lock(kctx);
+ reg = kctx->pending_regions[cookie];
+ if (!reg) {
+ kbase_gpu_vm_unlock(kctx);
return -EINVAL;
-
+ }
if (!(reg->flags & KBASE_REG_GPU_NX)) {
if (cpu_va_bits > gpu_pc_bits) {
align_offset = 1ULL << gpu_pc_bits;
@@ -331,6 +334,7 @@
} else if (reg->flags & KBASE_REG_GPU_VA_SAME_4GB_PAGE) {
is_same_4gb_page = true;
}
+ kbase_gpu_vm_unlock(kctx);
#ifndef CONFIG_64BIT
} else {
return current->mm->get_unmapped_area(filp, addr, len, pgoff,