ARM: make_coherent: avoid recalculating the pfn for the modified page

We already know the pfn for the page to be modified in make_coherent,
so let's stop recalculating it unnecessarily.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 8e9bc51..ae88f2c 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -37,7 +37,7 @@
  * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
  */
 static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
-	pte_t *ptep)
+	unsigned long pfn, pte_t *ptep)
 {
 	pte_t entry = *ptep;
 	int ret;
@@ -52,7 +52,6 @@
 	 * fault (ie, is old), we can safely ignore any issues.
 	 */
 	if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
-		unsigned long pfn = pte_pfn(entry);
 		flush_cache_page(vma, address, pfn);
 		outer_flush_range((pfn << PAGE_SHIFT),
 				  (pfn << PAGE_SHIFT) + PAGE_SIZE);
@@ -65,7 +64,8 @@
 	return ret;
 }
 
-static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
+static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
+	unsigned long pfn)
 {
 	spinlock_t *ptl;
 	pgd_t *pgd;
@@ -90,7 +90,7 @@
 	pte = pte_offset_map_nested(pmd, address);
 	spin_lock(ptl);
 
-	ret = do_adjust_pte(vma, address, pte);
+	ret = do_adjust_pte(vma, address, pfn, pte);
 
 	spin_unlock(ptl);
 	pte_unmap_nested(pte);
@@ -127,11 +127,11 @@
 		if (!(mpnt->vm_flags & VM_MAYSHARE))
 			continue;
 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
-		aliases += adjust_pte(mpnt, mpnt->vm_start + offset);
+		aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
 	}
 	flush_dcache_mmap_unlock(mapping);
 	if (aliases)
-		adjust_pte(vma, addr);
+		adjust_pte(vma, addr, pfn);
 	else
 		flush_cache_page(vma, addr, pfn);
 }