[Cosmic][PATCH 3/4] mm, hugetlbfs: rename address to haddr in hugetlb_cow()

Joseph Salisbury joseph.salisbury at canonical.com
Mon Sep 10 17:19:08 UTC 2018


From: Huang Ying <ying.huang at intel.com>

BugLink: https://bugs.launchpad.net/bugs/1730836

To take better advantage of general huge page copying optimization, the
target subpage address will be passed to hugetlb_cow(), then
copy_user_huge_page().  So we will use both target subpage address and
huge page size aligned address in hugetlb_cow().  To distinguish between
them, "haddr" is used for huge page size aligned address to be
consistent with Transparent Huge Page naming convention.

Now, only huge page size aligned address is used in hugetlb_cow(), so
the "address" is renamed to "haddr" in hugetlb_cow() in this patch.
Next patch will use target subpage address in hugetlb_cow() too.

The patch is just code cleanup without any functionality changes.

Link: http://lkml.kernel.org/r/20180524005851.4079-4-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang at intel.com>
Suggested-by: Mike Kravetz <mike.kravetz at oracle.com>
Suggested-by: Michal Hocko <mhocko at suse.com>
Reviewed-by: Mike Kravetz <mike.kravetz at oracle.com>
Cc: David Rientjes <rientjes at google.com>
Cc: Andrea Arcangeli <aarcange at redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov at linux.intel.com>
Cc: Andi Kleen <andi.kleen at intel.com>
Cc: Jan Kara <jack at suse.cz>
Cc: Matthew Wilcox <willy at infradead.org>
Cc: Hugh Dickins <hughd at google.com>
Cc: Minchan Kim <minchan at kernel.org>
Cc: Shaohua Li <shli at fb.com>
Cc: Christopher Lameter <cl at linux.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar at linux.vnet.ibm.com>
Cc: Punit Agrawal <punit.agrawal at arm.com>
Cc: Anshuman Khandual <khandual at linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
(cherry picked from commit 5b7a1d406062449a4d51aea1df37a73285ced1dc)
Signed-off-by: Joseph Salisbury <joseph.salisbury at canonical.com>
---
 mm/hugetlb.c | 26 ++++++++++++--------------
 1 file changed, 12 insertions(+), 14 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4c1a2c0..7affa9d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3508,7 +3508,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  * Keep the pte_same checks anyway to make transition from the mutex easier.
  */
 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
-		       unsigned long address, pte_t *ptep,
+		       unsigned long haddr, pte_t *ptep,
 		       struct page *pagecache_page, spinlock_t *ptl)
 {
 	pte_t pte;
@@ -3526,7 +3526,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
 	 * and just make the page writable */
 	if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
 		page_move_anon_rmap(old_page, vma);
-		set_huge_ptep_writable(vma, address, ptep);
+		set_huge_ptep_writable(vma, haddr, ptep);
 		return 0;
 	}
 
@@ -3550,7 +3550,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
 	 * be acquired again before returning to the caller, as expected.
 	 */
 	spin_unlock(ptl);
-	new_page = alloc_huge_page(vma, address, outside_reserve);
+	new_page = alloc_huge_page(vma, haddr, outside_reserve);
 
 	if (IS_ERR(new_page)) {
 		/*
@@ -3563,11 +3563,10 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
 		if (outside_reserve) {
 			put_page(old_page);
 			BUG_ON(huge_pte_none(pte));
-			unmap_ref_private(mm, vma, old_page, address);
+			unmap_ref_private(mm, vma, old_page, haddr);
 			BUG_ON(huge_pte_none(pte));
 			spin_lock(ptl);
-			ptep = huge_pte_offset(mm, address & huge_page_mask(h),
-					       huge_page_size(h));
+			ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
 			if (likely(ptep &&
 				   pte_same(huge_ptep_get(ptep), pte)))
 				goto retry_avoidcopy;
@@ -3592,12 +3591,12 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
 		goto out_release_all;
 	}
 
-	copy_user_huge_page(new_page, old_page, address, vma,
+	copy_user_huge_page(new_page, old_page, haddr, vma,
 			    pages_per_huge_page(h));
 	__SetPageUptodate(new_page);
 	set_page_huge_active(new_page);
 
-	mmun_start = address & huge_page_mask(h);
+	mmun_start = haddr;
 	mmun_end = mmun_start + huge_page_size(h);
 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 
@@ -3606,25 +3605,24 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
 	 * before the page tables are altered
 	 */
 	spin_lock(ptl);
-	ptep = huge_pte_offset(mm, address & huge_page_mask(h),
-			       huge_page_size(h));
+	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
 	if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
 		ClearPagePrivate(new_page);
 
 		/* Break COW */
-		huge_ptep_clear_flush(vma, address, ptep);
+		huge_ptep_clear_flush(vma, haddr, ptep);
 		mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
-		set_huge_pte_at(mm, address, ptep,
+		set_huge_pte_at(mm, haddr, ptep,
 				make_huge_pte(vma, new_page, 1));
 		page_remove_rmap(old_page, true);
-		hugepage_add_new_anon_rmap(new_page, vma, address);
+		hugepage_add_new_anon_rmap(new_page, vma, haddr);
 		/* Make the old page be freed below */
 		new_page = old_page;
 	}
 	spin_unlock(ptl);
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 out_release_all:
-	restore_reserve_on_error(h, vma, address, new_page);
+	restore_reserve_on_error(h, vma, haddr, new_page);
 	put_page(new_page);
 out_release_old:
 	put_page(old_page);
-- 
2.7.4





More information about the kernel-team mailing list