[PATCH 1/4] powerpc/mm/hugetlb: Update huge_ptep_set_access_flags to call __ptep_set_access_flags directly
Khalid Elmously
khalid.elmously at canonical.com
Thu Aug 30 19:17:53 UTC 2018
From: "Aneesh Kumar K.V" <aneesh.kumar at linux.ibm.com>
BugLink: https://bugs.launchpad.net/bugs/1789772
In a later patch, we want to update __ptep_set_access_flags take page size
arg. This makes ptep_set_access_flags only work with mmu_virtual_psize.
To simplify the code make huge_ptep_set_access_flags directly call
__ptep_set_access_flags so that we can compute the hugetlb page size in
hugetlb function.
Now that ptep_set_access_flags won't be called for hugetlb remove
the is_vm_hugetlb_page() check and add the assert of pte lock
unconditionally.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar at linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe at ellerman.id.au>
(cherry-picked from f069ff396d657ac7bdb5de866c3ec28b8d08d953)
Signed-off-by: Khalid Elmously <khalid.elmously at canonical.com>
---
arch/powerpc/include/asm/hugetlb.h | 19 +++--------------
arch/powerpc/mm/pgtable.c | 33 ++++++++++++++++++++++++++++--
2 files changed, 34 insertions(+), 18 deletions(-)
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 14c9d44f355b..1dbb290e20f8 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -173,22 +173,9 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
return pte_wrprotect(pte);
}
-static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty)
-{
-#ifdef HUGETLB_NEED_PRELOAD
- /*
- * The "return 1" forces a call of update_mmu_cache, which will write a
- * TLB entry. Without this, platforms that don't do a write of the TLB
- * entry in the TLB miss handler asm will fault ad infinitum.
- */
- ptep_set_access_flags(vma, addr, ptep, pte, dirty);
- return 1;
-#else
- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
-#endif
-}
+extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty);
static inline pte_t huge_ptep_get(pte_t *ptep)
{
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index a03ff3d99e0c..387b7ab24207 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -220,14 +220,43 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
entry = set_access_flags_filter(entry, vma, dirty);
changed = !pte_same(*(ptep), entry);
if (changed) {
- if (!is_vm_hugetlb_page(vma))
- assert_pte_locked(vma->vm_mm, address);
+ assert_pte_locked(vma->vm_mm, address);
__ptep_set_access_flags(vma->vm_mm, ptep, entry, address);
flush_tlb_page(vma, address);
}
return changed;
}
+#ifdef CONFIG_HUGETLB_PAGE
+extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+#ifdef HUGETLB_NEED_PRELOAD
+ /*
+ * The "return 1" forces a call of update_mmu_cache, which will write a
+ * TLB entry. Without this, platforms that don't do a write of the TLB
+ * entry in the TLB miss handler asm will fault ad infinitum.
+ */
+ ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+ return 1;
+#else
+ int changed;
+
+ pte = set_access_flags_filter(pte, vma, dirty);
+ changed = !pte_same(*(ptep), pte);
+ if (changed) {
+#ifdef CONFIG_DEBUG_VM
+ assert_spin_locked(&vma->vm_mm->page_table_lock);
+#endif
+ __ptep_set_access_flags(vma->vm_mm, ptep, pte, addr);
+ flush_hugetlb_page(vma, addr);
+ }
+ return changed;
+#endif
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
#ifdef CONFIG_DEBUG_VM
void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
{
--
2.17.1
More information about the kernel-team
mailing list