[PATCH 1/2] Revert "UBUNTU:SAUCE: mm: remove gup_flags FOLL_WRITE games from __get_user_pages()"

Luis Henriques luis.henriques at canonical.com
Mon Oct 24 10:12:55 UTC 2016


This reverts commit 8c1303a50e0c278a27effe10c6a7b70b72aecddb.
It is being replace with the upstream fix from stable kernel 3.2.83, which
includes a backport of 19be0eaffa3ac7d8eb6784ad9bdbc7d67ed8e619.

Signed-off-by: Luis Henriques <luis.henriques at canonical.com>
---
 include/linux/mm.h |  1 -
 mm/memory.c        | 14 ++------------
 2 files changed, 2 insertions(+), 13 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 50905fac3cb8..43bd547c7ff6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1547,7 +1547,6 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
 #define FOLL_MLOCK	0x40	/* mark page as mlocked */
 #define FOLL_SPLIT	0x80	/* don't return transhuge pages, split them */
 #define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
-#define FOLL_COW	0x4000	/* internal GUP flag */
 
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
 			void *data);
diff --git a/mm/memory.c b/mm/memory.c
index 7e3c7c5a0a40..41c12b30ce96 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1427,16 +1427,6 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
 }
 EXPORT_SYMBOL_GPL(zap_vma_ptes);
 
-/*
- * FOLL_FORCE can write to even unwritable pte's, but only
- * after we've gone through a COW cycle and they are dirty.
- */
-static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
-{
-        return pte_write(pte) ||
-                ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
-}
-
 /**
  * follow_page - look up a page descriptor from a user-virtual address
  * @vma: vm_area_struct mapping @address
@@ -1519,7 +1509,7 @@ split_fallthrough:
 	pte = *ptep;
 	if (!pte_present(pte))
 		goto no_page;
-	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags))
+	if ((flags & FOLL_WRITE) && !pte_write(pte))
 		goto unlock;
 
 	page = vm_normal_page(vma, address, pte);
@@ -1809,7 +1799,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 				 */
 				if ((ret & VM_FAULT_WRITE) &&
 				    !(vma->vm_flags & VM_WRITE))
-					foll_flags |= FOLL_COW;
+					foll_flags &= ~FOLL_WRITE;
 
 				cond_resched();
 			}




More information about the kernel-team mailing list