[Xenial PATCH 3/3] UBUNTU: SAUCE: s390/mm: fix race on mm->context.flush_mm

Stefan Bader stefan.bader at canonical.com
Tue Sep 12 09:45:40 UTC 2017


From: Martin Schwidefsky <schwidefsky at de.ibm.com>

BugLink: http://bugs.launchpad.net/bugs/1708399

The order in __tlb_flush_mm_lazy is to flush TLB first and then clear
the mm->context.flush_mm bit. This can lead to missed flushes as the
bit can be set anytime, the order needs to be the other way aronud.

But this leads to a different race, __tlb_flush_mm_lazy may be called
on two CPUs concurrently. If mm->context.flush_mm is cleared first then
another CPU can bypass __tlb_flush_mm_lazy although the first CPU has
not done the flush yet. In a virtualized environment the time until the
flush is finally completed can be arbitrarily long.

Add a spinlock to serialize __tlb_flush_mm_lazy and use the function
in finish_arch_post_lock_switch as well.

Cc: <stable at vger.kernel.org>
Reviewed-by: Heiko Carstens <heiko.carstens at de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky at de.ibm.com>
(backported from 60f07c8ec5fae06c23e9fd7bab67dabce92b3414 linux-next)
[context adaption]
Signed-off-by: Stefan Bader <stefan.bader at canonical.com>
---
 arch/s390/include/asm/mmu.h         | 2 ++
 arch/s390/include/asm/mmu_context.h | 4 ++--
 arch/s390/include/asm/tlbflush.h    | 4 +++-
 3 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 081b2ad..d8675e6 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -5,6 +5,7 @@
 #include <linux/errno.h>
 
 typedef struct {
+	spinlock_t lock;
 	cpumask_t cpu_attach_mask;
 	atomic_t attach_count;
 	unsigned int flush_mm;
@@ -23,6 +24,7 @@ typedef struct {
 } mm_context_t;
 
 #define INIT_MM_CONTEXT(name)						      \
+	.context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock),	      \
 	.context.list_lock    = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
 	.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list),    \
 	.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 22877c9..27e47ee 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -15,6 +15,7 @@
 static inline int init_new_context(struct task_struct *tsk,
 				   struct mm_struct *mm)
 {
+	spin_lock_init(&mm->context.lock);
 	spin_lock_init(&mm->context.list_lock);
 	INIT_LIST_HEAD(&mm->context.pgtable_list);
 	INIT_LIST_HEAD(&mm->context.gmap_list);
@@ -114,8 +115,7 @@ static inline void finish_arch_post_lock_switch(void)
 			cpu_relax();
 
 		cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
-		if (mm->context.flush_mm)
-			__tlb_flush_mm(mm);
+		__tlb_flush_mm_lazy(mm);
 		preempt_enable();
 	}
 	set_fs(current->thread.mm_segment);
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index d54cc83..f3b3618 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -104,10 +104,12 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
 
 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
 {
+	spin_lock(&mm->context.lock);
 	if (mm->context.flush_mm) {
-		__tlb_flush_mm(mm);
 		mm->context.flush_mm = 0;
+		__tlb_flush_mm(mm);
 	}
+	spin_unlock(&mm->context.lock);
 }
 
 /*
-- 
2.7.4





More information about the kernel-team mailing list