[X][PATCH 1/4] stop_machine, sched: Fix migrate_swap() vs. active_balance() deadlock
Mauricio Faria de Oliveira
mfo at canonical.com
Thu Mar 21 23:44:09 UTC 2019
From: Peter Zijlstra <peterz at infradead.org>
BugLink: https://bugs.launchpad.net/bugs/1821259
Matt reported the following deadlock:
CPU0 CPU1
schedule(.prev=migrate/0) <fault>
pick_next_task() ...
idle_balance() migrate_swap()
active_balance() stop_two_cpus()
spin_lock(stopper0->lock)
spin_lock(stopper1->lock)
ttwu(migrate/0)
smp_cond_load_acquire() -- waits for schedule()
stop_one_cpu(1)
spin_lock(stopper1->lock) -- waits for stopper lock
Fix this deadlock by taking the wakeups out from under stopper->lock.
This allows the active_balance() to queue the stop work and finish the
context switch, which in turn allows the wakeup from migrate_swap() to
observe the context and complete the wakeup.
Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
Reported-by: Matt Fleming <matt at codeblueprint.co.uk>
Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
Acked-by: Matt Fleming <matt at codeblueprint.co.uk>
Cc: Linus Torvalds <torvalds at linux-foundation.org>
Cc: Michal Hocko <mhocko at suse.com>
Cc: Mike Galbraith <umgwanakikbuti at gmail.com>
Cc: Peter Zijlstra <peterz at infradead.org>
Cc: Thomas Gleixner <tglx at linutronix.de>
Link: http://lkml.kernel.org/r/20180420095005.GH4064@hirez.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo at kernel.org>
(backported from commit 0b26351b910fb8fe6a056f8a1bbccabe50c0e19f)
[mfo: backport:
- hunk 1:
- refresh context lines
- include 'linux/sched.h' instead of 'linux/sched/wake_q.h' which is
code moved by upstream commit eb61baf69871 ("sched/headers: Move the
wake-queue types and interfaces from sched.h into <linux/sched/wake_q.h>")
- hunk 2:
- refresh context lines
- s/bool/void/ return type of cpu_stop_queue_work() and other 2 changes
which are not relevant to this fix, due to lack of upstream commits:
- 'enabled' variable:
commit 1b034bd989aa ("stop_machine: Make cpu_stop_queue_work() and
stop_one_cpu_nowait() return bool")
- else 'if (work->done)' condition
commit dd2e3121e3cb ("stop_machine: Shift the 'done != NULL' check
from cpu_stop_signal_done() to callers")
- s/DEFINE_WAKE_Q/WAKE_Q/ due to lack of upstream commit 194a6b5b9cb6
("sched/wake_q: Rename WAKE_Q to DEFINE_WAKE_Q")
- hunk 3:
- refresh context lines
- s/DEFINE_WAKE_Q/WAKE_Q/ due to lack of upstream commit 194a6b5b9cb6
("sched/wake_q: Rename WAKE_Q to DEFINE_WAKE_Q")
- hunk 4:
- refresh context lines
- hunk 5:
- merge with hunk 4
- refresh context lines]
Signed-off-by: Mauricio Faria de Oliveira <mfo at canonical.com>
---
kernel/stop_machine.c | 18 +++++++++++++-----
1 file changed, 13 insertions(+), 5 deletions(-)
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index a3bbaee77c58..71435be8bd25 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -21,6 +21,7 @@
#include <linux/smpboot.h>
#include <linux/atomic.h>
#include <linux/lglock.h>
+#include <linux/sched.h>
/*
* Structure to determine completion condition and record errors. May
@@ -74,24 +75,28 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
}
static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
- struct cpu_stop_work *work)
+ struct cpu_stop_work *work,
+ struct wake_q_head *wakeq)
{
list_add_tail(&work->list, &stopper->works);
- wake_up_process(stopper->thread);
+ wake_q_add(wakeq, stopper->thread);
}
/* queue @work to @stopper. if offline, @work is completed immediately */
static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
{
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+ WAKE_Q(wakeq);
unsigned long flags;
spin_lock_irqsave(&stopper->lock, flags);
if (stopper->enabled)
- __cpu_stop_queue_work(stopper, work);
+ __cpu_stop_queue_work(stopper, work, &wakeq);
else
cpu_stop_signal_done(work->done, false);
spin_unlock_irqrestore(&stopper->lock, flags);
+
+ wake_up_q(&wakeq);
}
/**
@@ -221,6 +226,7 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
{
struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
+ WAKE_Q(wakeq);
int err;
lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
@@ -232,13 +238,15 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
goto unlock;
err = 0;
- __cpu_stop_queue_work(stopper1, work1);
- __cpu_stop_queue_work(stopper2, work2);
+ __cpu_stop_queue_work(stopper1, work1, &wakeq);
+ __cpu_stop_queue_work(stopper2, work2, &wakeq);
unlock:
spin_unlock(&stopper2->lock);
spin_unlock_irq(&stopper1->lock);
lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
+ wake_up_q(&wakeq);
+
return err;
}
/**
--
2.17.1
More information about the kernel-team
mailing list