[SRU][F][PATCH v2 2/5] x86/kvm: Teardown PV features on boot CPU as well
Andrea Righi
andrea.righi at canonical.com
Wed May 26 05:15:44 UTC 2021
From: Vitaly Kuznetsov <vkuznets at redhat.com>
BugLink: https://bugs.launchpad.net/bugs/1920944
Various PV features (Async PF, PV EOI, steal time) work through memory
shared with hypervisor and when we restore from hibernation we must
properly teardown all these features to make sure hypervisor doesn't
write to stale locations after we jump to the previously hibernated kernel
(which can try to place anything there). For secondary CPUs the job is
already done by kvm_cpu_down_prepare(), register syscore ops to do
the same for boot CPU.
Signed-off-by: Vitaly Kuznetsov <vkuznets at redhat.com>
Message-Id: <20210414123544.1060604-3-vkuznets at redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
(backported from commit 8b79feffeca28c5459458fe78676b081e87c93a4)
Signed-off-by: Andrea Righi <andrea.righi at canonical.com>
---
arch/x86/kernel/kvm.c | 57 +++++++++++++++++++++++++++++++------------
1 file changed, 41 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index ee595588b98a..29082944956b 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -24,6 +24,7 @@
#include <linux/debugfs.h>
#include <linux/nmi.h>
#include <linux/swait.h>
+#include <linux/syscore_ops.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include <asm/traps.h>
@@ -426,6 +427,25 @@ static void __init sev_map_percpu_data(void)
}
}
+static void kvm_guest_cpu_offline(void)
+{
+ kvm_disable_steal_time();
+ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
+ wrmsrl(MSR_KVM_PV_EOI_EN, 0);
+ kvm_pv_disable_apf();
+ apf_task_wake_all();
+}
+
+static int kvm_cpu_online(unsigned int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ kvm_guest_cpu_init();
+ local_irq_restore(flags);
+ return 0;
+}
+
#ifdef CONFIG_SMP
#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
@@ -545,31 +565,34 @@ static void __init kvm_smp_prepare_boot_cpu(void)
kvm_spinlock_init();
}
-static void kvm_guest_cpu_offline(void)
+static int kvm_cpu_down_prepare(unsigned int cpu)
{
- kvm_disable_steal_time();
- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
- wrmsrl(MSR_KVM_PV_EOI_EN, 0);
- kvm_pv_disable_apf();
- apf_task_wake_all();
-}
+ unsigned long flags;
-static int kvm_cpu_online(unsigned int cpu)
-{
- local_irq_disable();
- kvm_guest_cpu_init();
- local_irq_enable();
+ local_irq_save(flags);
+ kvm_guest_cpu_offline();
+ local_irq_restore(flags);
return 0;
}
-static int kvm_cpu_down_prepare(unsigned int cpu)
+#endif
+
+static int kvm_suspend(void)
{
- local_irq_disable();
kvm_guest_cpu_offline();
- local_irq_enable();
+
return 0;
}
-#endif
+
+static void kvm_resume(void)
+{
+ kvm_cpu_online(raw_smp_processor_id());
+}
+
+static struct syscore_ops kvm_syscore_ops = {
+ .suspend = kvm_suspend,
+ .resume = kvm_resume,
+};
static void __init kvm_apf_trap_init(void)
{
@@ -647,6 +670,8 @@ static void __init kvm_guest_init(void)
kvm_guest_cpu_init();
#endif
+ register_syscore_ops(&kvm_syscore_ops);
+
/*
* Hard lockup detection is enabled by default. Disable it, as guests
* can get false positives too easily, for example if the host is
--
2.31.1
More information about the kernel-team
mailing list