[ZESTY] [PATCH 03/33] Revert "KVM: PPC: Book 3S: XICS: Don't lock twice when checking for resend"
Breno Leitao
leitao at debian.org
Tue Mar 28 16:54:15 UTC 2017
From: Breno Leitao <breno.leitao at gmail.com>
BugLink: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1675806
This reverts commit 21acd0e4df04f02176e773468658c3cebff096bb.
Reverting this commit know, to apply other commits, and, then,
add this commit back on top of the new commits
Signed-off-by: Breno Leitao <breno.leitao at gmail.com>
---
arch/powerpc/kvm/book3s_hv_rm_xics.c | 40 ++++++++++++------------
arch/powerpc/kvm/book3s_xics.c | 59 +++++++++++++++++++-----------------
2 files changed, 51 insertions(+), 48 deletions(-)
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 44cfdd281fa1..30f82c79de5d 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -35,7 +35,7 @@ int kvm_irq_bypass = 1;
EXPORT_SYMBOL(kvm_irq_bypass);
static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
- u32 new_irq, bool check_resend);
+ u32 new_irq);
static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu);
/* -- ICS routines -- */
@@ -44,12 +44,22 @@ static void ics_rm_check_resend(struct kvmppc_xics *xics,
{
int i;
+ arch_spin_lock(&ics->lock);
+
for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
struct ics_irq_state *state = &ics->irq_state[i];
- if (state->resend)
- icp_rm_deliver_irq(xics, icp, state->number, true);
+
+ if (!state->resend)
+ continue;
+
+ state->resend = 0;
+
+ arch_spin_unlock(&ics->lock);
+ icp_rm_deliver_irq(xics, icp, state->number);
+ arch_spin_lock(&ics->lock);
}
+ arch_spin_unlock(&ics->lock);
}
/* -- ICP routines -- */
@@ -282,7 +292,7 @@ static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
}
static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
- u32 new_irq, bool check_resend)
+ u32 new_irq)
{
struct ics_irq_state *state;
struct kvmppc_ics *ics;
@@ -327,10 +337,6 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
}
}
- if (check_resend)
- if (!state->resend)
- goto out;
-
/* Clear the resend bit of that interrupt */
state->resend = 0;
@@ -378,7 +384,6 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
arch_spin_unlock(&ics->lock);
icp->n_reject++;
new_irq = reject;
- check_resend = 0;
goto again;
}
} else {
@@ -386,14 +391,8 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* We failed to deliver the interrupt we need to set the
* resend map bit and mark the ICS state as needing a resend
*/
- state->resend = 1;
-
- /*
- * Make sure when checking resend, we don't miss the resend
- * if resend_map bit is seen and cleared.
- */
- smp_wmb();
set_bit(ics->icsid, icp->resend_map);
+ state->resend = 1;
/*
* If the need_resend flag got cleared in the ICP some time
@@ -405,7 +404,6 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
if (!icp->state.need_resend) {
state->resend = 0;
arch_spin_unlock(&ics->lock);
- check_resend = 0;
goto again;
}
}
@@ -600,7 +598,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
/* Handle reject in real mode */
if (reject && reject != XICS_IPI) {
this_icp->n_reject++;
- icp_rm_deliver_irq(xics, icp, reject, false);
+ icp_rm_deliver_irq(xics, icp, reject);
}
/* Handle resends in real mode */
@@ -668,7 +666,7 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
*/
if (reject && reject != XICS_IPI) {
icp->n_reject++;
- icp_rm_deliver_irq(xics, icp, reject, false);
+ icp_rm_deliver_irq(xics, icp, reject);
}
bail:
return check_too_hard(xics, icp);
@@ -706,7 +704,7 @@ static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq)
} while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
if (pq_new & PQ_PRESENTED)
- icp_rm_deliver_irq(xics, NULL, irq, false);
+ icp_rm_deliver_irq(xics, NULL, irq);
if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
icp->rm_action |= XICS_RM_NOTIFY_EOI;
@@ -876,7 +874,7 @@ long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
/* Test P=1, Q=0, this is the only case where we present */
if (pq_new == PQ_PRESENTED)
- icp_rm_deliver_irq(xics, icp, irq, false);
+ icp_rm_deliver_irq(xics, icp, irq);
/* EOI the interrupt */
icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr,
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index e48803e2918d..c7620622c846 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -63,7 +63,7 @@
/* -- ICS routines -- */
static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
- u32 new_irq, bool check_resend);
+ u32 new_irq);
/*
* Return value ideally indicates how the interrupt was handled, but no
@@ -117,7 +117,7 @@ static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
/* Test P=1, Q=0, this is the only case where we present */
if (pq_new == PQ_PRESENTED)
- icp_deliver_irq(xics, NULL, irq, false);
+ icp_deliver_irq(xics, NULL, irq);
/* Record which CPU this arrived on for passed-through interrupts */
if (state->host_irq)
@@ -131,14 +131,31 @@ static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
{
int i;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ arch_spin_lock(&ics->lock);
+
for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
struct ics_irq_state *state = &ics->irq_state[i];
- if (state->resend) {
- XICS_DBG("resend %#x prio %#x\n", state->number,
- state->priority);
- icp_deliver_irq(xics, icp, state->number, true);
- }
+
+ if (!state->resend)
+ continue;
+
+ state->resend = 0;
+
+ XICS_DBG("resend %#x prio %#x\n", state->number,
+ state->priority);
+
+ arch_spin_unlock(&ics->lock);
+ local_irq_restore(flags);
+ icp_deliver_irq(xics, icp, state->number);
+ local_irq_save(flags);
+ arch_spin_lock(&ics->lock);
}
+
+ arch_spin_unlock(&ics->lock);
+ local_irq_restore(flags);
}
static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
@@ -192,7 +209,7 @@ int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
state->masked_pending, state->resend);
if (write_xive(xics, ics, state, server, priority, priority))
- icp_deliver_irq(xics, icp, irq, false);
+ icp_deliver_irq(xics, icp, irq);
return 0;
}
@@ -245,7 +262,7 @@ int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
if (write_xive(xics, ics, state, state->server, state->saved_priority,
state->saved_priority))
- icp_deliver_irq(xics, icp, irq, false);
+ icp_deliver_irq(xics, icp, irq);
return 0;
}
@@ -379,7 +396,7 @@ static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
}
static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
- u32 new_irq, bool check_resend)
+ u32 new_irq)
{
struct ics_irq_state *state;
struct kvmppc_ics *ics;
@@ -425,10 +442,6 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
}
}
- if (check_resend)
- if (!state->resend)
- goto out;
-
/* Clear the resend bit of that interrupt */
state->resend = 0;
@@ -477,7 +490,6 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
arch_spin_unlock(&ics->lock);
local_irq_restore(flags);
new_irq = reject;
- check_resend = 0;
goto again;
}
} else {
@@ -485,14 +497,8 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* We failed to deliver the interrupt we need to set the
* resend map bit and mark the ICS state as needing a resend
*/
- state->resend = 1;
-
- /*
- * Make sure when checking resend, we don't miss the resend
- * if resend_map bit is seen and cleared.
- */
- smp_wmb();
set_bit(ics->icsid, icp->resend_map);
+ state->resend = 1;
/*
* If the need_resend flag got cleared in the ICP some time
@@ -505,7 +511,6 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
state->resend = 0;
arch_spin_unlock(&ics->lock);
local_irq_restore(flags);
- check_resend = 0;
goto again;
}
}
@@ -697,7 +702,7 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
/* Handle reject */
if (reject && reject != XICS_IPI)
- icp_deliver_irq(xics, icp, reject, false);
+ icp_deliver_irq(xics, icp, reject);
/* Handle resend */
if (resend)
@@ -777,7 +782,7 @@ static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
* attempt (see comments in icp_deliver_irq).
*/
if (reject && reject != XICS_IPI)
- icp_deliver_irq(xics, icp, reject, false);
+ icp_deliver_irq(xics, icp, reject);
}
static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
@@ -813,7 +818,7 @@ static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
} while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
if (pq_new & PQ_PRESENTED)
- icp_deliver_irq(xics, icp, irq, false);
+ icp_deliver_irq(xics, icp, irq);
kvm_notify_acked_irq(vcpu->kvm, 0, irq);
@@ -1302,7 +1307,7 @@ static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
local_irq_restore(flags);
if (val & KVM_XICS_PENDING)
- icp_deliver_irq(xics, NULL, irqp->number, false);
+ icp_deliver_irq(xics, NULL, irqp->number);
return 0;
}
--
2.11.0
More information about the kernel-team
mailing list