[SRU][Xenial/gcp][PATCH 8/11] virtio_net: Stripe queue affinities across cores.
Khalid Elmously
khalid.elmously at canonical.com
Thu Sep 19 09:23:50 UTC 2019
From: Caleb Raitto <caraitto at google.com>
BugLink: https://bugs.launchpad.net/bugs/1810457
Always set the affinity hint, even if #cpu != #vq.
Handle the case where #cpu > #vq (including when #cpu % #vq != 0) and
when #vq > #cpu (including when #vq % #cpu != 0).
Signed-off-by: Caleb Raitto <caraitto at google.com>
Signed-off-by: Willem de Bruijn <willemb at google.com>
Acked-by: Jon Olson <jonolson at google.com>
Signed-off-by: David S. Miller <davem at davemloft.net>
(backported from commit 2ca653d607ce59f2729173a7ea56dbfa6330ec88)
[marcelo.cerri at canonical.com: fixed context in the included headers]
Signed-off-by: Marcelo Henrique Cerri <marcelo.cerri at canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously at canonical.com>
---
drivers/net/virtio_net.c | 42 ++++++++++++++++++++++++++--------------
1 file changed, 27 insertions(+), 15 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c40ebde9cee9..e1b753a334d0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -30,6 +30,7 @@
#include <linux/cpu.h>
#include <linux/average.h>
#include <linux/filter.h>
+#include <linux/kernel.h>
#include <net/route.h>
static int napi_weight = NAPI_POLL_WEIGHT;
@@ -1724,30 +1725,41 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
static void virtnet_set_affinity(struct virtnet_info *vi)
{
- int i;
- int cpu;
+ cpumask_var_t mask;
+ int stragglers;
+ int group_size;
+ int i, j, cpu;
+ int num_cpu;
+ int stride;
- /* In multiqueue mode, when the number of cpu is equal to the number of
- * queue pairs, we let the queue pairs to be private to one cpu by
- * setting the affinity hint to eliminate the contention.
- */
- if (vi->curr_queue_pairs == 1 ||
- vi->max_queue_pairs != num_online_cpus()) {
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
virtnet_clean_affinity(vi, -1);
return;
}
- i = 0;
- for_each_online_cpu(cpu) {
- const unsigned long *mask = cpumask_bits(cpumask_of(cpu));
+ num_cpu = num_online_cpus();
+ stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
+ stragglers = num_cpu >= vi->curr_queue_pairs ?
+ num_cpu % vi->curr_queue_pairs :
+ 0;
+ cpu = cpumask_next(-1, cpu_online_mask);
- virtqueue_set_affinity(vi->rq[i].vq, cpumask_of(cpu));
- virtqueue_set_affinity(vi->sq[i].vq, cpumask_of(cpu));
- __netif_set_xps_queue(vi->dev, mask, i, false);
- i++;
+ for (i = 0; i < vi->curr_queue_pairs; i++) {
+ group_size = stride + (i < stragglers ? 1 : 0);
+
+ for (j = 0; j < group_size; j++) {
+ cpumask_set_cpu(cpu, mask);
+ cpu = cpumask_next_wrap(cpu, cpu_online_mask,
+ nr_cpu_ids, false);
+ }
+ virtqueue_set_affinity(vi->rq[i].vq, mask);
+ virtqueue_set_affinity(vi->sq[i].vq, mask);
+ __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false);
+ cpumask_clear(mask);
}
vi->affinity_hint_set = true;
+ free_cpumask_var(mask);
}
static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
--
2.17.1
More information about the kernel-team
mailing list