[SRU][AWS][O][[PATCH 1/1] UBUNTU SAUCE: (no-up) linux/ena: Add NUMA aware interrupt allocation
Philip Cox
philip.cox at canonical.com
Tue Oct 22 11:29:35 UTC 2024
BugLink: https://bugs.launchpad.net/bugs/2085159
This patch implements an improved allocation strategy for IO interrupts,
taking into account the physical device NUMA node.
Also, it replaces the use of a deprecated API (irq_set_affinity_hint)
which enforces affinity with a newer API (irq_update_affinity_hint)
which only hints at the preferred CPU mask.
Signed-off-by: Osama Abboud <osamaabb at amazon.com>
(content derived from https://github.com/amzn/amzn-drivers/commit/ed7754cdf6ca0d4fb3d76906abc17718cc3ffa23)
Signed-off-by: Philip Cox <philip.cox at canonical.com>
---
drivers/net/ethernet/amazon/ena/ena_netdev.c | 21 +++++++++++---------
1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 184b6e6cbed4..aa8b20e40d4d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1626,16 +1626,20 @@ static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
static void ena_setup_io_intr(struct ena_adapter *adapter)
{
+ const struct cpumask *affinity = cpu_online_mask;
+ int irq_idx, i, cpu, io_queue_count, node;
struct net_device *netdev;
- int irq_idx, i, cpu;
- int io_queue_count;
netdev = adapter->netdev;
io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
+ node = dev_to_node(adapter->ena_dev->dmadev);
+
+ if (node != NUMA_NO_NODE)
+ affinity = cpumask_of_node(node);
for (i = 0; i < io_queue_count; i++) {
irq_idx = ENA_IO_IRQ_IDX(i);
- cpu = i % num_online_cpus();
+ cpu = cpumask_local_spread(i, node);
snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
"%s-Tx-Rx-%d", netdev->name, i);
@@ -1645,8 +1649,7 @@ static void ena_setup_io_intr(struct ena_adapter *adapter)
pci_irq_vector(adapter->pdev, irq_idx);
adapter->irq_tbl[irq_idx].cpu = cpu;
- cpumask_set_cpu(cpu,
- &adapter->irq_tbl[irq_idx].affinity_hint_mask);
+ cpumask_copy(&adapter->irq_tbl[irq_idx].affinity_hint_mask, affinity);
}
}
@@ -1669,7 +1672,7 @@ static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
"Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
irq->affinity_hint_mask.bits[0], irq->vector);
- irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
+ irq_update_affinity_hint(irq->vector, &irq->affinity_hint_mask);
return rc;
}
@@ -1702,7 +1705,7 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
"Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
i, irq->affinity_hint_mask.bits[0], irq->vector);
- irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
+ irq_update_affinity_hint(irq->vector, &irq->affinity_hint_mask);
}
return rc;
@@ -1722,7 +1725,7 @@ static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
synchronize_irq(irq->vector);
- irq_set_affinity_hint(irq->vector, NULL);
+ irq_update_affinity_hint(irq->vector, NULL);
free_irq(irq->vector, irq->data);
}
@@ -1741,7 +1744,7 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
irq = &adapter->irq_tbl[i];
- irq_set_affinity_hint(irq->vector, NULL);
+ irq_update_affinity_hint(irq->vector, NULL);
free_irq(irq->vector, irq->data);
}
}
--
2.34.1
More information about the kernel-team
mailing list