[SRU][Bionic][PATCH 1/2] Revert "blk-mq: simplify queue mapping & schedule with each possisble CPU"

Seth Forshee seth.forshee at canonical.com
Sat Apr 21 21:08:25 UTC 2018


BugLink: http://bugs.launchpad.net/bugs/1765232

This reverts commit 9403a13fd07ef6a5fff7a69860f419d16641bb60.
This patch was applied as part of the fix for LP #1759723 and is
resulting in IO hangs due to some drivers selecting reply queues
which do not have any online CPU mapped.

Signed-off-by: Seth Forshee <seth.forshee at canonical.com>
---
 block/blk-mq.c | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 50ef7dc4c41e..1a80d8c4f3ec 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -443,7 +443,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
 		blk_queue_exit(q);
 		return ERR_PTR(-EXDEV);
 	}
-	cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
+	cpu = cpumask_first(alloc_data.hctx->cpumask);
 	alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
 
 	rq = blk_mq_get_request(q, NULL, op, &alloc_data);
@@ -1263,10 +1263,9 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
 	if (--hctx->next_cpu_batch <= 0) {
 		int next_cpu;
 
-		next_cpu = cpumask_next_and(hctx->next_cpu, hctx->cpumask,
-				cpu_online_mask);
+		next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
 		if (next_cpu >= nr_cpu_ids)
-			next_cpu = cpumask_first_and(hctx->cpumask,cpu_online_mask);
+			next_cpu = cpumask_first(hctx->cpumask);
 
 		hctx->next_cpu = next_cpu;
 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
@@ -2138,11 +2137,16 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
 		INIT_LIST_HEAD(&__ctx->rq_list);
 		__ctx->queue = q;
 
+		/* If the cpu isn't present, the cpu is mapped to first hctx */
+		if (!cpu_present(i))
+			continue;
+
+		hctx = blk_mq_map_queue(q, i);
+
 		/*
 		 * Set local node, IFF we have more than one hw queue. If
 		 * not, we remain on the home node of the device
 		 */
-		hctx = blk_mq_map_queue(q, i);
 		if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
 			hctx->numa_node = local_memory_node(cpu_to_node(i));
 	}
@@ -2199,7 +2203,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
 	 *
 	 * If the cpu isn't present, the cpu is mapped to first hctx.
 	 */
-	for_each_possible_cpu(i) {
+	for_each_present_cpu(i) {
 		hctx_idx = q->mq_map[i];
 		/* unmapped hw queue can be remapped after CPU topo changed */
 		if (!set->tags[hctx_idx] &&
@@ -2253,8 +2257,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
 		/*
 		 * Initialize batch roundrobin counts
 		 */
-		hctx->next_cpu = cpumask_first_and(hctx->cpumask,
-				cpu_online_mask);
+		hctx->next_cpu = cpumask_first(hctx->cpumask);
 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
 	}
 }
-- 
2.17.0





More information about the kernel-team mailing list