[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240913-refactor-blk-affinity-helpers-v1-3-8e058f77af12@suse.de>
Date: Fri, 13 Sep 2024 09:42:01 +0200
From: Daniel Wagner <wagi@...nel.org>
To: Jens Axboe <axboe@...nel.dk>, Bjorn Helgaas <bhelgaas@...gle.com>,
"Michael S. Tsirkin" <mst@...hat.com>, Jason Wang <jasowang@...hat.com>,
"Martin K. Petersen" <martin.petersen@...cle.com>,
Keith Busch <kbusch@...nel.org>, Christoph Hellwig <hch@....de>,
Sagi Grimberg <sagi@...mberg.me>
Cc: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-pci@...r.kernel.org, virtualization@...ts.linux.dev,
linux-scsi@...r.kernel.org, megaraidlinux.pdl@...adcom.com,
mpi3mr-linuxdrv.pdl@...adcom.com, MPT-FusionLinux.pdl@...adcom.com,
storagedev@...rochip.com, linux-nvme@...ts.infradead.org,
Daniel Wagner <dwagner@...e.de>,
20240912-do-not-overwrite-pci-mapping-v1-1-85724b6cec49@...e.de
Subject: [PATCH 3/6] scsi: hisi_sas: replace blk_mq_pci_map_queues with
blk_mq_hctx_map_queues
From: Daniel Wagner <dwagner@...e.de>
Replace all users of blk_mq_pci_map_queues with the more generic
blk_mq_hctx_map_queues. This in preparation to retire
blk_mq_pci_map_queues.
For his_sas_v2_hw.c we have to provide its own callback for retrieving
the affinity because pci_get_blk_mq_affinity is using
pci_irq_get_affinity and not irq_data_get_affinity_mask.
But at least we can replace the open code loop with
blk_mq_hctx_map_queues.
Signed-off-by: Daniel Wagner <dwagner@...e.de>
---
drivers/scsi/hisi_sas/hisi_sas.h | 1 -
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 20 ++++++++++----------
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 5 +++--
3 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index d223f482488f..010479a354ee 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -9,7 +9,6 @@
#include <linux/acpi.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/dmapool.h>
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 342d75f12051..31be34f23164 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -3549,21 +3549,21 @@ static const struct attribute_group *sdev_groups_v2_hw[] = {
NULL
};
+static const struct cpumask *hisi_hba_get_queue_affinity(void *dev_data,
+ int offset, int queue)
+{
+ struct hisi_hba *hba = dev_data;
+
+ return irq_get_affinity_mask(hba->irq_map[offset + queue]);
+}
+
static void map_queues_v2_hw(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- const struct cpumask *mask;
- unsigned int queue, cpu;
- for (queue = 0; queue < qmap->nr_queues; queue++) {
- mask = irq_get_affinity_mask(hisi_hba->irq_map[96 + queue]);
- if (!mask)
- continue;
-
- for_each_cpu(cpu, mask)
- qmap->mq_map[cpu] = qmap->queue_offset + queue;
- }
+ blk_mq_hctx_map_queues(qmap, hisi_hba, CQ0_IRQ_INDEX,
+ hisi_hba_get_queue_affinity);
}
static const struct scsi_host_template sht_v2_hw = {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index feda9b54b443..1576eee943ba 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -3322,8 +3322,9 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(qmap);
else
- blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
- BASE_VECTORS_V3_HW);
+ blk_mq_hctx_map_queues(qmap, hisi_hba->pci_dev,
+ BASE_VECTORS_V3_HW,
+ pci_get_blk_mq_affinity);
qoff += qmap->nr_queues;
}
}
--
2.46.0
Powered by blists - more mailing lists