[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c3e74996-6188-12c6-b0c5-58d2188c0609@huawei.com>
Date: Sat, 26 Jul 2025 14:25:00 +0800
From: liulongfang <liulongfang@...wei.com>
To: <alex.williamson@...hat.com>, <jgg@...dia.com>,
<herbert@...dor.apana.org.au>, <shameerali.kolothum.thodi@...wei.com>,
<jonathan.cameron@...wei.com>
CC: <linux-crypto@...r.kernel.org>, <kvm@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <linuxarm@...neuler.org>
Subject: Re: [PATCH v6 3/3] migration: adapt to new migration configuration
On 2025/7/17 9:15, Longfang Liu wrote:
> On new platforms greater than QM_HW_V3, the migration region has been
> relocated from the VF to the PF. The driver must also be modified
> accordingly to adapt to the new hardware device.
>
> Utilize the PF's I/O base directly on the new hardware platform,
> and no mmap operation is required. If it is on an old platform,
> the driver needs to be compatible with the old solution.
>
> Signed-off-by: Longfang Liu <liulongfang@...wei.com>
> ---
> .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 164 ++++++++++++------
> .../vfio/pci/hisilicon/hisi_acc_vfio_pci.h | 7 +
> 2 files changed, 118 insertions(+), 53 deletions(-)
>
Hi Alex:
Please take a look at this set of patches!
Thank you.
> diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
> index 515ff87f9ed9..bf4a7468bca0 100644
> --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
> +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
> @@ -125,6 +125,72 @@ static int qm_get_cqc(struct hisi_qm *qm, u64 *addr)
> return 0;
> }
>
> +static int qm_get_xqc_regs(struct hisi_acc_vf_core_device *hisi_acc_vdev,
> + struct acc_vf_data *vf_data)
> +{
> + struct hisi_qm *qm = &hisi_acc_vdev->vf_qm;
> + struct device *dev = &qm->pdev->dev;
> + u32 eqc_addr, aeqc_addr;
> + int ret;
> +
> + if (qm->ver == QM_HW_V3) {
> + eqc_addr = QM_EQC_DW0;
> + aeqc_addr = QM_AEQC_DW0;
> + } else {
> + eqc_addr = QM_EQC_PF_DW0;
> + aeqc_addr = QM_AEQC_PF_DW0;
> + }
> +
> + /* QM_EQC_DW has 7 regs */
> + ret = qm_read_regs(qm, eqc_addr, vf_data->qm_eqc_dw, 7);
> + if (ret) {
> + dev_err(dev, "failed to read QM_EQC_DW\n");
> + return ret;
> + }
> +
> + /* QM_AEQC_DW has 7 regs */
> + ret = qm_read_regs(qm, aeqc_addr, vf_data->qm_aeqc_dw, 7);
> + if (ret) {
> + dev_err(dev, "failed to read QM_AEQC_DW\n");
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> +static int qm_set_xqc_regs(struct hisi_acc_vf_core_device *hisi_acc_vdev,
> + struct acc_vf_data *vf_data)
> +{
> + struct hisi_qm *qm = &hisi_acc_vdev->vf_qm;
> + struct device *dev = &qm->pdev->dev;
> + u32 eqc_addr, aeqc_addr;
> + int ret;
> +
> + if (qm->ver == QM_HW_V3) {
> + eqc_addr = QM_EQC_DW0;
> + aeqc_addr = QM_AEQC_DW0;
> + } else {
> + eqc_addr = QM_EQC_PF_DW0;
> + aeqc_addr = QM_AEQC_PF_DW0;
> + }
> +
> + /* QM_EQC_DW has 7 regs */
> + ret = qm_write_regs(qm, eqc_addr, vf_data->qm_eqc_dw, 7);
> + if (ret) {
> + dev_err(dev, "failed to write QM_EQC_DW\n");
> + return ret;
> + }
> +
> + /* QM_AEQC_DW has 7 regs */
> + ret = qm_write_regs(qm, aeqc_addr, vf_data->qm_aeqc_dw, 7);
> + if (ret) {
> + dev_err(dev, "failed to write QM_AEQC_DW\n");
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
> {
> struct device *dev = &qm->pdev->dev;
> @@ -167,20 +233,6 @@ static int qm_get_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
> return ret;
> }
>
> - /* QM_EQC_DW has 7 regs */
> - ret = qm_read_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
> - if (ret) {
> - dev_err(dev, "failed to read QM_EQC_DW\n");
> - return ret;
> - }
> -
> - /* QM_AEQC_DW has 7 regs */
> - ret = qm_read_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
> - if (ret) {
> - dev_err(dev, "failed to read QM_AEQC_DW\n");
> - return ret;
> - }
> -
> return 0;
> }
>
> @@ -239,20 +291,6 @@ static int qm_set_regs(struct hisi_qm *qm, struct acc_vf_data *vf_data)
> return ret;
> }
>
> - /* QM_EQC_DW has 7 regs */
> - ret = qm_write_regs(qm, QM_EQC_DW0, vf_data->qm_eqc_dw, 7);
> - if (ret) {
> - dev_err(dev, "failed to write QM_EQC_DW\n");
> - return ret;
> - }
> -
> - /* QM_AEQC_DW has 7 regs */
> - ret = qm_write_regs(qm, QM_AEQC_DW0, vf_data->qm_aeqc_dw, 7);
> - if (ret) {
> - dev_err(dev, "failed to write QM_AEQC_DW\n");
> - return ret;
> - }
> -
> return 0;
> }
>
> @@ -522,6 +560,10 @@ static int vf_qm_load_data(struct hisi_acc_vf_core_device *hisi_acc_vdev,
> return ret;
> }
>
> + ret = qm_set_xqc_regs(hisi_acc_vdev, vf_data);
> + if (ret)
> + return ret;
> +
> ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
> if (ret) {
> dev_err(dev, "set sqc failed\n");
> @@ -589,6 +631,10 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev,
> vf_data->vf_qm_state = QM_READY;
> hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state;
>
> + ret = qm_get_xqc_regs(hisi_acc_vdev, vf_data);
> + if (ret)
> + return ret;
> +
> ret = vf_qm_read_data(vf_qm, vf_data);
> if (ret)
> return ret;
> @@ -1186,34 +1232,45 @@ static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)
> {
> struct vfio_pci_core_device *vdev = &hisi_acc_vdev->core_device;
> struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm;
> + struct hisi_qm *pf_qm = hisi_acc_vdev->pf_qm;
> struct pci_dev *vf_dev = vdev->pdev;
>
> - /*
> - * ACC VF dev BAR2 region consists of both functional register space
> - * and migration control register space. For migration to work, we
> - * need access to both. Hence, we map the entire BAR2 region here.
> - * But unnecessarily exposing the migration BAR region to the Guest
> - * has the potential to prevent/corrupt the Guest migration. Hence,
> - * we restrict access to the migration control space from
> - * Guest(Please see mmap/ioctl/read/write override functions).
> - *
> - * Please note that it is OK to expose the entire VF BAR if migration
> - * is not supported or required as this cannot affect the ACC PF
> - * configurations.
> - *
> - * Also the HiSilicon ACC VF devices supported by this driver on
> - * HiSilicon hardware platforms are integrated end point devices
> - * and the platform lacks the capability to perform any PCIe P2P
> - * between these devices.
> - */
> -
> - vf_qm->io_base =
> - ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
> - pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
> - if (!vf_qm->io_base)
> - return -EIO;
> + if (pf_qm->ver == QM_HW_V3) {
> + /*
> + * ACC VF dev BAR2 region consists of both functional register space
> + * and migration control register space. For migration to work, we
> + * need access to both. Hence, we map the entire BAR2 region here.
> + * But unnecessarily exposing the migration BAR region to the Guest
> + * has the potential to prevent/corrupt the Guest migration. Hence,
> + * we restrict access to the migration control space from
> + * Guest(Please see mmap/ioctl/read/write override functions).
> + *
> + * Please note that it is OK to expose the entire VF BAR if migration
> + * is not supported or required as this cannot affect the ACC PF
> + * configurations.
> + *
> + * Also the HiSilicon ACC VF devices supported by this driver on
> + * HiSilicon hardware platforms are integrated end point devices
> + * and the platform lacks the capability to perform any PCIe P2P
> + * between these devices.
> + */
>
> + vf_qm->io_base =
> + ioremap(pci_resource_start(vf_dev, VFIO_PCI_BAR2_REGION_INDEX),
> + pci_resource_len(vf_dev, VFIO_PCI_BAR2_REGION_INDEX));
> + if (!vf_qm->io_base)
> + return -EIO;
> + } else {
> + /*
> + * On hardware platforms greater than QM_HW_V3, the migration function
> + * register is placed in the BAR2 configuration region of the PF,
> + * and each VF device occupies 8KB of configuration space.
> + */
> + vf_qm->io_base = pf_qm->io_base + QM_MIG_REGION_OFFSET +
> + hisi_acc_vdev->vf_id * QM_MIG_REGION_SIZE;
> + }
> vf_qm->fun_type = QM_HW_VF;
> + vf_qm->ver = pf_qm->ver;
> vf_qm->pdev = vf_dev;
> mutex_init(&vf_qm->mailbox_lock);
>
> @@ -1539,7 +1596,8 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev)
> hisi_acc_vf_disable_fds(hisi_acc_vdev);
> mutex_lock(&hisi_acc_vdev->open_mutex);
> hisi_acc_vdev->dev_opened = false;
> - iounmap(vf_qm->io_base);
> + if (vf_qm->ver == QM_HW_V3)
> + iounmap(vf_qm->io_base);
> mutex_unlock(&hisi_acc_vdev->open_mutex);
> vfio_pci_core_close_device(core_vdev);
> }
> diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
> index 91002ceeebc1..348f8bb5b42c 100644
> --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
> +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
> @@ -59,6 +59,13 @@
> #define ACC_DEV_MAGIC_V1 0XCDCDCDCDFEEDAACC
> #define ACC_DEV_MAGIC_V2 0xAACCFEEDDECADEDE
>
> +#define QM_MIG_REGION_OFFSET 0x180000
> +#define QM_MIG_REGION_SIZE 0x2000
> +
> +#define QM_SUB_VERSION_ID 0x100210
> +#define QM_EQC_PF_DW0 0x1c00
> +#define QM_AEQC_PF_DW0 0x1c20
> +
> struct acc_vf_data {
> #define QM_MATCH_SIZE offsetofend(struct acc_vf_data, qm_rsv_state)
> /* QM match information */
>
Powered by blists - more mailing lists