[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <73fbb369-1bd7-db91-9868-a8ac42f5dd68@huawei.com>
Date: Fri, 27 Dec 2024 15:24:17 +0800
From: liulongfang <liulongfang@...wei.com>
To: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@...wei.com>,
"alex.williamson@...hat.com" <alex.williamson@...hat.com>, "jgg@...dia.com"
<jgg@...dia.com>, Jonathan Cameron <jonathan.cameron@...wei.com>
CC: "kvm@...r.kernel.org" <kvm@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linuxarm@...neuler.org" <linuxarm@...neuler.org>
Subject: Re: [PATCH v2 1/5] hisi_acc_vfio_pci: fix XQE dma address error
On 2024/12/19 18:01, Shameerali Kolothum Thodi wrote:
>
>
>> -----Original Message-----
>> From: liulongfang <liulongfang@...wei.com>
>> Sent: Thursday, December 19, 2024 9:18 AM
>> To: alex.williamson@...hat.com; jgg@...dia.com; Shameerali Kolothum
>> Thodi <shameerali.kolothum.thodi@...wei.com>; Jonathan Cameron
>> <jonathan.cameron@...wei.com>
>> Cc: kvm@...r.kernel.org; linux-kernel@...r.kernel.org;
>> linuxarm@...neuler.org; liulongfang <liulongfang@...wei.com>
>> Subject: [PATCH v2 1/5] hisi_acc_vfio_pci: fix XQE dma address error
>>
>> The dma addresses of EQE and AEQE are wrong after migration and
>> results in guest kernel-mode encryption services failure.
>> Comparing the definition of hardware registers, we found that
>> there was an error when the data read from the register was
>> combined into an address. Therefore, the address combination
>> sequence needs to be corrected.
>>
>> Even after fixing the above problem, we still have an issue
>> where the Guest from an old kernel can get migrated to
>> new kernel and may result in wrong data.
>>
>> In order to ensure that the address is correct after migration,
>> if an old magic number is detected, the dma address needs to be
>> updated.
>>
>> Fixes:b0eed085903e("hisi_acc_vfio_pci: Add support for VFIO live
>> migration")
>> Signed-off-by: Longfang Liu <liulongfang@...wei.com>
>> ---
>> .../vfio/pci/hisilicon/hisi_acc_vfio_pci.c | 34 +++++++++++++++----
>> .../vfio/pci/hisilicon/hisi_acc_vfio_pci.h | 9 ++++-
>> 2 files changed, 36 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
>> b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
>> index 451c639299eb..8518efea3a52 100644
>> --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
>> +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c
>> @@ -350,6 +350,27 @@ static int vf_qm_func_stop(struct hisi_qm *qm)
>> return hisi_qm_mb(qm, QM_MB_CMD_PAUSE_QM, 0, 0, 0);
>> }
>>
>> +static int vf_qm_magic_check(struct acc_vf_data *vf_data)
>> +{
>> + switch (vf_data->acc_magic) {
>> + case ACC_DEV_MAGIC_V2:
>> + break;
>> + case ACC_DEV_MAGIC_V1:
>> + /* Correct dma address */
>> + vf_data->eqe_dma = vf_data-
>>> qm_eqc_dw[QM_XQC_ADDR_HIGH];
>> + vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
>> + vf_data->eqe_dma |= vf_data-
>>> qm_eqc_dw[QM_XQC_ADDR_LOW];
>> + vf_data->aeqe_dma = vf_data-
>>> qm_aeqc_dw[QM_XQC_ADDR_HIGH];
>> + vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
>> + vf_data->aeqe_dma |= vf_data-
>>> qm_aeqc_dw[QM_XQC_ADDR_LOW];
>> + break;
>> + default:
>> + return -EINVAL;
>> + }
>> +
>> + return 0;
>> +}
>> +
>> static int vf_qm_check_match(struct hisi_acc_vf_core_device
>> *hisi_acc_vdev,
>> struct hisi_acc_vf_migration_file *migf)
>> {
>> @@ -363,7 +384,8 @@ static int vf_qm_check_match(struct
>> hisi_acc_vf_core_device *hisi_acc_vdev,
>> if (migf->total_length < QM_MATCH_SIZE || hisi_acc_vdev-
>>> match_done)
>> return 0;
>>
>> - if (vf_data->acc_magic != ACC_DEV_MAGIC) {
>> + ret = vf_qm_magic_check(vf_data);
>> + if (ret) {
>> dev_err(dev, "failed to match ACC_DEV_MAGIC\n");
>> return -EINVAL;
>> }
>> @@ -418,7 +440,7 @@ static int vf_qm_get_match_data(struct
>> hisi_acc_vf_core_device *hisi_acc_vdev,
>> int vf_id = hisi_acc_vdev->vf_id;
>> int ret;
>>
>> - vf_data->acc_magic = ACC_DEV_MAGIC;
>> + vf_data->acc_magic = ACC_DEV_MAGIC_V2;
>> /* Save device id */
>> vf_data->dev_id = hisi_acc_vdev->vf_dev->device;
>>
>> @@ -496,12 +518,12 @@ static int vf_qm_read_data(struct hisi_qm
>> *vf_qm, struct acc_vf_data *vf_data)
>> return -EINVAL;
>>
>> /* Every reg is 32 bit, the dma address is 64 bit. */
>> - vf_data->eqe_dma = vf_data->qm_eqc_dw[1];
>> + vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH];
>> vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
>> - vf_data->eqe_dma |= vf_data->qm_eqc_dw[0];
>> - vf_data->aeqe_dma = vf_data->qm_aeqc_dw[1];
>> + vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW];
>> + vf_data->aeqe_dma = vf_data-
>>> qm_aeqc_dw[QM_XQC_ADDR_HIGH];
>> vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
>> - vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[0];
>> + vf_data->aeqe_dma |= vf_data-
>>> qm_aeqc_dw[QM_XQC_ADDR_LOW];
>>
>> /* Through SQC_BT/CQC_BT to get sqc and cqc address */
>> ret = qm_get_sqc(vf_qm, &vf_data->sqc_dma);
>> diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
>> b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
>> index 245d7537b2bc..2afce68f5a34 100644
>> --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
>> +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h
>> @@ -39,6 +39,9 @@
>> #define QM_REG_ADDR_OFFSET 0x0004
>>
>> #define QM_XQC_ADDR_OFFSET 32U
>> +#define QM_XQC_ADDR_LOW 0x1
>> +#define QM_XQC_ADDR_HIGH 0x2
>> +
>> #define QM_VF_AEQ_INT_MASK 0x0004
>> #define QM_VF_EQ_INT_MASK 0x000c
>> #define QM_IFC_INT_SOURCE_V 0x0020
>> @@ -50,10 +53,14 @@
>> #define QM_EQC_DW0 0X8000
>> #define QM_AEQC_DW0 0X8020
>>
>> +enum acc_magic_num {
>> + ACC_DEV_MAGIC_V1 = 0XCDCDCDCDFEEDAACC,
>> + ACC_DEV_MAGIC_V2 = 0xAACCFEEDDECADEDE,
>
>
> I think we have discussed this before that having some kind of
> version info embed into magic_num will be beneficial going
> forward. ie, may be use the last 4 bytes for denoting version.
>
> ACC_DEV_MAGIC_V2 = 0xAACCFEEDDECA0002
>
OK, so the subsequent magic number no longer needs a random value,
but a predictable and certain value.
Thanks,
Longfang.
> The reason being, otherwise we have to come up with a random
> magic each time when a fix like this is required in future.
>
> Thanks,
> Shameer
>
> .
>
Powered by blists - more mailing lists