lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <DM6PR04MB65750EE12401C4F69B352E5CFC909@DM6PR04MB6575.namprd04.prod.outlook.com>
Date:   Fri, 22 Jul 2022 07:31:06 +0000
From:   Avri Altman <Avri.Altman@....com>
To:     Can Guo <quic_cang@...cinc.com>,
        "bvanassche@....org" <bvanassche@....org>,
        "stanley.chu@...iatek.com" <stanley.chu@...iatek.com>,
        "adrian.hunter@...el.com" <adrian.hunter@...el.com>,
        "alim.akhtar@...sung.com" <alim.akhtar@...sung.com>,
        "beanhuo@...ron.com" <beanhuo@...ron.com>,
        "quic_asutoshd@...cinc.com" <quic_asutoshd@...cinc.com>,
        "quic_nguyenb@...cinc.com" <quic_nguyenb@...cinc.com>,
        "quic_ziqichen@...cinc.com" <quic_ziqichen@...cinc.com>,
        "linux-scsi@...r.kernel.org" <linux-scsi@...r.kernel.org>,
        "kernel-team@...roid.com" <kernel-team@...roid.com>
CC:     "James E.J. Bottomley" <jejb@...ux.ibm.com>,
        "Martin K. Petersen" <martin.petersen@...cle.com>,
        Daejun Park <daejun7.park@...sung.com>,
        Jinyoung Choi <j-young.choi@...sung.com>,
        Kiwoong Kim <kwmad.kim@...sung.com>,
        open list <linux-kernel@...r.kernel.org>
Subject: RE: [PATCH 1/2] scsi: ufs: Add Multi-Circular Queue support

> +static int ufshcd_mcq_config_resource(struct ufs_hba *hba)
> +{
> +       struct platform_device *pdev = to_platform_device(hba->dev);
> +       struct ufshcd_res_info_t *res;
> +       struct resource *res_mem, *res_mcq;
> +       int i, ret = 0;
> +
> +       memcpy(hba->res, ufshcd_res_info, sizeof(ufshcd_res_info));
> +
> +       for (i = 0; i < RES_MAX; i++) {
> +               res = &hba->res[i];
> +
> +               res->resource = platform_get_resource_byname(pdev,
> +                                                            IORESOURCE_MEM,
> +                                                            res->name);
> +               if (!res->resource) {
> +                       dev_info(hba->dev, "Resource %s not provided\n", res-
> >name);
> +                       if (i == RES_MEM)
> +                               return -ENOMEM;
> +                       continue;
> +               } else if (i == RES_MEM) {
> +                       res_mem = res->resource;
> +                       res->base = hba->mmio_base;
> +                       continue;
> +               }
> +
> +               res->base = devm_ioremap_resource(hba->dev, res->resource);
> +               if (IS_ERR(res->base)) {
> +                       dev_err(hba->dev, "Failed to map res %s, err = %d\n",
> +                                        res->name, (int)PTR_ERR(res->base));
> +                       res->base = NULL;
> +                       ret = PTR_ERR(res->base);
> +                       goto out_err;
> +               }
> +       }
> +
> +       res = &hba->res[RES_MCQ];
> +       /* MCQ resource provided */
> +       if (res->base)
> +               goto out;
> +
> +       /* Manually allocate MCQ resource */
Did you consider to force providing the MCQ configuration?

> +       res_mcq = res->resource;
> +       res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
> +       if (!res_mcq) {
> +               dev_err(hba->dev, "Failed to alloate MCQ resource\n");
> +               goto out_err;
> +       }
> +       res->is_alloc = true;
> +
> +       res_mcq->start = res_mem->start +
> +                        mcq_sqattr_offset(hba->mcq_capabilities);
> +       res_mcq->end = res_mcq->start + 32 * MCQ_QCFG_SIZE - 1;
Shouldn't there can be MCQCap.MAXQ queues and no more than 32?


> +int ufshcd_mcq_init(struct ufs_hba *hba)
> +{
> +       struct Scsi_Host *host = hba->host;
> +       struct ufs_hw_queue *hwq;
> +       int i, ret = 0;
> +
> +       if (!is_mcq_supported(hba))
> +               return 0;
> +
> +       ret = ufshcd_mcq_config_resource(hba);
> +       if (ret) {
> +               dev_err(hba->dev, "Failed to config MCQ resource\n");
> +               return ret;
> +       }
> +
> +       ret = ufshcd_vops_config_mcq_rop(hba);
> +       if (ret) {
> +               dev_err(hba->dev, "MCQ Runtime Operation Pointers not
> configured\n");
> +               goto out_err;
> +       }
> +
> +       hba->nr_queues[HCTX_TYPE_DEFAULT] = num_possible_cpus();
> +       hba->nr_queues[HCTX_TYPE_READ] = 0;
> +       hba->nr_queues[HCTX_TYPE_POLL] = 1;
> +
> +       for (i = 0; i < HCTX_MAX_TYPES; i++)
> +               host->nr_hw_queues += hba->nr_queues[i];
> +
> +       host->can_queue = hba->nutrs;
> +       host->cmd_per_lun = hba->nutrs;
> +
> +       /* One more reserved for dev_cmd_queue */
> +       hba->nr_hw_queues = host->nr_hw_queues + 1;
Is it possible, since MCQ memory space is *added* to the UTR & UTMR lists,
That we'll keep using the legacy doorbell for query commands?
Wouldn't it will simplify the hw_queue bookkeeping


> -#define ufshcd_hex_dump(prefix_str, buf, len) do {                       \
> -       size_t __len = (len);                                            \
> -       print_hex_dump(KERN_ERR, prefix_str,                             \
> -                      __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
> -                      16, 4, buf, __len, false);                        \
> +#define ufshcd_hex_dump(prefix_str, buf, len) do {                     \
> +       size_t __len = (len);                                           \
> +                                                                       \
> +       print_hex_dump(KERN_ERR, prefix_str,                            \
> +                      __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, \
> +                      16, 4, buf, __len, false);                       \
> +                                                                       \
>  } while (0)
Should this be part of this patch?

> +#define UFSHCD_MCQ_IO_QUEUE_OFFSET     1
Maybe add a comment above: "queue 0 is reserved for query commands" or something
That is if the query commands don't use the  legacy doorbell

> +static inline bool ufshcd_is_hwq_full(struct ufs_hw_queue *q)
> +{
> +       return (q->sq_hp_slot == ((q->sq_tp_slot + 1) %
> +                                     q->max_entries));
> +}
Isn't sq_tp_slot is already % q->max_entries ?


Thanks,
Avri

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ