[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20221129152809.GE4931@workstation>
Date: Tue, 29 Nov 2022 20:58:09 +0530
From: Manivannan Sadhasivam <mani@...nel.org>
To: Asutosh Das <quic_asutoshd@...cinc.com>
Cc: quic_cang@...cinc.com, martin.petersen@...cle.com,
linux-scsi@...r.kernel.org, quic_nguyenb@...cinc.com,
quic_xiaosenh@...cinc.com, stanley.chu@...iatek.com,
eddie.huang@...iatek.com, daejun7.park@...sung.com,
bvanassche@....org, avri.altman@....com, beanhuo@...ron.com,
linux-arm-msm@...r.kernel.org,
Alim Akhtar <alim.akhtar@...sung.com>,
"James E.J. Bottomley" <jejb@...ux.ibm.com>,
Jinyoung Choi <j-young.choi@...sung.com>,
Arthur Simchaev <Arthur.Simchaev@....com>,
open list <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v6 05/16] ufs: core: mcq: Add support to allocate
multiple queues
On Mon, Nov 28, 2022 at 05:20:46PM -0800, Asutosh Das wrote:
> Multi-circular queue (MCQ) has been added in UFSHC v4.0
> standard in addition to the Single Doorbell mode.
> The MCQ mode supports multiple submission and completion queues.
> Add support to allocate and configure the queues.
> Add module parameters support to configure the queues.
>
> Co-developed-by: Can Guo <quic_cang@...cinc.com>
> Signed-off-by: Can Guo <quic_cang@...cinc.com>
> Signed-off-by: Asutosh Das <quic_asutoshd@...cinc.com>
Reviewed-by: Manivannan Sadhasivam <mani@...nel.org>
Thanks,
Mani
> Reviewed-by: Bart Van Assche <bvanassche@....org>
> ---
> drivers/ufs/core/Makefile | 2 +-
> drivers/ufs/core/ufs-mcq.c | 125 +++++++++++++++++++++++++++++++++++++++++
> drivers/ufs/core/ufshcd-priv.h | 1 +
> drivers/ufs/core/ufshcd.c | 12 ++++
> include/ufs/ufshcd.h | 4 ++
> 5 files changed, 143 insertions(+), 1 deletion(-)
> create mode 100644 drivers/ufs/core/ufs-mcq.c
>
> diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile
> index 62f38c5..4d02e0f 100644
> --- a/drivers/ufs/core/Makefile
> +++ b/drivers/ufs/core/Makefile
> @@ -1,7 +1,7 @@
> # SPDX-License-Identifier: GPL-2.0
>
> obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
> -ufshcd-core-y += ufshcd.o ufs-sysfs.o
> +ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
> ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
> ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
> ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
> diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
> new file mode 100644
> index 0000000..bf08ec5
> --- /dev/null
> +++ b/drivers/ufs/core/ufs-mcq.c
> @@ -0,0 +1,125 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
> + *
> + * Authors:
> + * Asutosh Das <quic_asutoshd@...cinc.com>
> + * Can Guo <quic_cang@...cinc.com>
> + */
> +
> +#include <asm/unaligned.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/module.h>
> +#include <linux/platform_device.h>
> +#include "ufshcd-priv.h"
> +
> +#define MAX_QUEUE_SUP GENMASK(7, 0)
> +#define UFS_MCQ_MIN_RW_QUEUES 2
> +#define UFS_MCQ_MIN_READ_QUEUES 0
> +#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
> +#define UFS_MCQ_MIN_POLL_QUEUES 0
> +
> +static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
> +{
> + return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
> + num_possible_cpus());
> +}
> +
> +static const struct kernel_param_ops rw_queue_count_ops = {
> + .set = rw_queue_count_set,
> + .get = param_get_uint,
> +};
> +
> +static unsigned int rw_queues;
> +module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644);
> +MODULE_PARM_DESC(rw_queues,
> + "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
> +
> +static int read_queue_count_set(const char *val, const struct kernel_param *kp)
> +{
> + return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES,
> + num_possible_cpus());
> +}
> +
> +static const struct kernel_param_ops read_queue_count_ops = {
> + .set = read_queue_count_set,
> + .get = param_get_uint,
> +};
> +
> +static unsigned int read_queues;
> +module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644);
> +MODULE_PARM_DESC(read_queues,
> + "Number of interrupt driven read queues used for read. Default value is 0");
> +
> +static int poll_queue_count_set(const char *val, const struct kernel_param *kp)
> +{
> + return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES,
> + num_possible_cpus());
> +}
> +
> +static const struct kernel_param_ops poll_queue_count_ops = {
> + .set = poll_queue_count_set,
> + .get = param_get_uint,
> +};
> +
> +static unsigned int poll_queues = 1;
> +module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
> +MODULE_PARM_DESC(poll_queues,
> + "Number of poll queues used for r/w. Default value is 1");
> +
> +static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
> +{
> + int i;
> + u32 hba_maxq, rem, tot_queues;
> + struct Scsi_Host *host = hba->host;
> +
> + hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities);
> +
> + tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
> + rw_queues;
> +
> + if (hba_maxq < tot_queues) {
> + dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
> + tot_queues, hba_maxq);
> + return -EOPNOTSUPP;
> + }
> +
> + rem = hba_maxq - UFS_MCQ_NUM_DEV_CMD_QUEUES;
> +
> + if (rw_queues) {
> + hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
> + rem -= hba->nr_queues[HCTX_TYPE_DEFAULT];
> + } else {
> + rw_queues = num_possible_cpus();
> + }
> +
> + if (poll_queues) {
> + hba->nr_queues[HCTX_TYPE_POLL] = poll_queues;
> + rem -= hba->nr_queues[HCTX_TYPE_POLL];
> + }
> +
> + if (read_queues) {
> + hba->nr_queues[HCTX_TYPE_READ] = read_queues;
> + rem -= hba->nr_queues[HCTX_TYPE_READ];
> + }
> +
> + if (!hba->nr_queues[HCTX_TYPE_DEFAULT])
> + hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues,
> + num_possible_cpus());
> +
> + for (i = 0; i < HCTX_MAX_TYPES; i++)
> + host->nr_hw_queues += hba->nr_queues[i];
> +
> + hba->nr_hw_queues = host->nr_hw_queues + UFS_MCQ_NUM_DEV_CMD_QUEUES;
> + return 0;
> +}
> +
> +int ufshcd_mcq_init(struct ufs_hba *hba)
> +{
> + int ret;
> +
> + ret = ufshcd_mcq_config_nr_queues(hba);
> +
> + return ret;
> +}
> +
> diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
> index a9e8e1f..9368ba2 100644
> --- a/drivers/ufs/core/ufshcd-priv.h
> +++ b/drivers/ufs/core/ufshcd-priv.h
> @@ -61,6 +61,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
> int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
> enum flag_idn idn, u8 index, bool *flag_res);
> void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
> +int ufshcd_mcq_init(struct ufs_hba *hba);
>
> #define SD_ASCII_STD true
> #define SD_RAW false
> diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
> index 3c2220c..9b78814 100644
> --- a/drivers/ufs/core/ufshcd.c
> +++ b/drivers/ufs/core/ufshcd.c
> @@ -8220,6 +8220,11 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
> return ret;
> }
>
> +static int ufshcd_alloc_mcq(struct ufs_hba *hba)
> +{
> + return ufshcd_mcq_init(hba);
> +}
> +
> /**
> * ufshcd_probe_hba - probe hba to detect device and initialize it
> * @hba: per-adapter instance
> @@ -8269,6 +8274,13 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
> goto out;
>
> if (is_mcq_supported(hba)) {
> + ret = ufshcd_alloc_mcq(hba);
> + if (ret) {
> + /* Continue with SDB mode */
> + use_mcq_mode = false;
> + dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
> + ret);
> + }
> ret = scsi_add_host(host, hba->dev);
> if (ret) {
> dev_err(hba->dev, "scsi_add_host failed\n");
> diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
> index 70c0f9f..146b613 100644
> --- a/include/ufs/ufshcd.h
> +++ b/include/ufs/ufshcd.h
> @@ -829,6 +829,8 @@ struct ufs_hba_monitor {
> * ee_ctrl_mask
> * @luns_avail: number of regular and well known LUNs supported by the UFS
> * device
> + * @nr_hw_queues: number of hardware queues configured
> + * @nr_queues: number of Queues of different queue types
> * @complete_put: whether or not to call ufshcd_rpm_put() from inside
> * ufshcd_resume_complete()
> * @ext_iid_sup: is EXT_IID is supported by UFSHC
> @@ -981,6 +983,8 @@ struct ufs_hba {
> u32 debugfs_ee_rate_limit_ms;
> #endif
> u32 luns_avail;
> + unsigned int nr_hw_queues;
> + unsigned int nr_queues[HCTX_MAX_TYPES];
> bool complete_put;
> bool ext_iid_sup;
> bool mcq_sup;
> --
> 2.7.4
>
Powered by blists - more mailing lists