[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <581352225de29859544b88f95ae5de89@manjaro.org>
Date: Tue, 30 Apr 2024 03:17:49 +0200
From: Dragan Simic <dsimic@...jaro.org>
To: Ulf Hansson <ulf.hansson@...aro.org>
Cc: linux-mmc@...r.kernel.org, Adrian Hunter <adrian.hunter@...el.com>, Avri
Altman <avri.altman@....com>, Felix Qin <xiaokeqinhealth@....com>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH] mmc: core: Convert to use __mmc_poll_for_busy()
SD_APP_OP_COND too
Hello Ulf,
Please see my comment below.
On 2024-04-25 15:30, Ulf Hansson wrote:
> Similar to what has already been changed for eMMC and the
> MMC_SEND_OP_COND
> (CMD1), let's convert the SD_APP_OP_COND (ACMD41) for SD cards to use
> the
> common __mmc_poll_for_busy() too.
>
> This change means the initial delay period, that starts as 10ms will
> now
> increase for every loop when being busy. The total accepted timeout for
> being busy is 1s, which is according to the SD spec.
>
> Signed-off-by: Ulf Hansson <ulf.hansson@...aro.org>
> ---
> drivers/mmc/core/sd_ops.c | 77 +++++++++++++++++++++++++--------------
> 1 file changed, 50 insertions(+), 27 deletions(-)
>
> diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
> index a59cd592f06e..3ce1ff336826 100644
> --- a/drivers/mmc/core/sd_ops.c
> +++ b/drivers/mmc/core/sd_ops.c
> @@ -19,6 +19,15 @@
> #include "sd_ops.h"
> #include "mmc_ops.h"
>
> +#define SD_APP_OP_COND_PERIOD_US (10 * 1000) /* 10ms */
> +#define SD_APP_OP_COND_TIMEOUT_MS 1000 /* 1s */
> +
> +struct sd_app_op_cond_busy_data {
> + struct mmc_host *host;
> + u32 ocr;
> + struct mmc_command *cmd;
> +};
> +
> int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)
> {
> int err;
> @@ -115,10 +124,44 @@ int mmc_app_set_bus_width(struct mmc_card *card,
> int width)
> return mmc_wait_for_app_cmd(card->host, card, &cmd);
> }
>
> +static int sd_app_op_cond_cb(void *cb_data, bool *busy)
> +{
> + struct sd_app_op_cond_busy_data *data = cb_data;
> + struct mmc_host *host = data->host;
> + struct mmc_command *cmd = data->cmd;
> + u32 ocr = data->ocr;
> + int err;
Minor nitpick... An empty line should be added here, to
separate the variable definitions from the subsequent code.
Otherwise, the patch is looking to me, so please include my
Reviewed-by: Dragan Simic <dsimic@...jaro.org>
> + *busy = false;
> +
> + err = mmc_wait_for_app_cmd(host, NULL, cmd);
> + if (err)
> + return err;
> +
> + /* If we're just probing, do a single pass. */
> + if (ocr == 0)
> + return 0;
> +
> + /* Wait until reset completes. */
> + if (mmc_host_is_spi(host)) {
> + if (!(cmd->resp[0] & R1_SPI_IDLE))
> + return 0;
> + } else if (cmd->resp[0] & MMC_CARD_BUSY) {
> + return 0;
> + }
> +
> + *busy = true;
> + return 0;
> +}
> +
> int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
> {
> struct mmc_command cmd = {};
> - int i, err = 0;
> + struct sd_app_op_cond_busy_data cb_data = {
> + .host = host,
> + .ocr = ocr,
> + .cmd = &cmd
> + };
> + int err;
>
> cmd.opcode = SD_APP_OP_COND;
> if (mmc_host_is_spi(host))
> @@ -127,36 +170,16 @@ int mmc_send_app_op_cond(struct mmc_host *host,
> u32 ocr, u32 *rocr)
> cmd.arg = ocr;
> cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
>
> - for (i = 100; i; i--) {
> - err = mmc_wait_for_app_cmd(host, NULL, &cmd);
> - if (err)
> - break;
> -
> - /* if we're just probing, do a single pass */
> - if (ocr == 0)
> - break;
> -
> - /* otherwise wait until reset completes */
> - if (mmc_host_is_spi(host)) {
> - if (!(cmd.resp[0] & R1_SPI_IDLE))
> - break;
> - } else {
> - if (cmd.resp[0] & MMC_CARD_BUSY)
> - break;
> - }
> -
> - err = -ETIMEDOUT;
> -
> - mmc_delay(10);
> - }
> -
> - if (!i)
> - pr_err("%s: card never left busy state\n", mmc_hostname(host));
> + err = __mmc_poll_for_busy(host, SD_APP_OP_COND_PERIOD_US,
> + SD_APP_OP_COND_TIMEOUT_MS, &sd_app_op_cond_cb,
> + &cb_data);
> + if (err)
> + return err;
>
> if (rocr && !mmc_host_is_spi(host))
> *rocr = cmd.resp[0];
>
> - return err;
> + return 0;
> }
>
> static int __mmc_send_if_cond(struct mmc_host *host, u32 ocr, u8
> pcie_bits,
Powered by blists - more mailing lists