[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <d583dc7e-b9c7-7288-ef7d-23ce7e9a7fc9@intel.com>
Date: Tue, 20 Apr 2021 17:02:36 +0300
From: Adrian Hunter <adrian.hunter@...el.com>
To: Avri Altman <avri.altman@....com>,
Ulf Hansson <ulf.hansson@...aro.org>, linux-mmc@...r.kernel.org
Cc: linux-kernel@...r.kernel.org, Brendan Peter <bpeter@...x.com>
Subject: Re: [PATCH v4 1/2] mmc: block: Issue flush only if allowed
On 20/04/21 4:46 pm, Avri Altman wrote:
> The cache may be flushed to the nonvolatile storage by writing to
> FLUSH_CACHE byte (EXT_CSD byte [32]). When in command queueing mode, the
> cache may be flushed by issuing a CMDQ_TASK_ DEV_MGMT (CMD48) with a
> FLUSH_CACHE op-code. Either way, verify that The cache function is
> turned ON before doing so.
>
> fixes: 1e8e55b67030 (mmc: block: Add CQE support)
>
> Reported-by: Brendan Peter <bpeter@...x.com>
> Tested-by: Brendan Peter <bpeter@...x.com>
> Signed-off-by: Avri Altman <avri.altman@....com>
Acked-by: Adrian Hunter <adrian.hunter@...el.com>
> ---
> drivers/mmc/core/block.c | 9 +++++++++
> drivers/mmc/core/mmc.c | 2 +-
> drivers/mmc/core/mmc_ops.h | 5 +++++
> 3 files changed, 15 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
> index 8bfd4d95b386..24e1ecbdd510 100644
> --- a/drivers/mmc/core/block.c
> +++ b/drivers/mmc/core/block.c
> @@ -2186,6 +2186,11 @@ static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
> return mmc_blk_rw_wait(mq, NULL);
> }
>
> +static bool mmc_blk_cache_disabled(struct mmc_card *card)
> +{
> + return mmc_card_mmc(card) && !mmc_flush_allowed(card);
> +}
> +
> enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
> {
> struct mmc_blk_data *md = mq->blkdata;
> @@ -2225,6 +2230,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
> case MMC_ISSUE_ASYNC:
> switch (req_op(req)) {
> case REQ_OP_FLUSH:
> + if (mmc_blk_cache_disabled(mq->card)) {
> + blk_mq_end_request(req, BLK_STS_OK);
> + return MMC_REQ_FINISHED;
> + }
> ret = mmc_blk_cqe_issue_flush(mq, req);
> break;
> case REQ_OP_READ:
> diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
> index 9ad4aa537867..e3da62ffcb5e 100644
> --- a/drivers/mmc/core/mmc.c
> +++ b/drivers/mmc/core/mmc.c
> @@ -2037,7 +2037,7 @@ static int _mmc_flush_cache(struct mmc_card *card)
> {
> int err = 0;
>
> - if (card->ext_csd.cache_size > 0 && card->ext_csd.cache_ctrl & 1) {
> + if (mmc_flush_allowed(card)) {
> err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
> EXT_CSD_FLUSH_CACHE, 1,
> CACHE_FLUSH_TIMEOUT_MS);
> diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
> index 5782fdf4e8e9..2682bf66708a 100644
> --- a/drivers/mmc/core/mmc_ops.h
> +++ b/drivers/mmc/core/mmc_ops.h
> @@ -19,6 +19,11 @@ enum mmc_busy_cmd {
> struct mmc_host;
> struct mmc_card;
>
> +static inline bool mmc_flush_allowed(struct mmc_card *card)
> +{
> + return card->ext_csd.cache_size > 0 && card->ext_csd.cache_ctrl & 1;
> +}
> +
> int mmc_select_card(struct mmc_card *card);
> int mmc_deselect_cards(struct mmc_host *host);
> int mmc_set_dsr(struct mmc_host *host);
>
Powered by blists - more mailing lists