[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210510102005.975415818@linuxfoundation.org>
Date: Mon, 10 May 2021 12:17:11 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Brendan Peter <bpeter@...x.com>,
Avri Altman <avri.altman@....com>,
Adrian Hunter <adrian.hunter@...el.com>,
Ulf Hansson <ulf.hansson@...aro.org>
Subject: [PATCH 5.10 034/299] mmc: block: Issue a cache flush only when its enabled
From: Avri Altman <avri.altman@....com>
commit 97fce126e279690105ee15be652b465fd96f9997 upstream.
In command queueing mode, the cache isn't flushed via the mmc_flush_cache()
function, but instead by issuing a CMDQ_TASK_MGMT (CMD48) with a
FLUSH_CACHE opcode. In this path, we need to check if cache has been
enabled, before deciding to flush the cache, along the lines of what's
being done in mmc_flush_cache().
To fix this problem, let's add a new bus ops callback ->cache_enabled() and
implement it for the mmc bus type. In this way, the mmc block device driver
can call it to know whether cache flushing should be done.
Fixes: 1e8e55b67030 (mmc: block: Add CQE support)
Cc: stable@...r.kernel.org
Reported-by: Brendan Peter <bpeter@...x.com>
Signed-off-by: Avri Altman <avri.altman@....com>
Tested-by: Brendan Peter <bpeter@...x.com>
Acked-by: Adrian Hunter <adrian.hunter@...el.com>
Link: https://lore.kernel.org/r/20210425060207.2591-2-avri.altman@wdc.com
Link: https://lore.kernel.org/r/20210425060207.2591-3-avri.altman@wdc.com
[Ulf: Squashed the two patches and made some minor updates]
Signed-off-by: Ulf Hansson <ulf.hansson@...aro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/mmc/core/block.c | 4 ++++
drivers/mmc/core/core.h | 9 +++++++++
drivers/mmc/core/mmc.c | 7 +++++++
drivers/mmc/core/mmc_ops.c | 4 +---
4 files changed, 21 insertions(+), 3 deletions(-)
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2233,6 +2233,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(stru
case MMC_ISSUE_ASYNC:
switch (req_op(req)) {
case REQ_OP_FLUSH:
+ if (!mmc_cache_enabled(host)) {
+ blk_mq_end_request(req, BLK_STS_OK);
+ return MMC_REQ_FINISHED;
+ }
ret = mmc_blk_cqe_issue_flush(mq, req);
break;
case REQ_OP_READ:
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -29,6 +29,7 @@ struct mmc_bus_ops {
int (*shutdown)(struct mmc_host *);
int (*hw_reset)(struct mmc_host *);
int (*sw_reset)(struct mmc_host *);
+ bool (*cache_enabled)(struct mmc_host *);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -171,4 +172,12 @@ static inline void mmc_post_req(struct m
host->ops->post_req(host, mrq, err);
}
+static inline bool mmc_cache_enabled(struct mmc_host *host)
+{
+ if (host->bus_ops->cache_enabled)
+ return host->bus_ops->cache_enabled(host);
+
+ return false;
+}
+
#endif
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -2033,6 +2033,12 @@ static void mmc_detect(struct mmc_host *
}
}
+static bool _mmc_cache_enabled(struct mmc_host *host)
+{
+ return host->card->ext_csd.cache_size > 0 &&
+ host->card->ext_csd.cache_ctrl & 1;
+}
+
static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
{
int err = 0;
@@ -2212,6 +2218,7 @@ static const struct mmc_bus_ops mmc_ops
.alive = mmc_alive,
.shutdown = mmc_shutdown,
.hw_reset = _mmc_hw_reset,
+ .cache_enabled = _mmc_cache_enabled,
};
/*
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -988,9 +988,7 @@ int mmc_flush_cache(struct mmc_card *car
{
int err = 0;
- if (mmc_card_mmc(card) &&
- (card->ext_csd.cache_size > 0) &&
- (card->ext_csd.cache_ctrl & 1)) {
+ if (mmc_cache_enabled(card->host)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 1,
MMC_CACHE_FLUSH_TIMEOUT_MS);
Powered by blists - more mailing lists