[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230905023921.10766-3-wenchao.chen@unisoc.com>
Date: Tue, 5 Sep 2023 10:39:21 +0800
From: Wenchao Chen <wenchao.chen@...soc.com>
To: <ulf.hansson@...aro.org>
CC: <linux-mmc@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
<wenchao.chen666@...il.com>, <zhenxiong.lai@...soc.com>,
<yuelin.tang@...soc.com>, Wenchao Chen <wenchao.chen@...soc.com>
Subject: [PATCH V3 2/2] mmc: hsq: dynamic adjustment of hsq->depth
Increasing hsq_depth improves random write performance.
Signed-off-by: Wenchao Chen <wenchao.chen@...soc.com>
---
drivers/mmc/host/mmc_hsq.c | 27 +++++++++++++++++++++++++++
drivers/mmc/host/mmc_hsq.h | 5 +++++
2 files changed, 32 insertions(+)
diff --git a/drivers/mmc/host/mmc_hsq.c b/drivers/mmc/host/mmc_hsq.c
index 8556cacb21a1..0984c39108ba 100644
--- a/drivers/mmc/host/mmc_hsq.c
+++ b/drivers/mmc/host/mmc_hsq.c
@@ -21,6 +21,31 @@ static void mmc_hsq_retry_handler(struct work_struct *work)
mmc->ops->request(mmc, hsq->mrq);
}
+static void mmc_hsq_modify_threshold(struct mmc_hsq *hsq)
+{
+ struct mmc_host *mmc = hsq->mmc;
+ struct mmc_request *mrq;
+ struct hsq_slot *slot;
+ int need_change = 0;
+ int tag;
+
+ for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
+ slot = &hsq->slot[tag];
+ mrq = slot->mrq;
+ if (mrq && mrq->data &&
+ (mrq->data->blksz * mrq->data->blocks == 4096) &&
+ (mrq->data->flags & MMC_DATA_WRITE))
+ need_change++;
+ else
+ break;
+ }
+
+ if (need_change > 1)
+ mmc->hsq_depth = HSQ_PERFORMANCE_DEPTH;
+ else
+ mmc->hsq_depth = HSQ_NORMAL_DEPTH;
+}
+
static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
{
struct mmc_host *mmc = hsq->mmc;
@@ -42,6 +67,8 @@ static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
return;
}
+ mmc_hsq_modify_threshold(hsq);
+
slot = &hsq->slot[hsq->next_tag];
hsq->mrq = slot->mrq;
hsq->qcnt--;
diff --git a/drivers/mmc/host/mmc_hsq.h b/drivers/mmc/host/mmc_hsq.h
index aa5c4543b55f..dd352a6ac32a 100644
--- a/drivers/mmc/host/mmc_hsq.h
+++ b/drivers/mmc/host/mmc_hsq.h
@@ -10,6 +10,11 @@
* flight to avoid a long latency.
*/
#define HSQ_NORMAL_DEPTH 2
+/*
+ * For 4k random writes, we allow hsq_depth to increase to 5
+ * for better performance.
+ */
+#define HSQ_PERFORMANCE_DEPTH 5
struct hsq_slot {
struct mmc_request *mrq;
--
2.17.1
Powered by blists - more mailing lists