[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240522110909.10060-19-victorshihgli@gmail.com>
Date: Wed, 22 May 2024 19:09:04 +0800
From: Victor Shih <victorshihgli@...il.com>
To: ulf.hansson@...aro.org,
adrian.hunter@...el.com
Cc: linux-mmc@...r.kernel.org,
linux-kernel@...r.kernel.org,
benchuanggli@...il.com,
HL.Liu@...esyslogic.com.tw,
Greg.tu@...esyslogic.com.tw,
takahiro.akashi@...aro.org,
dlunev@...omium.org,
Victor Shih <victorshihgli@...il.com>,
Ben Chuang <ben.chuang@...esyslogic.com.tw>,
Victor Shih <victor.shih@...esyslogic.com.tw>
Subject: [PATCH V16 18/23] mmc: sdhci-uhs2: add irq() and others
From: Victor Shih <victor.shih@...esyslogic.com.tw>
This is a UHS-II version of sdhci's request() operation.
It handles UHS-II related command interrupts and errors.
Signed-off-by: Ben Chuang <ben.chuang@...esyslogic.com.tw>
Signed-off-by: AKASHI Takahiro <takahiro.akashi@...aro.org>
Signed-off-by: Victor Shih <victor.shih@...esyslogic.com.tw>
---
Updates in V14:
- Use mmc_card_uhs2() to stead sdhci_uhs2_mode() in the
sdhci_uhs2_complete_work(), sdhci_uhs2_irq() and
sdhci_uhs2_thread_irq().
Updates in V13:
- Re-order function to avoid declaration.
- Remove unnecessary definitions.
Updates in V9:
- Cancel export state of sdhci_set_mrq_done() function.
Updates in V8:
- Forward declare struct mmc_request in sdhci_uhs2.h.
- Remove forward declaration of sdhci_send_command().
- Use mmc_dev() to simplify code in sdhci_request_done_dma().
Updates in V7:
- Remove unnecessary functions.
- Use sdhci_uhs2_mode() to simplify code in sdhci_uhs2_irq().
- Modify descriptions in sdhci_uhs2_irq().
- Cancel export state of some functions.
Updates in V6:
- Remove unnecessary functions.
- Add sdhci_uhs2_mode() in sdhci_uhs2_complete_work().
- Add sdhci_uhs2_mode() in sdhci_uhs2_thread_irq().
---
drivers/mmc/host/sdhci-uhs2.c | 215 ++++++++++++++++++++++++++++++++++
drivers/mmc/host/sdhci-uhs2.h | 2 +
drivers/mmc/host/sdhci.c | 99 +++++++++-------
drivers/mmc/host/sdhci.h | 4 +
4 files changed, 275 insertions(+), 45 deletions(-)
diff --git a/drivers/mmc/host/sdhci-uhs2.c b/drivers/mmc/host/sdhci-uhs2.c
index 008fa38ac907..28a25cd09753 100644
--- a/drivers/mmc/host/sdhci-uhs2.c
+++ b/drivers/mmc/host/sdhci-uhs2.c
@@ -921,6 +921,221 @@ static void sdhci_uhs2_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, flags);
}
+/*****************************************************************************\
+ * *
+ * Request done *
+ * *
+\*****************************************************************************/
+
+static bool sdhci_uhs2_request_done(struct sdhci_host *host)
+{
+ unsigned long flags;
+ struct mmc_request *mrq;
+ int i;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ for (i = 0; i < SDHCI_MAX_MRQS; i++) {
+ mrq = host->mrqs_done[i];
+ if (mrq)
+ break;
+ }
+
+ if (!mrq) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return true;
+ }
+
+ /*
+ * Always unmap the data buffers if they were mapped by
+ * sdhci_prepare_data() whenever we finish with a request.
+ * This avoids leaking DMA mappings on error.
+ */
+ if (host->flags & SDHCI_REQ_USE_DMA)
+ sdhci_request_done_dma(host, mrq);
+
+ /*
+ * The controller needs a reset of internal state machines
+ * upon error conditions.
+ */
+ if (sdhci_needs_reset(host, mrq)) {
+ /*
+ * Do not finish until command and data lines are available for
+ * reset. Note there can only be one other mrq, so it cannot
+ * also be in mrqs_done, otherwise host->cmd and host->data_cmd
+ * would both be null.
+ */
+ if (host->cmd || host->data_cmd) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return true;
+ }
+
+ sdhci_uhs2_reset(host, SDHCI_UHS2_SW_RESET_SD);
+ host->pending_reset = false;
+ }
+
+ host->mrqs_done[i] = NULL;
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (host->ops->request_done)
+ host->ops->request_done(host, mrq);
+ else
+ mmc_request_done(host->mmc, mrq);
+
+ return false;
+}
+
+static void sdhci_uhs2_complete_work(struct work_struct *work)
+{
+ struct sdhci_host *host = container_of(work, struct sdhci_host,
+ complete_work);
+
+ if (!mmc_card_uhs2(host->mmc)) {
+ sdhci_complete_work(work);
+ return;
+ }
+
+ while (!sdhci_uhs2_request_done(host))
+ ;
+}
+
+/*****************************************************************************\
+ * *
+ * Interrupt handling *
+ * *
+\*****************************************************************************/
+
+static void __sdhci_uhs2_irq(struct sdhci_host *host, u32 uhs2mask)
+{
+ struct mmc_command *cmd = host->cmd;
+
+ DBG("*** %s got UHS2 error interrupt: 0x%08x\n",
+ mmc_hostname(host->mmc), uhs2mask);
+
+ if (uhs2mask & SDHCI_UHS2_INT_CMD_ERR_MASK) {
+ if (!host->cmd) {
+ pr_err("%s: Got cmd interrupt 0x%08x but no cmd.\n",
+ mmc_hostname(host->mmc),
+ (unsigned int)uhs2mask);
+ sdhci_dumpregs(host);
+ return;
+ }
+ host->cmd->error = -EILSEQ;
+ if (uhs2mask & SDHCI_UHS2_INT_CMD_TIMEOUT)
+ host->cmd->error = -ETIMEDOUT;
+ }
+
+ if (uhs2mask & SDHCI_UHS2_INT_DATA_ERR_MASK) {
+ if (!host->data) {
+ pr_err("%s: Got data interrupt 0x%08x but no data.\n",
+ mmc_hostname(host->mmc),
+ (unsigned int)uhs2mask);
+ sdhci_dumpregs(host);
+ return;
+ }
+
+ if (uhs2mask & SDHCI_UHS2_INT_DEADLOCK_TIMEOUT) {
+ pr_err("%s: Got deadlock timeout interrupt 0x%08x\n",
+ mmc_hostname(host->mmc),
+ (unsigned int)uhs2mask);
+ host->data->error = -ETIMEDOUT;
+ } else if (uhs2mask & SDHCI_UHS2_INT_ADMA_ERROR) {
+ pr_err("%s: ADMA error = 0x %x\n",
+ mmc_hostname(host->mmc),
+ sdhci_readb(host, SDHCI_ADMA_ERROR));
+ host->data->error = -EIO;
+ } else {
+ host->data->error = -EILSEQ;
+ }
+ }
+
+ if (host->data && host->data->error)
+ sdhci_uhs2_finish_data(host);
+ else
+ sdhci_finish_mrq(host, cmd->mrq);
+}
+
+u32 sdhci_uhs2_irq(struct sdhci_host *host, u32 intmask)
+{
+ u32 mask = intmask, uhs2mask;
+
+ if (!mmc_card_uhs2(host->mmc))
+ goto out;
+
+ if (intmask & SDHCI_INT_ERROR) {
+ uhs2mask = sdhci_readl(host, SDHCI_UHS2_INT_STATUS);
+ if (!(uhs2mask & SDHCI_UHS2_INT_ERROR_MASK))
+ goto cmd_irq;
+
+ /* Clear error interrupts */
+ sdhci_writel(host, uhs2mask & SDHCI_UHS2_INT_ERROR_MASK,
+ SDHCI_UHS2_INT_STATUS);
+
+ /* Handle error interrupts */
+ __sdhci_uhs2_irq(host, uhs2mask);
+
+ /* Caller, sdhci_irq(), doesn't have to care about UHS-2 errors */
+ intmask &= ~SDHCI_INT_ERROR;
+ mask &= SDHCI_INT_ERROR;
+ }
+
+cmd_irq:
+ if (intmask & SDHCI_INT_CMD_MASK) {
+ /* Clear command interrupt */
+ sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, SDHCI_INT_STATUS);
+
+ /* Handle command interrupt */
+ if (intmask & SDHCI_INT_RESPONSE)
+ sdhci_uhs2_finish_command(host);
+
+ /* Caller, sdhci_irq(), doesn't have to care about UHS-2 commands */
+ intmask &= ~SDHCI_INT_CMD_MASK;
+ mask &= SDHCI_INT_CMD_MASK;
+ }
+
+ /* Clear already-handled interrupts. */
+ sdhci_writel(host, mask, SDHCI_INT_STATUS);
+
+out:
+ return intmask;
+}
+EXPORT_SYMBOL_GPL(sdhci_uhs2_irq);
+
+static irqreturn_t sdhci_uhs2_thread_irq(int irq, void *dev_id)
+{
+ struct sdhci_host *host = dev_id;
+ struct mmc_command *cmd;
+ unsigned long flags;
+ u32 isr;
+
+ if (!mmc_card_uhs2(host->mmc))
+ return sdhci_thread_irq(irq, dev_id);
+
+ while (!sdhci_uhs2_request_done(host))
+ ;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ isr = host->thread_isr;
+ host->thread_isr = 0;
+
+ cmd = host->deferred_cmd;
+ if (cmd && !sdhci_uhs2_send_command_retry(host, cmd, flags))
+ sdhci_finish_mrq(host, cmd->mrq);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+ struct mmc_host *mmc = host->mmc;
+
+ mmc->ops->card_event(mmc);
+ mmc_detect_change(mmc, msecs_to_jiffies(200));
+ }
+
+ return IRQ_HANDLED;
+}
+
/*****************************************************************************\
* *
* Driver init/exit *
diff --git a/drivers/mmc/host/sdhci-uhs2.h b/drivers/mmc/host/sdhci-uhs2.h
index e08aa60bf06b..beef08db0a16 100644
--- a/drivers/mmc/host/sdhci-uhs2.h
+++ b/drivers/mmc/host/sdhci-uhs2.h
@@ -176,10 +176,12 @@
struct sdhci_host;
struct mmc_command;
+struct mmc_request;
void sdhci_uhs2_dump_regs(struct sdhci_host *host);
void sdhci_uhs2_reset(struct sdhci_host *host, u16 mask);
void sdhci_uhs2_set_timeout(struct sdhci_host *host, struct mmc_command *cmd);
void sdhci_uhs2_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set);
+u32 sdhci_uhs2_irq(struct sdhci_host *host, u32 intmask);
#endif /* __SDHCI_UHS2_H */
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 10ec09af5028..5f2157a26020 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1498,7 +1498,7 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
}
-static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
+bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
{
return (!(host->flags & SDHCI_DEVICE_DEAD) &&
((mrq->cmd && mrq->cmd->error) ||
@@ -1506,6 +1506,7 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
(mrq->data && mrq->data->stop && mrq->data->stop->error) ||
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
}
+EXPORT_SYMBOL_GPL(sdhci_needs_reset);
static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
{
@@ -3112,6 +3113,53 @@ static const struct mmc_host_ops sdhci_ops = {
* *
\*****************************************************************************/
+void sdhci_request_done_dma(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ if (data && data->host_cookie == COOKIE_MAPPED) {
+ if (host->bounce_buffer) {
+ /*
+ * On reads, copy the bounced data into the
+ * sglist
+ */
+ if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
+ unsigned int length = data->bytes_xfered;
+
+ if (length > host->bounce_buffer_size) {
+ pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
+ mmc_hostname(host->mmc),
+ host->bounce_buffer_size,
+ data->bytes_xfered);
+ /* Cap it down and continue */
+ length = host->bounce_buffer_size;
+ }
+ dma_sync_single_for_cpu(mmc_dev(host->mmc),
+ host->bounce_addr,
+ host->bounce_buffer_size,
+ DMA_FROM_DEVICE);
+ sg_copy_from_buffer(data->sg,
+ data->sg_len,
+ host->bounce_buffer,
+ length);
+ } else {
+ /* No copying, just switch ownership */
+ dma_sync_single_for_cpu(mmc_dev(host->mmc),
+ host->bounce_addr,
+ host->bounce_buffer_size,
+ mmc_get_dma_dir(data));
+ }
+ } else {
+ /* Unmap the raw data */
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len,
+ mmc_get_dma_dir(data));
+ }
+ data->host_cookie = COOKIE_UNMAPPED;
+ }
+}
+EXPORT_SYMBOL_GPL(sdhci_request_done_dma);
+
static bool sdhci_request_done(struct sdhci_host *host)
{
unsigned long flags;
@@ -3176,48 +3224,7 @@ static bool sdhci_request_done(struct sdhci_host *host)
sdhci_set_mrq_done(host, mrq);
}
- if (data && data->host_cookie == COOKIE_MAPPED) {
- if (host->bounce_buffer) {
- /*
- * On reads, copy the bounced data into the
- * sglist
- */
- if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
- unsigned int length = data->bytes_xfered;
-
- if (length > host->bounce_buffer_size) {
- pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
- mmc_hostname(host->mmc),
- host->bounce_buffer_size,
- data->bytes_xfered);
- /* Cap it down and continue */
- length = host->bounce_buffer_size;
- }
- dma_sync_single_for_cpu(
- mmc_dev(host->mmc),
- host->bounce_addr,
- host->bounce_buffer_size,
- DMA_FROM_DEVICE);
- sg_copy_from_buffer(data->sg,
- data->sg_len,
- host->bounce_buffer,
- length);
- } else {
- /* No copying, just switch ownership */
- dma_sync_single_for_cpu(
- mmc_dev(host->mmc),
- host->bounce_addr,
- host->bounce_buffer_size,
- mmc_get_dma_dir(data));
- }
- } else {
- /* Unmap the raw data */
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len,
- mmc_get_dma_dir(data));
- }
- data->host_cookie = COOKIE_UNMAPPED;
- }
+ sdhci_request_done_dma(host, mrq);
}
host->mrqs_done[i] = NULL;
@@ -3232,7 +3239,7 @@ static bool sdhci_request_done(struct sdhci_host *host)
return false;
}
-static void sdhci_complete_work(struct work_struct *work)
+void sdhci_complete_work(struct work_struct *work)
{
struct sdhci_host *host = container_of(work, struct sdhci_host,
complete_work);
@@ -3240,6 +3247,7 @@ static void sdhci_complete_work(struct work_struct *work)
while (!sdhci_request_done(host))
;
}
+EXPORT_SYMBOL_GPL(sdhci_complete_work);
static void sdhci_timeout_timer(struct timer_list *t)
{
@@ -3701,7 +3709,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
return result;
}
-static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
+irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
{
struct sdhci_host *host = dev_id;
struct mmc_command *cmd;
@@ -3731,6 +3739,7 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(sdhci_thread_irq);
/*****************************************************************************\
* *
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index a0d6710895f1..db412c7b132c 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -833,6 +833,7 @@ bool sdhci_data_line_cmd(struct mmc_command *cmd);
void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, unsigned long timeout);
void sdhci_initialize_data(struct sdhci_host *host, struct mmc_data *data);
void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data);
+bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq);
void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq);
void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq);
void __sdhci_finish_data_common(struct sdhci_host *host);
@@ -864,6 +865,9 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios);
void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable);
+void sdhci_request_done_dma(struct sdhci_host *host, struct mmc_request *mrq);
+void sdhci_complete_work(struct work_struct *work);
+irqreturn_t sdhci_thread_irq(int irq, void *dev_id);
void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
dma_addr_t addr, int len, unsigned int cmd);
--
2.25.1
Powered by blists - more mailing lists