[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230227172108.8206-3-kyarlagadda@nvidia.com>
Date: Mon, 27 Feb 2023 22:51:07 +0530
From: Krishna Yarlagadda <kyarlagadda@...dia.com>
To: <robh+dt@...nel.org>, <broonie@...nel.org>, <peterhuewe@....de>,
<jgg@...pe.ca>, <jarkko@...nel.org>,
<krzysztof.kozlowski+dt@...aro.org>, <linux-spi@...r.kernel.org>,
<linux-tegra@...r.kernel.org>, <linux-integrity@...r.kernel.org>,
<linux-kernel@...r.kernel.org>
CC: <thierry.reding@...il.com>, <jonathanh@...dia.com>,
<skomatineni@...dia.com>, <ldewangan@...dia.com>,
Krishna Yarlagadda <kyarlagadda@...dia.com>
Subject: [Patch V6 2/3] tpm_tis-spi: Support hardware wait polling
TPM devices raise wait signal on last addr cycle. This can be detected
by software driver by reading MISO line on same clock which requires
full duplex support. In case of half duplex controllers wait detection
has to be implemented in HW.
Support hardware wait state detection by sending entire message and let
controller handle flow control.
QSPI controller in Tegra234 & Tegra241 implement TPM wait polling.
Signed-off-by: Krishna Yarlagadda <kyarlagadda@...dia.com>
---
drivers/char/tpm/tpm_tis_spi_main.c | 92 ++++++++++++++++++++++++++++-
1 file changed, 90 insertions(+), 2 deletions(-)
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index a0963a3e92bd..5f66448ee09e 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -71,8 +71,74 @@ static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
return 0;
}
-int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *in, const u8 *out)
+/*
+ * Half duplex controller with support for TPM wait state detection like
+ * Tegra241 need cmd, addr & data sent in single message to manage HW flow
+ * control. Each phase sent in different transfer for controller to idenity
+ * phase.
+ */
+int tpm_tis_spi_hw_flow_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct spi_transfer spi_xfer[3];
+ struct spi_message m;
+ u8 transfer_len;
+ int ret;
+
+ while (len) {
+ transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
+
+ spi_message_init(&m);
+ phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
+ phy->iobuf[1] = 0xd4;
+ phy->iobuf[2] = addr >> 8;
+ phy->iobuf[3] = addr;
+
+ memset(&spi_xfer, 0, sizeof(spi_xfer));
+
+ spi_xfer[0].tx_buf = phy->iobuf;
+ spi_xfer[0].len = 1;
+ spi_message_add_tail(&spi_xfer[0], &m);
+
+ spi_xfer[1].tx_buf = phy->iobuf + 1;
+ spi_xfer[1].len = 3;
+ spi_message_add_tail(&spi_xfer[1], &m);
+
+ if (out) {
+ spi_xfer[2].tx_buf = &phy->iobuf[4];
+ spi_xfer[2].rx_buf = NULL;
+ memcpy(&phy->iobuf[4], out, transfer_len);
+ out += transfer_len;
+ }
+
+ if (in) {
+ spi_xfer[2].tx_buf = NULL;
+ spi_xfer[2].rx_buf = &phy->iobuf[4];
+ }
+
+ spi_xfer[2].len = transfer_len;
+ spi_message_add_tail(&spi_xfer[2], &m);
+
+ reinit_completion(&phy->ready);
+
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+
+ if (in) {
+ memcpy(in, &phy->iobuf[4], transfer_len);
+ in += transfer_len;
+ }
+
+ len -= transfer_len;
+ }
+
+ return ret;
+}
+
+int tpm_tis_spi_sw_flow_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
{
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
int ret = 0;
@@ -140,6 +206,28 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
return ret;
}
+int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct spi_controller *ctlr = phy->spi_device->controller;
+
+ /*
+ * TPM flow control over SPI requires full duplex support.
+ * Send entire message to a half duplex controller to handle
+ * wait polling in controller.
+ * Set TPM HW flow control flag..
+ */
+ if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ phy->spi_device->mode |= SPI_TPM_HW_FLOW;
+ return tpm_tis_spi_hw_flow_transfer(data, addr, len, in,
+ out);
+ } else {
+ return tpm_tis_spi_sw_flow_transfer(data, addr, len, in,
+ out);
+ }
+}
+
static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
u16 len, u8 *result, enum tpm_tis_io_mode io_mode)
{
--
2.17.1
Powered by blists - more mailing lists