[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190829224110.91103-4-swboyd@chromium.org>
Date: Thu, 29 Aug 2019 15:41:09 -0700
From: Stephen Boyd <swboyd@...omium.org>
To: Peter Huewe <peterhuewe@....de>,
Jarkko Sakkinen <jarkko.sakkinen@...ux.intel.com>
Cc: linux-kernel@...r.kernel.org, linux-integrity@...r.kernel.org,
Andrey Pronin <apronin@...omium.org>,
Duncan Laurie <dlaurie@...omium.org>,
Jason Gunthorpe <jgg@...pe.ca>, Arnd Bergmann <arnd@...db.de>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Guenter Roeck <groeck@...omium.org>,
Alexander Steffen <Alexander.Steffen@...ineon.com>,
Heiko Stuebner <heiko@...ech.de>
Subject: [PATCH v6 3/4] tpm: tpm_tis_spi: Introduce a flow control callback
Cr50 firmware has a different flow control protocol than the one used by
this TPM PTP SPI driver. Introduce a flow control callback so we can
override the standard sequence with the custom one that Cr50 uses.
Cc: Andrey Pronin <apronin@...omium.org>
Cc: Duncan Laurie <dlaurie@...omium.org>
Cc: Jason Gunthorpe <jgg@...pe.ca>
Cc: Arnd Bergmann <arnd@...db.de>
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Cc: Guenter Roeck <groeck@...omium.org>
Cc: Alexander Steffen <Alexander.Steffen@...ineon.com>
Cc: Heiko Stuebner <heiko@...ech.de>
Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@...ux.intel.com>
Signed-off-by: Stephen Boyd <swboyd@...omium.org>
---
drivers/char/tpm/tpm_tis_spi.c | 62 ++++++++++++++++++++++------------
1 file changed, 41 insertions(+), 21 deletions(-)
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index 19513e622053..b3ed85671dd8 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -42,6 +42,8 @@
struct tpm_tis_spi_phy {
struct tpm_tis_data priv;
struct spi_device *spi_device;
+ int (*flow_control)(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *xfer);
u8 *iobuf;
};
@@ -50,12 +52,46 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da
return container_of(data, struct tpm_tis_spi_phy, priv);
}
+/*
+ * TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
+ * keep trying to read from the device until MISO goes high indicating the
+ * wait state has ended.
+ *
+ * [1] https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
+ */
+static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
+ struct spi_transfer *spi_xfer)
+{
+ struct spi_message m;
+ int ret, i;
+
+ if ((phy->iobuf[3] & 0x01) == 0) {
+ // handle SPI wait states
+ phy->iobuf[0] = 0;
+
+ for (i = 0; i < TPM_RETRY; i++) {
+ spi_xfer->len = 1;
+ spi_message_init(&m);
+ spi_message_add_tail(spi_xfer, &m);
+ ret = spi_sync_locked(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+ if (phy->iobuf[0] & 0x01)
+ break;
+ }
+
+ if (i == TPM_RETRY)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *in, const u8 *out)
{
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
int ret = 0;
- int i;
struct spi_message m;
struct spi_transfer spi_xfer;
u8 transfer_len;
@@ -82,26 +118,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
- if ((phy->iobuf[3] & 0x01) == 0) {
- // handle SPI wait states
- phy->iobuf[0] = 0;
-
- for (i = 0; i < TPM_RETRY; i++) {
- spi_xfer.len = 1;
- spi_message_init(&m);
- spi_message_add_tail(&spi_xfer, &m);
- ret = spi_sync_locked(phy->spi_device, &m);
- if (ret < 0)
- goto exit;
- if (phy->iobuf[0] & 0x01)
- break;
- }
-
- if (i == TPM_RETRY) {
- ret = -ETIMEDOUT;
- goto exit;
- }
- }
+ ret = phy->flow_control(phy, &spi_xfer);
+ if (ret < 0)
+ goto exit;
spi_xfer.cs_change = 0;
spi_xfer.len = transfer_len;
@@ -207,6 +226,7 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
if (!phy->iobuf)
return -ENOMEM;
+ phy->flow_control = tpm_tis_spi_flow_control;
/* If the SPI device has an IRQ then use that */
if (dev->irq > 0)
--
Sent by a computer through tubes
Powered by blists - more mailing lists