[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200210162838.173903-5-amirmizi6@gmail.com>
Date: Mon, 10 Feb 2020 18:28:35 +0200
From: amirmizi6@...il.com
To: Eyal.Cohen@...oton.com, jarkko.sakkinen@...ux.intel.com,
oshrialkoby85@...il.com, alexander.steffen@...ineon.com,
robh+dt@...nel.org, mark.rutland@....com, peterhuewe@....de,
jgg@...pe.ca, arnd@...db.de, gregkh@...uxfoundation.org
Cc: devicetree@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-integrity@...r.kernel.org, oshri.alkoby@...oton.com,
tmaimon77@...il.com, gcwilson@...ibm.com, kgoldman@...ibm.com,
Dan.Morav@...oton.com, oren.tanami@...oton.com,
shmulik.hager@...oton.com, amir.mizinski@...oton.com,
Amir Mizinski <amirmizi6@...il.com>
Subject: [PATCH v3 4/7] char: tpm: Fix expected bit handling and send all bytes in one shot without last byte in exception
From: Amir Mizinski <amirmizi6@...il.com>
Today, actual implementation for send massage is not correct. We check and
loop only on TPM_STS.stsValid bit and next we single check TPM_STS.expect
bit value.
TPM_STS.expected bit shall be checked in the same time of
TPM_STS.stsValid, and should be repeated until timeout_A.
To aquire that, "wait_for_tpm_stat" function is modified to
"wait_for_tpm_stat_result". this function read regulary status register
and check bit defined by "mask" to reach value defined in "mask_result"
(that way a bit in mask can be checked if reached 1 or 0).
Respectively, to send message as defined in
TCG_DesignPrinciples_TPM2p0Driver_vp24_pubrev.pdf, all bytes should be
sent in one shot instead of sending last byte in exception.
This improvment was suggested by Benoit Houyere.
Signed-off-by: Amir Mizinski <amirmizi6@...il.com>
---
drivers/char/tpm/tpm_tis_core.c | 72 ++++++++++++++++-------------------------
1 file changed, 28 insertions(+), 44 deletions(-)
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 18b9dc4..c8f4cf8 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -44,9 +44,10 @@ static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
return false;
}
-static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
- unsigned long timeout, wait_queue_head_t *queue,
- bool check_cancel)
+static int wait_for_tpm_stat_result(struct tpm_chip *chip, u8 mask,
+ u8 mask_result, unsigned long timeout,
+ wait_queue_head_t *queue,
+ bool check_cancel)
{
unsigned long stop;
long rc;
@@ -55,7 +56,7 @@ static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
/* check current status */
status = chip->ops->status(chip);
- if ((status & mask) == mask)
+ if ((status & mask) == mask_result)
return 0;
stop = jiffies + timeout;
@@ -83,7 +84,7 @@ static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
usleep_range(TPM_TIMEOUT_USECS_MIN,
TPM_TIMEOUT_USECS_MAX);
status = chip->ops->status(chip);
- if ((status & mask) == mask)
+ if ((status & mask) == mask_result)
return 0;
} while (time_before(jiffies, stop));
}
@@ -290,10 +291,11 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
int size = 0, burstcnt, rc;
while (size < count) {
- rc = wait_for_tpm_stat(chip,
- TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- chip->timeout_c,
- &priv->read_queue, true);
+ rc = wait_for_tpm_stat_result(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ chip->timeout_c,
+ &priv->read_queue, true);
if (rc < 0)
return rc;
burstcnt = get_burstcount(chip);
@@ -348,8 +350,9 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
- if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false) < 0) {
+ if (wait_for_tpm_stat_result(chip, TPM_STS_VALID,
+ TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
size = -ETIME;
goto out;
}
@@ -385,61 +388,40 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc, status, burstcnt;
size_t count = 0;
- bool itpm = priv->flags & TPM_TIS_ITPM_WORKAROUND;
status = tpm_tis_status(chip);
if ((status & TPM_STS_COMMAND_READY) == 0) {
tpm_tis_ready(chip);
- if (wait_for_tpm_stat
- (chip, TPM_STS_COMMAND_READY, chip->timeout_b,
- &priv->int_queue, false) < 0) {
+ if (wait_for_tpm_stat_result(chip, TPM_STS_COMMAND_READY,
+ TPM_STS_COMMAND_READY,
+ chip->timeout_b,
+ &priv->int_queue, false) < 0) {
rc = -ETIME;
goto out_err;
}
}
- while (count < len - 1) {
+ while (count < len) {
burstcnt = get_burstcount(chip);
if (burstcnt < 0) {
dev_err(&chip->dev, "Unable to read burstcount\n");
rc = burstcnt;
goto out_err;
}
- burstcnt = min_t(int, burstcnt, len - count - 1);
+ burstcnt = min_t(int, burstcnt, len - count);
rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality),
burstcnt, buf + count);
if (rc < 0)
goto out_err;
count += burstcnt;
-
- if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false) < 0) {
- rc = -ETIME;
- goto out_err;
- }
- status = tpm_tis_status(chip);
- if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
- rc = -EIO;
- goto out_err;
- }
}
-
- /* write last byte */
- rc = tpm_tis_write8(priv, TPM_DATA_FIFO(priv->locality), buf[count]);
- if (rc < 0)
- goto out_err;
-
- if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
- &priv->int_queue, false) < 0) {
+ if (wait_for_tpm_stat_result(chip, TPM_STS_VALID | TPM_STS_DATA_EXPECT,
+ TPM_STS_VALID, chip->timeout_c,
+ &priv->int_queue, false) < 0) {
rc = -ETIME;
goto out_err;
}
- status = tpm_tis_status(chip);
- if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) {
- rc = -EIO;
- goto out_err;
- }
return 0;
@@ -496,9 +478,11 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
dur = tpm_calc_ordinal_duration(chip, ordinal);
- if (wait_for_tpm_stat
- (chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID, dur,
- &priv->read_queue, false) < 0) {
+ if (wait_for_tpm_stat_result(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ dur,
+ &priv->read_queue, false) < 0) {
rc = -ETIME;
goto out_err;
}
--
2.7.4
Powered by blists - more mailing lists