[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200109123651.18520-4-jens.wiklander@linaro.org>
Date: Thu, 9 Jan 2020 13:36:49 +0100
From: Jens Wiklander <jens.wiklander@...aro.org>
To: tee-dev@...ts.linaro.org, linux-kernel@...r.kernel.org
Cc: Jerome Forissier <jerome@...issier.org>,
Sumit Garg <sumit.garg@...aro.org>,
Etienne Carriere <etienne.carriere@...aro.org>,
Rijo Thomas <Rijo-john.Thomas@....com>,
Devaraj Rangasamy <Devaraj.Rangasamy@....com>,
Jens Wiklander <jens.wiklander@...aro.org>
Subject: [PATCH 3/5] tee: don't assign shm id for private shms
Private shared memory object must not be referenced from user space. To
guarantee that, don't assign an id to shared memory objects which are
driver private.
Signed-off-by: Jens Wiklander <jens.wiklander@...aro.org>
---
drivers/tee/tee_private.h | 3 ++-
drivers/tee/tee_shm.c | 31 ++++++++++++++++++-------------
2 files changed, 20 insertions(+), 14 deletions(-)
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
index f797171f0434..e55204df31ce 100644
--- a/drivers/tee/tee_private.h
+++ b/drivers/tee/tee_private.h
@@ -37,7 +37,8 @@ struct tee_shm_pool {
* @num_users: number of active users of this device
* @c_no_user: completion used when unregistering the device
* @mutex: mutex protecting @num_users and @idr
- * @idr: register of shared memory object allocated on this device
+ * @idr: register of user space shared memory objects allocated or
+ * registered on this device
* @pool: shared memory pool
*/
struct tee_device {
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 8afe08b23242..e636cf82acdb 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -15,9 +15,11 @@ static void tee_shm_release(struct tee_shm *shm)
{
struct tee_device *teedev = shm->teedev;
- mutex_lock(&teedev->mutex);
- idr_remove(&teedev->idr, shm->id);
- mutex_unlock(&teedev->mutex);
+ if (shm->flags & TEE_SHM_DMA_BUF) {
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, shm->id);
+ mutex_unlock(&teedev->mutex);
+ }
if (shm->flags & TEE_SHM_POOL) {
struct tee_shm_pool_mgr *poolm;
@@ -143,17 +145,18 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
goto err_kfree;
}
- mutex_lock(&teedev->mutex);
- shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
- mutex_unlock(&teedev->mutex);
- if (shm->id < 0) {
- ret = ERR_PTR(shm->id);
- goto err_pool_free;
- }
if (flags & TEE_SHM_DMA_BUF) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ mutex_lock(&teedev->mutex);
+ shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
+ mutex_unlock(&teedev->mutex);
+ if (shm->id < 0) {
+ ret = ERR_PTR(shm->id);
+ goto err_pool_free;
+ }
+
exp_info.ops = &tee_shm_dma_buf_ops;
exp_info.size = shm->size;
exp_info.flags = O_RDWR;
@@ -171,9 +174,11 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
return shm;
err_rem:
- mutex_lock(&teedev->mutex);
- idr_remove(&teedev->idr, shm->id);
- mutex_unlock(&teedev->mutex);
+ if (flags & TEE_SHM_DMA_BUF) {
+ mutex_lock(&teedev->mutex);
+ idr_remove(&teedev->idr, shm->id);
+ mutex_unlock(&teedev->mutex);
+ }
err_pool_free:
poolm->ops->free(poolm, shm);
err_kfree:
--
2.17.1
Powered by blists - more mailing lists