[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAHUa44GRL-vkXH6CKoL-JamJpk+XV8mWyO+VaE8VVdmqSVa6EQ@mail.gmail.com>
Date: Fri, 17 Nov 2023 09:29:51 +0100
From: Jens Wiklander <jens.wiklander@...aro.org>
To: Sumit Garg <sumit.garg@...aro.org>
Cc: linux-kernel@...r.kernel.org, op-tee@...ts.trustedfirmware.org,
Jerome Forissier <jerome.forissier@...aro.org>,
Shyam Saini <shyamsaini@...ux.microsoft.com>
Subject: Re: [PATCH v3 1/2] optee: add page list to kernel private shared memory
On Thu, Nov 16, 2023 at 1:28 PM Sumit Garg <sumit.garg@...aro.org> wrote:
>
> On Tue, 14 Nov 2023 at 15:22, Jens Wiklander <jens.wiklander@...aro.org> wrote:
> >
> > Until now has kernel private shared memory allocated as dynamic shared
> > memory (not from the static shared memory pool) been returned without a
> > list of physical pages on allocations via RPC. To support allocations
> > larger than one page add a list of physical pages.
>
> Although this patch looks like a good cleanup, I can't find an ABI
> change here. Wasn't a list of pages returned earlier too?
No, because handle_rpc_func_cmd_shm_alloc() didn't take the path where
optee_allocate_pages_list() is called.
Cheers,
Jens
>
> -Sumit
>
> >
> > Signed-off-by: Jens Wiklander <jens.wiklander@...aro.org>
> > ---
> > drivers/tee/optee/core.c | 28 +++++++++++++----------
> > drivers/tee/optee/smc_abi.c | 45 +++++++++++++++++--------------------
> > 2 files changed, 37 insertions(+), 36 deletions(-)
> >
> > diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
> > index 2a258bd3b6b5..38ea2fecfc2e 100644
> > --- a/drivers/tee/optee/core.c
> > +++ b/drivers/tee/optee/core.c
> > @@ -27,7 +27,10 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
> > unsigned long start))
> > {
> > unsigned int order = get_order(size);
> > + unsigned int nr_pages = 1 << order;
> > + struct page **pages;
> > struct page *page;
> > + unsigned int i;
> > int rc = 0;
> >
> > /*
> > @@ -42,30 +45,29 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
> > shm->paddr = page_to_phys(page);
> > shm->size = PAGE_SIZE << order;
> >
> > - if (shm_register) {
> > - unsigned int nr_pages = 1 << order, i;
> > - struct page **pages;
> > + pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
> > + if (!pages) {
> > + rc = -ENOMEM;
> > + goto err;
> > + }
> >
> > - pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
> > - if (!pages) {
> > - rc = -ENOMEM;
> > - goto err;
> > - }
> > + for (i = 0; i < nr_pages; i++)
> > + pages[i] = page + i;
> >
> > - for (i = 0; i < nr_pages; i++)
> > - pages[i] = page + i;
> > + shm->pages = pages;
> > + shm->num_pages = nr_pages;
> >
> > + if (shm_register) {
> > rc = shm_register(shm->ctx, shm, pages, nr_pages,
> > (unsigned long)shm->kaddr);
> > - kfree(pages);
> > if (rc)
> > goto err;
> > }
> >
> > return 0;
> > -
> > err:
> > free_pages((unsigned long)shm->kaddr, order);
> > + shm->kaddr = NULL;
> > return rc;
> > }
> >
> > @@ -77,6 +79,8 @@ void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
> > shm_unregister(shm->ctx, shm);
> > free_pages((unsigned long)shm->kaddr, get_order(shm->size));
> > shm->kaddr = NULL;
> > + kfree(shm->pages);
> > + shm->pages = NULL;
> > }
> >
> > static void optee_bus_scan(struct work_struct *work)
> > diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
> > index d5b28fd35d66..b69410c7cc0a 100644
> > --- a/drivers/tee/optee/smc_abi.c
> > +++ b/drivers/tee/optee/smc_abi.c
> > @@ -678,10 +678,11 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
> > struct optee_msg_arg *arg,
> > struct optee_call_ctx *call_ctx)
> > {
> > - phys_addr_t pa;
> > struct tee_shm *shm;
> > size_t sz;
> > size_t n;
> > + struct page **pages;
> > + size_t page_count;
> >
> > arg->ret_origin = TEEC_ORIGIN_COMMS;
> >
> > @@ -716,32 +717,23 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
> > return;
> > }
> >
> > - if (tee_shm_get_pa(shm, 0, &pa)) {
> > - arg->ret = TEEC_ERROR_BAD_PARAMETERS;
> > - goto bad;
> > - }
> > -
> > - sz = tee_shm_get_size(shm);
> > -
> > - if (tee_shm_is_dynamic(shm)) {
> > - struct page **pages;
> > + /*
> > + * If there are pages it's dynamically allocated shared memory (not
> > + * from the reserved shared memory pool) and needs to be
> > + * registered.
> > + */
> > + pages = tee_shm_get_pages(shm, &page_count);
> > + if (pages) {
> > u64 *pages_list;
> > - size_t page_num;
> > -
> > - pages = tee_shm_get_pages(shm, &page_num);
> > - if (!pages || !page_num) {
> > - arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
> > - goto bad;
> > - }
> >
> > - pages_list = optee_allocate_pages_list(page_num);
> > + pages_list = optee_allocate_pages_list(page_count);
> > if (!pages_list) {
> > arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
> > goto bad;
> > }
> >
> > call_ctx->pages_list = pages_list;
> > - call_ctx->num_entries = page_num;
> > + call_ctx->num_entries = page_count;
> >
> > arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
> > OPTEE_MSG_ATTR_NONCONTIG;
> > @@ -752,17 +744,22 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
> > arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
> > (tee_shm_get_page_offset(shm) &
> > (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
> > - arg->params[0].u.tmem.size = tee_shm_get_size(shm);
> > - arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
> >
> > - optee_fill_pages_list(pages_list, pages, page_num,
> > + optee_fill_pages_list(pages_list, pages, page_count,
> > tee_shm_get_page_offset(shm));
> > } else {
> > + phys_addr_t pa;
> > +
> > + if (tee_shm_get_pa(shm, 0, &pa)) {
> > + arg->ret = TEEC_ERROR_BAD_PARAMETERS;
> > + goto bad;
> > + }
> > +
> > arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
> > arg->params[0].u.tmem.buf_ptr = pa;
> > - arg->params[0].u.tmem.size = sz;
> > - arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
> > }
> > + arg->params[0].u.tmem.size = tee_shm_get_size(shm);
> > + arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
> >
> > arg->ret = TEEC_SUCCESS;
> > return;
> > --
> > 2.34.1
> >
Powered by blists - more mailing lists