[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAFCwf102yW=5e=3t+ho5Hxboa2LkqrjtZTZ6KAP7v+TT=82KZw@mail.gmail.com>
Date: Sat, 16 Nov 2019 12:21:34 +0200
From: Oded Gabbay <oded.gabbay@...il.com>
To: Omer Shpigelman <oshpigelman@...ana.ai>
Cc: "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH 8/8] habanalabs: remove unnecessary checks
On Thu, Nov 14, 2019 at 8:24 PM Omer Shpigelman <oshpigelman@...ana.ai> wrote:
>
> Now that the VA block free list is not updated on context close in order
> to optimize this flow, no need in the sanity checks of the list contents
> as these will fail for sure.
> In addition, remove the "context closing with VA in use" print during hard
> reset as this situation is a side effect of the failure that caused the
> hard reset.
>
> Signed-off-by: Omer Shpigelman <oshpigelman@...ana.ai>
> ---
> drivers/misc/habanalabs/memory.c | 40 +++++++-------------------------
> 1 file changed, 9 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
> index fa9462ee9d6f..b009ac4c62c0 100644
> --- a/drivers/misc/habanalabs/memory.c
> +++ b/drivers/misc/habanalabs/memory.c
> @@ -544,7 +544,6 @@ static u64 get_va_block(struct hl_device *hdev,
> /* calc the first possible aligned addr */
> valid_start = va_block->start;
>
> -
> if (valid_start & (page_size - 1)) {
> valid_start &= page_mask;
> valid_start += page_size;
> @@ -1589,43 +1588,16 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
> * @hdev : pointer to the habanalabs structure
> * va_range : pointer to virtual addresses range
> *
> - * This function initializes the following:
> - * - Checks that the given range contains the whole initial range
> + * This function does the following:
> * - Frees the virtual addresses block list and its lock
> */
> static void hl_va_range_fini(struct hl_device *hdev,
> struct hl_va_range *va_range)
> {
> - struct hl_vm_va_block *va_block;
> -
> - if (list_empty(&va_range->list)) {
> - dev_warn(hdev->dev,
> - "va list should not be empty on cleanup!\n");
> - goto out;
> - }
> -
> - if (!list_is_singular(&va_range->list)) {
> - dev_warn(hdev->dev,
> - "va list should not contain multiple blocks on cleanup!\n");
> - goto free_va_list;
> - }
> -
> - va_block = list_first_entry(&va_range->list, typeof(*va_block), node);
> -
> - if (va_block->start != va_range->start_addr ||
> - va_block->end != va_range->end_addr) {
> - dev_warn(hdev->dev,
> - "wrong va block on cleanup, from 0x%llx to 0x%llx\n",
> - va_block->start, va_block->end);
> - goto free_va_list;
> - }
> -
> -free_va_list:
> mutex_lock(&va_range->lock);
> clear_va_list_locked(hdev, &va_range->list);
> mutex_unlock(&va_range->lock);
>
> -out:
> mutex_destroy(&va_range->lock);
> }
>
> @@ -1660,8 +1632,14 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
>
> hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
>
> - if (!hash_empty(ctx->mem_hash))
> - dev_notice(hdev->dev, "ctx is freed while it has va in use\n");
> + /*
> + * Clearly something went wrong on hard reset so no point in printing
> + * another side effect error
> + */
> + if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
> + dev_notice(hdev->dev,
> + "ctx %d is freed while it has va in use\n",
> + ctx->asid);
>
> hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
> dev_dbg(hdev->dev,
> --
> 2.17.1
>
This patch-set is:
Reviewed-by: Oded Gabbay <oded.gabbay@...il.com>
Powered by blists - more mailing lists