[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c460d3d0-792a-4daf-8f95-9005e24261a1@collabora.com>
Date: Wed, 8 Nov 2023 16:26:51 +0100
From: Andrzej Pietrasiewicz <andrzej.p@...labora.com>
To: Benjamin Gaignard <benjamin.gaignard@...labora.com>,
mchehab@...nel.org, tfiga@...omium.org, m.szyprowski@...sung.com,
ming.qian@....com, ezequiel@...guardiasur.com.ar,
p.zabel@...gutronix.de, gregkh@...uxfoundation.org,
hverkuil-cisco@...all.nl, nicolas.dufresne@...labora.com
Cc: linux-media@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org,
linux-mediatek@...ts.infradead.org, linux-arm-msm@...r.kernel.org,
linux-rockchip@...ts.infradead.org, linux-staging@...ts.linux.dev,
kernel@...labora.com
Subject: Re: [PATCH v14 03/56] media: videobuf2: Stop spamming kernel log with
all queue counter
W dniu 31.10.2023 o 17:30, Benjamin Gaignard pisze:
> Only report unbalanced queue counters do avoid spamming kernel log
> with useless information.
>
> Signed-off-by: Benjamin Gaignard <benjamin.gaignard@...labora.com>
Reviewed-by: Andrzej Pietrasiewicz <andrzej.p@...labora.com>
> ---
> .../media/common/videobuf2/videobuf2-core.c | 79 +++++++++++--------
> 1 file changed, 44 insertions(+), 35 deletions(-)
>
> diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
> index 09be8e026044..47dba2a20d73 100644
> --- a/drivers/media/common/videobuf2/videobuf2-core.c
> +++ b/drivers/media/common/videobuf2/videobuf2-core.c
> @@ -533,25 +533,26 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
>
> #ifdef CONFIG_VIDEO_ADV_DEBUG
> /*
> - * Check that all the calls were balances during the life-time of this
> - * queue. If not (or if the debug level is 1 or up), then dump the
> - * counters to the kernel log.
> + * Check that all the calls were balanced during the life-time of this
> + * queue. If not then dump the counters to the kernel log.
> */
> if (q->num_buffers) {
> bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
> q->cnt_prepare_streaming != q->cnt_unprepare_streaming ||
> q->cnt_wait_prepare != q->cnt_wait_finish;
>
> - if (unbalanced || debug) {
> - pr_info("counters for queue %p:%s\n", q,
> - unbalanced ? " UNBALANCED!" : "");
> - pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n",
> - q->cnt_queue_setup, q->cnt_start_streaming,
> - q->cnt_stop_streaming);
> - pr_info(" prepare_streaming: %u unprepare_streaming: %u\n",
> - q->cnt_prepare_streaming, q->cnt_unprepare_streaming);
> - pr_info(" wait_prepare: %u wait_finish: %u\n",
> - q->cnt_wait_prepare, q->cnt_wait_finish);
> + if (unbalanced) {
> + pr_info("unbalanced counters for queue %p:\n", q);
> + if (q->cnt_start_streaming != q->cnt_stop_streaming)
> + pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n",
> + q->cnt_queue_setup, q->cnt_start_streaming,
> + q->cnt_stop_streaming);
> + if (q->cnt_prepare_streaming != q->cnt_unprepare_streaming)
> + pr_info(" prepare_streaming: %u unprepare_streaming: %u\n",
> + q->cnt_prepare_streaming, q->cnt_unprepare_streaming);
> + if (q->cnt_wait_prepare != q->cnt_wait_finish)
> + pr_info(" wait_prepare: %u wait_finish: %u\n",
> + q->cnt_wait_prepare, q->cnt_wait_finish);
> }
> q->cnt_queue_setup = 0;
> q->cnt_wait_prepare = 0;
> @@ -572,29 +573,37 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
> vb->cnt_buf_prepare != vb->cnt_buf_finish ||
> vb->cnt_buf_init != vb->cnt_buf_cleanup;
>
> - if (unbalanced || debug) {
> - pr_info(" counters for queue %p, buffer %d:%s\n",
> - q, buffer, unbalanced ? " UNBALANCED!" : "");
> - pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
> - vb->cnt_buf_init, vb->cnt_buf_cleanup,
> - vb->cnt_buf_prepare, vb->cnt_buf_finish);
> - pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n",
> - vb->cnt_buf_out_validate, vb->cnt_buf_queue,
> - vb->cnt_buf_done, vb->cnt_buf_request_complete);
> - pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
> - vb->cnt_mem_alloc, vb->cnt_mem_put,
> - vb->cnt_mem_prepare, vb->cnt_mem_finish,
> - vb->cnt_mem_mmap);
> - pr_info(" get_userptr: %u put_userptr: %u\n",
> - vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
> - pr_info(" attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
> - vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
> - vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
> - pr_info(" get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
> + if (unbalanced) {
> + pr_info("unbalanced counters for queue %p, buffer %d:\n",
> + q, buffer);
> + if (vb->cnt_buf_init != vb->cnt_buf_cleanup)
> + pr_info(" buf_init: %u buf_cleanup: %u\n",
> + vb->cnt_buf_init, vb->cnt_buf_cleanup);
> + if (vb->cnt_buf_prepare != vb->cnt_buf_finish)
> + pr_info(" buf_prepare: %u buf_finish: %u\n",
> + vb->cnt_buf_prepare, vb->cnt_buf_finish);
> + if (vb->cnt_buf_queue != vb->cnt_buf_done)
> + pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n",
> + vb->cnt_buf_out_validate, vb->cnt_buf_queue,
> + vb->cnt_buf_done, vb->cnt_buf_request_complete);
> + if (vb->cnt_mem_alloc != vb->cnt_mem_put)
> + pr_info(" alloc: %u put: %u\n",
> + vb->cnt_mem_alloc, vb->cnt_mem_put);
> + if (vb->cnt_mem_prepare != vb->cnt_mem_finish)
> + pr_info(" prepare: %u finish: %u\n",
> + vb->cnt_mem_prepare, vb->cnt_mem_finish);
> + if (vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr)
> + pr_info(" get_userptr: %u put_userptr: %u\n",
> + vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
> + if (vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf)
> + pr_info(" attach_dmabuf: %u detach_dmabuf: %u\n",
> + vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf);
> + if (vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf)
> + pr_info(" map_dmabuf: %u unmap_dmabuf: %u\n",
> + vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
> + pr_info(" get_dmabuf: %u num_users: %u\n",
> vb->cnt_mem_get_dmabuf,
> - vb->cnt_mem_num_users,
> - vb->cnt_mem_vaddr,
> - vb->cnt_mem_cookie);
> + vb->cnt_mem_num_users);
> }
> }
> #endif
Powered by blists - more mailing lists