[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191229172720.679452422@linuxfoundation.org>
Date: Sun, 29 Dec 2019 18:25:35 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Pi-Hsun Shih <pihsun@...omium.org>,
Hans Verkuil <hverkuil-cisco@...all.nl>,
Mauro Carvalho Chehab <mchehab@...nel.org>,
Sasha Levin <sashal@...nel.org>
Subject: [PATCH 5.4 282/434] media: v4l2-ctrl: Lock main_hdl on operations of requests_queued.
From: Pi-Hsun Shih <pihsun@...omium.org>
[ Upstream commit df4a3e7f88e3b0d7ae46d70b9ff8e3c0ea730785 ]
There's a race condition between the list_del_init in the
v4l2_ctrl_request_complete, and the list_add_tail in the
v4l2_ctrl_request_queue, since they can be called in different thread
and the requests_queued list is not protected by a lock. This can lead
to that the v4l2_ctrl_handler is still in the requests_queued list while
the request_is_queued is already set to false, which would cause
use-after-free if the v4l2_ctrl_handler is later released.
Fix this by locking the ->lock of main_hdl (which is the owner of the
requests_queued list) when doing list operations on the
->requests_queued list.
Signed-off-by: Pi-Hsun Shih <pihsun@...omium.org>
Signed-off-by: Hans Verkuil <hverkuil-cisco@...all.nl>
Signed-off-by: Mauro Carvalho Chehab <mchehab@...nel.org>
Signed-off-by: Sasha Levin <sashal@...nel.org>
---
drivers/media/v4l2-core/v4l2-ctrls.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 1d8f38824631..cd84dbbf6a89 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -3144,6 +3144,7 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
struct v4l2_ctrl_handler *prev_hdl = NULL;
struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
+ mutex_lock(main_hdl->lock);
if (list_empty(&main_hdl->requests_queued))
goto queue;
@@ -3175,18 +3176,22 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
queue:
list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
hdl->request_is_queued = true;
+ mutex_unlock(main_hdl->lock);
}
static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
{
struct v4l2_ctrl_handler *hdl =
container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
list_del_init(&hdl->requests);
+ mutex_lock(main_hdl->lock);
if (hdl->request_is_queued) {
list_del_init(&hdl->requests_queued);
hdl->request_is_queued = false;
}
+ mutex_unlock(main_hdl->lock);
}
static void v4l2_ctrl_request_release(struct media_request_object *obj)
@@ -4128,9 +4133,11 @@ void v4l2_ctrl_request_complete(struct media_request *req,
v4l2_ctrl_unlock(ctrl);
}
+ mutex_lock(main_hdl->lock);
WARN_ON(!hdl->request_is_queued);
list_del_init(&hdl->requests_queued);
hdl->request_is_queued = false;
+ mutex_unlock(main_hdl->lock);
media_request_object_complete(obj);
media_request_object_put(obj);
}
--
2.20.1
Powered by blists - more mailing lists