[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20260105-reset-core-refactor-v1-11-ac443103498d@oss.qualcomm.com>
Date: Mon, 05 Jan 2026 15:15:30 +0100
From: Bartosz Golaszewski <bartosz.golaszewski@....qualcomm.com>
To: Krzysztof Kozlowski <krzk@...nel.org>,
Philipp Zabel <p.zabel@...gutronix.de>
Cc: linux-kernel@...r.kernel.org,
Bartosz Golaszewski <bartosz.golaszewski@....qualcomm.com>
Subject: [PATCH 11/15] reset: protect struct reset_control with its own
mutex
Currently we use a single, global mutex - misleadingly names
reset_list_mutex - to protect the global list of reset devices,
per-controller list of reset control handles and also internal fields of
struct reset_control. Locking can be made a lot more fine-grained if we
use a separate mutex for serializing operations on the list AND
accessing the reset control handle.
Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@....qualcomm.com>
---
drivers/reset/core.c | 38 ++++++++++++++------------------------
1 file changed, 14 insertions(+), 24 deletions(-)
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index a53d445fbbfc4601c062e7e5f17278417bd37836..d712cf4315477ada98ef81975fd9b535db477e9a 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -49,6 +49,7 @@ static DEFINE_IDA(reset_gpio_ida);
* @triggered_count: Number of times this reset line has been reset. Currently
* only used for shared resets, which means that the value
* will be either 0 or 1.
+ * @lock: Serializes access to other fields of this structure
*/
struct reset_control {
struct reset_controller_dev __rcu *rcdev;
@@ -61,6 +62,7 @@ struct reset_control {
bool array;
atomic_t deassert_count;
atomic_t triggered_count;
+ struct mutex lock;
};
/**
@@ -707,7 +709,7 @@ int reset_control_acquire(struct reset_control *rstc)
if (reset_control_is_array(rstc))
return reset_control_array_acquire(rstc_to_array(rstc));
- guard(mutex)(&reset_list_mutex);
+ guard(mutex)(&rstc->lock);
if (rstc->acquired)
return 0;
@@ -859,6 +861,7 @@ __reset_control_get_internal(struct reset_controller_dev *rcdev,
list_add(&rstc->list, &rcdev->reset_control_head);
rstc->id = index;
kref_init(&rstc->refcnt);
+ mutex_init(&rstc->lock);
rstc->acquired = acquired;
rstc->shared = shared;
get_device(rcdev->dev);
@@ -872,8 +875,6 @@ static void __reset_control_release(struct kref *kref)
refcnt);
struct reset_controller_dev *rcdev;
- lockdep_assert_held(&reset_list_mutex);
-
scoped_guard(srcu, &rstc->srcu) {
rcdev = rcu_replace_pointer(rstc->rcdev, NULL, true);
if (rcdev) {
@@ -882,15 +883,14 @@ static void __reset_control_release(struct kref *kref)
}
}
+ mutex_destroy(&rstc->lock);
synchronize_srcu(&rstc->srcu);
cleanup_srcu_struct(&rstc->srcu);
kfree(rstc);
}
-static void __reset_control_put_internal(struct reset_control *rstc)
+static void reset_control_put_internal(struct reset_control *rstc)
{
- lockdep_assert_held(&reset_list_mutex);
-
if (IS_ERR_OR_NULL(rstc))
return;
@@ -1103,7 +1103,7 @@ __of_reset_control_get(struct device_node *node, const char *id, int index,
{
bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
bool gpio_fallback = false;
- struct reset_control *rstc;
+ struct reset_control *rstc = ERR_PTR(-EINVAL);
struct reset_controller_dev *rcdev;
struct of_phandle_args args;
int rstc_id;
@@ -1168,8 +1168,8 @@ __of_reset_control_get(struct device_node *node, const char *id, int index,
flags &= ~RESET_CONTROL_FLAGS_BIT_OPTIONAL;
- /* reset_list_mutex also protects the rcdev's reset_control list */
- rstc = __reset_control_get_internal(rcdev, rstc_id, flags);
+ scoped_guard(mutex, &rcdev->lock)
+ rstc = __reset_control_get_internal(rcdev, rstc_id, flags);
out_put:
of_node_put(args.np);
@@ -1212,10 +1212,8 @@ int __reset_control_bulk_get(struct device *dev, int num_rstcs,
return 0;
err:
- guard(mutex)(&reset_list_mutex);
-
while (i--)
- __reset_control_put_internal(rstcs[i].rstc);
+ reset_control_put_internal(rstcs[i].rstc);
return ret;
}
@@ -1225,10 +1223,8 @@ static void reset_control_array_put(struct reset_control_array *resets)
{
int i;
- guard(mutex)(&reset_list_mutex);
-
for (i = 0; i < resets->num_rstcs; i++)
- __reset_control_put_internal(resets->rstc[i]);
+ reset_control_put_internal(resets->rstc[i]);
kfree(resets);
}
@@ -1246,9 +1242,7 @@ void reset_control_put(struct reset_control *rstc)
return;
}
- guard(mutex)(&reset_list_mutex);
-
- __reset_control_put_internal(rstc);
+ reset_control_put_internal(rstc);
}
EXPORT_SYMBOL_GPL(reset_control_put);
@@ -1259,10 +1253,8 @@ EXPORT_SYMBOL_GPL(reset_control_put);
*/
void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
{
- guard(mutex)(&reset_list_mutex);
-
while (num_rstcs--)
- __reset_control_put_internal(rstcs[num_rstcs].rstc);
+ reset_control_put_internal(rstcs[num_rstcs].rstc);
}
EXPORT_SYMBOL_GPL(reset_control_bulk_put);
@@ -1481,10 +1473,8 @@ of_reset_control_array_get(struct device_node *np, enum reset_control_flags flag
return &resets->base;
err_rst:
- guard(mutex)(&reset_list_mutex);
-
while (--i >= 0)
- __reset_control_put_internal(resets->rstc[i]);
+ reset_control_put_internal(resets->rstc[i]);
kfree(resets);
--
2.47.3
Powered by blists - more mailing lists