[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240802211850.1211737-4-tj@kernel.org>
Date: Fri, 2 Aug 2024 11:18:19 -1000
From: Tejun Heo <tj@...nel.org>
To: jiangshanlai@...il.com
Cc: linux-kernel@...r.kernel.org,
kernel-team@...a.com,
Tejun Heo <tj@...nel.org>
Subject: [PATCH 3/4] workqueue: s/unbound_wq_update_pwq/update_unbound_pwqs/
unbound_wq_update_pwq() reads odd and is unwieldy. Rename to
update_unbound_pwqs().
Signed-off-by: Tejun Heo <tj@...nel.org>
---
kernel/workqueue.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index bb96414f68b8..0f593f2884e0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -433,8 +433,8 @@ static struct kmem_cache *pwq_cache;
static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
-/* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
-static struct workqueue_attrs *unbound_wq_update_pwq_attrs_buf;
+/* buf for update_unbound_pwqs_attrs(), protected by CPU hotplug exclusion */
+static struct workqueue_attrs *update_unbound_pwqs_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
@@ -5351,7 +5351,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
}
/**
- * unbound_wq_update_pwq - update a pwq slot for CPU hot[un]plug
+ * update_unbound_pwqs - update a pwq slot for CPU hot[un]plug
* @wq: the target workqueue
* @cpu: the CPU to update the pwq slot for
*
@@ -5369,7 +5369,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
* CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
* responsibility to flush the work item from CPU_DOWN_PREPARE.
*/
-static void unbound_wq_update_pwq(struct workqueue_struct *wq, int cpu)
+static void update_unbound_pwqs(struct workqueue_struct *wq, int cpu)
{
struct pool_workqueue *old_pwq = NULL, *pwq;
struct workqueue_attrs *target_attrs;
@@ -5384,7 +5384,7 @@ static void unbound_wq_update_pwq(struct workqueue_struct *wq, int cpu)
* Let's use a preallocated one. The following buf is protected by
* CPU hotplug exclusion.
*/
- target_attrs = unbound_wq_update_pwq_attrs_buf;
+ target_attrs = update_unbound_pwqs_attrs_buf;
copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask);
@@ -6589,7 +6589,7 @@ int workqueue_online_cpu(unsigned int cpu)
int tcpu;
for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
- unbound_wq_update_pwq(wq, tcpu);
+ update_unbound_pwqs(wq, tcpu);
mutex_lock(&wq->mutex);
wq_update_node_max_active(wq);
@@ -6624,7 +6624,7 @@ int workqueue_offline_cpu(unsigned int cpu)
int tcpu;
for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
- unbound_wq_update_pwq(wq, tcpu);
+ update_unbound_pwqs(wq, tcpu);
mutex_lock(&wq->mutex);
wq_update_node_max_active(wq);
@@ -6910,7 +6910,7 @@ static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
list_for_each_entry(wq, &workqueues, list) {
for_each_cpu(cpu, wq_online_cpumask)
- unbound_wq_update_pwq(wq, cpu);
+ update_unbound_pwqs(wq, cpu);
}
mutex_unlock(&wq_pool_mutex);
@@ -7656,8 +7656,8 @@ void __init workqueue_init_early(void)
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
- unbound_wq_update_pwq_attrs_buf = alloc_workqueue_attrs();
- BUG_ON(!unbound_wq_update_pwq_attrs_buf);
+ update_unbound_pwqs_attrs_buf = alloc_workqueue_attrs();
+ BUG_ON(!update_unbound_pwqs_attrs_buf);
/*
* If nohz_full is enabled, set power efficient workqueue as unbound.
@@ -7922,12 +7922,12 @@ void __init workqueue_init_topology(void)
/*
* Workqueues allocated earlier would have all CPUs sharing the default
- * worker pool. Explicitly call unbound_wq_update_pwq() on all workqueue
+ * worker pool. Explicitly call update_unbound_pwqs() on all workqueue
* and CPU combinations to apply per-pod sharing.
*/
list_for_each_entry(wq, &workqueues, list) {
for_each_online_cpu(cpu)
- unbound_wq_update_pwq(wq, cpu);
+ update_unbound_pwqs(wq, cpu);
if (wq->flags & WQ_UNBOUND) {
mutex_lock(&wq->mutex);
wq_update_node_max_active(wq);
--
2.45.2
Powered by blists - more mailing lists