lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221019083708.27138-5-nstange@suse.de>
Date:   Wed, 19 Oct 2022 10:37:07 +0200
From:   Nicolai Stange <nstange@...e.de>
To:     Steffen Klassert <steffen.klassert@...unet.com>,
        Daniel Jordan <daniel.m.jordan@...cle.com>
Cc:     Herbert Xu <herbert@...dor.apana.org.au>,
        Martin Doucha <mdoucha@...e.cz>, linux-crypto@...r.kernel.org,
        linux-kernel@...r.kernel.org, Nicolai Stange <nstange@...e.de>
Subject: [PATCH 4/5] padata: split out dequeue operation from padata_find_next()

Currently, padata_find_next() takes a 'remove_object' argument for
specifying whether the caller wants the returned patada_priv, if any, to
get removed from the percpu reorder list it's been found on.

There are only two callsites, both from padata_reorder():
- one supposed to dequeue the padata_priv instances to be processed in a
  loop, i.e. it has the 'remove_object' set to true, and
- another one near the end of padata_reorder() with 'remove_object' set to
  false for checking whether the reorder work needs to get rescheduled.

In order to deal with lifetime issues, a future commit will need to move
this latter reorder work scheduling operation to under the reorder->lock,
where pd->ps is guaranteed to exist as long as there are any padata_privs
to process. However this lock is currently taken within padata_find_next().

In order to be able to extend the reorder->lock to beyond the call to
padata_find_next() from padata_reorder(), a variant where the caller may
grab it for the callee shall be provided.

Split padata_find_next() into two parts:
- __padata_find_next(), which expects the caller to hold the reorder->lock
  and only returns the found padata_priv, if any, without removing it
  from the queue.
- padata_dequeue_next(), with functionality equivalent to the former
  padata_find_next(pd, remove_object=true) and implemented by means of
  the factored out __padata_find_next().

Adapt the two callsites in padata_reorder() as appropriate.

There is no change in functionality.

Signed-off-by: Nicolai Stange <nstange@...e.de>
---
 kernel/padata.c | 57 ++++++++++++++++++++++++++++++++-----------------
 1 file changed, 37 insertions(+), 20 deletions(-)

diff --git a/kernel/padata.c b/kernel/padata.c
index b79226727ef7..e9eab3e94cfc 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -230,29 +230,24 @@ int padata_do_parallel(struct padata_shell *ps,
 EXPORT_SYMBOL(padata_do_parallel);
 
 /*
- * padata_find_next - Find the next object that needs serialization.
+ * __padata_find_next - Find the next object that needs serialization.
  *
  * Return:
  * * A pointer to the control struct of the next object that needs
- *   serialization, if present in one of the percpu reorder queues.
+ *   serialization, if already present on the given percpu reorder queue.
  * * NULL, if the next object that needs serialization will
  *   be parallel processed by another cpu and is not yet present in
- *   the cpu's reorder queue.
+ *   the reorder queue.
  */
-static struct padata_priv *padata_find_next(struct parallel_data *pd,
-					    bool remove_object)
+static struct padata_priv *__padata_find_next(struct parallel_data *pd,
+					      struct padata_list *reorder)
 {
 	struct padata_priv *padata;
-	struct padata_list *reorder;
-	int cpu = pd->cpu;
 
-	reorder = per_cpu_ptr(pd->reorder_list, cpu);
+	lockdep_assert_held(&reorder->lock);
 
-	spin_lock(&reorder->lock);
-	if (list_empty(&reorder->list)) {
-		spin_unlock(&reorder->lock);
+	if (list_empty(&reorder->list))
 		return NULL;
-	}
 
 	padata = list_entry(reorder->list.next, struct padata_priv, list);
 
@@ -260,16 +255,30 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd,
 	 * Checks the rare case where two or more parallel jobs have hashed to
 	 * the same CPU and one of the later ones finishes first.
 	 */
-	if (padata->seq_nr != pd->processed) {
+	if (padata->seq_nr != pd->processed)
+		return NULL;
+
+	return padata;
+}
+
+static struct padata_priv *padata_dequeue_next(struct parallel_data *pd)
+{
+	struct padata_priv *padata;
+	struct padata_list *reorder;
+	int cpu = pd->cpu;
+
+	reorder = per_cpu_ptr(pd->reorder_list, cpu);
+	spin_lock(&reorder->lock);
+
+	padata = __padata_find_next(pd, reorder);
+	if (!padata) {
 		spin_unlock(&reorder->lock);
 		return NULL;
 	}
 
-	if (remove_object) {
-		list_del_init(&padata->list);
-		++pd->processed;
-		pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
-	}
+	list_del_init(&padata->list);
+	++pd->processed;
+	pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
 
 	spin_unlock(&reorder->lock);
 	return padata;
@@ -297,7 +306,7 @@ static bool padata_reorder(struct parallel_data *pd)
 		return false;
 
 	while (1) {
-		padata = padata_find_next(pd, true);
+		padata = padata_dequeue_next(pd);
 
 		/*
 		 * If the next object that needs serialization is parallel
@@ -330,8 +339,16 @@ static bool padata_reorder(struct parallel_data *pd)
 	smp_mb();
 
 	reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
-	if (!list_empty(&reorder->list) && padata_find_next(pd, false))
+	if (!list_empty(&reorder->list)) {
+		spin_lock(&reorder->lock);
+		if (!__padata_find_next(pd, reorder)) {
+			spin_unlock(&reorder->lock);
+			return false;
+		}
+		spin_unlock(&reorder->lock);
+
 		return queue_work(pinst->serial_wq, &pd->reorder_work);
+	}
 
 	return false;
 }
-- 
2.37.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ