lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180131230413.27653-11-daniel.m.jordan@oracle.com>
Date:   Wed, 31 Jan 2018 18:04:10 -0500
From:   daniel.m.jordan@...cle.com
To:     linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc:     aaron.lu@...el.com, ak@...ux.intel.com, akpm@...ux-foundation.org,
        Dave.Dice@...cle.com, dave@...olabs.net,
        khandual@...ux.vnet.ibm.com, ldufour@...ux.vnet.ibm.com,
        mgorman@...e.de, mhocko@...nel.org, pasha.tatashin@...cle.com,
        steven.sistare@...cle.com, yossi.lev@...cle.com
Subject: [RFC PATCH v1 10/13] mm: add LRU batch lock API's

Add the LRU batch locking API's themselves.  This adds the final piece
of infrastructure necessary for locking batches on an LRU list.

The API's lock a specific page on the LRU list, taking only the
appropriate LRU batch lock for a non-sentinel page and taking the
node's/memcg's lru_lock in addition for a sentinel page.

These interfaces are designed for performance: they minimize the number
of times we needlessly drop and then reacquire the same lock(s) when
used in a loop.  They're difficult to use but will do for a prototype.

Signed-off-by: Daniel Jordan <daniel.m.jordan@...cle.com>
---
 include/linux/mm_inline.h | 58 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 58 insertions(+)

diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 1f1657c75b1b..11d9fcf93f2b 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -210,6 +210,64 @@ static __always_inline void lru_unlock_all(struct pglist_data *pgdat,
 		local_irq_enable();
 }
 
+static __always_inline spinlock_t *page_lru_batch_lock(struct page *page)
+{
+	return &page_pgdat(page)->lru_batch_locks[page->lru_batch].lock;
+}
+
+/**
+ * lru_batch_lock - lock an LRU list batch
+ */
+static __always_inline void lru_batch_lock(struct page *page,
+					   spinlock_t **locked_lru_batch,
+					   struct pglist_data **locked_pgdat,
+					   unsigned long *flags)
+{
+	spinlock_t *lru_batch = page_lru_batch_lock(page);
+	struct pglist_data *pgdat = page_pgdat(page);
+
+	VM_BUG_ON(*locked_pgdat && !page->lru_sentinel);
+
+	if (lru_batch != *locked_lru_batch) {
+		VM_BUG_ON(*locked_pgdat);
+		VM_BUG_ON(*locked_lru_batch);
+		spin_lock_irqsave(lru_batch, *flags);
+		*locked_lru_batch = lru_batch;
+		if (page->lru_sentinel) {
+			spin_lock(&pgdat->lru_lock);
+			*locked_pgdat = pgdat;
+		}
+	} else if (!*locked_pgdat && page->lru_sentinel) {
+		spin_lock(&pgdat->lru_lock);
+		*locked_pgdat = pgdat;
+	}
+}
+
+/**
+ * lru_batch_unlock - unlock an LRU list batch
+ */
+static __always_inline void lru_batch_unlock(struct page *page,
+					     spinlock_t **locked_lru_batch,
+					     struct pglist_data **locked_pgdat,
+					     unsigned long *flags)
+{
+	spinlock_t *lru_batch = (page) ? page_lru_batch_lock(page) : NULL;
+
+	VM_BUG_ON(!*locked_lru_batch);
+
+	if (lru_batch != *locked_lru_batch) {
+		if (*locked_pgdat) {
+			spin_unlock(&(*locked_pgdat)->lru_lock);
+			*locked_pgdat = NULL;
+		}
+		spin_unlock_irqrestore(*locked_lru_batch, *flags);
+		*locked_lru_batch = NULL;
+	} else if (*locked_pgdat && !page->lru_sentinel) {
+		spin_unlock(&(*locked_pgdat)->lru_lock);
+		*locked_pgdat = NULL;
+	}
+}
+
 /**
  * page_lru_base_type - which LRU list type should a page be on?
  * @page: the page to test
-- 
2.16.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ