lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1531413965-5401-3-git-send-email-longman@redhat.com>
Date:   Thu, 12 Jul 2018 12:46:01 -0400
From:   Waiman Long <longman@...hat.com>
To:     Alexander Viro <viro@...iv.linux.org.uk>,
        Jonathan Corbet <corbet@....net>,
        "Luis R. Rodriguez" <mcgrof@...nel.org>,
        Kees Cook <keescook@...omium.org>
Cc:     linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
        linux-mm@...ck.org, linux-doc@...r.kernel.org,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        Jan Kara <jack@...e.cz>,
        "Paul E. McKenney" <paulmck@...ux.vnet.ibm.com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Ingo Molnar <mingo@...nel.org>,
        Miklos Szeredi <mszeredi@...hat.com>,
        Matthew Wilcox <willy@...radead.org>,
        Larry Woodman <lwoodman@...hat.com>,
        James Bottomley <James.Bottomley@...senPartnership.com>,
        "Wangkai (Kevin C)" <wangkai86@...wei.com>,
        Michal Hocko <mhocko@...nel.org>,
        Waiman Long <longman@...hat.com>
Subject: [PATCH v7 2/6] fs/dcache: Add negative dentries to LRU head initially

For negative dentries that are accessed once and never reused again,
there is not much value in putting the dentries at the tail of the LRU
list and keep it for a long time.

A new DCACHE_NEW_NEGATIVE flag is now added to a negative dentry when
it is initially created. When such a dentry is added to the LRU, it
will be added to the head so that it will be the first to go when a
shrinker is running if it is never accessed again. The flag is cleared
after the LRU list addition so that it will be added to the tail like
others if this dentry is pull out from the LRU and put back in again.

Suggested-by: Larry Woodman <lwoodman@...hat.com>
Signed-off-by: Waiman Long <longman@...hat.com>
---
 fs/dcache.c              | 25 +++++++++++++++++--------
 include/linux/dcache.h   |  1 +
 include/linux/list_lru.h | 17 +++++++++++++++++
 mm/list_lru.c            | 19 +++++++++++++++++--
 4 files changed, 52 insertions(+), 10 deletions(-)

diff --git a/fs/dcache.c b/fs/dcache.c
index dbab6c2..0be5972 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -242,12 +242,6 @@ static inline void __neg_dentry_inc(struct dentry *dentry)
 	this_cpu_inc(nr_dentry_neg);
 }
 
-static inline void neg_dentry_inc(struct dentry *dentry)
-{
-	if (unlikely(d_is_negative(dentry)))
-		__neg_dentry_inc(dentry);
-}
-
 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
 {
 	/*
@@ -352,7 +346,7 @@ static inline void __d_set_inode_and_type(struct dentry *dentry,
 
 	dentry->d_inode = inode;
 	flags = READ_ONCE(dentry->d_flags);
-	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU | DCACHE_NEW_NEGATIVE);
 	flags |= type_flags;
 	WRITE_ONCE(dentry->d_flags, flags);
 }
@@ -432,8 +426,20 @@ static void d_lru_add(struct dentry *dentry)
 	D_FLAG_VERIFY(dentry, 0);
 	dentry->d_flags |= DCACHE_LRU_LIST;
 	this_cpu_inc(nr_dentry_unused);
+	if (d_is_negative(dentry)) {
+		__neg_dentry_inc(dentry);
+		if (dentry->d_flags & DCACHE_NEW_NEGATIVE) {
+			/*
+			 * Add the negative dentry to the head once, it
+			 * will be added to the tail next time.
+			 */
+			WARN_ON_ONCE(!list_lru_add_head(
+				&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
+			dentry->d_flags &= ~DCACHE_NEW_NEGATIVE;
+			return;
+		}
+	}
 	WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
-	neg_dentry_inc(dentry);
 }
 
 static void d_lru_del(struct dentry *dentry)
@@ -2647,6 +2653,9 @@ static inline void __d_add(struct dentry *dentry, struct inode *inode)
 		__d_set_inode_and_type(dentry, inode, add_flags);
 		raw_write_seqcount_end(&dentry->d_seq);
 		fsnotify_update_flags(dentry);
+	} else {
+		/* It is a negative dentry, add it to LRU head initially. */
+		dentry->d_flags |= DCACHE_NEW_NEGATIVE;
 	}
 	__d_rehash(dentry);
 	if (dir)
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 6e06d91..89cf348 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -215,6 +215,7 @@ struct dentry_operations {
 #define DCACHE_FALLTHRU			0x01000000 /* Fall through to lower layer */
 #define DCACHE_ENCRYPTED_WITH_KEY	0x02000000 /* dir is encrypted with a valid key */
 #define DCACHE_OP_REAL			0x04000000
+#define DCACHE_NEW_NEGATIVE		0x08000000 /* New negative dentry */
 
 #define DCACHE_PAR_LOOKUP		0x10000000 /* being looked up (with parent locked shared) */
 #define DCACHE_DENTRY_CURSOR		0x20000000
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 96def9d..1cdbfb0 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -86,6 +86,23 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
 bool list_lru_add(struct list_lru *lru, struct list_head *item);
 
 /**
+ * list_lru_add_head: add an element to the lru list's head
+ * @list_lru: the lru pointer
+ * @item: the item to be added.
+ *
+ * This is similar to list_lru_add(). The only difference is the location
+ * where the new item will be added. The list_lru_add() function will add
+ * the new item to the tail as it is the most recently used one. The
+ * list_lru_add_head() will add the new item into the head so that it
+ * will the first to go if a shrinker is running. So this function should
+ * only be used for less important item that can be the first to go if
+ * the system is under memory pressure.
+ *
+ * Return value: true if the list was updated, false otherwise
+ */
+bool list_lru_add_head(struct list_lru *lru, struct list_head *item);
+
+/**
  * list_lru_del: delete an element to the lru list
  * @list_lru: the lru pointer
  * @item: the item to be deleted.
diff --git a/mm/list_lru.c b/mm/list_lru.c
index fcfb6c8..f079fe2 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -107,7 +107,8 @@ static inline bool list_lru_memcg_aware(struct list_lru *lru)
 }
 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
 
-bool list_lru_add(struct list_lru *lru, struct list_head *item)
+static inline bool __list_lru_add(struct list_lru *lru, struct list_head *item,
+				  const bool add_tail)
 {
 	int nid = page_to_nid(virt_to_page(item));
 	struct list_lru_node *nlru = &lru->node[nid];
@@ -116,7 +117,10 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
 	spin_lock(&nlru->lock);
 	if (list_empty(item)) {
 		l = list_lru_from_kmem(nlru, item);
-		list_add_tail(item, &l->list);
+		if (add_tail)
+			list_add_tail(item, &l->list);
+		else
+			list_add(item, &l->list);
 		l->nr_items++;
 		nlru->nr_items++;
 		spin_unlock(&nlru->lock);
@@ -125,8 +129,19 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
 	spin_unlock(&nlru->lock);
 	return false;
 }
+
+bool list_lru_add(struct list_lru *lru, struct list_head *item)
+{
+	return __list_lru_add(lru, item, true);
+}
 EXPORT_SYMBOL_GPL(list_lru_add);
 
+bool list_lru_add_head(struct list_lru *lru, struct list_head *item)
+{
+	return __list_lru_add(lru, item, false);
+}
+EXPORT_SYMBOL_GPL(list_lru_add_head);
+
 bool list_lru_del(struct list_lru *lru, struct list_head *item)
 {
 	int nid = page_to_nid(virt_to_page(item));
-- 
1.8.3.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ