[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220310140911.50924-5-chao.p.peng@linux.intel.com>
Date: Thu, 10 Mar 2022 22:09:02 +0800
From: Chao Peng <chao.p.peng@...ux.intel.com>
To: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-fsdevel@...r.kernel.org,
linux-api@...r.kernel.org, qemu-devel@...gnu.org
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Jonathan Corbet <corbet@....net>,
Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
x86@...nel.org, "H . Peter Anvin" <hpa@...or.com>,
Hugh Dickins <hughd@...gle.com>,
Jeff Layton <jlayton@...nel.org>,
"J . Bruce Fields" <bfields@...ldses.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Mike Rapoport <rppt@...nel.org>,
Steven Price <steven.price@....com>,
"Maciej S . Szmigiero" <mail@...iej.szmigiero.name>,
Vlastimil Babka <vbabka@...e.cz>,
Vishal Annapurve <vannapurve@...gle.com>,
Yu Zhang <yu.c.zhang@...ux.intel.com>,
Chao Peng <chao.p.peng@...ux.intel.com>,
"Kirill A . Shutemov" <kirill.shutemov@...ux.intel.com>,
luto@...nel.org, jun.nakajima@...el.com, dave.hansen@...el.com,
ak@...ux.intel.com, david@...hat.com
Subject: [PATCH v5 04/13] mm/shmem: Restrict MFD_INACCESSIBLE memory against RLIMIT_MEMLOCK
Since page migration / swapping is not supported yet, MFD_INACCESSIBLE
memory behave like longterm pinned pages and thus should be accounted to
mm->pinned_vm and be restricted by RLIMIT_MEMLOCK.
Signed-off-by: Chao Peng <chao.p.peng@...ux.intel.com>
---
mm/shmem.c | 25 ++++++++++++++++++++++++-
1 file changed, 24 insertions(+), 1 deletion(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 7b43e274c9a2..ae46fb96494b 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -915,14 +915,17 @@ static void notify_fallocate(struct inode *inode, pgoff_t start, pgoff_t end)
static void notify_invalidate_page(struct inode *inode, struct folio *folio,
pgoff_t start, pgoff_t end)
{
-#ifdef CONFIG_MEMFILE_NOTIFIER
struct shmem_inode_info *info = SHMEM_I(inode);
+#ifdef CONFIG_MEMFILE_NOTIFIER
start = max(start, folio->index);
end = min(end, folio->index + folio_nr_pages(folio));
memfile_notifier_invalidate(&info->memfile_notifiers, start, end);
#endif
+
+ if (info->xflags & SHM_F_INACCESSIBLE)
+ atomic64_sub(end - start, ¤t->mm->pinned_vm);
}
/*
@@ -2680,6 +2683,20 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
return offset;
}
+static bool memlock_limited(unsigned long npages)
+{
+ unsigned long lock_limit;
+ unsigned long pinned;
+
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ pinned = atomic64_add_return(npages, ¤t->mm->pinned_vm);
+ if (pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
+ atomic64_sub(npages, ¤t->mm->pinned_vm);
+ return true;
+ }
+ return false;
+}
+
static long shmem_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
@@ -2753,6 +2770,12 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
goto out;
}
+ if ((info->xflags & SHM_F_INACCESSIBLE) &&
+ memlock_limited(end - start)) {
+ error = -ENOMEM;
+ goto out;
+ }
+
shmem_falloc.waitq = NULL;
shmem_falloc.start = start;
shmem_falloc.next = start;
--
2.17.1
Powered by blists - more mailing lists