[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190510072125.18059-3-oleksandr@redhat.com>
Date: Fri, 10 May 2019 09:21:23 +0200
From: Oleksandr Natalenko <oleksandr@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: Kirill Tkhai <ktkhai@...tuozzo.com>,
Vlastimil Babka <vbabka@...e.cz>,
Michal Hocko <mhocko@...e.com>,
Matthew Wilcox <willy@...radead.org>,
Pavel Tatashin <pasha.tatashin@...cle.com>,
Timofey Titovets <nefelim4ag@...il.com>,
Aaron Tomlin <atomlin@...hat.com>, linux-mm@...ck.org
Subject: [PATCH RFC 2/4] mm/ksm: introduce VM_UNMERGEABLE
Add separate vmaflag to allow applications to opt out of automatic VMAs
merging due to (possible) security concerns.
Since vmaflags are tight on free bits, this flag is available on 64-bit
architectures only. Thus, subsequently, KSM "always" mode will be
available for 64-bit architectures only as well.
Signed-off-by: Oleksandr Natalenko <oleksandr@...hat.com>
---
fs/proc/task_mmu.c | 3 +++
include/linux/mm.h | 6 ++++++
include/trace/events/mmflags.h | 7 +++++++
mm/ksm.c | 13 +++++++++++++
4 files changed, 29 insertions(+)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 95ca1fe7283c..19cc246000e8 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -648,6 +648,9 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
[ilog2(VM_MIXEDMAP)] = "mm",
[ilog2(VM_HUGEPAGE)] = "hg",
[ilog2(VM_NOHUGEPAGE)] = "nh",
+#ifdef VM_UNMERGEABLE
+ [ilog2(VM_UNMERGEABLE)] = "ug",
+#endif
[ilog2(VM_MERGEABLE)] = "mg",
[ilog2(VM_UFFD_MISSING)]= "um",
[ilog2(VM_UFFD_WP)] = "uw",
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6b10c21630f5..114cdb882cdd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -252,11 +252,13 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
+#define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5)
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
#ifdef CONFIG_ARCH_HAS_PKEYS
@@ -272,6 +274,10 @@ extern unsigned int kobjsize(const void *objp);
#endif
#endif /* CONFIG_ARCH_HAS_PKEYS */
+#ifdef VM_HIGH_ARCH_5
+#define VM_UNMERGEABLE VM_HIGH_ARCH_5 /* Opt-out for KSM "always" mode */
+#endif /* VM_HIGH_ARCH_5 */
+
#if defined(CONFIG_X86)
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
#elif defined(CONFIG_PPC)
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index a1675d43777e..717e0fd9d2ef 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -130,6 +130,12 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
#define IF_HAVE_VM_SOFTDIRTY(flag,name)
#endif
+#ifdef VM_UNMERGEABLE
+#define IF_HAVE_VM_UNMERGEABLE(flag,name) {flag, name },
+#else
+#define IF_HAVE_VM_UNMERGEABLE(flag,name)
+#endif
+
#define __def_vmaflag_names \
{VM_READ, "read" }, \
{VM_WRITE, "write" }, \
@@ -161,6 +167,7 @@ IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \
{VM_MIXEDMAP, "mixedmap" }, \
{VM_HUGEPAGE, "hugepage" }, \
{VM_NOHUGEPAGE, "nohugepage" }, \
+IF_HAVE_VM_UNMERGEABLE(VM_UNMERGEABLE, "unmergeable" ) \
{VM_MERGEABLE, "mergeable" } \
#define show_vma_flags(flags) \
diff --git a/mm/ksm.c b/mm/ksm.c
index a6b0788a3a22..0fb5f850087a 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2450,12 +2450,18 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
switch (advice) {
case MADV_MERGEABLE:
+#ifdef VM_UNMERGEABLE
+ *vm_flags &= ~VM_UNMERGEABLE;
+#endif
err = ksm_enter(mm, vma, vm_flags);
if (err)
return err;
break;
case MADV_UNMERGEABLE:
+#ifdef VM_UNMERGEABLE
+ *vm_flags |= VM_UNMERGEABLE;
+#endif
if (!(*vm_flags & VM_MERGEABLE))
return 0; /* just ignore the advice */
@@ -2496,6 +2502,10 @@ int ksm_enter(struct mm_struct *mm, struct vm_area_struct *vma,
if (*vm_flags & VM_SPARC_ADI)
return 0;
#endif
+#ifdef VM_UNMERGEABLE
+ if (*vm_flags & VM_UNMERGEABLE)
+ return 0;
+#endif
if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
err = __ksm_enter(mm);
@@ -3173,6 +3183,9 @@ static ssize_t full_scans_show(struct kobject *kobj,
KSM_ATTR_RO(full_scans);
static struct attribute *ksm_attrs[] = {
+#ifdef VM_UNMERGEABLE
+ &mode_attr.attr,
+#endif
&sleep_millisecs_attr.attr,
&pages_to_scan_attr.attr,
&run_attr.attr,
--
2.21.0
Powered by blists - more mailing lists