lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210406102346.3890ceb2@alex-virtual-machine>
Date:   Tue, 6 Apr 2021 10:23:46 +0800
From:   Aili Yao <yaoaili@...gsoft.com>
To:     David Hildenbrand <david@...hat.com>,
        "HORIGUCHI NAOYA堀口 直也)" 
        <naoya.horiguchi@....com>, "Matthew Wilcox" <willy@...radead.org>,
        "akpm@...ux-foundation.org" <akpm@...ux-foundation.org>
CC:     "linux-mm@...ck.org" <linux-mm@...ck.org>,
        "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
        "yangfeng1@...gsoft.com" <yangfeng1@...gsoft.com>,
        "sunhao2@...gsoft.com" <sunhao2@...gsoft.com>,
        Oscar Salvador <osalvador@...e.de>,
        Mike Kravetz <mike.kravetz@...cle.com>, <yaoaili@...gsoft.com>
Subject: [PATCH v6] mm/gup: check page hwpoison status for memory recovery
 failures.

When we call get_user_pages() to pin user page in memory, there may be
hwpoison page, currently, we just handle the normal case that memory
recovery jod is correctly finished, and we will not return the hwpoison
page to callers, but for other cases like memory recovery fails and the
user process related pte is not correctly set invalid, we will still
return the hwpoison page, and may touch it and lead to panic.

In gup.c, for normal page, after we call follow_page_mask(), we will
return the related page pointer; or like another hwpoison case with pte
invalid, it will return NULL. For NULL, we will handle it in if (!page)
branch. In this patch, we will filter out the hwpoison page in
follow_page_mask() and return error code for recovery failure cases.

We will check the page hwpoison status as soon as possible and avoid doing
followed normal procedure and try not to grab related pages.

Signed-off-by: Aili Yao <yaoaili@...gsoft.com>
Cc: David Hildenbrand <david@...hat.com>
Cc: Matthew Wilcox <willy@...radead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@....com>
Cc: Oscar Salvador <osalvador@...e.de>
Cc: Mike Kravetz <mike.kravetz@...cle.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: stable@...r.kernel.org
---
 mm/gup.c         | 27 +++++++++++++++++++++++----
 mm/huge_memory.c |  9 +++++++--
 mm/hugetlb.c     |  8 +++++++-
 mm/internal.h    | 13 +++++++++++++
 4 files changed, 50 insertions(+), 7 deletions(-)

diff --git a/mm/gup.c b/mm/gup.c
index e40579624f10..88a93b89c03e 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -433,6 +433,9 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
 			page = ERR_PTR(ret);
 			goto out;
 		}
+	} else if (PageHWPoison(page)) {
+		page = ERR_PTR(-EHWPOISON);
+		goto out;
 	}
 
 	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
@@ -540,8 +543,13 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
 		page = follow_huge_pd(vma, address,
 				      __hugepd(pmd_val(pmdval)), flags,
 				      PMD_SHIFT);
-		if (page)
-			return page;
+		if (page) {
+			struct page *p = check_page_hwpoison(page);
+
+			if (p == ERR_PTR(-EHWPOISON) && flags & FOLL_GET)
+				put_page(page);
+			return p;
+		}
 		return no_page_table(vma, flags);
 	}
 retry:
@@ -643,7 +651,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
 	if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
 		page = follow_huge_pud(mm, address, pud, flags);
 		if (page)
-			return page;
+			return check_page_hwpoison(page);
 		return no_page_table(vma, flags);
 	}
 	if (is_hugepd(__hugepd(pud_val(*pud)))) {
@@ -652,6 +660,13 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
 				      PUD_SHIFT);
 		if (page)
 			return page;
+		if (page) {
+			struct page *p = check_page_hwpoison(page);
+
+			if (p == ERR_PTR(-EHWPOISON) && flags & FOLL_GET)
+				put_page(page);
+			return p;
+		}
 		return no_page_table(vma, flags);
 	}
 	if (pud_devmap(*pud)) {
@@ -1087,10 +1102,14 @@ static long __get_user_pages(struct mm_struct *mm,
 			 * struct page.
 			 */
 			goto next_page;
-		} else if (IS_ERR(page)) {
+		} else if (PTR_ERR(page) == -EHWPOISON) {
+			ret = (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
+			goto out;
+		}  else if (IS_ERR(page)) {
 			ret = PTR_ERR(page);
 			goto out;
 		}
+
 		if (pages) {
 			pages[i] = page;
 			flush_anon_page(vma, page, start);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ae907a9c2050..3973d988e485 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1349,6 +1349,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 {
 	struct mm_struct *mm = vma->vm_mm;
 	struct page *page = NULL;
+	struct page *tail = NULL;
 
 	assert_spin_locked(pmd_lockptr(mm, pmd));
 
@@ -1366,6 +1367,11 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 	page = pmd_page(*pmd);
 	VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
 
+	tail = page + ((addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT);
+
+	if (PageHWPoison(tail))
+		return ERR_PTR(-EHWPOISON);
+
 	if (!try_grab_page(page, flags))
 		return ERR_PTR(-ENOMEM);
 
@@ -1405,11 +1411,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 		unlock_page(page);
 	}
 skip_mlock:
-	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
 	VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
 
 out:
-	return page;
+	return tail;
 }
 
 /* NUMA hinting page fault entry point for trans huge pmds */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a86a58ef132d..8b50f7eaa159 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4958,7 +4958,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 					     likely(pages) ? pages + i : NULL,
 					     vmas ? vmas + i : NULL);
 
-		if (pages) {
+		/* As we will filter out the hwpoison page, so don't try grab it */
+		if (pages && !PageHWPoison(page)) {
 			/*
 			 * try_grab_compound_head() should always succeed here,
 			 * because: a) we hold the ptl lock, and b) we've just
@@ -5581,6 +5582,11 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 	pte = huge_ptep_get((pte_t *)pmd);
 	if (pte_present(pte)) {
 		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
+		/* if hwpoison, we don't grab it */
+		if (PageHWPoison(compound_head(page))) {
+			page = ERR_PTR(-EHWPOISON);
+			goto out;
+		}
 		/*
 		 * try_grab_page() should always succeed here, because: a) we
 		 * hold the pmd (ptl) lock, and b) we've just checked that the
diff --git a/mm/internal.h b/mm/internal.h
index 1432feec62df..049b310bc79a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -97,6 +97,19 @@ static inline void set_page_refcounted(struct page *page)
 	set_page_count(page, 1);
 }
 
+/*
+ * Check the hwposion status of any page type, and if TRUE, return ERR ptr.
+ */
+static inline struct page *check_page_hwpoison(struct page *page)
+{
+	if (PageHWPoison(page))
+		return ERR_PTR(-EHWPOISON);
+	else if (PageHuge(page) && PageHWPoison(compound_head(page)))
+		return ERR_PTR(-EHWPOISON);
+
+	return page;
+}
+
 extern unsigned long highest_memmap_pfn;
 
 /*
-- 
2.29.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ