lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210106035027.GA1160@open-light-1.localdomain>
Date:   Tue, 5 Jan 2021 22:50:31 -0500
From:   Liang Li <liliang324@...il.com>
To:     Alexander Duyck <alexander.h.duyck@...ux.intel.com>,
        Mel Gorman <mgorman@...hsingularity.net>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Andrea Arcangeli <aarcange@...hat.com>,
        Dan Williams <dan.j.williams@...el.com>,
        "Michael S. Tsirkin" <mst@...hat.com>,
        David Hildenbrand <david@...hat.com>,
        Jason Wang <jasowang@...hat.com>,
        Dave Hansen <dave.hansen@...el.com>,
        Michal Hocko <mhocko@...e.com>,
        Liang Li <liliangleo@...iglobal.com>,
        Liang Li <liliang324@...il.com>,
        Mike Kravetz <mike.kravetz@...cle.com>
Cc:     linux-mm@...ck.org, linux-kernel@...r.kernel.org,
        virtualization@...ts.linux-foundation.org
Subject: [PATCH 4/6] hugetlb: avoid allocation failed when page reporting is
 on going

Page reporting isolates free pages temporarily when reporting
free pages information. It will reduce the actual free pages
and may cause application failed for no enough available memory.
This patch try to solve this issue, when there is no free page
and page repoting is on going, wait until it is done.

Cc: Alexander Duyck <alexander.h.duyck@...ux.intel.com>
Cc: Mel Gorman <mgorman@...hsingularity.net>
Cc: Andrea Arcangeli <aarcange@...hat.com>
Cc: Dan Williams <dan.j.williams@...el.com>
Cc: Dave Hansen <dave.hansen@...el.com>
Cc: David Hildenbrand <david@...hat.com>
Cc: Michal Hocko <mhocko@...nel.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>
Cc: Alex Williamson <alex.williamson@...hat.com>
Cc: Michael S. Tsirkin <mst@...hat.com>
Cc: Liang Li <liliang324@...il.com>
Signed-off-by: Liang Li <liliangleo@...iglobal.com>
---
 include/linux/hugetlb.h | 2 ++
 mm/hugetlb.c            | 9 +++++++++
 mm/page_reporting.c     | 6 +++++-
 3 files changed, 16 insertions(+), 1 deletion(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d55e6a00b3dc..73b2934ba91c 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -490,6 +490,7 @@ struct hstate {
 	unsigned long resv_huge_pages;
 	unsigned long surplus_huge_pages;
 	unsigned long nr_overcommit_huge_pages;
+	unsigned long isolated_huge_pages;
 	struct list_head hugepage_activelist;
 	struct list_head hugepage_freelists[MAX_NUMNODES];
 	unsigned int nr_huge_pages_node[MAX_NUMNODES];
@@ -500,6 +501,7 @@ struct hstate {
 	struct cftype cgroup_files_dfl[7];
 	struct cftype cgroup_files_legacy[9];
 #endif
+	struct mutex mtx_prezero;
 	char name[HSTATE_NAME_LEN];
 };
 
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index eb533995cb49..0fccd5f96954 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2320,6 +2320,12 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
 		goto out_uncharge_cgroup_reservation;
 
 	spin_lock(&hugetlb_lock);
+	while (h->free_huge_pages <= 1 && h->isolated_huge_pages) {
+		spin_unlock(&hugetlb_lock);
+		mutex_lock(&h->mtx_prezero);
+		mutex_unlock(&h->mtx_prezero);
+		spin_lock(&hugetlb_lock);
+	}
 	/*
 	 * glb_chg is passed to indicate whether or not a page must be taken
 	 * from the global free pool (global change).  gbl_chg == 0 indicates
@@ -3208,6 +3214,7 @@ void __init hugetlb_add_hstate(unsigned int order)
 	INIT_LIST_HEAD(&h->hugepage_activelist);
 	h->next_nid_to_alloc = first_memory_node;
 	h->next_nid_to_free = first_memory_node;
+	mutex_init(&h->mtx_prezero);
 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
 					huge_page_size(h)/1024);
 
@@ -5541,6 +5548,7 @@ void isolate_free_huge_page(struct page *page, struct hstate *h, int nid)
 
 	list_move(&page->lru, &h->hugepage_activelist);
 	set_page_refcounted(page);
+	h->isolated_huge_pages++;
 }
 
 void putback_isolate_huge_page(struct hstate *h, struct page *page)
@@ -5548,6 +5556,7 @@ void putback_isolate_huge_page(struct hstate *h, struct page *page)
 	int nid = page_to_nid(page);
 
 	list_move(&page->lru, &h->hugepage_freelists[nid]);
+	h->isolated_huge_pages--;
 }
 
 bool isolate_huge_page(struct page *page, struct list_head *list)
diff --git a/mm/page_reporting.c b/mm/page_reporting.c
index cc31696225bb..99e1e688d7c1 100644
--- a/mm/page_reporting.c
+++ b/mm/page_reporting.c
@@ -272,12 +272,15 @@ hugepage_reporting_process_hstate(struct page_reporting_dev_info *prdev,
 	int ret = 0, nid;
 
 	offset = max_items;
+	mutex_lock(&h->mtx_prezero);
 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
 		ret = hugepage_reporting_cycle(prdev, h, nid, sgl, &offset,
 					       max_items);
 
-		if (ret < 0)
+		if (ret < 0) {
+			mutex_unlock(&h->mtx_prezero);
 			return ret;
+		}
 	}
 
 	/* report the leftover pages before going idle */
@@ -291,6 +294,7 @@ hugepage_reporting_process_hstate(struct page_reporting_dev_info *prdev,
 		hugepage_reporting_drain(prdev, h, sgl, leftover, !ret);
 		spin_unlock(&hugetlb_lock);
 	}
+	mutex_unlock(&h->mtx_prezero);
 
 	return ret;
 }
-- 
2.18.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ