lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200107130950.2983-10-Tianyu.Lan@microsoft.com>
Date:   Tue,  7 Jan 2020 21:09:49 +0800
From:   lantianyu1986@...il.com
To:     kys@...rosoft.com, haiyangz@...rosoft.com, sthemmin@...rosoft.com,
        sashal@...nel.org, michael.h.kelley@...rosoft.com, david@...hat.com
Cc:     Tianyu Lan <Tianyu.Lan@...rosoft.com>,
        linux-hyperv@...r.kernel.org, linux-kernel@...r.kernel.org,
        vkuznets@...hat.com, eric.devolder@...cle.com
Subject: [RFC PATCH V2 9/10] x86/Hyper-V/Balloon: Hot add mem in the gaps of hot add region

From: Tianyu Lan <Tianyu.Lan@...rosoft.com>

Mem hot remove operation may find memory in the hot add region
and create gaps in ha region list if there is hot-add memory
at that point. The following hot add msg may contain memory range
in these gaps. Handle such request and change gap range after
adding memory.

Signed-off-by: Tianyu Lan <Tianyu.Lan@...rosoft.com>
---
 drivers/hv/hv_balloon.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 105 insertions(+), 3 deletions(-)

diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index f76c9bd7fe2f..5aaae62955bf 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -907,10 +907,11 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
 			continue;
 
 		/*
-		 * If the current start pfn is not where the covered_end
-		 * is, create a gap and update covered_end_pfn.
+		 * If the current start pfn is greater than covered_end_pfn,
+		 * create a gap and update covered_end_pfn. Start pfn may
+		 * locate at gap rangs which is created during mem hot remove.
 		 */
-		if (has->covered_end_pfn != start_pfn) {
+		if (has->covered_end_pfn < start_pfn) {
 			gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
 			if (!gap) {
 				ret = -ENOMEM;
@@ -949,6 +950,91 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
 	return ret;
 }
 
+static int handle_hot_add_in_gap(unsigned long start, unsigned long pg_cnt,
+			  struct hv_hotadd_state *has)
+{
+	struct hv_hotadd_gap *gap, *new_gap, *tmp_gap;
+	unsigned long pfn_cnt = pg_cnt;
+	unsigned long start_pfn = start;
+	unsigned long end_pfn;
+	unsigned long pages;
+	unsigned long pgs_ol;
+	unsigned long block_pages = HA_CHUNK;
+	unsigned long pfn;
+	int nid;
+	int ret;
+
+	list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
+
+		if ((start_pfn < gap->start_pfn)
+		    || (start_pfn >= gap->end_pfn))
+			continue;
+
+		end_pfn = min(gap->end_pfn, start_pfn + pfn_cnt);
+		pgs_ol = end_pfn - start_pfn;
+
+		/*
+		 * hv_bring_pgs_online() identifies whether pfn
+		 * should be online or not via checking pfn is in
+		 * hot add covered range or gap range(Detail see
+		 * has_pfn_is_backed()). So adjust gap before bringing
+		 * online or add memory.
+		 */
+		if (gap->end_pfn - gap->start_pfn == pgs_ol) {
+			list_del(&gap->list);
+			kfree(gap);
+		} else if (gap->start_pfn < start && gap->end_pfn == end_pfn) {
+			gap->end_pfn = start_pfn;
+		} else if (gap->end_pfn > end_pfn
+		   && gap->start_pfn == start_pfn) {
+			gap->start_pfn = end_pfn;
+		} else {
+			gap->end_pfn = start_pfn;
+
+			new_gap = kzalloc(sizeof(struct hv_hotadd_gap),
+					GFP_ATOMIC);
+			if (!new_gap) {
+				do_hot_add = false;
+				return -ENOMEM;
+			}
+
+			INIT_LIST_HEAD(&new_gap->list);
+			new_gap->start_pfn = end_pfn;
+			new_gap->end_pfn = gap->end_pfn;
+			list_add_tail(&gap->list, &has->gap_list);
+		}
+
+		/* Bring online or add memmory in gaps. */
+		for (pfn = start_pfn; pfn < end_pfn;
+		     pfn = round_up(pfn + 1, block_pages)) {
+			pages = min(round_up(pfn + 1, block_pages),
+				    end_pfn) - pfn;
+
+			if (online_section_nr(pfn_to_section_nr(pfn))) {
+				hv_bring_pgs_online(has, pfn, pages);
+			} else {
+				nid = memory_add_physaddr_to_nid(PFN_PHYS(pfn));
+				ret = add_memory(nid, PFN_PHYS(pfn),
+						 round_up(pages, block_pages)
+						 << PAGE_SHIFT);
+				if (ret) {
+					pr_err("Fail to add memory in gaps(error=%d).\n",
+					       ret);
+					do_hot_add = false;
+					return ret;
+				}
+			}
+		}
+
+		start_pfn += pgs_ol;
+		pfn_cnt -= pgs_ol;
+		if (!pfn_cnt)
+			break;
+	}
+
+	return pg_cnt - pfn_cnt;
+}
+
 static unsigned long handle_pg_range(unsigned long pg_start,
 					unsigned long pg_count)
 {
@@ -975,6 +1061,22 @@ static unsigned long handle_pg_range(unsigned long pg_start,
 
 		old_covered_state = has->covered_end_pfn;
 
+		/*
+		 * If start_pfn is less than cover_end_pfn, the hot-add memory
+		 * area is in the gap range.
+		 */
+		if (start_pfn < has->covered_end_pfn) {
+			pgs_ol = handle_hot_add_in_gap(start_pfn, pfn_cnt, has);
+
+			pfn_cnt -= pgs_ol;
+			if (!pfn_cnt) {
+				res = pgs_ol;
+				break;
+			}
+
+			start_pfn += pgs_ol;
+		}
+
 		if (start_pfn < has->ha_end_pfn) {
 			/*
 			 * This is the case where we are backing pages
-- 
2.14.5

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ