[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210305221143.GA220893@agluck-desk2.amr.corp.intel.com>
Date: Fri, 5 Mar 2021 14:11:43 -0800
From: "Luck, Tony" <tony.luck@...el.com>
To: Aili Yao <yaoaili@...gsoft.com>
Cc: HORIGUCHI NAOYA(堀口 直也)
<naoya.horiguchi@....com>, Oscar Salvador <osalvador@...e.de>,
"david@...hat.com" <david@...hat.com>,
"akpm@...ux-foundation.org" <akpm@...ux-foundation.org>,
"bp@...en8.de" <bp@...en8.de>,
"tglx@...utronix.de" <tglx@...utronix.de>,
"mingo@...hat.com" <mingo@...hat.com>,
"hpa@...or.com" <hpa@...or.com>, "x86@...nel.org" <x86@...nel.org>,
"linux-edac@...r.kernel.org" <linux-edac@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"yangfeng1@...gsoft.com" <yangfeng1@...gsoft.com>
Subject: Re: [PATCH] mm,hwpoison: return -EBUSY when page already poisoned
This whole page table walking patch is trying to work around the
races caused by multiple calls to memory_failure() for the same
page.
Maybe better to just avoid the races. The comment right above
memory_failure says:
* Must run in process context (e.g. a work queue) with interrupts
* enabled and no spinlocks hold.
So it should be safe to grab and hold a mutex. See patch below.
-Tony
commit 8dd0dbe7d595e02647e9c2c76c03341a9f6bd7b9
Author: Tony Luck <tony.luck@...el.com>
Date: Fri Mar 5 10:40:48 2021 -0800
Use a mutex to avoid memory_failure() races
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 24210c9bd843..c1509f4b565e 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1381,6 +1381,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
return rc;
}
+static DEFINE_MUTEX(mf_mutex);
+
/**
* memory_failure - Handle memory failure of a page.
* @pfn: Page Number of the corrupted page
@@ -1424,12 +1426,18 @@ int memory_failure(unsigned long pfn, int flags)
return -ENXIO;
}
+ mutex_lock(&mf_mutex);
+
try_again:
- if (PageHuge(p))
- return memory_failure_hugetlb(pfn, flags);
+ if (PageHuge(p)) {
+ res = memory_failure_hugetlb(pfn, flags);
+ goto out2;
+ }
+
if (TestSetPageHWPoison(p)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n",
pfn);
+ mutex_unlock(&mf_mutex);
return 0;
}
@@ -1463,9 +1471,11 @@ int memory_failure(unsigned long pfn, int flags)
res = MF_FAILED;
}
action_result(pfn, MF_MSG_BUDDY, res);
+ mutex_unlock(&mf_mutex);
return res == MF_RECOVERED ? 0 : -EBUSY;
} else {
action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
+ mutex_unlock(&mf_mutex);
return -EBUSY;
}
}
@@ -1473,6 +1483,7 @@ int memory_failure(unsigned long pfn, int flags)
if (PageTransHuge(hpage)) {
if (try_to_split_thp_page(p, "Memory Failure") < 0) {
action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
+ mutex_unlock(&mf_mutex);
return -EBUSY;
}
VM_BUG_ON_PAGE(!page_count(p), p);
@@ -1517,6 +1528,7 @@ int memory_failure(unsigned long pfn, int flags)
num_poisoned_pages_dec();
unlock_page(p);
put_page(p);
+ mutex_unlock(&mf_mutex);
return 0;
}
if (hwpoison_filter(p)) {
@@ -1524,6 +1536,7 @@ int memory_failure(unsigned long pfn, int flags)
num_poisoned_pages_dec();
unlock_page(p);
put_page(p);
+ mutex_unlock(&mf_mutex);
return 0;
}
@@ -1559,6 +1572,8 @@ int memory_failure(unsigned long pfn, int flags)
res = identify_page_state(pfn, p, page_flags);
out:
unlock_page(p);
+out2:
+ mutex_unlock(&mf_mutex);
return res;
}
EXPORT_SYMBOL_GPL(memory_failure);
Powered by blists - more mailing lists