[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210308225504.GA233893@agluck-desk2.amr.corp.intel.com>
Date: Mon, 8 Mar 2021 14:55:04 -0800
From: "Luck, Tony" <tony.luck@...el.com>
To: HORIGUCHI NAOYA(堀口 直也)
<naoya.horiguchi@....com>
Cc: Aili Yao <yaoaili@...gsoft.com>,
Oscar Salvador <osalvador@...e.de>,
"david@...hat.com" <david@...hat.com>,
"akpm@...ux-foundation.org" <akpm@...ux-foundation.org>,
"bp@...en8.de" <bp@...en8.de>,
"tglx@...utronix.de" <tglx@...utronix.de>,
"mingo@...hat.com" <mingo@...hat.com>,
"hpa@...or.com" <hpa@...or.com>, "x86@...nel.org" <x86@...nel.org>,
"linux-edac@...r.kernel.org" <linux-edac@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"yangfeng1@...gsoft.com" <yangfeng1@...gsoft.com>
Subject: [PATCH] mm/memory-failure: Use a mutex to avoid memory_failure()
races
There can be races when multiple CPUs consume poison from the same
page. The first into memory_failure() atomically sets the HWPoison
page flag and begins hunting for tasks that map this page. Eventually
it invalidates those mappings and may send a SIGBUS to the affected
tasks.
But while all that work is going on, other CPUs see a "success"
return code from memory_failure() and so they believe the error
has been handled and continue executing.
Fix by wrapping most of the internal parts of memory_failure() in
a mutex.
Signed-off-by: Tony Luck <tony.luck@...el.com>
---
mm/memory-failure.c | 19 +++++++++++++++++--
1 file changed, 17 insertions(+), 2 deletions(-)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 24210c9bd843..c1509f4b565e 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1381,6 +1381,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
return rc;
}
+static DEFINE_MUTEX(mf_mutex);
+
/**
* memory_failure - Handle memory failure of a page.
* @pfn: Page Number of the corrupted page
@@ -1424,12 +1426,18 @@ int memory_failure(unsigned long pfn, int flags)
return -ENXIO;
}
+ mutex_lock(&mf_mutex);
+
try_again:
- if (PageHuge(p))
- return memory_failure_hugetlb(pfn, flags);
+ if (PageHuge(p)) {
+ res = memory_failure_hugetlb(pfn, flags);
+ goto out2;
+ }
+
if (TestSetPageHWPoison(p)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n",
pfn);
+ mutex_unlock(&mf_mutex);
return 0;
}
@@ -1463,9 +1471,11 @@ int memory_failure(unsigned long pfn, int flags)
res = MF_FAILED;
}
action_result(pfn, MF_MSG_BUDDY, res);
+ mutex_unlock(&mf_mutex);
return res == MF_RECOVERED ? 0 : -EBUSY;
} else {
action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
+ mutex_unlock(&mf_mutex);
return -EBUSY;
}
}
@@ -1473,6 +1483,7 @@ int memory_failure(unsigned long pfn, int flags)
if (PageTransHuge(hpage)) {
if (try_to_split_thp_page(p, "Memory Failure") < 0) {
action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
+ mutex_unlock(&mf_mutex);
return -EBUSY;
}
VM_BUG_ON_PAGE(!page_count(p), p);
@@ -1517,6 +1528,7 @@ int memory_failure(unsigned long pfn, int flags)
num_poisoned_pages_dec();
unlock_page(p);
put_page(p);
+ mutex_unlock(&mf_mutex);
return 0;
}
if (hwpoison_filter(p)) {
@@ -1524,6 +1536,7 @@ int memory_failure(unsigned long pfn, int flags)
num_poisoned_pages_dec();
unlock_page(p);
put_page(p);
+ mutex_unlock(&mf_mutex);
return 0;
}
@@ -1559,6 +1572,8 @@ int memory_failure(unsigned long pfn, int flags)
res = identify_page_state(pfn, p, page_flags);
out:
unlock_page(p);
+out2:
+ mutex_unlock(&mf_mutex);
return res;
}
EXPORT_SYMBOL_GPL(memory_failure);
--
2.29.2
Powered by blists - more mailing lists