[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220111113314.27173-7-kirill.shutemov@linux.intel.com>
Date: Tue, 11 Jan 2022 14:33:13 +0300
From: "Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
To: Borislav Petkov <bp@...en8.de>, Andy Lutomirski <luto@...nel.org>,
Sean Christopherson <seanjc@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Joerg Roedel <jroedel@...e.de>,
Ard Biesheuvel <ardb@...nel.org>
Cc: Andi Kleen <ak@...ux.intel.com>,
Kuppuswamy Sathyanarayanan
<sathyanarayanan.kuppuswamy@...ux.intel.com>,
David Rientjes <rientjes@...gle.com>,
Vlastimil Babka <vbabka@...e.cz>,
Tom Lendacky <thomas.lendacky@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Peter Zijlstra <peterz@...radead.org>,
Paolo Bonzini <pbonzini@...hat.com>,
Ingo Molnar <mingo@...hat.com>,
Varad Gautam <varad.gautam@...e.com>,
Dario Faggioli <dfaggioli@...e.com>, x86@...nel.org,
linux-mm@...ck.org, linux-coco@...ts.linux.dev,
linux-efi@...r.kernel.org, linux-kernel@...r.kernel.org,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>
Subject: [PATCHv2 6/7] x86/mm: Provide helpers for unaccepted memory
Core-mm requires few helpers to support unaccepted memory:
- accept_memory() checks the range of addresses against the bitmap and
accept memory if needed;
- maybe_set_page_offline() checks the bitmap and marks a page with
PageOffline() if memory acceptance required on the first
allocation of the page.
- accept_and_clear_page_offline() accepts memory for the page and clears
PageOffline().
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@...ux.intel.com>
---
arch/x86/boot/compressed/unaccepted_memory.c | 3 +-
arch/x86/include/asm/page.h | 5 ++
arch/x86/include/asm/unaccepted_memory.h | 3 +
arch/x86/mm/Makefile | 2 +
arch/x86/mm/unaccepted_memory.c | 90 ++++++++++++++++++++
5 files changed, 101 insertions(+), 2 deletions(-)
create mode 100644 arch/x86/mm/unaccepted_memory.c
diff --git a/arch/x86/boot/compressed/unaccepted_memory.c b/arch/x86/boot/compressed/unaccepted_memory.c
index 91db800d5f5e..b6caca4d3d22 100644
--- a/arch/x86/boot/compressed/unaccepted_memory.c
+++ b/arch/x86/boot/compressed/unaccepted_memory.c
@@ -20,8 +20,7 @@ void mark_unaccepted(struct boot_params *params, u64 start, u64 end)
/* Immediately accept whole range if it is within a PMD_SIZE block: */
if ((start & PMD_MASK) == (end & PMD_MASK)) {
- npages = (end - start) / PAGE_SIZE;
- __accept_memory(start, start + npages * PAGE_SIZE);
+ __accept_memory(start, end);
return;
}
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 4d5810c8fab7..1e56d76ca474 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -19,6 +19,11 @@
struct page;
#include <linux/range.h>
+
+#ifdef CONFIG_UNACCEPTED_MEMORY
+#include <asm/unaccepted_memory.h>
+#endif
+
extern struct range pfn_mapped[];
extern int nr_pfn_mapped;
diff --git a/arch/x86/include/asm/unaccepted_memory.h b/arch/x86/include/asm/unaccepted_memory.h
index f1f835d3cd78..8a06ac8fc9e9 100644
--- a/arch/x86/include/asm/unaccepted_memory.h
+++ b/arch/x86/include/asm/unaccepted_memory.h
@@ -6,9 +6,12 @@
#include <linux/types.h>
struct boot_params;
+struct page;
void mark_unaccepted(struct boot_params *params, u64 start, u64 num);
void accept_memory(phys_addr_t start, phys_addr_t end);
+void maybe_set_page_offline(struct page *page, unsigned int order);
+void accept_and_clear_page_offline(struct page *page, unsigned int order);
#endif
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index fe3d3061fc11..e327f83e6bbf 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -60,3 +60,5 @@ obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o
obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o
+
+obj-$(CONFIG_UNACCEPTED_MEMORY) += unaccepted_memory.o
diff --git a/arch/x86/mm/unaccepted_memory.c b/arch/x86/mm/unaccepted_memory.c
new file mode 100644
index 000000000000..984eaead0b11
--- /dev/null
+++ b/arch/x86/mm/unaccepted_memory.c
@@ -0,0 +1,90 @@
+#include <linux/memblock.h>
+#include <linux/mm.h>
+#include <linux/pfn.h>
+#include <linux/spinlock.h>
+
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/unaccepted_memory.h>
+
+static DEFINE_SPINLOCK(unaccepted_memory_lock);
+
+#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
+
+static void __accept_memory(phys_addr_t start, phys_addr_t end)
+{
+ unsigned long *unaccepted_memory;
+ unsigned int rs, re;
+
+ unaccepted_memory = __va(boot_params.unaccepted_memory);
+ bitmap_for_each_set_region(unaccepted_memory, rs, re,
+ start / PMD_SIZE,
+ DIV_ROUND_UP(end, PMD_SIZE)) {
+ /* Platform-specific memory-acceptance call goes here */
+ panic("Cannot accept memory");
+ bitmap_clear(unaccepted_memory, rs, re - rs);
+ }
+}
+
+void accept_memory(phys_addr_t start, phys_addr_t end)
+{
+ unsigned long flags;
+ if (!boot_params.unaccepted_memory)
+ return;
+
+ spin_lock_irqsave(&unaccepted_memory_lock, flags);
+ __accept_memory(start, end);
+ spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+}
+
+void __init maybe_set_page_offline(struct page *page, unsigned int order)
+{
+ unsigned long *unaccepted_memory;
+ phys_addr_t addr = page_to_phys(page);
+ unsigned long flags;
+ bool unaccepted = false;
+ unsigned int i;
+
+ if (!boot_params.unaccepted_memory)
+ return;
+
+ unaccepted_memory = __va(boot_params.unaccepted_memory);
+ spin_lock_irqsave(&unaccepted_memory_lock, flags);
+ if (order < PMD_ORDER) {
+ BUG_ON(test_bit(addr / PMD_SIZE, unaccepted_memory));
+ goto out;
+ }
+
+ for (i = 0; i < (1 << (order - PMD_ORDER)); i++) {
+ if (test_bit(addr / PMD_SIZE + i, unaccepted_memory)) {
+ unaccepted = true;
+ break;
+ }
+ }
+
+ /* At least part of page is uneccepted */
+ if (unaccepted)
+ __SetPageOffline(page);
+out:
+ spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+}
+
+void accept_and_clear_page_offline(struct page *page, unsigned int order)
+{
+ phys_addr_t addr = round_down(page_to_phys(page), PMD_SIZE);
+ int i;
+
+ /* PageOffline() page on a free list, but no unaccepted memory? Hm. */
+ WARN_ON_ONCE(!boot_params.unaccepted_memory);
+
+ page = pfn_to_page(addr >> PAGE_SHIFT);
+ if (order < PMD_ORDER)
+ order = PMD_ORDER;
+
+ accept_memory(addr, addr + (PAGE_SIZE << order));
+
+ for (i = 0; i < (1 << order); i++) {
+ if (PageOffline(page + i))
+ __ClearPageOffline(page + i);
+ }
+}
--
2.34.1
Powered by blists - more mailing lists