[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20140610002427.269562206@linuxfoundation.org>
Date: Mon, 9 Jun 2014 17:25:34 -0700
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, David Vrabel <david.vrabel@...rix.com>,
Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>,
Daniel Kiper <daniel.kiper@...cle.com>
Subject: [PATCH 3.4 84/88] xen/setup: update VA mapping when releasing memory during setup
3.4-stable review patch. If anyone has any objections, please let me know.
------------------
From: David Vrabel <dvrabel@...tab.net>
commit 83d51ab473dddde7df858015070ed22b84ebe9a9 upstream.
In xen_memory_setup(), if a page that is being released has a VA
mapping this must also be updated. Otherwise, the page will be not
released completely -- it will still be referenced in Xen and won't be
freed util the mapping is removed and this prevents it from being
reallocated at a different PFN.
This was already being done for the ISA memory region in
xen_ident_map_ISA() but on many systems this was omitting a few pages
as many systems marked a few pages below the ISA memory region as
reserved in the e820 map.
This fixes errors such as:
(XEN) page_alloc.c:1148:d0 Over-allocation for domain 0: 2097153 > 2097152
(XEN) memory.c:133:d0 Could not allocate order=0 extent: id=0 memflags=0 (0 of 17)
Signed-off-by: David Vrabel <david.vrabel@...rix.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>
Signed-off-by: Daniel Kiper <daniel.kiper@...cle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
arch/x86/xen/enlighten.c | 1 -
arch/x86/xen/mmu.c | 23 -----------------------
arch/x86/xen/setup.c | 43 +++++++++++++++++++++++++++++++++++--------
arch/x86/xen/xen-ops.h | 1 -
4 files changed, 35 insertions(+), 33 deletions(-)
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1390,7 +1390,6 @@ asmlinkage void __init xen_start_kernel(
xen_raw_console_write("mapping kernel into physical memory\n");
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
- xen_ident_map_ISA();
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1953,29 +1953,6 @@ static void xen_set_fixmap(unsigned idx,
#endif
}
-void __init xen_ident_map_ISA(void)
-{
- unsigned long pa;
-
- /*
- * If we're dom0, then linear map the ISA machine addresses into
- * the kernel's address space.
- */
- if (!xen_initial_domain())
- return;
-
- xen_raw_printk("Xen: setup ISA identity maps\n");
-
- for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
- pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
-
- if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
- BUG();
- }
-
- xen_flush_tlb();
-}
-
static void __init xen_post_allocator_init(void)
{
pv_mmu_ops.set_pte = xen_set_pte;
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -147,6 +147,13 @@ static unsigned long __init xen_do_chunk
return len;
}
+
+static unsigned long __init xen_release_chunk(unsigned long start,
+ unsigned long end)
+{
+ return xen_do_chunk(start, end, true);
+}
+
static unsigned long __init xen_populate_chunk(
const struct e820entry *list, size_t map_size,
unsigned long max_pfn, unsigned long *last_pfn,
@@ -205,6 +212,29 @@ static unsigned long __init xen_populate
}
return done;
}
+
+static void __init xen_set_identity_and_release_chunk(
+ unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
+ unsigned long *released, unsigned long *identity)
+{
+ unsigned long pfn;
+
+ /*
+ * If the PFNs are currently mapped, the VA mapping also needs
+ * to be updated to be 1:1.
+ */
+ for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
+ (void)HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ mfn_pte(pfn, PAGE_KERNEL_IO), 0);
+
+ if (start_pfn < nr_pages)
+ *released += xen_release_chunk(
+ start_pfn, min(end_pfn, nr_pages));
+
+ *identity += set_phys_range_identity(start_pfn, end_pfn);
+}
+
static unsigned long __init xen_set_identity_and_release(
const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
@@ -234,14 +264,11 @@ static unsigned long __init xen_set_iden
if (entry->type == E820_RAM)
end_pfn = PFN_UP(entry->addr);
- if (start_pfn < end_pfn) {
- if (start_pfn < nr_pages)
- released += xen_do_chunk(
- start_pfn, min(end_pfn, nr_pages), true);
-
- identity += set_phys_range_identity(
- start_pfn, end_pfn);
- }
+ if (start_pfn < end_pfn)
+ xen_set_identity_and_release_chunk(
+ start_pfn, end_pfn, nr_pages,
+ &released, &identity);
+
start = end;
}
}
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -28,7 +28,6 @@ void xen_setup_shared_info(void);
void xen_build_mfn_list_list(void);
void xen_setup_machphys_mapping(void);
pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
-void xen_ident_map_ISA(void);
void xen_reserve_top(void);
extern unsigned long xen_max_p2m_pfn;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists