[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1343731376-20658-2-git-send-email-konrad.wilk@oracle.com>
Date: Tue, 31 Jul 2012 06:42:54 -0400
From: Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>
To: linux-kernel@...r.kernel.org, xen-devel@...ts.xensource.com,
Ian.Campbell@...rix.com, hpa@...or.com, x86@...nel.org
Cc: Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>
Subject: [PATCH 1/3] xen/mmu/p2m: Check extend_brk for NULL
Which allows us to be a bit smarter in case we exhaust the reserved
virtual space.
[v1: Suggested by Ian Campbell]
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>
---
arch/x86/xen/enlighten.c | 2 ++
arch/x86/xen/mmu.c | 5 ++++-
arch/x86/xen/p2m.c | 35 ++++++++++++++++++++++++-----------
3 files changed, 30 insertions(+), 12 deletions(-)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 993e2a5..923d98e 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1519,6 +1519,8 @@ void __ref xen_hvm_init_shared_info(void)
if (!shared_info_page)
shared_info_page = (struct shared_info *)
extend_brk(PAGE_SIZE, PAGE_SIZE);
+ if (!shared_info_page)
+ return;
xatp.domid = DOMID_SELF;
xatp.idx = 0;
xatp.space = XENMAPSPACE_shared_info;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 993ba07..d7a2044 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1711,6 +1711,8 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
PAGE_SIZE);
+ if (!level_ident_pgt)
+ goto out;
ident_pte = 0;
pfn = 0;
@@ -1750,7 +1752,7 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
-
+out:
set_page_prot(pmd, PAGE_KERNEL_RO);
}
#endif
@@ -1948,6 +1950,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
initial_kernel_pmd =
extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
+ BUG_ON(!initial_kernel_pmd);
max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
xen_start_info->nr_pt_frames * PAGE_SIZE +
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index bbfd085..1658af0 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -258,6 +258,13 @@ static void p2m_init(unsigned long *p2m)
p2m[i] = INVALID_P2M_ENTRY;
}
+static __always_inline __init void *brk_alloc_page(void)
+{
+ void *p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ /* So early that printk does not function. */
+ BUG_ON(p == NULL);
+ return p;
+}
/*
* Build the parallel p2m_top_mfn and p2m_mid_mfn structures
*
@@ -274,13 +281,13 @@ void __ref xen_build_mfn_list_list(void)
/* Pre-initialize p2m_top_mfn to be completely missing */
if (p2m_top_mfn == NULL) {
- p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_missing_mfn = brk_alloc_page();
p2m_mid_mfn_init(p2m_mid_missing_mfn);
- p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_top_mfn_p = brk_alloc_page();
p2m_top_mfn_p_init(p2m_top_mfn_p);
- p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_top_mfn = brk_alloc_page();
p2m_top_mfn_init(p2m_top_mfn);
} else {
/* Reinitialise, mfn's all change after migration */
@@ -312,10 +319,10 @@ void __ref xen_build_mfn_list_list(void)
/*
* XXX boot-time only! We should never find
* missing parts of the mfn tree after
- * runtime. extend_brk() will BUG if we call
+ * runtime. brk_alloc_page() will BUG if we call
* it too late.
*/
- mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ mid_mfn_p = brk_alloc_page();
p2m_mid_mfn_init(mid_mfn_p);
p2m_top_mfn_p[topidx] = mid_mfn_p;
@@ -344,16 +351,16 @@ void __init xen_build_dynamic_phys_to_machine(void)
xen_max_p2m_pfn = max_pfn;
- p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_missing = brk_alloc_page();
p2m_init(p2m_missing);
- p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_missing = brk_alloc_page();
p2m_mid_init(p2m_mid_missing);
- p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_top = brk_alloc_page();
p2m_top_init(p2m_top);
- p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_identity = brk_alloc_page();
p2m_init(p2m_identity);
/*
@@ -366,7 +373,7 @@ void __init xen_build_dynamic_phys_to_machine(void)
unsigned mididx = p2m_mid_index(pfn);
if (p2m_top[topidx] == p2m_mid_missing) {
- unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ unsigned long **mid = brk_alloc_page();
p2m_mid_init(mid);
p2m_top[topidx] = mid;
@@ -600,6 +607,8 @@ static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary
/* Boundary cross-over for the edges: */
p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ if (!p2m)
+ return false;
p2m_init(p2m);
@@ -626,7 +635,8 @@ static bool __init early_alloc_p2m(unsigned long pfn)
mid_mfn_p = p2m_top_mfn_p[topidx];
if (mid == p2m_mid_missing) {
mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
-
+ if (!mid)
+ return false;
p2m_mid_init(mid);
p2m_top[topidx] = mid;
@@ -636,6 +646,8 @@ static bool __init early_alloc_p2m(unsigned long pfn)
/* And the save/restore P2M tables.. */
if (mid_mfn_p == p2m_mid_missing_mfn) {
mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ if (!mid_mfn_p)
+ return false;
p2m_mid_mfn_init(mid_mfn_p);
p2m_top_mfn_p[topidx] = mid_mfn_p;
@@ -762,6 +774,7 @@ static void __init m2p_override_init(void)
m2p_overrides = extend_brk(sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
sizeof(unsigned long));
+ BUG_ON(!m2p_overrides);
for (i = 0; i < M2P_OVERRIDE_HASH; i++)
INIT_LIST_HEAD(&m2p_overrides[i]);
--
1.7.7.6
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists