lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 26 Dec 2012 19:33:37 -0800
From:	"H. Peter Anvin" <hpa@...or.com>
To:	Daniel Kiper <daniel.kiper@...cle.com>, andrew.cooper3@...rix.com,
	ebiederm@...ssion.com, jbeulich@...e.com, konrad.wilk@...cle.com,
	maxim.uvarov@...cle.com, mingo@...hat.com, tglx@...utronix.de,
	vgoyal@...hat.com, x86@...nel.org, kexec@...ts.infradead.org,
	linux-kernel@...r.kernel.org,
	virtualization@...ts.linux-foundation.org,
	xen-devel@...ts.xensource.com
Subject: Re: [PATCH v3 02/11] x86/kexec: Add extra pointers to transition page table PGD, PUD, PMD and PTE

Hmm... this code is being redone at the moment... this might conflict.

Daniel Kiper <daniel.kiper@...cle.com> wrote:

>Some implementations (e.g. Xen PVOPS) could not use part of identity
>page table
>to construct transition page table. It means that they require separate
>PUDs,
>PMDs and PTEs for virtual and physical (identity) mapping. To satisfy
>that
>requirement add extra pointer to PGD, PUD, PMD and PTE and align
>existing code.
>
>Signed-off-by: Daniel Kiper <daniel.kiper@...cle.com>
>---
> arch/x86/include/asm/kexec.h       |   10 +++++++---
> arch/x86/kernel/machine_kexec_64.c |   12 ++++++------
> 2 files changed, 13 insertions(+), 9 deletions(-)
>
>diff --git a/arch/x86/include/asm/kexec.h
>b/arch/x86/include/asm/kexec.h
>index 6080d26..cedd204 100644
>--- a/arch/x86/include/asm/kexec.h
>+++ b/arch/x86/include/asm/kexec.h
>@@ -157,9 +157,13 @@ struct kimage_arch {
> };
> #else
> struct kimage_arch {
>-	pud_t *pud;
>-	pmd_t *pmd;
>-	pte_t *pte;
>+	pgd_t *pgd;
>+	pud_t *pud0;
>+	pud_t *pud1;
>+	pmd_t *pmd0;
>+	pmd_t *pmd1;
>+	pte_t *pte0;
>+	pte_t *pte1;
> };
> #endif
> 
>diff --git a/arch/x86/kernel/machine_kexec_64.c
>b/arch/x86/kernel/machine_kexec_64.c
>index b3ea9db..976e54b 100644
>--- a/arch/x86/kernel/machine_kexec_64.c
>+++ b/arch/x86/kernel/machine_kexec_64.c
>@@ -137,9 +137,9 @@ out:
> 
> static void free_transition_pgtable(struct kimage *image)
> {
>-	free_page((unsigned long)image->arch.pud);
>-	free_page((unsigned long)image->arch.pmd);
>-	free_page((unsigned long)image->arch.pte);
>+	free_page((unsigned long)image->arch.pud0);
>+	free_page((unsigned long)image->arch.pmd0);
>+	free_page((unsigned long)image->arch.pte0);
> }
> 
> static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
>@@ -157,7 +157,7 @@ static int init_transition_pgtable(struct kimage
>*image, pgd_t *pgd)
> 		pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
> 		if (!pud)
> 			goto err;
>-		image->arch.pud = pud;
>+		image->arch.pud0 = pud;
> 		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
> 	}
> 	pud = pud_offset(pgd, vaddr);
>@@ -165,7 +165,7 @@ static int init_transition_pgtable(struct kimage
>*image, pgd_t *pgd)
> 		pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
> 		if (!pmd)
> 			goto err;
>-		image->arch.pmd = pmd;
>+		image->arch.pmd0 = pmd;
> 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
> 	}
> 	pmd = pmd_offset(pud, vaddr);
>@@ -173,7 +173,7 @@ static int init_transition_pgtable(struct kimage
>*image, pgd_t *pgd)
> 		pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
> 		if (!pte)
> 			goto err;
>-		image->arch.pte = pte;
>+		image->arch.pte0 = pte;
> 		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
> 	}
> 	pte = pte_offset_kernel(pmd, vaddr);

-- 
Sent from my mobile phone. Please excuse brevity and lack of formatting.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists