lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <1203042328.30010.2.camel@caritas-dev.intel.com>
Date:	Fri, 15 Feb 2008 10:25:28 +0800
From:	"Huang, Ying" <ying.huang@...el.com>
To:	Ingo Molnar <mingo@...hat.com>
Cc:	ThomasGleixner <tglx@...utronix.de>,
	"H. Peter Anvin" <hpa@...or.com>, Andi Kleen <ak@...e.de>,
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH] x86: EFI runtime code mapping enhancement

Hi, Ingo,

Please revert this patch. Because it does not work with 1GB pages.
Although the original implementation does not work with 1GB pages too,
it can be fixed easier than this one. I will compose a new patch to fix
the original implementation.

Best Regards,
Huang Ying

On Wed, 2008-02-13 at 17:22 +0800, Huang, Ying wrote:
> This patch enhances EFI runtime code memory mapping as following:
> 
> - Move __supported_pte_mask & _PAGE_NX checking before invoking
>   runtime_code_page_mkexec(). This makes it possible for compiler to
>   eliminate runtime_code_page_mkexec() on machine without NX support.
> 
> - Use set_memory_x/nx in early_mapping_set_exec(). This eliminates the
>   duplicated implementation.
> 
> This patch has been tested on Intel x86_64 platform with EFI64/32
> firmware.
> 
> Signed-off-by: Huang Ying <ying.huang@...el.com>
> 
> ---
>  arch/x86/kernel/efi.c    |    6 ++----
>  arch/x86/kernel/efi_64.c |   30 ++++++++++++------------------
>  2 files changed, 14 insertions(+), 22 deletions(-)
> 
> --- a/arch/x86/kernel/efi.c
> +++ b/arch/x86/kernel/efi.c
> @@ -384,9 +384,6 @@ static void __init runtime_code_page_mke
>  	efi_memory_desc_t *md;
>  	void *p;
>  
> -	if (!(__supported_pte_mask & _PAGE_NX))
> -		return;
> -
>  	/* Make EFI runtime service code area executable */
>  	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
>  		md = p;
> @@ -476,7 +473,8 @@ void __init efi_enter_virtual_mode(void)
>  	efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
>  	efi.reset_system = virt_efi_reset_system;
>  	efi.set_virtual_address_map = virt_efi_set_virtual_address_map;
> -	runtime_code_page_mkexec();
> +	if (__supported_pte_mask & _PAGE_NX)
> +		runtime_code_page_mkexec();
>  	early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
>  	memmap.map = NULL;
>  }
> --- a/arch/x86/kernel/efi_64.c
> +++ b/arch/x86/kernel/efi_64.c
> @@ -35,6 +35,7 @@
>  #include <asm/tlbflush.h>
>  #include <asm/proto.h>
>  #include <asm/efi.h>
> +#include <asm/cacheflush.h>
>  
>  static pgd_t save_pgd __initdata;
>  static unsigned long efi_flags __initdata;
> @@ -43,22 +44,15 @@ static void __init early_mapping_set_exe
>  					  unsigned long end,
>  					  int executable)
>  {
> -	pte_t *kpte;
> -	unsigned int level;
> +	unsigned long num_pages;
>  
> -	while (start < end) {
> -		kpte = lookup_address((unsigned long)__va(start), &level);
> -		BUG_ON(!kpte);
> -		if (executable)
> -			set_pte(kpte, pte_mkexec(*kpte));
> -		else
> -			set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \
> -					    __supported_pte_mask));
> -		if (level == PG_LEVEL_4K)
> -			start = (start + PAGE_SIZE) & PAGE_MASK;
> -		else
> -			start = (start + PMD_SIZE) & PMD_MASK;
> -	}
> +	start &= PMD_MASK;
> +	end = (end + PMD_SIZE - 1) & PMD_MASK;
> +	num_pages = (end - start) >> PAGE_SHIFT;
> +	if (executable)
> +		set_memory_x((unsigned long)__va(start), num_pages);
> +	else
> +		set_memory_nx((unsigned long)__va(start), num_pages);
>  }
>  
>  static void __init early_runtime_code_mapping_set_exec(int executable)
> @@ -74,7 +68,7 @@ static void __init early_runtime_code_ma
>  		md = p;
>  		if (md->type == EFI_RUNTIME_SERVICES_CODE) {
>  			unsigned long end;
> -			end = md->phys_addr + (md->num_pages << PAGE_SHIFT);
> +			end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
>  			early_mapping_set_exec(md->phys_addr, end, executable);
>  		}
>  	}
> @@ -84,8 +78,8 @@ void __init efi_call_phys_prelog(void)
>  {
>  	unsigned long vaddress;
>  
> -	local_irq_save(efi_flags);
>  	early_runtime_code_mapping_set_exec(1);
> +	local_irq_save(efi_flags);
>  	vaddress = (unsigned long)__va(0x0UL);
>  	save_pgd = *pgd_offset_k(0x0UL);
>  	set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress));
> @@ -98,9 +92,9 @@ void __init efi_call_phys_epilog(void)
>  	 * After the lock is released, the original page table is restored.
>  	 */
>  	set_pgd(pgd_offset_k(0x0UL), save_pgd);
> -	early_runtime_code_mapping_set_exec(0);
>  	__flush_tlb_all();
>  	local_irq_restore(efi_flags);
> +	early_runtime_code_mapping_set_exec(0);
>  }
>  
>  void __init efi_reserve_bootmem(void)
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ