lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20110222193052.GB17866@dumpdata.com>
Date:	Tue, 22 Feb 2011 14:30:52 -0500
From:	Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>
To:	stefano.stabellini@...citrix.com
Cc:	linux-kernel@...r.kernel.org, xen-devel@...ts.xensource.com,
	Jeremy.Fitzhardinge@...rix.com
Subject: Re: [PATCH 6/7] xen: enable event channels to send and receive
 IPIs for PV on HVM guests

On Wed, Feb 16, 2011 at 05:53:06PM +0000, stefano.stabellini@...citrix.com wrote:
> From: Stefano Stabellini <stefano.stabellini@...citrix.com>
> 
> Enable the usage of event channels to send and receive IPIs when
> running as a PV on HVM guest.

Why not squash this with the other patch? After this, the only thing
that the other patch had done was to add in the 'xen_hvm_cpu_notify' function
a call to setup the spinlocks..

> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@...citrix.com>
> ---
>  arch/x86/xen/enlighten.c |   16 +---------------
>  arch/x86/xen/smp.c       |   42 ++++++++++++++++++++++++++++++++++++++++++
>  arch/x86/xen/xen-ops.h   |    2 ++
>  3 files changed, 45 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
> index 9c1628b..fe02574 100644
> --- a/arch/x86/xen/enlighten.c
> +++ b/arch/x86/xen/enlighten.c
> @@ -1343,20 +1343,6 @@ static struct notifier_block __cpuinitdata xen_hvm_cpu_notifier = {
>  	.notifier_call	= xen_hvm_cpu_notify,
>  };
>  
> -static void xen_hvm_spinlock_init(void)
> -{
> -	if (!xen_have_vector_callback)
> -		return;
> -	xen_init_lock_cpu(0);
> -	xen_init_spinlocks();
> -}
> -
> -static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
> -{
> -	native_smp_prepare_cpus(max_cpus);
> -	xen_hvm_spinlock_init();
> -}
> -
>  static void __init xen_hvm_guest_init(void)
>  {
>  	int r;
> @@ -1370,13 +1356,13 @@ static void __init xen_hvm_guest_init(void)
>  
>  	if (xen_feature(XENFEAT_hvm_callback_vector))
>  		xen_have_vector_callback = 1;
> +	xen_hvm_smp_init();
>  	register_cpu_notifier(&xen_hvm_cpu_notifier);
>  	xen_unplug_emulated_devices();
>  	have_vcpu_info_placement = 0;
>  	x86_init.irqs.intr_init = xen_init_IRQ;
>  	xen_hvm_init_time_ops();
>  	xen_hvm_init_mmu_ops();
> -	smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
>  }
>  
>  static bool __init xen_hvm_platform(void)
> diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
> index 72a4c79..2300d4b 100644
> --- a/arch/x86/xen/smp.c
> +++ b/arch/x86/xen/smp.c
> @@ -509,3 +509,45 @@ void __init xen_smp_init(void)
>  	xen_fill_possible_map();
>  	xen_init_spinlocks();
>  }
> +
> +static void xen_hvm_spinlock_init(void)
> +{
> +	if (!xen_have_vector_callback)
> +		return;
> +	xen_init_lock_cpu(0);
> +	xen_init_spinlocks();
> +}
> +
> +static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
> +{
> +	native_smp_prepare_cpus(max_cpus);
> +	WARN_ON(xen_smp_intr_init(0));
> +	xen_hvm_spinlock_init();

Why not merge 'xen_hvm_spinlock_init' in here?

> +}
> +
> +static int __cpuinit xen_hvm_cpu_up(unsigned int cpu)
> +{
> +	int rc;
> +	rc = native_cpu_up(cpu);
> +	WARN_ON (xen_smp_intr_init(cpu));
> +	return rc;
> +}
> +
> +static void xen_hvm_cpu_die(unsigned int cpu)
> +{
> +	unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
> +	unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
> +	unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
> +	unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
> +	native_cpu_die(cpu);
> +}
> +
> +void __init xen_hvm_smp_init(void)
> +{
> +	smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
> +	smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
> +	smp_ops.cpu_up = xen_hvm_cpu_up;
> +	smp_ops.cpu_die = xen_hvm_cpu_die;
> +	smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
> +	smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
> +}
> diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
> index 9d41bf9..3112f55 100644
> --- a/arch/x86/xen/xen-ops.h
> +++ b/arch/x86/xen/xen-ops.h
> @@ -64,10 +64,12 @@ void xen_setup_vcpu_info_placement(void);
>  
>  #ifdef CONFIG_SMP
>  void xen_smp_init(void);
> +void __init xen_hvm_smp_init(void);
>  
>  extern cpumask_var_t xen_cpu_initialized_map;
>  #else
>  static inline void xen_smp_init(void) {}
> +static inline void xen_hvm_smp_init(void) {}
>  #endif
>  
>  #ifdef CONFIG_PARAVIRT_SPINLOCKS
> -- 
> 1.5.6.5
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ