lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <47C5CAFD.9030803@qualcomm.com>
Date:	Wed, 27 Feb 2008 12:41:33 -0800
From:	Max Krasnyanskiy <maxk@...lcomm.com>
To:	Thomas Gleixner <tglx@...utronix.de>
CC:	LKML <linux-kernel@...r.kernel.org>,
	Peter Zijlstra <a.p.zijlstra@...llo.nl>
Subject: Re: [RFC] Genirq and CPU isolation

Thomas,
Any chance I can get your feed back on this ?
Max

Max Krasnyansky wrote:
> Hi Thomas,
> 
> While reviewing CPU isolation patches Peter pointed out that instead of
> changing arch specific irq handling I should be extending genirq code.
> Which makes perfect sense. Why didn't I think of that before :)
> Basically the idea is that by default isolated CPUs must not get HW
> irqs routed to them (besides IPIs and stuff of course). 
> Does the patch included below look like the right approach ?
> 
> btw select_smp_affinity() which is currently used only by alpha seemed
> out of place. It's called multiple times for shared irqs. ie Every time
> new handler is registered irq is moved to a different CPU.
> So I moved it under "if (!shared)" check inside setup_irq().
> 
> The patch introduces generic version of the select_smp_affinity() that
> sets the affinity mask to "online_cpus - isolated_cpus", and updates 
> x86_32 and alpha load balancers to ignore isolated cpus.
> Booted on Core2 laptop and dual Opteron boxes with and w/o isolcpus=
> options and everything seems to work as expected.
> 
> I wanted to run this by you before I include it in my patch series.
> Thanx
> Max
> 
> 
> diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
> index facf82a..6b01702 100644
> --- a/arch/alpha/kernel/irq.c
> +++ b/arch/alpha/kernel/irq.c
> @@ -51,7 +51,7 @@ select_smp_affinity(unsigned int irq)
>  	if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq])
>  		return 1;
>  
> -	while (!cpu_possible(cpu))
> +	while (!cpu_possible(cpu) || cpu_isolated(cpu))
>  		cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
>  	last_cpu = cpu;
>  
> diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
> index e02e58c..07352b7 100644
> --- a/arch/x86/kernel/genapic_flat_64.c
> +++ b/arch/x86/kernel/genapic_flat_64.c
> @@ -21,9 +21,7 @@
>  
>  static cpumask_t flat_target_cpus(void)
>  {
> -	cpumask_t target;
> -	cpus_andnot(target, cpu_online_map, cpu_isolated_map);
> -	return target;
> +	return cpu_online_map;
>  }
>  
>  static cpumask_t flat_vector_allocation_domain(int cpu)
> diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
> index 4ca5486..9c8816f 100644
> --- a/arch/x86/kernel/io_apic_32.c
> +++ b/arch/x86/kernel/io_apic_32.c
> @@ -468,7 +468,7 @@ static void do_irq_balance(void)
>  	for_each_possible_cpu(i) {
>  		int package_index;
>  		CPU_IRQ(i) = 0;
> -		if (!cpu_online(i))
> +		if (!cpu_online(i) || cpu_isolated(i))
>  			continue;
>  		package_index = CPU_TO_PACKAGEINDEX(i);
>  		for (j = 0; j < NR_IRQS; j++) {
> diff --git a/include/linux/irq.h b/include/linux/irq.h
> index 176e5e7..287bc64 100644
> --- a/include/linux/irq.h
> +++ b/include/linux/irq.h
> @@ -253,14 +253,7 @@ static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
>  }
>  #endif
>  
> -#ifdef CONFIG_AUTO_IRQ_AFFINITY
>  extern int select_smp_affinity(unsigned int irq);
> -#else
> -static inline int select_smp_affinity(unsigned int irq)
> -{
> -	return 1;
> -}
> -#endif
>  
>  extern int no_irq_affinity;
>  
> diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
> index 438a014..e74db94 100644
> --- a/kernel/irq/manage.c
> +++ b/kernel/irq/manage.c
> @@ -376,6 +376,9 @@ int setup_irq(unsigned int irq, struct irqaction *new)
>  		} else
>  			/* Undo nested disables: */
>  			desc->depth = 1;
> +
> +		/* Set default affinity mask once everything is setup */
> +		select_smp_affinity(irq);
>  	}
>  	/* Reset broken irq detection when installing new handler */
>  	desc->irq_count = 0;
> @@ -488,6 +491,26 @@ void free_irq(unsigned int irq, void *dev_id)
>  }
>  EXPORT_SYMBOL(free_irq);
>  
> +#ifndef CONFIG_AUTO_IRQ_AFFINITY
> +/**
> + * Generic version of the affinity autoselector.
> + * Called under desc->lock from setup_irq().
> + * btw Should we rename this to select_irq_affinity() ?
> + */
> +int select_smp_affinity(unsigned int irq)
> +{
> +	cpumask_t usable_cpus;
> +
> +	if (!irq_can_set_affinity(irq))
> +		return 0;
> +
> +	cpus_andnot(usable_cpus, cpu_online_map, cpu_isolated_map);
> +	irq_desc[irq].affinity = usable_cpus;
> +	irq_desc[irq].chip->set_affinity(irq, usable_cpus);
> +	return 0;
> +}
> +#endif
> +
>  /**
>   *	request_irq - allocate an interrupt line
>   *	@irq: Interrupt line to allocate
> @@ -555,8 +578,6 @@ int request_irq(unsigned int irq, irq_handler_t handler,
>  	action->next = NULL;
>  	action->dev_id = dev_id;
>  
> -	select_smp_affinity(irq);
> -
>  #ifdef CONFIG_DEBUG_SHIRQ
>  	if (irqflags & IRQF_SHARED) {
>  		/*
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@...r.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/
> 

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ