lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <753aef27d368410b1e5c5814b1e7c8a7@gmail.com>
Date:	Sun, 10 Nov 2013 21:33:10 +1300
From:	Michael Schmitz <schmitzmic@...il.com>
To:	Thomas Gleixner <tglx@...utronix.de>
Cc:	linux-m68k@...r.kernel.org, linux-kernel@...r.kernel.org,
	Geert Uytterhoeven <geert@...ux-m68k.org>
Subject: Re: [PATCH 09/17] [m68k] IRQ: add handle_polled_irq() for timer based soft interrupts

Thomas,

> Does the following patch solve the problem? You need to call
>
>      irq_set_status_flags(irq, IRQ_IS_POLLED);
>
> when setting up the interrupt controller for those polled interrupt
> lines.

Your patch works just fine, thanks.

I suppose setting the flag can be done in the corresponding irq startup 
function, instead of when setting up the irq controller?

Geert - I will send the patch to ataints.c implementing this as soon as 
Thomas' fix is merged.

Regards,

	Michael


> Thanks,
>
> 	tglx
> -----------------
> diff --git a/include/linux/irq.h b/include/linux/irq.h
> index 56bb0dc..7dc1003 100644
> --- a/include/linux/irq.h
> +++ b/include/linux/irq.h
> @@ -70,6 +70,9 @@ typedef	void (*irq_preflow_handler_t)(struct 
> irq_data *data);
>   * IRQ_MOVE_PCNTXT		- Interrupt can be migrated from process context
>   * IRQ_NESTED_TRHEAD		- Interrupt nests into another thread
>   * IRQ_PER_CPU_DEVID		- Dev_id is a per-cpu variable
> + * IRQ_IS_POLLED		- Always polled by another interrupt. Exclude
> + *				  it from the spurious interrupt detection
> + *				  mechanism and from core side polling.
>   */
>  enum {
>  	IRQ_TYPE_NONE		= 0x00000000,
> @@ -94,12 +97,14 @@ enum {
>  	IRQ_NESTED_THREAD	= (1 << 15),
>  	IRQ_NOTHREAD		= (1 << 16),
>  	IRQ_PER_CPU_DEVID	= (1 << 17),
> +	IRQ_IS_POLLED		= (1 << 18),
>  };
>
>  #define IRQF_MODIFY_MASK	\
>  	(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
>  	 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
> -	 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
> +	 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID 
> | \
> +	 IRQ_IS_POLLED)
>
>  #define IRQ_NO_BALANCING_MASK	(IRQ_PER_CPU | IRQ_NO_BALANCING)
>
> diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
> index 1162f10..3320b84 100644
> --- a/kernel/irq/settings.h
> +++ b/kernel/irq/settings.h
> @@ -14,6 +14,7 @@ enum {
>  	_IRQ_NO_BALANCING	= IRQ_NO_BALANCING,
>  	_IRQ_NESTED_THREAD	= IRQ_NESTED_THREAD,
>  	_IRQ_PER_CPU_DEVID	= IRQ_PER_CPU_DEVID,
> +	_IRQ_IS_POLLED		= IRQ_IS_POLLED,
>  	_IRQF_MODIFY_MASK	= IRQF_MODIFY_MASK,
>  };
>
> @@ -26,6 +27,7 @@ enum {
>  #define IRQ_NOAUTOEN		GOT_YOU_MORON
>  #define IRQ_NESTED_THREAD	GOT_YOU_MORON
>  #define IRQ_PER_CPU_DEVID	GOT_YOU_MORON
> +#define IRQ_IS_POLLED		GOT_YOU_MORON
>  #undef IRQF_MODIFY_MASK
>  #define IRQF_MODIFY_MASK	GOT_YOU_MORON
>
> @@ -147,3 +149,8 @@ static inline bool 
> irq_settings_is_nested_thread(struct irq_desc *desc)
>  {
>  	return desc->status_use_accessors & _IRQ_NESTED_THREAD;
>  }
> +
> +static inline bool irq_settings_is_polled(struct irq_desc *desc)
> +{
> +	return desc->status_use_accessors & _IRQ_IS_POLLED;
> +}
> diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
> index 7b5f012..a1d8cc6 100644
> --- a/kernel/irq/spurious.c
> +++ b/kernel/irq/spurious.c
> @@ -67,8 +67,13 @@ static int try_one_irq(int irq, struct irq_desc 
> *desc, bool force)
>
>  	raw_spin_lock(&desc->lock);
>
> -	/* PER_CPU and nested thread interrupts are never polled */
> -	if (irq_settings_is_per_cpu(desc) || 
> irq_settings_is_nested_thread(desc))
> +	/*
> +	 * PER_CPU, nested thread interrupts and interrupts explicitely
> +	 * marked polled are excluded from polling.
> +	 */
> +	if (irq_settings_is_per_cpu(desc) ||
> +	    irq_settings_is_nested_thread(desc) ||
> +	    irq_settings_is_polled(desc))
>  		goto out;
>
>  	/*
> @@ -268,7 +273,8 @@ try_misrouted_irq(unsigned int irq, struct 
> irq_desc *desc,
>  void note_interrupt(unsigned int irq, struct irq_desc *desc,
>  		    irqreturn_t action_ret)
>  {
> -	if (desc->istate & IRQS_POLL_INPROGRESS)
> +	if (desc->istate & IRQS_POLL_INPROGRESS ||
> +	    irq_settings_is_polled(desc))
>  		return;
>
>  	/* we get here again via the threaded handler */
>
>
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-m68k" 
> in
> the body of a message to majordomo@...r.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ