lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.DEB.2.11.1410030653040.8496@gentwo.org>
Date:	Fri, 3 Oct 2014 06:54:22 -0500 (CDT)
From:	Christoph Lameter <cl@...ux.com>
To:	Thomas Gleixner <tglx@...utronix.de>
cc:	linux-kernel@...r.kernel.org
Subject: Re: Why do we still have 32 bit counters? Interrupt counters overflow
 within 50 days

On Fri, 3 Oct 2014, Thomas Gleixner wrote:

> > Is this the way its intended or should the counters be expanded to 64 bit?
>
> There is no reason why we cannot or should not expand them.

Ok here is a patch to do just that:


Subject: Increase irq counters to 64 bit


Irq counters can overflow easily if they are just 32 bit.

For example the timer interrupt occurs 1000 times per second, so
it is predictable that the timer interrupt will overflow in


2^ 32 / 1000 [interrupts per second] / 86400 [seconds in a day]

which results in 46 days.

Other irq counters for devices may wrap even faster for example
those for high speed networking devices.

This patch is needed to avoid the counter overflow by increasing
the counters to 64 bit.

Signed-off-by: Christoph Lameter <cl@...ux.com>

Index: linux/arch/x86/include/asm/processor.h
===================================================================
--- linux.orig/arch/x86/include/asm/processor.h
+++ linux/arch/x86/include/asm/processor.h
@@ -432,7 +432,7 @@ DECLARE_PER_CPU_FIRST(union irq_stack_un
 DECLARE_INIT_PER_CPU(irq_stack_union);

 DECLARE_PER_CPU(char *, irq_stack_ptr);
-DECLARE_PER_CPU(unsigned int, irq_count);
+DECLARE_PER_CPU(unsigned long, irq_count);
 extern asmlinkage void ignore_sysret(void);
 #else	/* X86_64 */
 #ifdef CONFIG_CC_STACKPROTECTOR
Index: linux/arch/x86/kernel/cpu/common.c
===================================================================
--- linux.orig/arch/x86/kernel/cpu/common.c
+++ linux/arch/x86/kernel/cpu/common.c
@@ -1144,7 +1144,7 @@ EXPORT_PER_CPU_SYMBOL(current_task);
 DEFINE_PER_CPU(char *, irq_stack_ptr) =
 	init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;

-DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
+DEFINE_PER_CPU(unsigned long, irq_count) __visible = -1;

 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
 EXPORT_PER_CPU_SYMBOL(__preempt_count);
Index: linux/include/linux/irqdesc.h
===================================================================
--- linux.orig/include/linux/irqdesc.h
+++ linux/include/linux/irqdesc.h
@@ -41,7 +41,7 @@ struct irq_desc;
  */
 struct irq_desc {
 	struct irq_data		irq_data;
-	unsigned int __percpu	*kstat_irqs;
+	unsigned long __percpu	*kstat_irqs;
 	irq_flow_handler_t	handle_irq;
 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
 	irq_preflow_handler_t	preflow_handler;
@@ -51,7 +51,7 @@ struct irq_desc {
 	unsigned int		core_internal_state__do_not_mess_with_it;
 	unsigned int		depth;		/* nested irq disables */
 	unsigned int		wake_depth;	/* nested wake enables */
-	unsigned int		irq_count;	/* For detecting broken IRQs */
+	unsigned long		irq_count;	/* For detecting broken IRQs */
 	unsigned long		last_unhandled;	/* Aging timer for unhandled count */
 	unsigned int		irqs_unhandled;
 	atomic_t		threads_handled;
Index: linux/include/linux/kernel_stat.h
===================================================================
--- linux.orig/include/linux/kernel_stat.h
+++ linux/include/linux/kernel_stat.h
@@ -51,7 +51,7 @@ DECLARE_PER_CPU(struct kernel_cpustat, k

 extern unsigned long long nr_context_switches(void);

-extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
+extern unsigned long kstat_irqs_cpu(unsigned int irq, int cpu);
 extern void kstat_incr_irq_this_cpu(unsigned int irq);

 static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
Index: linux/kernel/irq/debug.h
===================================================================
--- linux.orig/kernel/irq/debug.h
+++ linux/kernel/irq/debug.h
@@ -11,7 +11,7 @@

 static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
 {
-	printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
+	printk("irq %d, desc: %p, depth: %d, count: %lu, unhandled: %d\n",
 		irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
 	printk("->handle_irq():  %p, ", desc->handle_irq);
 	print_symbol("%s\n", (unsigned long)desc->handle_irq);
Index: linux/kernel/irq/irqdesc.c
===================================================================
--- linux.orig/kernel/irq/irqdesc.c
+++ linux/kernel/irq/irqdesc.c
@@ -532,7 +532,7 @@ void kstat_incr_irq_this_cpu(unsigned in
 	kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
 }

-unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+unsigned long kstat_irqs_cpu(unsigned int irq, int cpu)
 {
 	struct irq_desc *desc = irq_to_desc(irq);

Index: linux/kernel/irq/proc.c
===================================================================
--- linux.orig/kernel/irq/proc.c
+++ linux/kernel/irq/proc.c
@@ -248,7 +248,7 @@ static int irq_spurious_proc_show(struct
 {
 	struct irq_desc *desc = irq_to_desc((long) m->private);

-	seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
+	seq_printf(m, "count %lu\n" "unhandled %u\n" "last_unhandled %u ms\n",
 		   desc->irq_count, desc->irqs_unhandled,
 		   jiffies_to_msecs(desc->last_unhandled));
 	return 0;
@@ -450,7 +450,7 @@ int show_interrupts(struct seq_file *p,

 	seq_printf(p, "%*d: ", prec, i);
 	for_each_online_cpu(j)
-		seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+		seq_printf(p, "%10lu ", kstat_irqs_cpu(i, j));

 	if (desc->irq_data.chip) {
 		if (desc->irq_data.chip->irq_print_chip)
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ