lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <tip-1e74016370ec3d552a7f5df18bb2b0f1c80b5a9f@git.kernel.org>
Date:   Wed, 1 Feb 2017 02:12:55 -0800
From:   "tip-bot for travis@....com" <tipbot@...or.com>
To:     linux-tip-commits@...r.kernel.org
Cc:     mingo@...nel.org, linux-kernel@...r.kernel.org, travis@....com,
        hpa@...or.com, torvalds@...ux-foundation.org, peterz@...radead.org,
        tglx@...utronix.de, rja@....com
Subject: [tip:x86/platform] x86/platform/UV: Clean up the NMI code to match
 current coding style

Commit-ID:  1e74016370ec3d552a7f5df18bb2b0f1c80b5a9f
Gitweb:     http://git.kernel.org/tip/1e74016370ec3d552a7f5df18bb2b0f1c80b5a9f
Author:     travis@....com <travis@....com>
AuthorDate: Wed, 25 Jan 2017 10:35:24 -0600
Committer:  Ingo Molnar <mingo@...nel.org>
CommitDate: Wed, 1 Feb 2017 10:21:00 +0100

x86/platform/UV: Clean up the NMI code to match current coding style

Update UV NMI to current coding style.

Signed-off-by: Mike Travis <travis@....com>
Acked-by: Thomas Gleixner <tglx@...utronix.de>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Russ Anderson <rja@....com>
Link: http://lkml.kernel.org/r/20170125163518.419094259@asylum.americas.sgi.com
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
 arch/x86/platform/uv/uv_nmi.c | 74 +++++++++++++++++++++----------------------
 1 file changed, 37 insertions(+), 37 deletions(-)

diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 6a71b08..0ecd7bf 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -45,8 +45,8 @@
  *
  * Handle system-wide NMI events generated by the global 'power nmi' command.
  *
- * Basic operation is to field the NMI interrupt on each cpu and wait
- * until all cpus have arrived into the nmi handler.  If some cpus do not
+ * Basic operation is to field the NMI interrupt on each CPU and wait
+ * until all CPU's have arrived into the nmi handler.  If some CPU's do not
  * make it into the handler, try and force them in with the IPI(NMI) signal.
  *
  * We also have to lessen UV Hub MMR accesses as much as possible as this
@@ -56,7 +56,7 @@
  * To do this we register our primary NMI notifier on the NMI_UNKNOWN
  * chain.  This reduces the number of false NMI calls when the perf
  * tools are running which generate an enormous number of NMIs per
- * second (~4M/s for 1024 cpu threads).  Our secondary NMI handler is
+ * second (~4M/s for 1024 CPU threads).  Our secondary NMI handler is
  * very short as it only checks that if it has been "pinged" with the
  * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
  *
@@ -113,7 +113,7 @@ static int param_get_local64(char *buffer, const struct kernel_param *kp)
 
 static int param_set_local64(const char *val, const struct kernel_param *kp)
 {
-	/* clear on any write */
+	/* Clear on any write */
 	local64_set((local64_t *)kp->arg, 0);
 	return 0;
 }
@@ -322,7 +322,7 @@ static struct init_nmi {
 	.data = 0x0,	/* ACPI Mode */
 	},
 
-/* clear status */
+/* Clear status: */
 	{	/* GPI_INT_STS_GPP_D_0 */
 	.offset = 0x104,
 	.mask = 0x0,
@@ -344,29 +344,29 @@ static struct init_nmi {
 	.data = 0x1,	/* Clear Status */
 	},
 
-/* disable interrupts */
+/* Disable interrupts: */
 	{	/* GPI_INT_EN_GPP_D_0 */
 	.offset = 0x114,
 	.mask = 0x1,
-	.data = 0x0,	/* disable interrupt generation */
+	.data = 0x0,	/* Disable interrupt generation */
 	},
 	{	/* GPI_GPE_EN_GPP_D_0 */
 	.offset = 0x134,
 	.mask = 0x1,
-	.data = 0x0,	/* disable interrupt generation */
+	.data = 0x0,	/* Disable interrupt generation */
 	},
 	{	/* GPI_SMI_EN_GPP_D_0 */
 	.offset = 0x154,
 	.mask = 0x1,
-	.data = 0x0,	/* disable interrupt generation */
+	.data = 0x0,	/* Disable interrupt generation */
 	},
 	{	/* GPI_NMI_EN_GPP_D_0 */
 	.offset = 0x174,
 	.mask = 0x1,
-	.data = 0x0,	/* disable interrupt generation */
+	.data = 0x0,	/* Disable interrupt generation */
 	},
 
-/* setup GPP_D_0 Pad Config */
+/* Setup GPP_D_0 Pad Config: */
 	{	/* PAD_CFG_DW0_GPP_D_0 */
 	.offset = 0x4c0,
 	.mask = 0xffffffff,
@@ -444,7 +444,7 @@ static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
 		return 0;
 
 	*pstat = STS_GPP_D_0_MASK;	/* Is a UV NMI: clear GPP_D_0 status */
-	(void)*pstat;			/* flush write */
+	(void)*pstat;			/* Flush write */
 
 	return 1;
 }
@@ -461,8 +461,8 @@ static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
 }
 
 /*
- * If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and
- * return true.  If first cpu in on the system, set global "in_nmi" flag.
+ * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
+ * return true.  If first CPU in on the system, set global "in_nmi" flag.
  */
 static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
 {
@@ -496,7 +496,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
 		if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
 			nmi_detected = uv_test_nmi(hub_nmi);
 
-			/* check flag for UV external NMI */
+			/* Check flag for UV external NMI */
 			if (nmi_detected > 0) {
 				uv_set_in_nmi(cpu, hub_nmi);
 				nmi = 1;
@@ -516,7 +516,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
 slave_wait:		cpu_relax();
 			udelay(uv_nmi_slave_delay);
 
-			/* re-check hub in_nmi flag */
+			/* Re-check hub in_nmi flag */
 			nmi = atomic_read(&hub_nmi->in_nmi);
 			if (nmi)
 				break;
@@ -560,7 +560,7 @@ static inline void uv_clear_nmi(int cpu)
 	}
 }
 
-/* Ping non-responding cpus attemping to force them into the NMI handler */
+/* Ping non-responding CPU's attemping to force them into the NMI handler */
 static void uv_nmi_nr_cpus_ping(void)
 {
 	int cpu;
@@ -571,7 +571,7 @@ static void uv_nmi_nr_cpus_ping(void)
 	apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
 }
 
-/* Clean up flags for cpus that ignored both NMI and ping */
+/* Clean up flags for CPU's that ignored both NMI and ping */
 static void uv_nmi_cleanup_mask(void)
 {
 	int cpu;
@@ -583,7 +583,7 @@ static void uv_nmi_cleanup_mask(void)
 	}
 }
 
-/* Loop waiting as cpus enter NMI handler */
+/* Loop waiting as CPU's enter NMI handler */
 static int uv_nmi_wait_cpus(int first)
 {
 	int i, j, k, n = num_online_cpus();
@@ -597,7 +597,7 @@ static int uv_nmi_wait_cpus(int first)
 		k = n - cpumask_weight(uv_nmi_cpu_mask);
 	}
 
-	/* PCH NMI causes only one cpu to respond */
+	/* PCH NMI causes only one CPU to respond */
 	if (first && uv_pch_intr_now_enabled) {
 		cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
 		return n - k - 1;
@@ -618,13 +618,13 @@ static int uv_nmi_wait_cpus(int first)
 			k = n;
 			break;
 		}
-		if (last_k != k) {	/* abort if no new cpus coming in */
+		if (last_k != k) {	/* abort if no new CPU's coming in */
 			last_k = k;
 			waiting = 0;
 		} else if (++waiting > uv_nmi_wait_count)
 			break;
 
-		/* extend delay if waiting only for cpu 0 */
+		/* Extend delay if waiting only for CPU 0: */
 		if (waiting && (n - k) == 1 &&
 		    cpumask_test_cpu(0, uv_nmi_cpu_mask))
 			loop_delay *= 100;
@@ -635,29 +635,29 @@ static int uv_nmi_wait_cpus(int first)
 	return n - k;
 }
 
-/* Wait until all slave cpus have entered UV NMI handler */
+/* Wait until all slave CPU's have entered UV NMI handler */
 static void uv_nmi_wait(int master)
 {
-	/* indicate this cpu is in */
+	/* Indicate this CPU is in: */
 	this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
 
-	/* if not the first cpu in (the master), then we are a slave cpu */
+	/* If not the first CPU in (the master), then we are a slave CPU */
 	if (!master)
 		return;
 
 	do {
-		/* wait for all other cpus to gather here */
+		/* Wait for all other CPU's to gather here */
 		if (!uv_nmi_wait_cpus(1))
 			break;
 
-		/* if not all made it in, send IPI NMI to them */
+		/* If not all made it in, send IPI NMI to them */
 		pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
 			 cpumask_weight(uv_nmi_cpu_mask),
 			 cpumask_pr_args(uv_nmi_cpu_mask));
 
 		uv_nmi_nr_cpus_ping();
 
-		/* if all cpus are in, then done */
+		/* If all CPU's are in, then done */
 		if (!uv_nmi_wait_cpus(0))
 			break;
 
@@ -709,7 +709,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
 	this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
 }
 
-/* Trigger a slave cpu to dump it's state */
+/* Trigger a slave CPU to dump it's state */
 static void uv_nmi_trigger_dump(int cpu)
 {
 	int retry = uv_nmi_trigger_delay;
@@ -730,7 +730,7 @@ static void uv_nmi_trigger_dump(int cpu)
 	uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
 }
 
-/* Wait until all cpus ready to exit */
+/* Wait until all CPU's ready to exit */
 static void uv_nmi_sync_exit(int master)
 {
 	atomic_dec(&uv_nmi_cpus_in_nmi);
@@ -760,7 +760,7 @@ static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
 	uv_nmi_sync_exit(master);
 }
 
-/* Walk through cpu list and dump state of each */
+/* Walk through CPU list and dump state of each */
 static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
 {
 	if (master) {
@@ -872,7 +872,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
 		if (reason < 0)
 			return;
 
-		/* call KGDB NMI handler as MASTER */
+		/* Call KGDB NMI handler as MASTER */
 		ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
 				&uv_nmi_slave_continue);
 		if (ret) {
@@ -880,7 +880,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
 			atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
 		}
 	} else {
-		/* wait for KGDB signal that it's ready for slaves to enter */
+		/* Wait for KGDB signal that it's ready for slaves to enter */
 		int sig;
 
 		do {
@@ -888,7 +888,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
 			sig = atomic_read(&uv_nmi_slave_continue);
 		} while (!sig);
 
-		/* call KGDB as slave */
+		/* Call KGDB as slave */
 		if (sig == SLAVE_CONTINUE)
 			kgdb_nmicallback(cpu, regs);
 	}
@@ -932,7 +932,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
 			strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
 	}
 
-	/* Pause as all cpus enter the NMI handler */
+	/* Pause as all CPU's enter the NMI handler */
 	uv_nmi_wait(master);
 
 	/* Process actions other than "kdump": */
@@ -972,7 +972,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
 }
 
 /*
- * NMI handler for pulling in CPUs when perf events are grabbing our NMI
+ * NMI handler for pulling in CPU's when perf events are grabbing our NMI
  */
 static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
 {
@@ -1005,7 +1005,7 @@ void uv_nmi_init(void)
 	unsigned int value;
 
 	/*
-	 * Unmask NMI on all cpus
+	 * Unmask NMI on all CPU's
 	 */
 	value = apic_read(APIC_LVT1) | APIC_DM_NMI;
 	value &= ~APIC_LVT_MASKED;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ