[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1224664817.15846.1.camel@eight.analog.com>
Date: Wed, 22 Oct 2008 16:40:17 +0800
From: sonic zhang <sonic.adi@...il.com>
To: Jason Wessel <jason.wessel@...driver.com>
Cc: Linux Kernel <linux-kernel@...r.kernel.org>,
kgdb mailing list <kgdb-bugreport@...ts.sourceforge.net>
Subject: [PATCH] [kgdb] Switch master cpu after gdb thread command for SMP
(v3)
In blackfin SMP architecture, different core has its own L1 SRAM and MMR
memory, which code running on the other core can't access. In current kgdb
impelemntation, cpus are represented by thread with minus prefix.
If user run thread command in gdb to switch to the thread of the other cpu,
kgdb should:
1. send IPI signal to master cpu
2. release the specific passive cpu waiting in IPI handler
3. exit kgdb exception loop on master cpu and trap into kgdb wait in IPI handler
4. trap the released passive cpu into kgdb exception in IPI handler
Signed-off-by: Sonic Zhang <sonic.adi@...il.com>
---
include/linux/kgdb.h | 16 +++++++++++-
kernel/kgdb.c | 66 +++++++++++++++++++++++++++++++++++++++++++-------
2 files changed, 72 insertions(+), 10 deletions(-)
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 6adcc29..664e396 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -106,7 +106,8 @@ struct kgdb_bkpt {
# define KGDB_MAX_BREAKPOINTS 1000
#endif
-#define KGDB_HW_BREAKPOINT 1
+#define KGDB_HW_BREAKPOINT 0x1
+#define KGDB_THR_PROC_SWAP 0x2
/*
* Functions each KGDB-supporting architecture must provide:
@@ -203,6 +204,19 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
*/
extern void kgdb_roundup_cpus(unsigned long flags);
+/**
+ * kgdb_roundup_cpu - Get spcific CPU into a holding pattern
+ * @cpu: Specific cpu id
+ * @flags: Current IRQ state
+ *
+ * On SMP systems, we need to switch cpu from current active one to
+ * the other passive one. This get current active CPU into a known state
+ * in kgdb_wait().
+ *
+ * On non-SMP systems, this is not called.
+ */
+extern void kgdb_roundup_cpu(int cpu, unsigned long flags);
+
/* Optional functions. */
extern int kgdb_validate_break_address(unsigned long addr);
extern int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr);
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index e4dcfb2..277986d 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -565,6 +565,7 @@ static void kgdb_wait(struct pt_regs *regs)
{
unsigned long flags;
int cpu;
+ struct task_struct *thread;
local_irq_save(flags);
cpu = raw_smp_processor_id();
@@ -577,6 +578,8 @@ static void kgdb_wait(struct pt_regs *regs)
smp_wmb();
atomic_set(&cpu_in_kgdb[cpu], 1);
+ kgdb_disable_hw_debug(regs);
+
/* Wait till primary CPU is done with debugging */
while (atomic_read(&passive_cpu_wait[cpu]))
cpu_relax();
@@ -584,6 +587,16 @@ static void kgdb_wait(struct pt_regs *regs)
kgdb_info[cpu].debuggerinfo = NULL;
kgdb_info[cpu].task = NULL;
+ /* Trap into kgdb as the active CPU if gdb asks to switch. */
+ thread = getthread(regs, -raw_smp_processor_id() - 2);
+ if ((arch_kgdb_ops.flags & KGDB_THR_PROC_SWAP) &&
+ kgdb_contthread && kgdb_contthread == thread) {
+ kgdb_breakpoint();
+ clocksource_touch_watchdog();
+ local_irq_restore(flags);
+ return;
+ }
+
/* fix up hardware debug registers on local cpu */
if (arch_kgdb_ops.correct_hw_break)
arch_kgdb_ops.correct_hw_break();
@@ -1066,13 +1079,16 @@ static void gdb_cmd_query(struct kgdb_state *ks)
sprintf(tmpstr, "shadowCPU%d",
(int)(-ks->threadid - 2));
kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr));
+
+ if (arch_kgdb_ops.flags & KGDB_THR_PROC_SWAP)
+ ks->thr_query = 1;
}
break;
}
}
/* Handle the 'H' task query packets */
-static void gdb_cmd_task(struct kgdb_state *ks)
+static int gdb_cmd_task(struct kgdb_state *ks)
{
struct task_struct *thread;
char *ptr;
@@ -1089,6 +1105,15 @@ static void gdb_cmd_task(struct kgdb_state *ks)
kgdb_usethread = thread;
ks->kgdb_usethreadid = ks->threadid;
strcpy(remcom_out_buffer, "OK");
+#ifdef CONFIG_SMP
+ if ((arch_kgdb_ops.flags & KGDB_THR_PROC_SWAP) &&
+ !ks->thr_query && ks->kgdb_usethreadid < -1 &&
+ - ks->kgdb_usethreadid - 2 != raw_smp_processor_id()) {
+ kgdb_roundup_cpu(raw_smp_processor_id(), 0);
+ kgdb_contthread = kgdb_usethread;
+ return 1;
+ }
+#endif
break;
case 'c':
ptr = &remcom_in_buffer[2];
@@ -1106,6 +1131,8 @@ static void gdb_cmd_task(struct kgdb_state *ks)
strcpy(remcom_out_buffer, "OK");
break;
}
+
+ return 0;
}
/* Handle the 'T' thread query packets */
@@ -1114,6 +1141,9 @@ static void gdb_cmd_thread(struct kgdb_state *ks)
char *ptr = &remcom_in_buffer[1];
struct task_struct *thread;
+ if (arch_kgdb_ops.flags & KGDB_THR_PROC_SWAP)
+ ks->thr_query = 0;
+
kgdb_hex2long(&ptr, &ks->threadid);
thread = getthread(ks->linux_regs, ks->threadid);
if (thread)
@@ -1223,7 +1253,12 @@ static int gdb_serial_stub(struct kgdb_state *ks)
/* Clear the out buffer. */
memset(remcom_out_buffer, 0, sizeof(remcom_out_buffer));
- if (kgdb_connected) {
+ if ((arch_kgdb_ops.flags & KGDB_THR_PROC_SWAP) && kgdb_contthread) {
+ remcom_out_buffer[0] = 'O';
+ remcom_out_buffer[1] = 'K';
+ remcom_out_buffer[2] = 0;
+ put_packet(remcom_out_buffer);
+ } else if (kgdb_connected) {
unsigned char thref[8];
char *ptr;
@@ -1238,6 +1273,7 @@ static int gdb_serial_stub(struct kgdb_state *ks)
put_packet(remcom_out_buffer);
}
+ kgdb_contthread = current;
kgdb_usethread = kgdb_info[ks->cpu].task;
ks->kgdb_usethreadid = shadow_pid(kgdb_info[ks->cpu].task->pid);
ks->pass_exception = 0;
@@ -1284,7 +1320,8 @@ static int gdb_serial_stub(struct kgdb_state *ks)
gdb_cmd_query(ks);
break;
case 'H': /* task related */
- gdb_cmd_task(ks);
+ if (gdb_cmd_task(ks))
+ goto kgdb_exit;
break;
case 'T': /* Query thread status */
gdb_cmd_thread(ks);
@@ -1324,6 +1361,7 @@ default_handle:
if (error >= 0 || remcom_in_buffer[0] == 'D' ||
remcom_in_buffer[0] == 'k') {
error = 0;
+ kgdb_contthread = NULL;
goto kgdb_exit;
}
@@ -1464,11 +1502,15 @@ acquirelock:
* Get the passive CPU lock which will hold all the non-primary
* CPU in a spin state while the debugger is active
*/
- if (!kgdb_single_step) {
+ if (!kgdb_single_step && !kgdb_contthread) {
for (i = 0; i < NR_CPUS; i++)
atomic_set(&passive_cpu_wait[i], 1);
}
+ if (kgdb_contthread)
+ atomic_set(&passive_cpu_wait[raw_smp_processor_id()], 1);
+
+
/*
* spin_lock code is good enough as a barrier so we don't
* need one here:
@@ -1477,7 +1519,7 @@ acquirelock:
#ifdef CONFIG_SMP
/* Signal the other CPUs to enter kgdb_wait() */
- if ((!kgdb_single_step) && kgdb_do_roundup)
+ if ((!kgdb_single_step && !kgdb_contthread) && kgdb_do_roundup)
kgdb_roundup_cpus(flags);
#endif
@@ -1496,7 +1538,6 @@ acquirelock:
kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code);
kgdb_deactivate_sw_breakpoints();
kgdb_single_step = 0;
- kgdb_contthread = current;
exception_level = 0;
/* Talk to debugger with gdbserial protocol */
@@ -1510,7 +1551,14 @@ acquirelock:
kgdb_info[ks->cpu].task = NULL;
atomic_set(&cpu_in_kgdb[ks->cpu], 0);
- if (!kgdb_single_step) {
+#ifdef CONFIG_SMP
+ i = -(ks->kgdb_usethreadid + 2);
+ if ((arch_kgdb_ops.flags & KGDB_THR_PROC_SWAP) &&
+ kgdb_contthread && i != cpu) {
+ atomic_set(&passive_cpu_wait[i], 0);
+ }
+#endif
+ if (!kgdb_single_step && !kgdb_contthread) {
for (i = NR_CPUS-1; i >= 0; i--)
atomic_set(&passive_cpu_wait[i], 0);
/*
@@ -1536,9 +1584,9 @@ kgdb_restore:
int kgdb_nmicallback(int cpu, void *regs)
{
#ifdef CONFIG_SMP
- if (!atomic_read(&cpu_in_kgdb[cpu]) &&
+ if (!atomic_read(&cpu_in_kgdb[cpu]) && (kgdb_contthread ||
atomic_read(&kgdb_active) != cpu &&
- atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) {
+ atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)]))) {
kgdb_wait((struct pt_regs *)regs);
return 0;
}
--
1.6.0
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists