diff options
author | Jason Wessel <jason.wessel@windriver.com> | 2008-04-01 16:55:27 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 20:05:43 +0200 |
commit | 56fb70932964927597ce30bbd820471633c72adc (patch) | |
tree | 72654913a07cbc83f174234d4fa0f91b79a2164c /kernel/kgdb.c | |
parent | 225a4424ade24e913c081d5a4c4bd71a0fe2e0ac (diff) | |
download | linux-56fb70932964927597ce30bbd820471633c72adc.tar.bz2 |
kgdb: fix SMP NMI kgdb_handle_exception exit race
Fix the problem of protecting the kgdb handle_exception exit
which had an NMI race condition, while trying to restore
normal system operation.
There was a small window after the master processor sets cpu_in_debug
to zero but before it has set kgdb_active to zero where a
non-master processor in an SMP system could receive an NMI and
re-enter the kgdb_wait() loop.
As long as the master processor sets the cpu_in_debug before sending
the cpu roundup the cpu_in_debug variable can also be used to guard
against the race condition.
The kgdb_wait() function no longer needs to check
kgdb_active because it is done in the arch specific code
and handled along with the nmi traps at the low level.
This also allows kgdb_wait() to exit correctly if it was
entered for some unknown reason due to a spurious NMI that
could not be handled by the arch specific code.
Signed-off-by: Jason Wessel <jason.wessel@windriver.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/kgdb.c')
-rw-r--r-- | kernel/kgdb.c | 27 |
1 files changed, 8 insertions, 19 deletions
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index 85b7e5b934a7..4d1b3c232377 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c @@ -561,18 +561,6 @@ static void kgdb_wait(struct pt_regs *regs) smp_wmb(); atomic_set(&cpu_in_kgdb[cpu], 1); - /* - * The primary CPU must be active to enter here, but this is - * guard in case the primary CPU had not been selected if - * this was an entry via nmi. - */ - while (atomic_read(&kgdb_active) == -1) - cpu_relax(); - - /* Wait till primary CPU goes completely into the debugger. */ - while (!atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) - cpu_relax(); - /* Wait till primary CPU is done with debugging */ while (atomic_read(&passive_cpu_wait[cpu])) cpu_relax(); @@ -1447,18 +1435,18 @@ acquirelock: atomic_set(&passive_cpu_wait[i], 1); } -#ifdef CONFIG_SMP - /* Signal the other CPUs to enter kgdb_wait() */ - if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup) - kgdb_roundup_cpus(flags); -#endif - /* * spin_lock code is good enough as a barrier so we don't * need one here: */ atomic_set(&cpu_in_kgdb[ks->cpu], 1); +#ifdef CONFIG_SMP + /* Signal the other CPUs to enter kgdb_wait() */ + if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup) + kgdb_roundup_cpus(flags); +#endif + /* * Wait for the other CPUs to be notified and be waiting for us: */ @@ -1514,7 +1502,8 @@ int kgdb_nmicallback(int cpu, void *regs) { #ifdef CONFIG_SMP if (!atomic_read(&cpu_in_kgdb[cpu]) && - atomic_read(&kgdb_active) != cpu) { + atomic_read(&kgdb_active) != cpu && + atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) { kgdb_wait((struct pt_regs *)regs); return 0; } |