kgdb patches for 4.20-rc1

Mostly clean ups although whilst Doug's was chasing down a odd
 lockdep warning he also did some work to improved debugger resilience
 when some CPUs fail to respond to the round up request.
 
 The main changes are:
 
  * Fixing a lockdep warning on architectures that cannot use an NMI for
    the round up plus related changes to make CPU round up and all CPU
    backtrace more resilient.
 
  * Constify the arch ops tables
 
  * A couple of other small clean ups
 
 Two of the three patchsets here include changes that spill over into
 arch/.  Changes in the arch space are relatively narrow in scope
 (and directly related to kgdb). Didn't get comprehensive acks but
 all impacted maintainers were Cc:ed in good time.
 
 Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJPBAABCAA5FiEELzVBU1D3lWq6cKzwfOMlXTn3iKEFAlwonoUbHGRhbmllbC50
 aG9tcHNvbkBsaW5hcm8ub3JnAAoJEHzjJV0594ihmooP/1uzSMGQIoQMB8XeU/jT
 Da2iILybi6hGp7ILA27d0yN3tsJBxWGWs8wzNdzMo3NQ3J0o4foAUnS/R0Vjkg9w
 uphe5EA4HDsIrH05OouNb984BeEgNaC9HSqtyr9fXuh024NboULFKIm7REYm+QHT
 C5SrBtmonL1xE7FmAhudLWjl7ZlvxM6DJeoVViH4kKq0raTiILt6VJaGl9JfcAdL
 m9GEf9r/nh0sCq3GNgyc0y4BvHed+Kxzy1fsIi3jE6t8elaYYR72gNRQ5LaFxcnQ
 F04/UtH75qB4rqYsqqV1q0rFi+tj+p9wYTmxixaGWsVDX4Gb5KXuLWJhaRb5IvwC
 bdq/0IAXRr4vUL3y0tFWfCj7pHGaVc/gfXi8aieRXLGAZG+tdfuu99NCiulIZTfc
 QqZz12Z+99/qi6dK7dBQtaN8SyPeB1QXKWefeGo2Bt5QqiBmcKHxsQYMUo3nkf3J
 UXHpj4LG6Ldsi/w8VZfvXmM0/vbO/jrus9m+X2v+4tJyisjrsyv0FRnREI4avfbC
 l09P1ajv7RrAaxtab0smV9krqWZ/mSn0zcgcaD6RdKe0+SwsiP/CEx1z1Wb1MH9c
 wjEiClXjdVB39YVT0YVfG2Ho7qH8WRErxVyNb/f4QKHMXL1Mu91hFWhBBpUOGUj2
 7Jrq2zK1uWramtt7GBDpHYYH
 =Aqlc
 -----END PGP SIGNATURE-----

Merge tag 'kgdb-4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/danielt/linux

Pull kgdb updates from Daniel Thompson:
 "Mostly clean ups although while Doug's was chasing down a odd lockdep
  warning he also did some work to improved debugger resilience when
  some CPUs fail to respond to the round up request.

  The main changes are:

   - Fixing a lockdep warning on architectures that cannot use an NMI
     for the round up plus related changes to make CPU round up and all
     CPU backtrace more resilient.

   - Constify the arch ops tables

   - A couple of other small clean ups

  Two of the three patchsets here include changes that spill over into
  arch/. Changes in the arch space are relatively narrow in scope (and
  directly related to kgdb). Didn't get comprehensive acks but all
  impacted maintainers were Cc:ed in good time"

* tag 'kgdb-4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/danielt/linux:
  kgdb/treewide: constify struct kgdb_arch arch_kgdb_ops
  mips/kgdb: prepare arch_kgdb_ops for constness
  kdb: use bool for binary state indicators
  kdb: Don't back trace on a cpu that didn't round up
  kgdb: Don't round up a CPU that failed rounding up before
  kgdb: Fix kgdb_roundup_cpus() for arches who used smp_call_function()
  kgdb: Remove irq flags from roundup
This commit is contained in:
Linus Torvalds 2019-01-01 15:38:14 -08:00
commit fcf010449e
20 changed files with 125 additions and 141 deletions

View File

@ -192,19 +192,13 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
instruction_pointer(regs) = ip;
}
static void kgdb_call_nmi_hook(void *ignored)
void kgdb_call_nmi_hook(void *ignored)
{
/* Default implementation passes get_irq_regs() but we don't */
kgdb_nmicallback(raw_smp_processor_id(), NULL);
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
struct kgdb_arch arch_kgdb_ops = {
const struct kgdb_arch arch_kgdb_ops = {
/* breakpoint instruction: TRAP_S 0x3 */
#ifdef CONFIG_CPU_BIG_ENDIAN
.gdb_bpt_instr = {0x78, 0x7e},

View File

@ -170,18 +170,6 @@ static struct undef_hook kgdb_compiled_brkpt_hook = {
.fn = kgdb_compiled_brk_fn
};
static void kgdb_call_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
struct pt_regs *regs = args->regs;
@ -274,7 +262,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
* and we handle the normal undef case within the do_undefinstr
* handler.
*/
struct kgdb_arch arch_kgdb_ops = {
const struct kgdb_arch arch_kgdb_ops = {
#ifndef __ARMEB__
.gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
#else /* ! __ARMEB__ */

View File

@ -284,18 +284,6 @@ static struct step_hook kgdb_step_hook = {
.fn = kgdb_step_brk_fn
};
static void kgdb_call_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
struct pt_regs *regs = args->regs;
@ -357,7 +345,7 @@ void kgdb_arch_exit(void)
unregister_die_notifier(&kgdb_notifier);
}
struct kgdb_arch arch_kgdb_ops;
const struct kgdb_arch arch_kgdb_ops;
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{

View File

@ -129,7 +129,7 @@ void kgdb_arch_exit(void)
/* Nothing to do */
}
struct kgdb_arch arch_kgdb_ops = {
const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: trapa #2 */
.gdb_bpt_instr = { 0x57, 0x20 },
};

View File

@ -83,7 +83,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
{ "syscall_nr", GDB_SIZEOF_REG, offsetof(struct pt_regs, syscall_nr)},
};
struct kgdb_arch arch_kgdb_ops = {
const struct kgdb_arch arch_kgdb_ops = {
/* trap0(#0xDB) 0x0cdb0054 */
.gdb_bpt_instr = {0x54, 0x00, 0xdb, 0x0c},
};
@ -115,38 +115,6 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
instruction_pointer(regs) = pc;
}
#ifdef CONFIG_SMP
/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them be in a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In
* this case, we have to make sure that interrupts are enabled before
* calling smp_call_function(). The argument to this function is
* the flags that will be used when restoring the interrupts. There is
* local_irq_save() call before kgdb_roundup_cpus().
*
* On non-SMP systems, this is not called.
*/
static void hexagon_kgdb_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(hexagon_kgdb_nmi_hook, NULL, 0);
local_irq_disable();
}
#endif
/* Not yet working */
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,

View File

@ -143,7 +143,7 @@ void kgdb_arch_exit(void)
/*
* Global data
*/
struct kgdb_arch arch_kgdb_ops = {
const struct kgdb_arch arch_kgdb_ops = {
#ifdef __MICROBLAZEEL__
.gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */
#else

View File

@ -207,7 +207,7 @@ void arch_kgdb_breakpoint(void)
".set\treorder");
}
static void kgdb_call_nmi_hook(void *ignored)
void kgdb_call_nmi_hook(void *ignored)
{
mm_segment_t old_fs;
@ -219,13 +219,6 @@ static void kgdb_call_nmi_hook(void *ignored)
set_fs(old_fs);
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int compute_signal(int tt)
{
struct hard_trap_info *ht;
@ -394,18 +387,16 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
return -1;
}
struct kgdb_arch arch_kgdb_ops;
const struct kgdb_arch arch_kgdb_ops = {
#ifdef CONFIG_CPU_BIG_ENDIAN
.gdb_bpt_instr = { spec_op << 2, 0x00, 0x00, break_op },
#else
.gdb_bpt_instr = { break_op, 0x00, 0x00, spec_op << 2 },
#endif
};
int kgdb_arch_init(void)
{
union mips_instruction insn = {
.r_format = {
.opcode = spec_op,
.func = break_op,
}
};
memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE);
register_die_notifier(&kgdb_notifier);
return 0;

View File

@ -165,7 +165,7 @@ void kgdb_arch_exit(void)
/* Nothing to do */
}
struct kgdb_arch arch_kgdb_ops = {
const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: trap 30 */
.gdb_bpt_instr = { 0xba, 0x6f, 0x3b, 0x00 },
};

View File

@ -117,14 +117,14 @@ int kgdb_skipexception(int exception, struct pt_regs *regs)
return kgdb_isremovedbreak(regs->nip);
}
static int kgdb_call_nmi_hook(struct pt_regs *regs)
static int kgdb_debugger_ipi(struct pt_regs *regs)
{
kgdb_nmicallback(raw_smp_processor_id(), regs);
return 0;
}
#ifdef CONFIG_SMP
void kgdb_roundup_cpus(unsigned long flags)
void kgdb_roundup_cpus(void)
{
smp_send_debugger_break();
}
@ -477,7 +477,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
/*
* Global data
*/
struct kgdb_arch arch_kgdb_ops;
const struct kgdb_arch arch_kgdb_ops;
static int kgdb_not_implemented(struct pt_regs *regs)
{
@ -502,7 +502,7 @@ int kgdb_arch_init(void)
old__debugger_break_match = __debugger_break_match;
old__debugger_fault_handler = __debugger_fault_handler;
__debugger_ipi = kgdb_call_nmi_hook;
__debugger_ipi = kgdb_debugger_ipi;
__debugger = kgdb_debugger;
__debugger_bpt = kgdb_handle_breakpoint;
__debugger_sstep = kgdb_singlestep;

View File

@ -311,18 +311,6 @@ BUILD_TRAP_HANDLER(singlestep)
local_irq_restore(flags);
}
static void kgdb_call_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
int ret;
@ -379,7 +367,7 @@ void kgdb_arch_exit(void)
unregister_die_notifier(&kgdb_notifier);
}
struct kgdb_arch arch_kgdb_ops = {
const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: trapa #0x3c */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
.gdb_bpt_instr = { 0x3c, 0xc3 },

View File

@ -166,7 +166,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
regs->npc = regs->pc + 4;
}
struct kgdb_arch arch_kgdb_ops = {
const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x7d */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
};

View File

@ -195,7 +195,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
regs->tnpc = regs->tpc + 4;
}
struct kgdb_arch arch_kgdb_ops = {
const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x72 */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
};

View File

@ -1014,7 +1014,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
}
#ifdef CONFIG_KGDB
void kgdb_roundup_cpus(unsigned long flags)
void kgdb_roundup_cpus(void)
{
smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
}

View File

@ -422,21 +422,16 @@ static void kgdb_disable_hw_debug(struct pt_regs *regs)
#ifdef CONFIG_SMP
/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them be in a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In
* this case, we have to make sure that interrupts are enabled before
* calling smp_call_function(). The argument to this function is
* the flags that will be used when restoring the interrupts. There is
* local_irq_save() call before kgdb_roundup_cpus().
* in case of MIPS, smp_call_function() is used to roundup CPUs.
*
* On non-SMP systems, this is not called.
*/
void kgdb_roundup_cpus(unsigned long flags)
void kgdb_roundup_cpus(void)
{
apic->send_IPI_allbutself(APIC_DM_NMI);
}
@ -804,7 +799,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
(char *)bpt->saved_instr, BREAK_INSTR_SIZE);
}
struct kgdb_arch arch_kgdb_ops = {
const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: */
.gdb_bpt_instr = { 0xcc },
.flags = KGDB_HW_BREAKPOINT,

View File

@ -176,23 +176,29 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_out_buffer,
struct pt_regs *regs);
/**
* kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU
* @ignored: This parameter is only here to match the prototype.
*
* If you're using the default implementation of kgdb_roundup_cpus()
* this function will be called per CPU. If you don't implement
* kgdb_call_nmi_hook() a default will be used.
*/
extern void kgdb_call_nmi_hook(void *ignored);
/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them into a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In
* this case, we have to make sure that interrupts are enabled before
* calling smp_call_function(). The argument to this function is
* the flags that will be used when restoring the interrupts. There is
* local_irq_save() call before kgdb_roundup_cpus().
* the NMI approach is not used for rounding up all the CPUs. Normally
* those architectures can just not implement this and get the default.
*
* On non-SMP systems, this is not called.
*/
extern void kgdb_roundup_cpus(unsigned long flags);
extern void kgdb_roundup_cpus(void);
/**
* kgdb_arch_set_pc - Generic call back to the program counter
@ -281,7 +287,7 @@ struct kgdb_io {
int is_console;
};
extern struct kgdb_arch arch_kgdb_ops;
extern const struct kgdb_arch arch_kgdb_ops;
extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);

View File

@ -55,6 +55,7 @@
#include <linux/mm.h>
#include <linux/vmacache.h>
#include <linux/rcupdate.h>
#include <linux/irq.h>
#include <asm/cacheflush.h>
#include <asm/byteorder.h>
@ -220,6 +221,62 @@ int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
return 0;
}
#ifdef CONFIG_SMP
/*
* Default (weak) implementation for kgdb_roundup_cpus
*/
static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd);
void __weak kgdb_call_nmi_hook(void *ignored)
{
/*
* NOTE: get_irq_regs() is supposed to get the registers from
* before the IPI interrupt happened and so is supposed to
* show where the processor was. In some situations it's
* possible we might be called without an IPI, so it might be
* safer to figure out how to make kgdb_breakpoint() work
* properly here.
*/
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void __weak kgdb_roundup_cpus(void)
{
call_single_data_t *csd;
int this_cpu = raw_smp_processor_id();
int cpu;
int ret;
for_each_online_cpu(cpu) {
/* No need to roundup ourselves */
if (cpu == this_cpu)
continue;
csd = &per_cpu(kgdb_roundup_csd, cpu);
/*
* If it didn't round up last time, don't try again
* since smp_call_function_single_async() will block.
*
* If rounding_up is false then we know that the
* previous call must have at least started and that
* means smp_call_function_single_async() won't block.
*/
if (kgdb_info[cpu].rounding_up)
continue;
kgdb_info[cpu].rounding_up = true;
csd->func = kgdb_call_nmi_hook;
ret = smp_call_function_single_async(cpu, csd);
if (ret)
kgdb_info[cpu].rounding_up = false;
}
}
#endif
/*
* Some architectures need cache flushes when we set/clear a
* breakpoint:
@ -535,6 +592,8 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
arch_kgdb_ops.correct_hw_break();
if (trace_on)
tracing_on();
kgdb_info[cpu].debuggerinfo = NULL;
kgdb_info[cpu].task = NULL;
kgdb_info[cpu].exception_state &=
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
kgdb_info[cpu].enter_kgdb--;
@ -593,7 +652,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
/* Signal the other CPUs to enter kgdb_wait() */
else if ((!kgdb_single_step) && kgdb_do_roundup)
kgdb_roundup_cpus(flags);
kgdb_roundup_cpus();
#endif
/*
@ -667,6 +726,8 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
if (trace_on)
tracing_on();
kgdb_info[cpu].debuggerinfo = NULL;
kgdb_info[cpu].task = NULL;
kgdb_info[cpu].exception_state &=
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
kgdb_info[cpu].enter_kgdb--;
@ -747,6 +808,8 @@ int kgdb_nmicallback(int cpu, void *regs)
struct kgdb_state kgdb_var;
struct kgdb_state *ks = &kgdb_var;
kgdb_info[cpu].rounding_up = false;
memset(ks, 0, sizeof(struct kgdb_state));
ks->cpu = cpu;
ks->linux_regs = regs;

View File

@ -42,6 +42,7 @@ struct debuggerinfo_struct {
int ret_state;
int irq_depth;
int enter_kgdb;
bool rounding_up;
};
extern struct debuggerinfo_struct kgdb_info[];

View File

@ -186,7 +186,16 @@ kdb_bt(int argc, const char **argv)
kdb_printf("btc: cpu status: ");
kdb_parse("cpu\n");
for_each_online_cpu(cpu) {
sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
void *kdb_tsk = KDB_TSK(cpu);
/* If a CPU failed to round up we could be here */
if (!kdb_tsk) {
kdb_printf("WARNING: no task for cpu %ld\n",
cpu);
continue;
}
sprintf(buf, "btt 0x%px\n", kdb_tsk);
kdb_parse(buf);
touch_nmi_watchdog();
}

View File

@ -118,13 +118,6 @@ int kdb_stub(struct kgdb_state *ks)
kdb_bp_remove();
KDB_STATE_CLEAR(DOING_SS);
KDB_STATE_SET(PAGER);
/* zero out any offline cpu data */
for_each_present_cpu(i) {
if (!cpu_online(i)) {
kgdb_info[i].debuggerinfo = NULL;
kgdb_info[i].task = NULL;
}
}
if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) {
ks->pass_exception = 1;
KDB_FLAG_SET(CATASTROPHIC);

View File

@ -658,7 +658,7 @@ static void kdb_cmderror(int diag)
*/
struct defcmd_set {
int count;
int usable;
bool usable;
char *name;
char *usage;
char *help;
@ -666,7 +666,7 @@ struct defcmd_set {
};
static struct defcmd_set *defcmd_set;
static int defcmd_set_count;
static int defcmd_in_progress;
static bool defcmd_in_progress;
/* Forward references */
static int kdb_exec_defcmd(int argc, const char **argv);
@ -676,9 +676,9 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
struct defcmd_set *s = defcmd_set + defcmd_set_count - 1;
char **save_command = s->command;
if (strcmp(argv0, "endefcmd") == 0) {
defcmd_in_progress = 0;
defcmd_in_progress = false;
if (!s->count)
s->usable = 0;
s->usable = false;
if (s->usable)
/* macros are always safe because when executed each
* internal command re-enters kdb_parse() and is
@ -695,7 +695,7 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
if (!s->command) {
kdb_printf("Could not allocate new kdb_defcmd table for %s\n",
cmdstr);
s->usable = 0;
s->usable = false;
return KDB_NOTIMP;
}
memcpy(s->command, save_command, s->count * sizeof(*(s->command)));
@ -737,7 +737,7 @@ static int kdb_defcmd(int argc, const char **argv)
defcmd_set_count * sizeof(*defcmd_set));
s = defcmd_set + defcmd_set_count;
memset(s, 0, sizeof(*s));
s->usable = 1;
s->usable = true;
s->name = kdb_strdup(argv[1], GFP_KDB);
if (!s->name)
goto fail_name;
@ -756,7 +756,7 @@ static int kdb_defcmd(int argc, const char **argv)
s->help[strlen(s->help)-1] = '\0';
}
++defcmd_set_count;
defcmd_in_progress = 1;
defcmd_in_progress = true;
kfree(save_defcmd_set);
return 0;
fail_help: