Skip to content

Commit 9b38cc7

Browse files
Jiri Olsarostedt
authored andcommitted
kretprobe: Prevent triggering kretprobe from within kprobe_flush_task
Ziqian reported lockup when adding retprobe on _raw_spin_lock_irqsave. My test was also able to trigger lockdep output: ============================================ WARNING: possible recursive locking detected 5.6.0-rc6+ #6 Not tainted -------------------------------------------- sched-messaging/2767 is trying to acquire lock: ffffffff9a492798 (&(kretprobe_table_locks[i].lock)){-.-.}, at: kretprobe_hash_lock+0x52/0xa0 but task is already holding lock: ffffffff9a491a18 (&(kretprobe_table_locks[i].lock)){-.-.}, at: kretprobe_trampoline+0x0/0x50 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&(kretprobe_table_locks[i].lock)); lock(&(kretprobe_table_locks[i].lock)); *** DEADLOCK *** May be due to missing lock nesting notation 1 lock held by sched-messaging/2767: #0: ffffffff9a491a18 (&(kretprobe_table_locks[i].lock)){-.-.}, at: kretprobe_trampoline+0x0/0x50 stack backtrace: CPU: 3 PID: 2767 Comm: sched-messaging Not tainted 5.6.0-rc6+ #6 Call Trace: dump_stack+0x96/0xe0 __lock_acquire.cold.57+0x173/0x2b7 ? native_queued_spin_lock_slowpath+0x42b/0x9e0 ? lockdep_hardirqs_on+0x590/0x590 ? __lock_acquire+0xf63/0x4030 lock_acquire+0x15a/0x3d0 ? kretprobe_hash_lock+0x52/0xa0 _raw_spin_lock_irqsave+0x36/0x70 ? kretprobe_hash_lock+0x52/0xa0 kretprobe_hash_lock+0x52/0xa0 trampoline_handler+0xf8/0x940 ? kprobe_fault_handler+0x380/0x380 ? find_held_lock+0x3a/0x1c0 kretprobe_trampoline+0x25/0x50 ? lock_acquired+0x392/0xbc0 ? _raw_spin_lock_irqsave+0x50/0x70 ? __get_valid_kprobe+0x1f0/0x1f0 ? _raw_spin_unlock_irqrestore+0x3b/0x40 ? finish_task_switch+0x4b9/0x6d0 ? __switch_to_asm+0x34/0x70 ? __switch_to_asm+0x40/0x70 The code within the kretprobe handler checks for probe reentrancy, so we won't trigger any _raw_spin_lock_irqsave probe in there. The problem is in outside kprobe_flush_task, where we call: kprobe_flush_task kretprobe_table_lock raw_spin_lock_irqsave _raw_spin_lock_irqsave where _raw_spin_lock_irqsave triggers the kretprobe and installs kretprobe_trampoline handler on _raw_spin_lock_irqsave return. The kretprobe_trampoline handler is then executed with already locked kretprobe_table_locks, and first thing it does is to lock kretprobe_table_locks ;-) the whole lockup path like: kprobe_flush_task kretprobe_table_lock raw_spin_lock_irqsave _raw_spin_lock_irqsave ---> probe triggered, kretprobe_trampoline installed ---> kretprobe_table_locks locked kretprobe_trampoline trampoline_handler kretprobe_hash_lock(current, &head, &flags); <--- deadlock Adding kprobe_busy_begin/end helpers that mark code with fake probe installed to prevent triggering of another kprobe within this code. Using these helpers in kprobe_flush_task, so the probe recursion protection check is hit and the probe is never set to prevent above lockup. Link: http://lkml.kernel.org/r/158927059835.27680.7011202830041561604.stgit@devnote2 Fixes: ef53d9c ("kprobes: improve kretprobe scalability with hashed locking") Cc: Ingo Molnar <mingo@kernel.org> Cc: "Gustavo A . R . Silva" <gustavoars@kernel.org> Cc: Anders Roxell <anders.roxell@linaro.org> Cc: "Naveen N . Rao" <naveen.n.rao@linux.ibm.com> Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: David Miller <davem@davemloft.net> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: stable@vger.kernel.org Reported-by: "Ziqian SUN (Zamir)" <zsun@redhat.com> Acked-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Jiri Olsa <jolsa@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
1 parent 75ddf64 commit 9b38cc7

3 files changed

Lines changed: 31 additions & 13 deletions

File tree

arch/x86/kernel/kprobes/core.c

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -753,16 +753,11 @@ asm(
753753
NOKPROBE_SYMBOL(kretprobe_trampoline);
754754
STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
755755

756-
static struct kprobe kretprobe_kprobe = {
757-
.addr = (void *)kretprobe_trampoline,
758-
};
759-
760756
/*
761757
* Called from kretprobe_trampoline
762758
*/
763759
__used __visible void *trampoline_handler(struct pt_regs *regs)
764760
{
765-
struct kprobe_ctlblk *kcb;
766761
struct kretprobe_instance *ri = NULL;
767762
struct hlist_head *head, empty_rp;
768763
struct hlist_node *tmp;
@@ -772,16 +767,12 @@ __used __visible void *trampoline_handler(struct pt_regs *regs)
772767
void *frame_pointer;
773768
bool skipped = false;
774769

775-
preempt_disable();
776-
777770
/*
778771
* Set a dummy kprobe for avoiding kretprobe recursion.
779772
* Since kretprobe never run in kprobe handler, kprobe must not
780773
* be running at this point.
781774
*/
782-
kcb = get_kprobe_ctlblk();
783-
__this_cpu_write(current_kprobe, &kretprobe_kprobe);
784-
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
775+
kprobe_busy_begin();
785776

786777
INIT_HLIST_HEAD(&empty_rp);
787778
kretprobe_hash_lock(current, &head, &flags);
@@ -857,7 +848,7 @@ __used __visible void *trampoline_handler(struct pt_regs *regs)
857848
__this_cpu_write(current_kprobe, &ri->rp->kp);
858849
ri->ret_addr = correct_ret_addr;
859850
ri->rp->handler(ri, regs);
860-
__this_cpu_write(current_kprobe, &kretprobe_kprobe);
851+
__this_cpu_write(current_kprobe, &kprobe_busy);
861852
}
862853

863854
recycle_rp_inst(ri, &empty_rp);
@@ -873,8 +864,7 @@ __used __visible void *trampoline_handler(struct pt_regs *regs)
873864

874865
kretprobe_hash_unlock(current, &flags);
875866

876-
__this_cpu_write(current_kprobe, NULL);
877-
preempt_enable();
867+
kprobe_busy_end();
878868

879869
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
880870
hlist_del(&ri->hlist);

include/linux/kprobes.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -350,6 +350,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
350350
return this_cpu_ptr(&kprobe_ctlblk);
351351
}
352352

353+
extern struct kprobe kprobe_busy;
354+
void kprobe_busy_begin(void);
355+
void kprobe_busy_end(void);
356+
353357
kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
354358
int register_kprobe(struct kprobe *p);
355359
void unregister_kprobe(struct kprobe *p);

kernel/kprobes.c

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1241,6 +1241,26 @@ __releases(hlist_lock)
12411241
}
12421242
NOKPROBE_SYMBOL(kretprobe_table_unlock);
12431243

1244+
struct kprobe kprobe_busy = {
1245+
.addr = (void *) get_kprobe,
1246+
};
1247+
1248+
void kprobe_busy_begin(void)
1249+
{
1250+
struct kprobe_ctlblk *kcb;
1251+
1252+
preempt_disable();
1253+
__this_cpu_write(current_kprobe, &kprobe_busy);
1254+
kcb = get_kprobe_ctlblk();
1255+
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1256+
}
1257+
1258+
void kprobe_busy_end(void)
1259+
{
1260+
__this_cpu_write(current_kprobe, NULL);
1261+
preempt_enable();
1262+
}
1263+
12441264
/*
12451265
* This function is called from finish_task_switch when task tk becomes dead,
12461266
* so that we can recycle any function-return probe instances associated
@@ -1258,6 +1278,8 @@ void kprobe_flush_task(struct task_struct *tk)
12581278
/* Early boot. kretprobe_table_locks not yet initialized. */
12591279
return;
12601280

1281+
kprobe_busy_begin();
1282+
12611283
INIT_HLIST_HEAD(&empty_rp);
12621284
hash = hash_ptr(tk, KPROBE_HASH_BITS);
12631285
head = &kretprobe_inst_table[hash];
@@ -1271,6 +1293,8 @@ void kprobe_flush_task(struct task_struct *tk)
12711293
hlist_del(&ri->hlist);
12721294
kfree(ri);
12731295
}
1296+
1297+
kprobe_busy_end();
12741298
}
12751299
NOKPROBE_SYMBOL(kprobe_flush_task);
12761300

0 commit comments

Comments
 (0)