2 * arch/arm64/kernel/probes/kprobes.c
4 * Kprobes support for ARM64
6 * Copyright (C) 2013 Linaro Limited.
7 * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
19 #include <linux/kasan.h>
20 #include <linux/kernel.h>
21 #include <linux/kprobes.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/stop_machine.h>
25 #include <linux/stringify.h>
26 #include <asm/traps.h>
27 #include <asm/ptrace.h>
28 #include <asm/cacheflush.h>
29 #include <asm/debug-monitors.h>
30 #include <asm/system_misc.h>
32 #include <asm/uaccess.h>
34 #include <asm-generic/sections.h>
36 #include "decode-insn.h"
38 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
39 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
42 post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
44 static inline unsigned long min_stack_size(unsigned long addr)
48 size = (unsigned long)current_thread_info() + THREAD_START_SP - addr;
50 return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack));
53 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
55 /* prepare insn slot */
56 p->ainsn.insn[0] = cpu_to_le32(p->opcode);
58 flush_icache_range((uintptr_t) (p->ainsn.insn),
59 (uintptr_t) (p->ainsn.insn) +
60 MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
63 * Needs restoring of return address after stepping xol.
65 p->ainsn.restore = (unsigned long) p->addr +
66 sizeof(kprobe_opcode_t);
69 static void __kprobes arch_prepare_simulate(struct kprobe *p)
71 /* This instructions is not executed xol. No need to adjust the PC */
75 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
77 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
80 p->ainsn.handler((u32)p->opcode, (long)p->addr, regs);
82 /* single step simulated, now go for post processing */
83 post_kprobe_handler(kcb, regs);
86 int __kprobes arch_prepare_kprobe(struct kprobe *p)
88 unsigned long probe_addr = (unsigned long)p->addr;
89 extern char __start_rodata[];
90 extern char __end_rodata[];
95 /* copy instruction */
96 p->opcode = le32_to_cpu(*p->addr);
98 if (in_exception_text(probe_addr))
100 if (probe_addr >= (unsigned long) __start_rodata &&
101 probe_addr <= (unsigned long) __end_rodata)
104 /* decode instruction */
105 switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
106 case INSN_REJECTED: /* insn not supported */
109 case INSN_GOOD_NO_SLOT: /* insn need simulation */
110 p->ainsn.insn = NULL;
113 case INSN_GOOD: /* instruction uses slot */
114 p->ainsn.insn = get_insn_slot();
120 /* prepare the instruction */
122 arch_prepare_ss_slot(p);
124 arch_prepare_simulate(p);
129 static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
134 addrs[0] = (void *)addr;
135 insns[0] = (u32)opcode;
137 return aarch64_insn_patch_text(addrs, insns, 1);
140 /* arm kprobe: install breakpoint in text */
141 void __kprobes arch_arm_kprobe(struct kprobe *p)
143 patch_text(p->addr, BRK64_OPCODE_KPROBES);
146 /* disarm kprobe: remove breakpoint from text */
147 void __kprobes arch_disarm_kprobe(struct kprobe *p)
149 patch_text(p->addr, p->opcode);
152 void __kprobes arch_remove_kprobe(struct kprobe *p)
155 free_insn_slot(p->ainsn.insn, 0);
156 p->ainsn.insn = NULL;
160 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
162 kcb->prev_kprobe.kp = kprobe_running();
163 kcb->prev_kprobe.status = kcb->kprobe_status;
166 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
168 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
169 kcb->kprobe_status = kcb->prev_kprobe.status;
172 static void __kprobes set_current_kprobe(struct kprobe *p)
174 __this_cpu_write(current_kprobe, p);
178 * The D-flag (Debug mask) is set (masked) upon debug exception entry.
179 * Kprobes needs to clear (unmask) D-flag -ONLY- in case of recursive
180 * probe i.e. when probe hit from kprobe handler context upon
181 * executing the pre/post handlers. In this case we return with
182 * D-flag clear so that single-stepping can be carried-out.
184 * Leave D-flag set in all other cases.
186 static void __kprobes
187 spsr_set_debug_flag(struct pt_regs *regs, int mask)
189 unsigned long spsr = regs->pstate;
200 * Interrupts need to be disabled before single-step mode is set, and not
201 * reenabled until after single-step mode ends.
202 * Without disabling interrupt on local CPU, there is a chance of
203 * interrupt occurrence in the period of exception return and start of
204 * out-of-line single-step, that result in wrongly single stepping
205 * into the interrupt handler.
207 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
208 struct pt_regs *regs)
210 kcb->saved_irqflag = regs->pstate;
211 regs->pstate |= PSR_I_BIT;
214 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
215 struct pt_regs *regs)
217 if (kcb->saved_irqflag & PSR_I_BIT)
218 regs->pstate |= PSR_I_BIT;
220 regs->pstate &= ~PSR_I_BIT;
223 static void __kprobes
224 set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
226 kcb->ss_ctx.ss_pending = true;
227 kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
230 static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
232 kcb->ss_ctx.ss_pending = false;
233 kcb->ss_ctx.match_addr = 0;
236 static void __kprobes setup_singlestep(struct kprobe *p,
237 struct pt_regs *regs,
238 struct kprobe_ctlblk *kcb, int reenter)
243 save_previous_kprobe(kcb);
244 set_current_kprobe(p);
245 kcb->kprobe_status = KPROBE_REENTER;
247 kcb->kprobe_status = KPROBE_HIT_SS;
252 /* prepare for single stepping */
253 slot = (unsigned long)p->ainsn.insn;
255 set_ss_context(kcb, slot); /* mark pending ss */
257 if (kcb->kprobe_status == KPROBE_REENTER)
258 spsr_set_debug_flag(regs, 0);
260 WARN_ON(regs->pstate & PSR_D_BIT);
262 /* IRQs and single stepping do not mix well. */
263 kprobes_save_local_irqflag(kcb, regs);
264 kernel_enable_single_step(regs);
265 instruction_pointer_set(regs, slot);
267 /* insn simulation */
268 arch_simulate_insn(p, regs);
272 static int __kprobes reenter_kprobe(struct kprobe *p,
273 struct pt_regs *regs,
274 struct kprobe_ctlblk *kcb)
276 switch (kcb->kprobe_status) {
277 case KPROBE_HIT_SSDONE:
278 case KPROBE_HIT_ACTIVE:
279 kprobes_inc_nmissed_count(p);
280 setup_singlestep(p, regs, kcb, 1);
284 pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
296 static void __kprobes
297 post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
299 struct kprobe *cur = kprobe_running();
304 /* return addr restore if non-branching insn */
305 if (cur->ainsn.restore != 0)
306 instruction_pointer_set(regs, cur->ainsn.restore);
308 /* restore back original saved kprobe variables and continue */
309 if (kcb->kprobe_status == KPROBE_REENTER) {
310 restore_previous_kprobe(kcb);
313 /* call post handler */
314 kcb->kprobe_status = KPROBE_HIT_SSDONE;
315 if (cur->post_handler) {
316 /* post_handler can hit breakpoint and single step
317 * again, so we enable D-flag for recursive exception.
319 cur->post_handler(cur, regs, 0);
322 reset_current_kprobe();
325 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
327 struct kprobe *cur = kprobe_running();
328 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
330 switch (kcb->kprobe_status) {
334 * We are here because the instruction being single
335 * stepped caused a page fault. We reset the current
336 * kprobe and the ip points back to the probe address
337 * and allow the page fault handler to continue as a
340 instruction_pointer_set(regs, (unsigned long) cur->addr);
341 if (!instruction_pointer(regs))
344 kernel_disable_single_step();
345 if (kcb->kprobe_status == KPROBE_REENTER)
346 spsr_set_debug_flag(regs, 1);
348 if (kcb->kprobe_status == KPROBE_REENTER)
349 restore_previous_kprobe(kcb);
351 reset_current_kprobe();
354 case KPROBE_HIT_ACTIVE:
355 case KPROBE_HIT_SSDONE:
357 * We increment the nmissed count for accounting,
358 * we can also use npre/npostfault count for accounting
359 * these specific fault cases.
361 kprobes_inc_nmissed_count(cur);
364 * We come here because instructions in the pre/post
365 * handler caused the page_fault, this could happen
366 * if handler tries to access user space by
367 * copy_from_user(), get_user() etc. Let the
368 * user-specified handler try to fix it first.
370 if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
374 * In case the user-specified fault handler returned
375 * zero, try to fix up.
377 if (fixup_exception(regs))
383 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
384 unsigned long val, void *data)
389 static void __kprobes kprobe_handler(struct pt_regs *regs)
391 struct kprobe *p, *cur_kprobe;
392 struct kprobe_ctlblk *kcb;
393 unsigned long addr = instruction_pointer(regs);
395 kcb = get_kprobe_ctlblk();
396 cur_kprobe = kprobe_running();
398 p = get_kprobe((kprobe_opcode_t *) addr);
402 if (reenter_kprobe(p, regs, kcb))
406 set_current_kprobe(p);
407 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
410 * If we have no pre-handler or it returned 0, we
411 * continue with normal processing. If we have a
412 * pre-handler and it returned non-zero, it prepped
413 * for calling the break_handler below on re-entry,
414 * so get out doing nothing more here.
416 * pre_handler can hit a breakpoint and can step thru
417 * before return, keep PSTATE D-flag enabled until
418 * pre_handler return back.
420 if (!p->pre_handler || !p->pre_handler(p, regs)) {
421 setup_singlestep(p, regs, kcb, 0);
425 } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) ==
426 BRK64_OPCODE_KPROBES) && cur_kprobe) {
427 /* We probably hit a jprobe. Call its break handler. */
428 if (cur_kprobe->break_handler &&
429 cur_kprobe->break_handler(cur_kprobe, regs)) {
430 setup_singlestep(cur_kprobe, regs, kcb, 0);
435 * The breakpoint instruction was removed right
436 * after we hit it. Another cpu has removed
437 * either a probepoint or a debugger breakpoint
438 * at this address. In either case, no further
439 * handling of this interrupt is appropriate.
440 * Return back to original instruction, and continue.
445 kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
447 if ((kcb->ss_ctx.ss_pending)
448 && (kcb->ss_ctx.match_addr == addr)) {
449 clear_ss_context(kcb); /* clear pending ss */
450 return DBG_HOOK_HANDLED;
452 /* not ours, kprobes should ignore it */
453 return DBG_HOOK_ERROR;
457 kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
459 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
462 /* return error if this is not our step */
463 retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
465 if (retval == DBG_HOOK_HANDLED) {
466 kprobes_restore_local_irqflag(kcb, regs);
467 kernel_disable_single_step();
469 if (kcb->kprobe_status == KPROBE_REENTER)
470 spsr_set_debug_flag(regs, 1);
472 post_kprobe_handler(kcb, regs);
479 kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
481 kprobe_handler(regs);
482 return DBG_HOOK_HANDLED;
485 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
487 struct jprobe *jp = container_of(p, struct jprobe, kp);
488 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
489 long stack_ptr = kernel_stack_pointer(regs);
491 kcb->jprobe_saved_regs = *regs;
493 * As Linus pointed out, gcc assumes that the callee
494 * owns the argument space and could overwrite it, e.g.
495 * tailcall optimization. So, to be absolutely safe
496 * we also save and restore enough stack bytes to cover
499 kasan_disable_current();
500 memcpy(kcb->jprobes_stack, (void *)stack_ptr,
501 min_stack_size(stack_ptr));
502 kasan_enable_current();
504 instruction_pointer_set(regs, (unsigned long) jp->entry);
506 pause_graph_tracing();
510 void __kprobes jprobe_return(void)
512 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
515 * Jprobe handler return by entering break exception,
516 * encoded same as kprobe, but with following conditions
517 * -a special PC to identify it from the other kprobes.
518 * -restore stack addr to original saved pt_regs
520 asm volatile(" mov sp, %0 \n"
521 "jprobe_return_break: brk %1 \n"
523 : "r" (kcb->jprobe_saved_regs.sp),
524 "I" (BRK64_ESR_KPROBES)
530 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
532 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
533 long stack_addr = kcb->jprobe_saved_regs.sp;
534 long orig_sp = kernel_stack_pointer(regs);
535 struct jprobe *jp = container_of(p, struct jprobe, kp);
536 extern const char jprobe_return_break[];
538 if (instruction_pointer(regs) != (u64) jprobe_return_break)
541 if (orig_sp != stack_addr) {
542 struct pt_regs *saved_regs =
543 (struct pt_regs *)kcb->jprobe_saved_regs.sp;
544 pr_err("current sp %lx does not match saved sp %lx\n",
545 orig_sp, stack_addr);
546 pr_err("Saved registers for jprobe %p\n", jp);
547 show_regs(saved_regs);
548 pr_err("Current registers\n");
552 unpause_graph_tracing();
553 *regs = kcb->jprobe_saved_regs;
554 kasan_disable_current();
555 memcpy((void *)stack_addr, kcb->jprobes_stack,
556 min_stack_size(stack_addr));
557 kasan_enable_current();
558 preempt_enable_no_resched();
562 bool arch_within_kprobe_blacklist(unsigned long addr)
564 extern char __idmap_text_start[], __idmap_text_end[];
566 if ((addr >= (unsigned long)__kprobes_text_start &&
567 addr < (unsigned long)__kprobes_text_end) ||
568 (addr >= (unsigned long)__entry_text_start &&
569 addr < (unsigned long)__entry_text_end) ||
570 (addr >= (unsigned long)__idmap_text_start &&
571 addr < (unsigned long)__idmap_text_end) ||
572 !!search_exception_tables(addr))
579 void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
581 struct kretprobe_instance *ri = NULL;
582 struct hlist_head *head, empty_rp;
583 struct hlist_node *tmp;
584 unsigned long flags, orig_ret_address = 0;
585 unsigned long trampoline_address =
586 (unsigned long)&kretprobe_trampoline;
587 kprobe_opcode_t *correct_ret_addr = NULL;
589 INIT_HLIST_HEAD(&empty_rp);
590 kretprobe_hash_lock(current, &head, &flags);
593 * It is possible to have multiple instances associated with a given
594 * task either because multiple functions in the call path have
595 * return probes installed on them, and/or more than one
596 * return probe was registered for a target function.
598 * We can handle this because:
599 * - instances are always pushed into the head of the list
600 * - when multiple return probes are registered for the same
601 * function, the (chronologically) first instance's ret_addr
602 * will be the real return address, and all the rest will
603 * point to kretprobe_trampoline.
605 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
606 if (ri->task != current)
607 /* another task is sharing our hash bucket */
610 orig_ret_address = (unsigned long)ri->ret_addr;
612 if (orig_ret_address != trampoline_address)
614 * This is the real return address. Any other
615 * instances associated with this task are for
616 * other calls deeper on the call stack
621 kretprobe_assert(ri, orig_ret_address, trampoline_address);
623 correct_ret_addr = ri->ret_addr;
624 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
625 if (ri->task != current)
626 /* another task is sharing our hash bucket */
629 orig_ret_address = (unsigned long)ri->ret_addr;
630 if (ri->rp && ri->rp->handler) {
631 __this_cpu_write(current_kprobe, &ri->rp->kp);
632 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
633 ri->ret_addr = correct_ret_addr;
634 ri->rp->handler(ri, regs);
635 __this_cpu_write(current_kprobe, NULL);
638 recycle_rp_inst(ri, &empty_rp);
640 if (orig_ret_address != trampoline_address)
642 * This is the real return address. Any other
643 * instances associated with this task are for
644 * other calls deeper on the call stack
649 kretprobe_hash_unlock(current, &flags);
651 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
652 hlist_del(&ri->hlist);
655 return (void *)orig_ret_address;
658 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
659 struct pt_regs *regs)
661 ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
663 /* replace return addr (x30) with trampoline */
664 regs->regs[30] = (long)&kretprobe_trampoline;
667 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
672 int __init arch_init_kprobes(void)