blob: 6c089d9757c9c1470d19ad6fe4b85a93d61b9ec1 [file] [log] [blame]
Naveen N. Raoead514d2017-04-19 18:22:26 +05301/*
2 * Dynamic Ftrace based Kprobes Optimization
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) Hitachi Ltd., 2012
19 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
20 * IBM Corporation
21 */
22#include <linux/kprobes.h>
23#include <linux/ptrace.h>
24#include <linux/hardirq.h>
25#include <linux/preempt.h>
26#include <linux/ftrace.h>
27
28static nokprobe_inline
29int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
30 struct kprobe_ctlblk *kcb, unsigned long orig_nip)
31{
32 /*
33 * Emulate singlestep (and also recover regs->nip)
34 * as if there is a nop
35 */
36 regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
37 if (unlikely(p->post_handler)) {
38 kcb->kprobe_status = KPROBE_HIT_SSDONE;
39 p->post_handler(p, regs, 0);
40 }
41 __this_cpu_write(current_kprobe, NULL);
42 if (orig_nip)
43 regs->nip = orig_nip;
44 return 1;
45}
46
47int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
48 struct kprobe_ctlblk *kcb)
49{
50 if (kprobe_ftrace(p))
51 return __skip_singlestep(p, regs, kcb, 0);
52 else
53 return 0;
54}
55NOKPROBE_SYMBOL(skip_singlestep);
56
57/* Ftrace callback handler for kprobes */
58void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
59 struct ftrace_ops *ops, struct pt_regs *regs)
60{
61 struct kprobe *p;
62 struct kprobe_ctlblk *kcb;
63 unsigned long flags;
64
65 /* Disable irq for emulating a breakpoint and avoiding preempt */
66 local_irq_save(flags);
67 hard_irq_disable();
68
69 p = get_kprobe((kprobe_opcode_t *)nip);
70 if (unlikely(!p) || kprobe_disabled(p))
71 goto end;
72
73 kcb = get_kprobe_ctlblk();
74 if (kprobe_running()) {
75 kprobes_inc_nmissed_count(p);
76 } else {
77 unsigned long orig_nip = regs->nip;
78
79 /*
80 * On powerpc, NIP is *before* this instruction for the
81 * pre handler
82 */
83 regs->nip -= MCOUNT_INSN_SIZE;
84
85 __this_cpu_write(current_kprobe, p);
86 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
87 if (!p->pre_handler || !p->pre_handler(p, regs))
88 __skip_singlestep(p, regs, kcb, orig_nip);
89 /*
90 * If pre_handler returns !0, it sets regs->nip and
91 * resets current kprobe.
92 */
93 }
94end:
95 local_irq_restore(flags);
96}
97NOKPROBE_SYMBOL(kprobe_ftrace_handler);
98
99int arch_prepare_kprobe_ftrace(struct kprobe *p)
100{
101 p->ainsn.insn = NULL;
102 p->ainsn.boostable = -1;
103 return 0;
104}