This source file includes following definitions.
- __ppc_alloc_insn_page
- __ppc_free_insn_page
- can_optimize
- optimized_callback
- arch_remove_optimized_kprobe
- patch_imm32_load_insns
- patch_imm64_load_insns
- arch_prepare_optimized_kprobe
- arch_prepared_optinsn
- arch_check_optimized_kprobe
- arch_optimize_kprobes
- arch_unoptimize_kprobe
- arch_unoptimize_kprobes
- arch_within_optimized_kprobe
1
2
3
4
5
6
7
8 #include <linux/kprobes.h>
9 #include <linux/jump_label.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/list.h>
13 #include <asm/kprobes.h>
14 #include <asm/ptrace.h>
15 #include <asm/cacheflush.h>
16 #include <asm/code-patching.h>
17 #include <asm/sstep.h>
18 #include <asm/ppc-opcode.h>
19
20 #define TMPL_CALL_HDLR_IDX \
21 (optprobe_template_call_handler - optprobe_template_entry)
22 #define TMPL_EMULATE_IDX \
23 (optprobe_template_call_emulate - optprobe_template_entry)
24 #define TMPL_RET_IDX \
25 (optprobe_template_ret - optprobe_template_entry)
26 #define TMPL_OP_IDX \
27 (optprobe_template_op_address - optprobe_template_entry)
28 #define TMPL_INSN_IDX \
29 (optprobe_template_insn - optprobe_template_entry)
30 #define TMPL_END_IDX \
31 (optprobe_template_end - optprobe_template_entry)
32
33 DEFINE_INSN_CACHE_OPS(ppc_optinsn);
34
35 static bool insn_page_in_use;
36
37 static void *__ppc_alloc_insn_page(void)
38 {
39 if (insn_page_in_use)
40 return NULL;
41 insn_page_in_use = true;
42 return &optinsn_slot;
43 }
44
45 static void __ppc_free_insn_page(void *page __maybe_unused)
46 {
47 insn_page_in_use = false;
48 }
49
50 struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
51 .mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
52 .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
53
54 .alloc = __ppc_alloc_insn_page,
55 .free = __ppc_free_insn_page,
56 .nr_garbage = 0,
57 };
58
59
60
61
62
63 static unsigned long can_optimize(struct kprobe *p)
64 {
65 struct pt_regs regs;
66 struct instruction_op op;
67 unsigned long nip = 0;
68
69
70
71
72
73
74 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
75 return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
76
77
78
79
80
81
82
83 if (!is_kernel_addr((unsigned long)p->addr))
84 return 0;
85
86 memset(®s, 0, sizeof(struct pt_regs));
87 regs.nip = (unsigned long)p->addr;
88 regs.trap = 0x0;
89 regs.msr = MSR_KERNEL;
90
91
92
93
94
95
96
97
98
99
100
101
102 if (!is_conditional_branch(*p->ainsn.insn) &&
103 analyse_instr(&op, ®s, *p->ainsn.insn) == 1) {
104 emulate_update_regs(®s, &op);
105 nip = regs.nip;
106 }
107
108 return nip;
109 }
110
111 static void optimized_callback(struct optimized_kprobe *op,
112 struct pt_regs *regs)
113 {
114
115 if (kprobe_disabled(&op->kp))
116 return;
117
118 preempt_disable();
119
120 if (kprobe_running()) {
121 kprobes_inc_nmissed_count(&op->kp);
122 } else {
123 __this_cpu_write(current_kprobe, &op->kp);
124 regs->nip = (unsigned long)op->kp.addr;
125 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
126 opt_pre_handler(&op->kp, regs);
127 __this_cpu_write(current_kprobe, NULL);
128 }
129
130 preempt_enable_no_resched();
131 }
132 NOKPROBE_SYMBOL(optimized_callback);
133
134 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
135 {
136 if (op->optinsn.insn) {
137 free_ppc_optinsn_slot(op->optinsn.insn, 1);
138 op->optinsn.insn = NULL;
139 }
140 }
141
142
143
144
145
146
147 void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
148 {
149
150 patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
151 ((val >> 16) & 0xffff));
152 addr++;
153
154
155 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
156 ___PPC_RS(4) | (val & 0xffff));
157 }
158
159
160
161
162
163 void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
164 {
165
166 patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
167 ((val >> 48) & 0xffff));
168 addr++;
169
170
171 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
172 ___PPC_RS(3) | ((val >> 32) & 0xffff));
173 addr++;
174
175
176 patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
177 ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
178 addr++;
179
180
181 patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
182 ___PPC_RS(3) | ((val >> 16) & 0xffff));
183 addr++;
184
185
186 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
187 ___PPC_RS(3) | (val & 0xffff));
188 }
189
190 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
191 {
192 kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
193 kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
194 long b_offset;
195 unsigned long nip, size;
196 int rc, i;
197
198 kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
199
200 nip = can_optimize(p);
201 if (!nip)
202 return -EILSEQ;
203
204
205 buff = get_ppc_optinsn_slot();
206 if (!buff)
207 return -ENOMEM;
208
209
210
211
212
213
214
215
216
217
218 b_offset = (unsigned long)buff - (unsigned long)p->addr;
219 if (!is_offset_in_branch_range(b_offset))
220 goto error;
221
222
223 b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
224 (unsigned long)nip;
225 if (!is_offset_in_branch_range(b_offset))
226 goto error;
227
228
229
230 size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
231 pr_devel("Copying template to %p, size %lu\n", buff, size);
232 for (i = 0; i < size; i++) {
233 rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
234 if (rc < 0)
235 goto error;
236 }
237
238
239
240
241
242 patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
243
244
245
246
247 op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
248 emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
249 if (!op_callback_addr || !emulate_step_addr) {
250 WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
251 goto error;
252 }
253
254 branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
255 (unsigned long)op_callback_addr,
256 BRANCH_SET_LINK);
257
258 branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX,
259 (unsigned long)emulate_step_addr,
260 BRANCH_SET_LINK);
261
262 if (!branch_op_callback || !branch_emulate_step)
263 goto error;
264
265 patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
266 patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
267
268
269
270
271 patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
272
273
274
275
276 patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0);
277
278 flush_icache_range((unsigned long)buff,
279 (unsigned long)(&buff[TMPL_END_IDX]));
280
281 op->optinsn.insn = buff;
282
283 return 0;
284
285 error:
286 free_ppc_optinsn_slot(buff, 0);
287 return -ERANGE;
288
289 }
290
291 int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
292 {
293 return optinsn->insn != NULL;
294 }
295
296
297
298
299
300
301 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
302 {
303 return 0;
304 }
305
306 void arch_optimize_kprobes(struct list_head *oplist)
307 {
308 struct optimized_kprobe *op;
309 struct optimized_kprobe *tmp;
310
311 list_for_each_entry_safe(op, tmp, oplist, list) {
312
313
314
315
316 memcpy(op->optinsn.copied_insn, op->kp.addr,
317 RELATIVEJUMP_SIZE);
318 patch_instruction(op->kp.addr,
319 create_branch((unsigned int *)op->kp.addr,
320 (unsigned long)op->optinsn.insn, 0));
321 list_del_init(&op->list);
322 }
323 }
324
325 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
326 {
327 arch_arm_kprobe(&op->kp);
328 }
329
330 void arch_unoptimize_kprobes(struct list_head *oplist,
331 struct list_head *done_list)
332 {
333 struct optimized_kprobe *op;
334 struct optimized_kprobe *tmp;
335
336 list_for_each_entry_safe(op, tmp, oplist, list) {
337 arch_unoptimize_kprobe(op);
338 list_move(&op->list, done_list);
339 }
340 }
341
342 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
343 unsigned long addr)
344 {
345 return ((unsigned long)op->kp.addr <= addr &&
346 (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
347 }