This source file includes following definitions.
- perf_output_wakeup
- perf_output_get_handle
- perf_output_put_handle
- ring_buffer_has_space
- __perf_output_begin
- perf_output_begin_forward
- perf_output_begin_backward
- perf_output_begin
- perf_output_copy
- perf_output_skip
- perf_output_end
- ring_buffer_init
- perf_aux_output_flag
- perf_aux_output_begin
- rb_need_aux_wakeup
- perf_aux_output_end
- perf_aux_output_skip
- perf_get_aux
- rb_alloc_aux_page
- rb_free_aux_page
- __rb_free_aux
- rb_alloc_aux
- rb_free_aux
- __perf_mmap_to_page
- perf_mmap_alloc_page
- rb_alloc
- perf_mmap_free_page
- rb_free
- data_page_nr
- __perf_mmap_to_page
- perf_mmap_unmark_page
- rb_free_work
- rb_free
- rb_alloc
- perf_mmap_to_page
1
2
3
4
5
6
7
8
9
10
11 #include <linux/perf_event.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/circ_buf.h>
15 #include <linux/poll.h>
16 #include <linux/nospec.h>
17
18 #include "internal.h"
19
20 static void perf_output_wakeup(struct perf_output_handle *handle)
21 {
22 atomic_set(&handle->rb->poll, EPOLLIN);
23
24 handle->event->pending_wakeup = 1;
25 irq_work_queue(&handle->event->pending);
26 }
27
28
29
30
31
32
33
34
35
36 static void perf_output_get_handle(struct perf_output_handle *handle)
37 {
38 struct ring_buffer *rb = handle->rb;
39
40 preempt_disable();
41
42
43
44
45
46 (*(volatile unsigned int *)&rb->nest)++;
47 handle->wakeup = local_read(&rb->wakeup);
48 }
49
50 static void perf_output_put_handle(struct perf_output_handle *handle)
51 {
52 struct ring_buffer *rb = handle->rb;
53 unsigned long head;
54 unsigned int nest;
55
56
57
58
59
60 nest = READ_ONCE(rb->nest);
61 if (nest > 1) {
62 WRITE_ONCE(rb->nest, nest - 1);
63 goto out;
64 }
65
66 again:
67
68
69
70
71
72
73
74
75 barrier();
76 head = local_read(&rb->head);
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109 smp_wmb();
110 WRITE_ONCE(rb->user_page->data_head, head);
111
112
113
114
115
116
117 barrier();
118 WRITE_ONCE(rb->nest, 0);
119
120
121
122
123
124 barrier();
125 if (unlikely(head != local_read(&rb->head))) {
126 WRITE_ONCE(rb->nest, 1);
127 goto again;
128 }
129
130 if (handle->wakeup != local_read(&rb->wakeup))
131 perf_output_wakeup(handle);
132
133 out:
134 preempt_enable();
135 }
136
137 static __always_inline bool
138 ring_buffer_has_space(unsigned long head, unsigned long tail,
139 unsigned long data_size, unsigned int size,
140 bool backward)
141 {
142 if (!backward)
143 return CIRC_SPACE(head, tail, data_size) >= size;
144 else
145 return CIRC_SPACE(tail, head, data_size) >= size;
146 }
147
148 static __always_inline int
149 __perf_output_begin(struct perf_output_handle *handle,
150 struct perf_event *event, unsigned int size,
151 bool backward)
152 {
153 struct ring_buffer *rb;
154 unsigned long tail, offset, head;
155 int have_lost, page_shift;
156 struct {
157 struct perf_event_header header;
158 u64 id;
159 u64 lost;
160 } lost_event;
161
162 rcu_read_lock();
163
164
165
166 if (event->parent)
167 event = event->parent;
168
169 rb = rcu_dereference(event->rb);
170 if (unlikely(!rb))
171 goto out;
172
173 if (unlikely(rb->paused)) {
174 if (rb->nr_pages)
175 local_inc(&rb->lost);
176 goto out;
177 }
178
179 handle->rb = rb;
180 handle->event = event;
181
182 have_lost = local_read(&rb->lost);
183 if (unlikely(have_lost)) {
184 size += sizeof(lost_event);
185 if (event->attr.sample_id_all)
186 size += event->id_header_size;
187 }
188
189 perf_output_get_handle(handle);
190
191 do {
192 tail = READ_ONCE(rb->user_page->data_tail);
193 offset = head = local_read(&rb->head);
194 if (!rb->overwrite) {
195 if (unlikely(!ring_buffer_has_space(head, tail,
196 perf_data_size(rb),
197 size, backward)))
198 goto fail;
199 }
200
201
202
203
204
205
206
207
208
209
210
211
212
213 if (!backward)
214 head += size;
215 else
216 head -= size;
217 } while (local_cmpxchg(&rb->head, offset, head) != offset);
218
219 if (backward) {
220 offset = head;
221 head = (u64)(-head);
222 }
223
224
225
226
227
228
229 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
230 local_add(rb->watermark, &rb->wakeup);
231
232 page_shift = PAGE_SHIFT + page_order(rb);
233
234 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
235 offset &= (1UL << page_shift) - 1;
236 handle->addr = rb->data_pages[handle->page] + offset;
237 handle->size = (1UL << page_shift) - offset;
238
239 if (unlikely(have_lost)) {
240 struct perf_sample_data sample_data;
241
242 lost_event.header.size = sizeof(lost_event);
243 lost_event.header.type = PERF_RECORD_LOST;
244 lost_event.header.misc = 0;
245 lost_event.id = event->id;
246 lost_event.lost = local_xchg(&rb->lost, 0);
247
248 perf_event_header__init_id(&lost_event.header,
249 &sample_data, event);
250 perf_output_put(handle, lost_event);
251 perf_event__output_id_sample(event, handle, &sample_data);
252 }
253
254 return 0;
255
256 fail:
257 local_inc(&rb->lost);
258 perf_output_put_handle(handle);
259 out:
260 rcu_read_unlock();
261
262 return -ENOSPC;
263 }
264
265 int perf_output_begin_forward(struct perf_output_handle *handle,
266 struct perf_event *event, unsigned int size)
267 {
268 return __perf_output_begin(handle, event, size, false);
269 }
270
271 int perf_output_begin_backward(struct perf_output_handle *handle,
272 struct perf_event *event, unsigned int size)
273 {
274 return __perf_output_begin(handle, event, size, true);
275 }
276
277 int perf_output_begin(struct perf_output_handle *handle,
278 struct perf_event *event, unsigned int size)
279 {
280
281 return __perf_output_begin(handle, event, size,
282 unlikely(is_write_backward(event)));
283 }
284
285 unsigned int perf_output_copy(struct perf_output_handle *handle,
286 const void *buf, unsigned int len)
287 {
288 return __output_copy(handle, buf, len);
289 }
290
291 unsigned int perf_output_skip(struct perf_output_handle *handle,
292 unsigned int len)
293 {
294 return __output_skip(handle, NULL, len);
295 }
296
297 void perf_output_end(struct perf_output_handle *handle)
298 {
299 perf_output_put_handle(handle);
300 rcu_read_unlock();
301 }
302
303 static void
304 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
305 {
306 long max_size = perf_data_size(rb);
307
308 if (watermark)
309 rb->watermark = min(max_size, watermark);
310
311 if (!rb->watermark)
312 rb->watermark = max_size / 2;
313
314 if (flags & RING_BUFFER_WRITABLE)
315 rb->overwrite = 0;
316 else
317 rb->overwrite = 1;
318
319 refcount_set(&rb->refcount, 1);
320
321 INIT_LIST_HEAD(&rb->event_list);
322 spin_lock_init(&rb->event_lock);
323
324
325
326
327
328 if (!rb->nr_pages)
329 rb->paused = 1;
330 }
331
332 void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
333 {
334
335
336
337
338 if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
339 return;
340
341 handle->aux_flags |= flags;
342 }
343 EXPORT_SYMBOL_GPL(perf_aux_output_flag);
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359 void *perf_aux_output_begin(struct perf_output_handle *handle,
360 struct perf_event *event)
361 {
362 struct perf_event *output_event = event;
363 unsigned long aux_head, aux_tail;
364 struct ring_buffer *rb;
365 unsigned int nest;
366
367 if (output_event->parent)
368 output_event = output_event->parent;
369
370
371
372
373
374
375 rb = ring_buffer_get(output_event);
376 if (!rb)
377 return NULL;
378
379 if (!rb_has_aux(rb))
380 goto err;
381
382
383
384
385
386
387
388
389
390 if (!atomic_read(&rb->aux_mmap_count))
391 goto err;
392
393 if (!refcount_inc_not_zero(&rb->aux_refcount))
394 goto err;
395
396 nest = READ_ONCE(rb->aux_nest);
397
398
399
400
401 if (WARN_ON_ONCE(nest))
402 goto err_put;
403
404 WRITE_ONCE(rb->aux_nest, nest + 1);
405
406 aux_head = rb->aux_head;
407
408 handle->rb = rb;
409 handle->event = event;
410 handle->head = aux_head;
411 handle->size = 0;
412 handle->aux_flags = 0;
413
414
415
416
417
418
419 if (!rb->aux_overwrite) {
420 aux_tail = READ_ONCE(rb->user_page->aux_tail);
421 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
422 if (aux_head - aux_tail < perf_aux_size(rb))
423 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
424
425
426
427
428
429
430 if (!handle->size) {
431 event->pending_disable = smp_processor_id();
432 perf_output_wakeup(handle);
433 WRITE_ONCE(rb->aux_nest, 0);
434 goto err_put;
435 }
436 }
437
438 return handle->rb->aux_priv;
439
440 err_put:
441
442 rb_free_aux(rb);
443
444 err:
445 ring_buffer_put(rb);
446 handle->event = NULL;
447
448 return NULL;
449 }
450 EXPORT_SYMBOL_GPL(perf_aux_output_begin);
451
452 static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
453 {
454 if (rb->aux_overwrite)
455 return false;
456
457 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
458 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
459 return true;
460 }
461
462 return false;
463 }
464
465
466
467
468
469
470
471
472
473
474
475 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
476 {
477 bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
478 struct ring_buffer *rb = handle->rb;
479 unsigned long aux_head;
480
481
482 if (rb->aux_overwrite) {
483 handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
484
485 aux_head = handle->head;
486 rb->aux_head = aux_head;
487 } else {
488 handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
489
490 aux_head = rb->aux_head;
491 rb->aux_head += size;
492 }
493
494
495
496
497
498
499
500
501
502
503
504
505
506 if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE))
507 perf_event_aux_event(handle->event, aux_head, size,
508 handle->aux_flags);
509
510 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
511 if (rb_need_aux_wakeup(rb))
512 wakeup = true;
513
514 if (wakeup) {
515 if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
516 handle->event->pending_disable = smp_processor_id();
517 perf_output_wakeup(handle);
518 }
519
520 handle->event = NULL;
521
522 WRITE_ONCE(rb->aux_nest, 0);
523
524 rb_free_aux(rb);
525 ring_buffer_put(rb);
526 }
527 EXPORT_SYMBOL_GPL(perf_aux_output_end);
528
529
530
531
532
533 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
534 {
535 struct ring_buffer *rb = handle->rb;
536
537 if (size > handle->size)
538 return -ENOSPC;
539
540 rb->aux_head += size;
541
542 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
543 if (rb_need_aux_wakeup(rb)) {
544 perf_output_wakeup(handle);
545 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
546 }
547
548 handle->head = rb->aux_head;
549 handle->size -= size;
550
551 return 0;
552 }
553 EXPORT_SYMBOL_GPL(perf_aux_output_skip);
554
555 void *perf_get_aux(struct perf_output_handle *handle)
556 {
557
558 if (!handle->event)
559 return NULL;
560
561 return handle->rb->aux_priv;
562 }
563 EXPORT_SYMBOL_GPL(perf_get_aux);
564
565 #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
566
567 static struct page *rb_alloc_aux_page(int node, int order)
568 {
569 struct page *page;
570
571 if (order > MAX_ORDER)
572 order = MAX_ORDER;
573
574 do {
575 page = alloc_pages_node(node, PERF_AUX_GFP, order);
576 } while (!page && order--);
577
578 if (page && order) {
579
580
581
582
583
584
585 split_page(page, order);
586 SetPagePrivate(page);
587 set_page_private(page, order);
588 }
589
590 return page;
591 }
592
593 static void rb_free_aux_page(struct ring_buffer *rb, int idx)
594 {
595 struct page *page = virt_to_page(rb->aux_pages[idx]);
596
597 ClearPagePrivate(page);
598 page->mapping = NULL;
599 __free_page(page);
600 }
601
602 static void __rb_free_aux(struct ring_buffer *rb)
603 {
604 int pg;
605
606
607
608
609
610
611
612 WARN_ON_ONCE(in_atomic());
613
614 if (rb->aux_priv) {
615 rb->free_aux(rb->aux_priv);
616 rb->free_aux = NULL;
617 rb->aux_priv = NULL;
618 }
619
620 if (rb->aux_nr_pages) {
621 for (pg = 0; pg < rb->aux_nr_pages; pg++)
622 rb_free_aux_page(rb, pg);
623
624 kfree(rb->aux_pages);
625 rb->aux_nr_pages = 0;
626 }
627 }
628
629 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
630 pgoff_t pgoff, int nr_pages, long watermark, int flags)
631 {
632 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
633 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
634 int ret = -ENOMEM, max_order;
635
636 if (!has_aux(event))
637 return -EOPNOTSUPP;
638
639
640
641
642
643 max_order = ilog2(nr_pages);
644
645
646
647
648
649 if (!overwrite) {
650 if (!max_order)
651 return -EINVAL;
652
653 max_order--;
654 }
655
656 rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
657 node);
658 if (!rb->aux_pages)
659 return -ENOMEM;
660
661 rb->free_aux = event->pmu->free_aux;
662 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
663 struct page *page;
664 int last, order;
665
666 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
667 page = rb_alloc_aux_page(node, order);
668 if (!page)
669 goto out;
670
671 for (last = rb->aux_nr_pages + (1 << page_private(page));
672 last > rb->aux_nr_pages; rb->aux_nr_pages++)
673 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
674 }
675
676
677
678
679
680
681
682 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
683 overwrite) {
684 struct page *page = virt_to_page(rb->aux_pages[0]);
685
686 if (page_private(page) != max_order)
687 goto out;
688 }
689
690 rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
691 overwrite);
692 if (!rb->aux_priv)
693 goto out;
694
695 ret = 0;
696
697
698
699
700
701
702
703 refcount_set(&rb->aux_refcount, 1);
704
705 rb->aux_overwrite = overwrite;
706 rb->aux_watermark = watermark;
707
708 if (!rb->aux_watermark && !rb->aux_overwrite)
709 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
710
711 out:
712 if (!ret)
713 rb->aux_pgoff = pgoff;
714 else
715 __rb_free_aux(rb);
716
717 return ret;
718 }
719
720 void rb_free_aux(struct ring_buffer *rb)
721 {
722 if (refcount_dec_and_test(&rb->aux_refcount))
723 __rb_free_aux(rb);
724 }
725
726 #ifndef CONFIG_PERF_USE_VMALLOC
727
728
729
730
731
732 static struct page *
733 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
734 {
735 if (pgoff > rb->nr_pages)
736 return NULL;
737
738 if (pgoff == 0)
739 return virt_to_page(rb->user_page);
740
741 return virt_to_page(rb->data_pages[pgoff - 1]);
742 }
743
744 static void *perf_mmap_alloc_page(int cpu)
745 {
746 struct page *page;
747 int node;
748
749 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
750 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
751 if (!page)
752 return NULL;
753
754 return page_address(page);
755 }
756
757 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
758 {
759 struct ring_buffer *rb;
760 unsigned long size;
761 int i;
762
763 size = sizeof(struct ring_buffer);
764 size += nr_pages * sizeof(void *);
765
766 if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
767 goto fail;
768
769 rb = kzalloc(size, GFP_KERNEL);
770 if (!rb)
771 goto fail;
772
773 rb->user_page = perf_mmap_alloc_page(cpu);
774 if (!rb->user_page)
775 goto fail_user_page;
776
777 for (i = 0; i < nr_pages; i++) {
778 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
779 if (!rb->data_pages[i])
780 goto fail_data_pages;
781 }
782
783 rb->nr_pages = nr_pages;
784
785 ring_buffer_init(rb, watermark, flags);
786
787 return rb;
788
789 fail_data_pages:
790 for (i--; i >= 0; i--)
791 free_page((unsigned long)rb->data_pages[i]);
792
793 free_page((unsigned long)rb->user_page);
794
795 fail_user_page:
796 kfree(rb);
797
798 fail:
799 return NULL;
800 }
801
802 static void perf_mmap_free_page(unsigned long addr)
803 {
804 struct page *page = virt_to_page((void *)addr);
805
806 page->mapping = NULL;
807 __free_page(page);
808 }
809
810 void rb_free(struct ring_buffer *rb)
811 {
812 int i;
813
814 perf_mmap_free_page((unsigned long)rb->user_page);
815 for (i = 0; i < rb->nr_pages; i++)
816 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
817 kfree(rb);
818 }
819
820 #else
821 static int data_page_nr(struct ring_buffer *rb)
822 {
823 return rb->nr_pages << page_order(rb);
824 }
825
826 static struct page *
827 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
828 {
829
830 if (pgoff > data_page_nr(rb))
831 return NULL;
832
833 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
834 }
835
836 static void perf_mmap_unmark_page(void *addr)
837 {
838 struct page *page = vmalloc_to_page(addr);
839
840 page->mapping = NULL;
841 }
842
843 static void rb_free_work(struct work_struct *work)
844 {
845 struct ring_buffer *rb;
846 void *base;
847 int i, nr;
848
849 rb = container_of(work, struct ring_buffer, work);
850 nr = data_page_nr(rb);
851
852 base = rb->user_page;
853
854 for (i = 0; i <= nr; i++)
855 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
856
857 vfree(base);
858 kfree(rb);
859 }
860
861 void rb_free(struct ring_buffer *rb)
862 {
863 schedule_work(&rb->work);
864 }
865
866 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
867 {
868 struct ring_buffer *rb;
869 unsigned long size;
870 void *all_buf;
871
872 size = sizeof(struct ring_buffer);
873 size += sizeof(void *);
874
875 rb = kzalloc(size, GFP_KERNEL);
876 if (!rb)
877 goto fail;
878
879 INIT_WORK(&rb->work, rb_free_work);
880
881 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
882 if (!all_buf)
883 goto fail_all_buf;
884
885 rb->user_page = all_buf;
886 rb->data_pages[0] = all_buf + PAGE_SIZE;
887 if (nr_pages) {
888 rb->nr_pages = 1;
889 rb->page_order = ilog2(nr_pages);
890 }
891
892 ring_buffer_init(rb, watermark, flags);
893
894 return rb;
895
896 fail_all_buf:
897 kfree(rb);
898
899 fail:
900 return NULL;
901 }
902
903 #endif
904
905 struct page *
906 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
907 {
908 if (rb->aux_nr_pages) {
909
910 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
911 return NULL;
912
913
914 if (pgoff >= rb->aux_pgoff) {
915 int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
916 return virt_to_page(rb->aux_pages[aux_pgoff]);
917 }
918 }
919
920 return __perf_mmap_to_page(rb, pgoff);
921 }