1#include <linux/list.h>
2#include <linux/compiler.h>
3#include <linux/string.h>
4#include "ordered-events.h"
5#include "session.h"
6#include "asm/bug.h"
7#include "debug.h"
8
9#define pr_N(n, fmt, ...) \
10	eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
11
12#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
13
14static void queue_event(struct ordered_events *oe, struct ordered_event *new)
15{
16	struct ordered_event *last = oe->last;
17	u64 timestamp = new->timestamp;
18	struct list_head *p;
19
20	++oe->nr_events;
21	oe->last = new;
22
23	pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
24
25	if (!last) {
26		list_add(&new->list, &oe->events);
27		oe->max_timestamp = timestamp;
28		return;
29	}
30
31	/*
32	 * last event might point to some random place in the list as it's
33	 * the last queued event. We expect that the new event is close to
34	 * this.
35	 */
36	if (last->timestamp <= timestamp) {
37		while (last->timestamp <= timestamp) {
38			p = last->list.next;
39			if (p == &oe->events) {
40				list_add_tail(&new->list, &oe->events);
41				oe->max_timestamp = timestamp;
42				return;
43			}
44			last = list_entry(p, struct ordered_event, list);
45		}
46		list_add_tail(&new->list, &last->list);
47	} else {
48		while (last->timestamp > timestamp) {
49			p = last->list.prev;
50			if (p == &oe->events) {
51				list_add(&new->list, &oe->events);
52				return;
53			}
54			last = list_entry(p, struct ordered_event, list);
55		}
56		list_add(&new->list, &last->list);
57	}
58}
59
60static union perf_event *__dup_event(struct ordered_events *oe,
61				     union perf_event *event)
62{
63	union perf_event *new_event = NULL;
64
65	if (oe->cur_alloc_size < oe->max_alloc_size) {
66		new_event = memdup(event, event->header.size);
67		if (new_event)
68			oe->cur_alloc_size += event->header.size;
69	}
70
71	return new_event;
72}
73
74static union perf_event *dup_event(struct ordered_events *oe,
75				   union perf_event *event)
76{
77	return oe->copy_on_queue ? __dup_event(oe, event) : event;
78}
79
80static void free_dup_event(struct ordered_events *oe, union perf_event *event)
81{
82	if (oe->copy_on_queue) {
83		oe->cur_alloc_size -= event->header.size;
84		free(event);
85	}
86}
87
88#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct ordered_event))
89static struct ordered_event *alloc_event(struct ordered_events *oe,
90					 union perf_event *event)
91{
92	struct list_head *cache = &oe->cache;
93	struct ordered_event *new = NULL;
94	union perf_event *new_event;
95
96	new_event = dup_event(oe, event);
97	if (!new_event)
98		return NULL;
99
100	if (!list_empty(cache)) {
101		new = list_entry(cache->next, struct ordered_event, list);
102		list_del(&new->list);
103	} else if (oe->buffer) {
104		new = oe->buffer + oe->buffer_idx;
105		if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
106			oe->buffer = NULL;
107	} else if (oe->cur_alloc_size < oe->max_alloc_size) {
108		size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
109
110		oe->buffer = malloc(size);
111		if (!oe->buffer) {
112			free_dup_event(oe, new_event);
113			return NULL;
114		}
115
116		pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
117		   oe->cur_alloc_size, size, oe->max_alloc_size);
118
119		oe->cur_alloc_size += size;
120		list_add(&oe->buffer->list, &oe->to_free);
121
122		/* First entry is abused to maintain the to_free list. */
123		oe->buffer_idx = 2;
124		new = oe->buffer + 1;
125	} else {
126		pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
127	}
128
129	new->event = new_event;
130	return new;
131}
132
133static struct ordered_event *
134ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
135		    union perf_event *event)
136{
137	struct ordered_event *new;
138
139	new = alloc_event(oe, event);
140	if (new) {
141		new->timestamp = timestamp;
142		queue_event(oe, new);
143	}
144
145	return new;
146}
147
148void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
149{
150	list_move(&event->list, &oe->cache);
151	oe->nr_events--;
152	free_dup_event(oe, event->event);
153}
154
155int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
156			  struct perf_sample *sample, u64 file_offset)
157{
158	u64 timestamp = sample->time;
159	struct ordered_event *oevent;
160
161	if (!timestamp || timestamp == ~0ULL)
162		return -ETIME;
163
164	if (timestamp < oe->last_flush) {
165		pr_oe_time(timestamp,      "out of order event\n");
166		pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
167			   oe->last_flush_type);
168
169		oe->nr_unordered_events++;
170	}
171
172	oevent = ordered_events__new_event(oe, timestamp, event);
173	if (!oevent) {
174		ordered_events__flush(oe, OE_FLUSH__HALF);
175		oevent = ordered_events__new_event(oe, timestamp, event);
176	}
177
178	if (!oevent)
179		return -ENOMEM;
180
181	oevent->file_offset = file_offset;
182	return 0;
183}
184
185static int __ordered_events__flush(struct ordered_events *oe)
186{
187	struct list_head *head = &oe->events;
188	struct ordered_event *tmp, *iter;
189	u64 limit = oe->next_flush;
190	u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
191	bool show_progress = limit == ULLONG_MAX;
192	struct ui_progress prog;
193	int ret;
194
195	if (!limit)
196		return 0;
197
198	if (show_progress)
199		ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
200
201	list_for_each_entry_safe(iter, tmp, head, list) {
202		if (session_done())
203			return 0;
204
205		if (iter->timestamp > limit)
206			break;
207		ret = oe->deliver(oe, iter);
208		if (ret)
209			return ret;
210
211		ordered_events__delete(oe, iter);
212		oe->last_flush = iter->timestamp;
213
214		if (show_progress)
215			ui_progress__update(&prog, 1);
216	}
217
218	if (list_empty(head))
219		oe->last = NULL;
220	else if (last_ts <= limit)
221		oe->last = list_entry(head->prev, struct ordered_event, list);
222
223	return 0;
224}
225
226int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
227{
228	static const char * const str[] = {
229		"NONE",
230		"FINAL",
231		"ROUND",
232		"HALF ",
233	};
234	int err;
235
236	if (oe->nr_events == 0)
237		return 0;
238
239	switch (how) {
240	case OE_FLUSH__FINAL:
241		oe->next_flush = ULLONG_MAX;
242		break;
243
244	case OE_FLUSH__HALF:
245	{
246		struct ordered_event *first, *last;
247		struct list_head *head = &oe->events;
248
249		first = list_entry(head->next, struct ordered_event, list);
250		last = oe->last;
251
252		/* Warn if we are called before any event got allocated. */
253		if (WARN_ONCE(!last || list_empty(head), "empty queue"))
254			return 0;
255
256		oe->next_flush  = first->timestamp;
257		oe->next_flush += (last->timestamp - first->timestamp) / 2;
258		break;
259	}
260
261	case OE_FLUSH__ROUND:
262	case OE_FLUSH__NONE:
263	default:
264		break;
265	};
266
267	pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE  %s, nr_events %u\n",
268		   str[how], oe->nr_events);
269	pr_oe_time(oe->max_timestamp, "max_timestamp\n");
270
271	err = __ordered_events__flush(oe);
272
273	if (!err) {
274		if (how == OE_FLUSH__ROUND)
275			oe->next_flush = oe->max_timestamp;
276
277		oe->last_flush_type = how;
278	}
279
280	pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
281		   str[how], oe->nr_events);
282	pr_oe_time(oe->last_flush, "last_flush\n");
283
284	return err;
285}
286
287void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
288{
289	INIT_LIST_HEAD(&oe->events);
290	INIT_LIST_HEAD(&oe->cache);
291	INIT_LIST_HEAD(&oe->to_free);
292	oe->max_alloc_size = (u64) -1;
293	oe->cur_alloc_size = 0;
294	oe->deliver	   = deliver;
295}
296
297void ordered_events__free(struct ordered_events *oe)
298{
299	while (!list_empty(&oe->to_free)) {
300		struct ordered_event *event;
301
302		event = list_entry(oe->to_free.next, struct ordered_event, list);
303		list_del(&event->list);
304		free_dup_event(oe, event->event);
305		free(event);
306	}
307}
308