1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM writeback
3 
4 #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_WRITEBACK_H
6 
7 #include <linux/tracepoint.h>
8 #include <linux/backing-dev.h>
9 #include <linux/writeback.h>
10 
11 #define show_inode_state(state)					\
12 	__print_flags(state, "|",				\
13 		{I_DIRTY_SYNC,		"I_DIRTY_SYNC"},	\
14 		{I_DIRTY_DATASYNC,	"I_DIRTY_DATASYNC"},	\
15 		{I_DIRTY_PAGES,		"I_DIRTY_PAGES"},	\
16 		{I_NEW,			"I_NEW"},		\
17 		{I_WILL_FREE,		"I_WILL_FREE"},		\
18 		{I_FREEING,		"I_FREEING"},		\
19 		{I_CLEAR,		"I_CLEAR"},		\
20 		{I_SYNC,		"I_SYNC"},		\
21 		{I_DIRTY_TIME,		"I_DIRTY_TIME"},	\
22 		{I_DIRTY_TIME_EXPIRED,	"I_DIRTY_TIME_EXPIRED"}, \
23 		{I_REFERENCED,		"I_REFERENCED"}		\
24 	)
25 
26 /* enums need to be exported to user space */
27 #undef EM
28 #undef EMe
29 #define EM(a,b) 	TRACE_DEFINE_ENUM(a);
30 #define EMe(a,b)	TRACE_DEFINE_ENUM(a);
31 
32 #define WB_WORK_REASON							\
33 	EM( WB_REASON_BACKGROUND,		"background")		\
34 	EM( WB_REASON_TRY_TO_FREE_PAGES,	"try_to_free_pages")	\
35 	EM( WB_REASON_SYNC,			"sync")			\
36 	EM( WB_REASON_PERIODIC,			"periodic")		\
37 	EM( WB_REASON_LAPTOP_TIMER,		"laptop_timer")		\
38 	EM( WB_REASON_FREE_MORE_MEM,		"free_more_memory")	\
39 	EM( WB_REASON_FS_FREE_SPACE,		"fs_free_space")	\
40 	EMe(WB_REASON_FORKER_THREAD,		"forker_thread")
41 
42 WB_WORK_REASON
43 
44 /*
45  * Now redefine the EM() and EMe() macros to map the enums to the strings
46  * that will be printed in the output.
47  */
48 #undef EM
49 #undef EMe
50 #define EM(a,b)		{ a, b },
51 #define EMe(a,b)	{ a, b }
52 
53 struct wb_writeback_work;
54 
55 TRACE_EVENT(writeback_dirty_page,
56 
57 	TP_PROTO(struct page *page, struct address_space *mapping),
58 
59 	TP_ARGS(page, mapping),
60 
61 	TP_STRUCT__entry (
62 		__array(char, name, 32)
63 		__field(unsigned long, ino)
64 		__field(pgoff_t, index)
65 	),
66 
67 	TP_fast_assign(
68 		strncpy(__entry->name,
69 			mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
70 		__entry->ino = mapping ? mapping->host->i_ino : 0;
71 		__entry->index = page->index;
72 	),
73 
74 	TP_printk("bdi %s: ino=%lu index=%lu",
75 		__entry->name,
76 		__entry->ino,
77 		__entry->index
78 	)
79 );
80 
81 DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
82 
83 	TP_PROTO(struct inode *inode, int flags),
84 
85 	TP_ARGS(inode, flags),
86 
87 	TP_STRUCT__entry (
88 		__array(char, name, 32)
89 		__field(unsigned long, ino)
90 		__field(unsigned long, state)
91 		__field(unsigned long, flags)
92 	),
93 
94 	TP_fast_assign(
95 		struct backing_dev_info *bdi = inode_to_bdi(inode);
96 
97 		/* may be called for files on pseudo FSes w/ unregistered bdi */
98 		strncpy(__entry->name,
99 			bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
100 		__entry->ino		= inode->i_ino;
101 		__entry->state		= inode->i_state;
102 		__entry->flags		= flags;
103 	),
104 
105 	TP_printk("bdi %s: ino=%lu state=%s flags=%s",
106 		__entry->name,
107 		__entry->ino,
108 		show_inode_state(__entry->state),
109 		show_inode_state(__entry->flags)
110 	)
111 );
112 
113 DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty,
114 
115 	TP_PROTO(struct inode *inode, int flags),
116 
117 	TP_ARGS(inode, flags)
118 );
119 
120 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start,
121 
122 	TP_PROTO(struct inode *inode, int flags),
123 
124 	TP_ARGS(inode, flags)
125 );
126 
127 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
128 
129 	TP_PROTO(struct inode *inode, int flags),
130 
131 	TP_ARGS(inode, flags)
132 );
133 
134 DECLARE_EVENT_CLASS(writeback_write_inode_template,
135 
136 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
137 
138 	TP_ARGS(inode, wbc),
139 
140 	TP_STRUCT__entry (
141 		__array(char, name, 32)
142 		__field(unsigned long, ino)
143 		__field(int, sync_mode)
144 	),
145 
146 	TP_fast_assign(
147 		strncpy(__entry->name,
148 			dev_name(inode_to_bdi(inode)->dev), 32);
149 		__entry->ino		= inode->i_ino;
150 		__entry->sync_mode	= wbc->sync_mode;
151 	),
152 
153 	TP_printk("bdi %s: ino=%lu sync_mode=%d",
154 		__entry->name,
155 		__entry->ino,
156 		__entry->sync_mode
157 	)
158 );
159 
160 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode_start,
161 
162 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
163 
164 	TP_ARGS(inode, wbc)
165 );
166 
167 DEFINE_EVENT(writeback_write_inode_template, writeback_write_inode,
168 
169 	TP_PROTO(struct inode *inode, struct writeback_control *wbc),
170 
171 	TP_ARGS(inode, wbc)
172 );
173 
174 DECLARE_EVENT_CLASS(writeback_work_class,
175 	TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
176 	TP_ARGS(bdi, work),
177 	TP_STRUCT__entry(
178 		__array(char, name, 32)
179 		__field(long, nr_pages)
180 		__field(dev_t, sb_dev)
181 		__field(int, sync_mode)
182 		__field(int, for_kupdate)
183 		__field(int, range_cyclic)
184 		__field(int, for_background)
185 		__field(int, reason)
186 	),
187 	TP_fast_assign(
188 		strncpy(__entry->name,
189 			bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
190 		__entry->nr_pages = work->nr_pages;
191 		__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
192 		__entry->sync_mode = work->sync_mode;
193 		__entry->for_kupdate = work->for_kupdate;
194 		__entry->range_cyclic = work->range_cyclic;
195 		__entry->for_background	= work->for_background;
196 		__entry->reason = work->reason;
197 	),
198 	TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
199 		  "kupdate=%d range_cyclic=%d background=%d reason=%s",
200 		  __entry->name,
201 		  MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
202 		  __entry->nr_pages,
203 		  __entry->sync_mode,
204 		  __entry->for_kupdate,
205 		  __entry->range_cyclic,
206 		  __entry->for_background,
207 		  __print_symbolic(__entry->reason, WB_WORK_REASON)
208 	)
209 );
210 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
211 DEFINE_EVENT(writeback_work_class, name, \
212 	TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
213 	TP_ARGS(bdi, work))
214 DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
215 DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
216 DEFINE_WRITEBACK_WORK_EVENT(writeback_start);
217 DEFINE_WRITEBACK_WORK_EVENT(writeback_written);
218 DEFINE_WRITEBACK_WORK_EVENT(writeback_wait);
219 
220 TRACE_EVENT(writeback_pages_written,
221 	TP_PROTO(long pages_written),
222 	TP_ARGS(pages_written),
223 	TP_STRUCT__entry(
224 		__field(long,		pages)
225 	),
226 	TP_fast_assign(
227 		__entry->pages		= pages_written;
228 	),
229 	TP_printk("%ld", __entry->pages)
230 );
231 
232 DECLARE_EVENT_CLASS(writeback_class,
233 	TP_PROTO(struct backing_dev_info *bdi),
234 	TP_ARGS(bdi),
235 	TP_STRUCT__entry(
236 		__array(char, name, 32)
237 	),
238 	TP_fast_assign(
239 		strncpy(__entry->name, dev_name(bdi->dev), 32);
240 	),
241 	TP_printk("bdi %s",
242 		  __entry->name
243 	)
244 );
245 #define DEFINE_WRITEBACK_EVENT(name) \
246 DEFINE_EVENT(writeback_class, name, \
247 	TP_PROTO(struct backing_dev_info *bdi), \
248 	TP_ARGS(bdi))
249 
250 DEFINE_WRITEBACK_EVENT(writeback_nowork);
251 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
252 DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
253 
254 DECLARE_EVENT_CLASS(wbc_class,
255 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
256 	TP_ARGS(wbc, bdi),
257 	TP_STRUCT__entry(
258 		__array(char, name, 32)
259 		__field(long, nr_to_write)
260 		__field(long, pages_skipped)
261 		__field(int, sync_mode)
262 		__field(int, for_kupdate)
263 		__field(int, for_background)
264 		__field(int, for_reclaim)
265 		__field(int, range_cyclic)
266 		__field(long, range_start)
267 		__field(long, range_end)
268 	),
269 
270 	TP_fast_assign(
271 		strncpy(__entry->name, dev_name(bdi->dev), 32);
272 		__entry->nr_to_write	= wbc->nr_to_write;
273 		__entry->pages_skipped	= wbc->pages_skipped;
274 		__entry->sync_mode	= wbc->sync_mode;
275 		__entry->for_kupdate	= wbc->for_kupdate;
276 		__entry->for_background	= wbc->for_background;
277 		__entry->for_reclaim	= wbc->for_reclaim;
278 		__entry->range_cyclic	= wbc->range_cyclic;
279 		__entry->range_start	= (long)wbc->range_start;
280 		__entry->range_end	= (long)wbc->range_end;
281 	),
282 
283 	TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
284 		"bgrd=%d reclm=%d cyclic=%d "
285 		"start=0x%lx end=0x%lx",
286 		__entry->name,
287 		__entry->nr_to_write,
288 		__entry->pages_skipped,
289 		__entry->sync_mode,
290 		__entry->for_kupdate,
291 		__entry->for_background,
292 		__entry->for_reclaim,
293 		__entry->range_cyclic,
294 		__entry->range_start,
295 		__entry->range_end)
296 )
297 
298 #define DEFINE_WBC_EVENT(name) \
299 DEFINE_EVENT(wbc_class, name, \
300 	TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
301 	TP_ARGS(wbc, bdi))
302 DEFINE_WBC_EVENT(wbc_writepage);
303 
304 TRACE_EVENT(writeback_queue_io,
305 	TP_PROTO(struct bdi_writeback *wb,
306 		 struct wb_writeback_work *work,
307 		 int moved),
308 	TP_ARGS(wb, work, moved),
309 	TP_STRUCT__entry(
310 		__array(char,		name, 32)
311 		__field(unsigned long,	older)
312 		__field(long,		age)
313 		__field(int,		moved)
314 		__field(int,		reason)
315 	),
316 	TP_fast_assign(
317 		unsigned long *older_than_this = work->older_than_this;
318 		strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
319 		__entry->older	= older_than_this ?  *older_than_this : 0;
320 		__entry->age	= older_than_this ?
321 				  (jiffies - *older_than_this) * 1000 / HZ : -1;
322 		__entry->moved	= moved;
323 		__entry->reason	= work->reason;
324 	),
325 	TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s",
326 		__entry->name,
327 		__entry->older,	/* older_than_this in jiffies */
328 		__entry->age,	/* older_than_this in relative milliseconds */
329 		__entry->moved,
330 		__print_symbolic(__entry->reason, WB_WORK_REASON)
331 	)
332 );
333 
334 TRACE_EVENT(global_dirty_state,
335 
336 	TP_PROTO(unsigned long background_thresh,
337 		 unsigned long dirty_thresh
338 	),
339 
340 	TP_ARGS(background_thresh,
341 		dirty_thresh
342 	),
343 
344 	TP_STRUCT__entry(
345 		__field(unsigned long,	nr_dirty)
346 		__field(unsigned long,	nr_writeback)
347 		__field(unsigned long,	nr_unstable)
348 		__field(unsigned long,	background_thresh)
349 		__field(unsigned long,	dirty_thresh)
350 		__field(unsigned long,	dirty_limit)
351 		__field(unsigned long,	nr_dirtied)
352 		__field(unsigned long,	nr_written)
353 	),
354 
355 	TP_fast_assign(
356 		__entry->nr_dirty	= global_page_state(NR_FILE_DIRTY);
357 		__entry->nr_writeback	= global_page_state(NR_WRITEBACK);
358 		__entry->nr_unstable	= global_page_state(NR_UNSTABLE_NFS);
359 		__entry->nr_dirtied	= global_page_state(NR_DIRTIED);
360 		__entry->nr_written	= global_page_state(NR_WRITTEN);
361 		__entry->background_thresh = background_thresh;
362 		__entry->dirty_thresh	= dirty_thresh;
363 		__entry->dirty_limit = global_dirty_limit;
364 	),
365 
366 	TP_printk("dirty=%lu writeback=%lu unstable=%lu "
367 		  "bg_thresh=%lu thresh=%lu limit=%lu "
368 		  "dirtied=%lu written=%lu",
369 		  __entry->nr_dirty,
370 		  __entry->nr_writeback,
371 		  __entry->nr_unstable,
372 		  __entry->background_thresh,
373 		  __entry->dirty_thresh,
374 		  __entry->dirty_limit,
375 		  __entry->nr_dirtied,
376 		  __entry->nr_written
377 	)
378 );
379 
380 #define KBps(x)			((x) << (PAGE_SHIFT - 10))
381 
382 TRACE_EVENT(bdi_dirty_ratelimit,
383 
384 	TP_PROTO(struct backing_dev_info *bdi,
385 		 unsigned long dirty_rate,
386 		 unsigned long task_ratelimit),
387 
388 	TP_ARGS(bdi, dirty_rate, task_ratelimit),
389 
390 	TP_STRUCT__entry(
391 		__array(char,		bdi, 32)
392 		__field(unsigned long,	write_bw)
393 		__field(unsigned long,	avg_write_bw)
394 		__field(unsigned long,	dirty_rate)
395 		__field(unsigned long,	dirty_ratelimit)
396 		__field(unsigned long,	task_ratelimit)
397 		__field(unsigned long,	balanced_dirty_ratelimit)
398 	),
399 
400 	TP_fast_assign(
401 		strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
402 		__entry->write_bw	= KBps(bdi->write_bandwidth);
403 		__entry->avg_write_bw	= KBps(bdi->avg_write_bandwidth);
404 		__entry->dirty_rate	= KBps(dirty_rate);
405 		__entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit);
406 		__entry->task_ratelimit	= KBps(task_ratelimit);
407 		__entry->balanced_dirty_ratelimit =
408 					  KBps(bdi->balanced_dirty_ratelimit);
409 	),
410 
411 	TP_printk("bdi %s: "
412 		  "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
413 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
414 		  "balanced_dirty_ratelimit=%lu",
415 		  __entry->bdi,
416 		  __entry->write_bw,		/* write bandwidth */
417 		  __entry->avg_write_bw,	/* avg write bandwidth */
418 		  __entry->dirty_rate,		/* bdi dirty rate */
419 		  __entry->dirty_ratelimit,	/* base ratelimit */
420 		  __entry->task_ratelimit, /* ratelimit with position control */
421 		  __entry->balanced_dirty_ratelimit /* the balanced ratelimit */
422 	)
423 );
424 
425 TRACE_EVENT(balance_dirty_pages,
426 
427 	TP_PROTO(struct backing_dev_info *bdi,
428 		 unsigned long thresh,
429 		 unsigned long bg_thresh,
430 		 unsigned long dirty,
431 		 unsigned long bdi_thresh,
432 		 unsigned long bdi_dirty,
433 		 unsigned long dirty_ratelimit,
434 		 unsigned long task_ratelimit,
435 		 unsigned long dirtied,
436 		 unsigned long period,
437 		 long pause,
438 		 unsigned long start_time),
439 
440 	TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty,
441 		dirty_ratelimit, task_ratelimit,
442 		dirtied, period, pause, start_time),
443 
444 	TP_STRUCT__entry(
445 		__array(	 char,	bdi, 32)
446 		__field(unsigned long,	limit)
447 		__field(unsigned long,	setpoint)
448 		__field(unsigned long,	dirty)
449 		__field(unsigned long,	bdi_setpoint)
450 		__field(unsigned long,	bdi_dirty)
451 		__field(unsigned long,	dirty_ratelimit)
452 		__field(unsigned long,	task_ratelimit)
453 		__field(unsigned int,	dirtied)
454 		__field(unsigned int,	dirtied_pause)
455 		__field(unsigned long,	paused)
456 		__field(	 long,	pause)
457 		__field(unsigned long,	period)
458 		__field(	 long,	think)
459 	),
460 
461 	TP_fast_assign(
462 		unsigned long freerun = (thresh + bg_thresh) / 2;
463 		strlcpy(__entry->bdi, dev_name(bdi->dev), 32);
464 
465 		__entry->limit		= global_dirty_limit;
466 		__entry->setpoint	= (global_dirty_limit + freerun) / 2;
467 		__entry->dirty		= dirty;
468 		__entry->bdi_setpoint	= __entry->setpoint *
469 						bdi_thresh / (thresh + 1);
470 		__entry->bdi_dirty	= bdi_dirty;
471 		__entry->dirty_ratelimit = KBps(dirty_ratelimit);
472 		__entry->task_ratelimit	= KBps(task_ratelimit);
473 		__entry->dirtied	= dirtied;
474 		__entry->dirtied_pause	= current->nr_dirtied_pause;
475 		__entry->think		= current->dirty_paused_when == 0 ? 0 :
476 			 (long)(jiffies - current->dirty_paused_when) * 1000/HZ;
477 		__entry->period		= period * 1000 / HZ;
478 		__entry->pause		= pause * 1000 / HZ;
479 		__entry->paused		= (jiffies - start_time) * 1000 / HZ;
480 	),
481 
482 
483 	TP_printk("bdi %s: "
484 		  "limit=%lu setpoint=%lu dirty=%lu "
485 		  "bdi_setpoint=%lu bdi_dirty=%lu "
486 		  "dirty_ratelimit=%lu task_ratelimit=%lu "
487 		  "dirtied=%u dirtied_pause=%u "
488 		  "paused=%lu pause=%ld period=%lu think=%ld",
489 		  __entry->bdi,
490 		  __entry->limit,
491 		  __entry->setpoint,
492 		  __entry->dirty,
493 		  __entry->bdi_setpoint,
494 		  __entry->bdi_dirty,
495 		  __entry->dirty_ratelimit,
496 		  __entry->task_ratelimit,
497 		  __entry->dirtied,
498 		  __entry->dirtied_pause,
499 		  __entry->paused,	/* ms */
500 		  __entry->pause,	/* ms */
501 		  __entry->period,	/* ms */
502 		  __entry->think	/* ms */
503 	  )
504 );
505 
506 TRACE_EVENT(writeback_sb_inodes_requeue,
507 
508 	TP_PROTO(struct inode *inode),
509 	TP_ARGS(inode),
510 
511 	TP_STRUCT__entry(
512 		__array(char, name, 32)
513 		__field(unsigned long, ino)
514 		__field(unsigned long, state)
515 		__field(unsigned long, dirtied_when)
516 	),
517 
518 	TP_fast_assign(
519 		strncpy(__entry->name,
520 		        dev_name(inode_to_bdi(inode)->dev), 32);
521 		__entry->ino		= inode->i_ino;
522 		__entry->state		= inode->i_state;
523 		__entry->dirtied_when	= inode->dirtied_when;
524 	),
525 
526 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
527 		  __entry->name,
528 		  __entry->ino,
529 		  show_inode_state(__entry->state),
530 		  __entry->dirtied_when,
531 		  (jiffies - __entry->dirtied_when) / HZ
532 	)
533 );
534 
535 DECLARE_EVENT_CLASS(writeback_congest_waited_template,
536 
537 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
538 
539 	TP_ARGS(usec_timeout, usec_delayed),
540 
541 	TP_STRUCT__entry(
542 		__field(	unsigned int,	usec_timeout	)
543 		__field(	unsigned int,	usec_delayed	)
544 	),
545 
546 	TP_fast_assign(
547 		__entry->usec_timeout	= usec_timeout;
548 		__entry->usec_delayed	= usec_delayed;
549 	),
550 
551 	TP_printk("usec_timeout=%u usec_delayed=%u",
552 			__entry->usec_timeout,
553 			__entry->usec_delayed)
554 );
555 
556 DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
557 
558 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
559 
560 	TP_ARGS(usec_timeout, usec_delayed)
561 );
562 
563 DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
564 
565 	TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
566 
567 	TP_ARGS(usec_timeout, usec_delayed)
568 );
569 
570 DECLARE_EVENT_CLASS(writeback_single_inode_template,
571 
572 	TP_PROTO(struct inode *inode,
573 		 struct writeback_control *wbc,
574 		 unsigned long nr_to_write
575 	),
576 
577 	TP_ARGS(inode, wbc, nr_to_write),
578 
579 	TP_STRUCT__entry(
580 		__array(char, name, 32)
581 		__field(unsigned long, ino)
582 		__field(unsigned long, state)
583 		__field(unsigned long, dirtied_when)
584 		__field(unsigned long, writeback_index)
585 		__field(long, nr_to_write)
586 		__field(unsigned long, wrote)
587 	),
588 
589 	TP_fast_assign(
590 		strncpy(__entry->name,
591 			dev_name(inode_to_bdi(inode)->dev), 32);
592 		__entry->ino		= inode->i_ino;
593 		__entry->state		= inode->i_state;
594 		__entry->dirtied_when	= inode->dirtied_when;
595 		__entry->writeback_index = inode->i_mapping->writeback_index;
596 		__entry->nr_to_write	= nr_to_write;
597 		__entry->wrote		= nr_to_write - wbc->nr_to_write;
598 	),
599 
600 	TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
601 		  "index=%lu to_write=%ld wrote=%lu",
602 		  __entry->name,
603 		  __entry->ino,
604 		  show_inode_state(__entry->state),
605 		  __entry->dirtied_when,
606 		  (jiffies - __entry->dirtied_when) / HZ,
607 		  __entry->writeback_index,
608 		  __entry->nr_to_write,
609 		  __entry->wrote
610 	)
611 );
612 
613 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_start,
614 	TP_PROTO(struct inode *inode,
615 		 struct writeback_control *wbc,
616 		 unsigned long nr_to_write),
617 	TP_ARGS(inode, wbc, nr_to_write)
618 );
619 
620 DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
621 	TP_PROTO(struct inode *inode,
622 		 struct writeback_control *wbc,
623 		 unsigned long nr_to_write),
624 	TP_ARGS(inode, wbc, nr_to_write)
625 );
626 
627 DECLARE_EVENT_CLASS(writeback_lazytime_template,
628 	TP_PROTO(struct inode *inode),
629 
630 	TP_ARGS(inode),
631 
632 	TP_STRUCT__entry(
633 		__field(	dev_t,	dev			)
634 		__field(unsigned long,	ino			)
635 		__field(unsigned long,	state			)
636 		__field(	__u16, mode			)
637 		__field(unsigned long, dirtied_when		)
638 	),
639 
640 	TP_fast_assign(
641 		__entry->dev	= inode->i_sb->s_dev;
642 		__entry->ino	= inode->i_ino;
643 		__entry->state	= inode->i_state;
644 		__entry->mode	= inode->i_mode;
645 		__entry->dirtied_when = inode->dirtied_when;
646 	),
647 
648 	TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
649 		  MAJOR(__entry->dev), MINOR(__entry->dev),
650 		  __entry->ino, __entry->dirtied_when,
651 		  show_inode_state(__entry->state), __entry->mode)
652 );
653 
654 DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime,
655 	TP_PROTO(struct inode *inode),
656 
657 	TP_ARGS(inode)
658 );
659 
660 DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput,
661 	TP_PROTO(struct inode *inode),
662 
663 	TP_ARGS(inode)
664 );
665 
666 DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue,
667 
668 	TP_PROTO(struct inode *inode),
669 
670 	TP_ARGS(inode)
671 );
672 
673 #endif /* _TRACE_WRITEBACK_H */
674 
675 /* This part must be outside protection */
676 #include <trace/define_trace.h>
677