1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/tracefile.c
37  *
38  * Author: Zach Brown <zab@clusterfs.com>
39  * Author: Phil Schwan <phil@clusterfs.com>
40  */
41 
42 
43 #define DEBUG_SUBSYSTEM S_LNET
44 #define LUSTRE_TRACEFILE_PRIVATE
45 #include "tracefile.h"
46 
47 #include "../../include/linux/libcfs/libcfs.h"
48 
49 /* XXX move things up to the top, comment */
50 union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS] __cacheline_aligned;
51 
52 char cfs_tracefile[TRACEFILE_NAME_SIZE];
53 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
54 static struct tracefiled_ctl trace_tctl;
55 struct mutex cfs_trace_thread_mutex;
56 static int thread_running;
57 
58 static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
59 
60 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
61 					 struct cfs_trace_cpu_data *tcd);
62 
63 static inline struct cfs_trace_page *
cfs_tage_from_list(struct list_head * list)64 cfs_tage_from_list(struct list_head *list)
65 {
66 	return list_entry(list, struct cfs_trace_page, linkage);
67 }
68 
cfs_tage_alloc(gfp_t gfp)69 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
70 {
71 	struct page	    *page;
72 	struct cfs_trace_page *tage;
73 
74 	/* My caller is trying to free memory */
75 	if (!in_interrupt() && memory_pressure_get())
76 		return NULL;
77 
78 	/*
79 	 * Don't spam console with allocation failures: they will be reported
80 	 * by upper layer anyway.
81 	 */
82 	gfp |= __GFP_NOWARN;
83 	page = alloc_page(gfp);
84 	if (page == NULL)
85 		return NULL;
86 
87 	tage = kmalloc(sizeof(*tage), gfp);
88 	if (tage == NULL) {
89 		__free_page(page);
90 		return NULL;
91 	}
92 
93 	tage->page = page;
94 	atomic_inc(&cfs_tage_allocated);
95 	return tage;
96 }
97 
cfs_tage_free(struct cfs_trace_page * tage)98 static void cfs_tage_free(struct cfs_trace_page *tage)
99 {
100 	__LASSERT(tage != NULL);
101 	__LASSERT(tage->page != NULL);
102 
103 	__free_page(tage->page);
104 	kfree(tage);
105 	atomic_dec(&cfs_tage_allocated);
106 }
107 
cfs_tage_to_tail(struct cfs_trace_page * tage,struct list_head * queue)108 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
109 			     struct list_head *queue)
110 {
111 	__LASSERT(tage != NULL);
112 	__LASSERT(queue != NULL);
113 
114 	list_move_tail(&tage->linkage, queue);
115 }
116 
cfs_trace_refill_stock(struct cfs_trace_cpu_data * tcd,gfp_t gfp,struct list_head * stock)117 int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
118 			   struct list_head *stock)
119 {
120 	int i;
121 
122 	/*
123 	 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
124 	 * from here: this will lead to infinite recursion.
125 	 */
126 
127 	for (i = 0; i + tcd->tcd_cur_stock_pages < TCD_STOCK_PAGES ; ++ i) {
128 		struct cfs_trace_page *tage;
129 
130 		tage = cfs_tage_alloc(gfp);
131 		if (tage == NULL)
132 			break;
133 		list_add_tail(&tage->linkage, stock);
134 	}
135 	return i;
136 }
137 
138 /* return a page that has 'len' bytes left at the end */
139 static struct cfs_trace_page *
cfs_trace_get_tage_try(struct cfs_trace_cpu_data * tcd,unsigned long len)140 cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
141 {
142 	struct cfs_trace_page *tage;
143 
144 	if (tcd->tcd_cur_pages > 0) {
145 		__LASSERT(!list_empty(&tcd->tcd_pages));
146 		tage = cfs_tage_from_list(tcd->tcd_pages.prev);
147 		if (tage->used + len <= PAGE_CACHE_SIZE)
148 			return tage;
149 	}
150 
151 	if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
152 		if (tcd->tcd_cur_stock_pages > 0) {
153 			tage = cfs_tage_from_list(tcd->tcd_stock_pages.prev);
154 			--tcd->tcd_cur_stock_pages;
155 			list_del_init(&tage->linkage);
156 		} else {
157 			tage = cfs_tage_alloc(GFP_ATOMIC);
158 			if (unlikely(tage == NULL)) {
159 				if ((!memory_pressure_get() ||
160 				     in_interrupt()) && printk_ratelimit())
161 					printk(KERN_WARNING
162 					       "cannot allocate a tage (%ld)\n",
163 					       tcd->tcd_cur_pages);
164 				return NULL;
165 			}
166 		}
167 
168 		tage->used = 0;
169 		tage->cpu = smp_processor_id();
170 		tage->type = tcd->tcd_type;
171 		list_add_tail(&tage->linkage, &tcd->tcd_pages);
172 		tcd->tcd_cur_pages++;
173 
174 		if (tcd->tcd_cur_pages > 8 && thread_running) {
175 			struct tracefiled_ctl *tctl = &trace_tctl;
176 			/*
177 			 * wake up tracefiled to process some pages.
178 			 */
179 			wake_up(&tctl->tctl_waitq);
180 		}
181 		return tage;
182 	}
183 	return NULL;
184 }
185 
cfs_tcd_shrink(struct cfs_trace_cpu_data * tcd)186 static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
187 {
188 	int pgcount = tcd->tcd_cur_pages / 10;
189 	struct page_collection pc;
190 	struct cfs_trace_page *tage;
191 	struct cfs_trace_page *tmp;
192 
193 	/*
194 	 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
195 	 * from here: this will lead to infinite recursion.
196 	 */
197 
198 	if (printk_ratelimit())
199 		printk(KERN_WARNING "debug daemon buffer overflowed; discarding 10%% of pages (%d of %ld)\n",
200 		       pgcount + 1, tcd->tcd_cur_pages);
201 
202 	INIT_LIST_HEAD(&pc.pc_pages);
203 	spin_lock_init(&pc.pc_lock);
204 
205 	list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
206 		if (pgcount-- == 0)
207 			break;
208 
209 		list_move_tail(&tage->linkage, &pc.pc_pages);
210 		tcd->tcd_cur_pages--;
211 	}
212 	put_pages_on_tcd_daemon_list(&pc, tcd);
213 }
214 
215 /* return a page that has 'len' bytes left at the end */
cfs_trace_get_tage(struct cfs_trace_cpu_data * tcd,unsigned long len)216 static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
217 						 unsigned long len)
218 {
219 	struct cfs_trace_page *tage;
220 
221 	/*
222 	 * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
223 	 * from here: this will lead to infinite recursion.
224 	 */
225 
226 	if (len > PAGE_CACHE_SIZE) {
227 		pr_err("cowardly refusing to write %lu bytes in a page\n", len);
228 		return NULL;
229 	}
230 
231 	tage = cfs_trace_get_tage_try(tcd, len);
232 	if (tage != NULL)
233 		return tage;
234 	if (thread_running)
235 		cfs_tcd_shrink(tcd);
236 	if (tcd->tcd_cur_pages > 0) {
237 		tage = cfs_tage_from_list(tcd->tcd_pages.next);
238 		tage->used = 0;
239 		cfs_tage_to_tail(tage, &tcd->tcd_pages);
240 	}
241 	return tage;
242 }
243 
libcfs_debug_msg(struct libcfs_debug_msg_data * msgdata,const char * format,...)244 int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
245 		     const char *format, ...)
246 {
247 	va_list args;
248 	int     rc;
249 
250 	va_start(args, format);
251 	rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
252 	va_end(args);
253 
254 	return rc;
255 }
256 EXPORT_SYMBOL(libcfs_debug_msg);
257 
libcfs_debug_vmsg2(struct libcfs_debug_msg_data * msgdata,const char * format1,va_list args,const char * format2,...)258 int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
259 		       const char *format1, va_list args,
260 		       const char *format2, ...)
261 {
262 	struct cfs_trace_cpu_data *tcd = NULL;
263 	struct ptldebug_header     header = {0};
264 	struct cfs_trace_page     *tage;
265 	/* string_buf is used only if tcd != NULL, and is always set then */
266 	char		      *string_buf = NULL;
267 	char		      *debug_buf;
268 	int			known_size;
269 	int			needed = 85; /* average message length */
270 	int			max_nob;
271 	va_list		    ap;
272 	int			depth;
273 	int			i;
274 	int			remain;
275 	int			mask = msgdata->msg_mask;
276 	const char		*file = kbasename(msgdata->msg_file);
277 	struct cfs_debug_limit_state   *cdls = msgdata->msg_cdls;
278 
279 	tcd = cfs_trace_get_tcd();
280 
281 	/* cfs_trace_get_tcd() grabs a lock, which disables preemption and
282 	 * pins us to a particular CPU.  This avoids an smp_processor_id()
283 	 * warning on Linux when debugging is enabled. */
284 	cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
285 
286 	if (tcd == NULL)		/* arch may not log in IRQ context */
287 		goto console;
288 
289 	if (tcd->tcd_cur_pages == 0)
290 		header.ph_flags |= PH_FLAG_FIRST_RECORD;
291 
292 	if (tcd->tcd_shutting_down) {
293 		cfs_trace_put_tcd(tcd);
294 		tcd = NULL;
295 		goto console;
296 	}
297 
298 	depth = __current_nesting_level();
299 	known_size = strlen(file) + 1 + depth;
300 	if (msgdata->msg_fn)
301 		known_size += strlen(msgdata->msg_fn) + 1;
302 
303 	if (libcfs_debug_binary)
304 		known_size += sizeof(header);
305 
306 	/*/
307 	 * '2' used because vsnprintf return real size required for output
308 	 * _without_ terminating NULL.
309 	 * if needed is to small for this format.
310 	 */
311 	for (i = 0; i < 2; i++) {
312 		tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
313 		if (tage == NULL) {
314 			if (needed + known_size > PAGE_CACHE_SIZE)
315 				mask |= D_ERROR;
316 
317 			cfs_trace_put_tcd(tcd);
318 			tcd = NULL;
319 			goto console;
320 		}
321 
322 		string_buf = (char *)page_address(tage->page) +
323 					tage->used + known_size;
324 
325 		max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
326 		if (max_nob <= 0) {
327 			printk(KERN_EMERG "negative max_nob: %d\n",
328 			       max_nob);
329 			mask |= D_ERROR;
330 			cfs_trace_put_tcd(tcd);
331 			tcd = NULL;
332 			goto console;
333 		}
334 
335 		needed = 0;
336 		if (format1) {
337 			va_copy(ap, args);
338 			needed = vsnprintf(string_buf, max_nob, format1, ap);
339 			va_end(ap);
340 		}
341 
342 		if (format2) {
343 			remain = max_nob - needed;
344 			if (remain < 0)
345 				remain = 0;
346 
347 			va_start(ap, format2);
348 			needed += vsnprintf(string_buf + needed, remain,
349 					    format2, ap);
350 			va_end(ap);
351 		}
352 
353 		if (needed < max_nob) /* well. printing ok.. */
354 			break;
355 	}
356 
357 	if (*(string_buf+needed-1) != '\n')
358 		printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
359 		       file, msgdata->msg_line, msgdata->msg_fn);
360 
361 	header.ph_len = known_size + needed;
362 	debug_buf = (char *)page_address(tage->page) + tage->used;
363 
364 	if (libcfs_debug_binary) {
365 		memcpy(debug_buf, &header, sizeof(header));
366 		tage->used += sizeof(header);
367 		debug_buf += sizeof(header);
368 	}
369 
370 	/* indent message according to the nesting level */
371 	while (depth-- > 0) {
372 		*(debug_buf++) = '.';
373 		++ tage->used;
374 	}
375 
376 	strcpy(debug_buf, file);
377 	tage->used += strlen(file) + 1;
378 	debug_buf += strlen(file) + 1;
379 
380 	if (msgdata->msg_fn) {
381 		strcpy(debug_buf, msgdata->msg_fn);
382 		tage->used += strlen(msgdata->msg_fn) + 1;
383 		debug_buf += strlen(msgdata->msg_fn) + 1;
384 	}
385 
386 	__LASSERT(debug_buf == string_buf);
387 
388 	tage->used += needed;
389 	__LASSERT (tage->used <= PAGE_CACHE_SIZE);
390 
391 console:
392 	if ((mask & libcfs_printk) == 0) {
393 		/* no console output requested */
394 		if (tcd != NULL)
395 			cfs_trace_put_tcd(tcd);
396 		return 1;
397 	}
398 
399 	if (cdls != NULL) {
400 		if (libcfs_console_ratelimit &&
401 		    cdls->cdls_next != 0 &&     /* not first time ever */
402 		    !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
403 			/* skipping a console message */
404 			cdls->cdls_count++;
405 			if (tcd != NULL)
406 				cfs_trace_put_tcd(tcd);
407 			return 1;
408 		}
409 
410 		if (cfs_time_after(cfs_time_current(), cdls->cdls_next +
411 						       libcfs_console_max_delay
412 						       + cfs_time_seconds(10))) {
413 			/* last timeout was a long time ago */
414 			cdls->cdls_delay /= libcfs_console_backoff * 4;
415 		} else {
416 			cdls->cdls_delay *= libcfs_console_backoff;
417 		}
418 
419 		if (cdls->cdls_delay < libcfs_console_min_delay)
420 			cdls->cdls_delay = libcfs_console_min_delay;
421 		else if (cdls->cdls_delay > libcfs_console_max_delay)
422 			cdls->cdls_delay = libcfs_console_max_delay;
423 
424 		/* ensure cdls_next is never zero after it's been seen */
425 		cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
426 	}
427 
428 	if (tcd != NULL) {
429 		cfs_print_to_console(&header, mask, string_buf, needed, file,
430 				     msgdata->msg_fn);
431 		cfs_trace_put_tcd(tcd);
432 	} else {
433 		string_buf = cfs_trace_get_console_buffer();
434 
435 		needed = 0;
436 		if (format1 != NULL) {
437 			va_copy(ap, args);
438 			needed = vsnprintf(string_buf,
439 					   CFS_TRACE_CONSOLE_BUFFER_SIZE,
440 					   format1, ap);
441 			va_end(ap);
442 		}
443 		if (format2 != NULL) {
444 			remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
445 			if (remain > 0) {
446 				va_start(ap, format2);
447 				needed += vsnprintf(string_buf+needed, remain,
448 						    format2, ap);
449 				va_end(ap);
450 			}
451 		}
452 		cfs_print_to_console(&header, mask,
453 				     string_buf, needed, file, msgdata->msg_fn);
454 
455 		cfs_trace_put_console_buffer(string_buf);
456 	}
457 
458 	if (cdls != NULL && cdls->cdls_count != 0) {
459 		string_buf = cfs_trace_get_console_buffer();
460 
461 		needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
462 				  "Skipped %d previous similar message%s\n",
463 				  cdls->cdls_count,
464 				  (cdls->cdls_count > 1) ? "s" : "");
465 
466 		cfs_print_to_console(&header, mask,
467 				     string_buf, needed, file, msgdata->msg_fn);
468 
469 		cfs_trace_put_console_buffer(string_buf);
470 		cdls->cdls_count = 0;
471 	}
472 
473 	return 0;
474 }
475 EXPORT_SYMBOL(libcfs_debug_vmsg2);
476 
477 void
cfs_trace_assertion_failed(const char * str,struct libcfs_debug_msg_data * msgdata)478 cfs_trace_assertion_failed(const char *str,
479 			   struct libcfs_debug_msg_data *msgdata)
480 {
481 	struct ptldebug_header hdr;
482 
483 	libcfs_panic_in_progress = 1;
484 	libcfs_catastrophe = 1;
485 	mb();
486 
487 	cfs_set_ptldebug_header(&hdr, msgdata, CDEBUG_STACK());
488 
489 	cfs_print_to_console(&hdr, D_EMERG, str, strlen(str),
490 			     msgdata->msg_file, msgdata->msg_fn);
491 
492 	panic("Lustre debug assertion failure\n");
493 
494 	/* not reached */
495 }
496 
497 static void
panic_collect_pages(struct page_collection * pc)498 panic_collect_pages(struct page_collection *pc)
499 {
500 	/* Do the collect_pages job on a single CPU: assumes that all other
501 	 * CPUs have been stopped during a panic.  If this isn't true for some
502 	 * arch, this will have to be implemented separately in each arch.  */
503 	int			i;
504 	int			j;
505 	struct cfs_trace_cpu_data *tcd;
506 
507 	INIT_LIST_HEAD(&pc->pc_pages);
508 
509 	cfs_tcd_for_each(tcd, i, j) {
510 		list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
511 		tcd->tcd_cur_pages = 0;
512 
513 		if (pc->pc_want_daemon_pages) {
514 			list_splice_init(&tcd->tcd_daemon_pages,
515 					     &pc->pc_pages);
516 			tcd->tcd_cur_daemon_pages = 0;
517 		}
518 	}
519 }
520 
collect_pages_on_all_cpus(struct page_collection * pc)521 static void collect_pages_on_all_cpus(struct page_collection *pc)
522 {
523 	struct cfs_trace_cpu_data *tcd;
524 	int i, cpu;
525 
526 	spin_lock(&pc->pc_lock);
527 	for_each_possible_cpu(cpu) {
528 		cfs_tcd_for_each_type_lock(tcd, i, cpu) {
529 			list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
530 			tcd->tcd_cur_pages = 0;
531 			if (pc->pc_want_daemon_pages) {
532 				list_splice_init(&tcd->tcd_daemon_pages,
533 						     &pc->pc_pages);
534 				tcd->tcd_cur_daemon_pages = 0;
535 			}
536 		}
537 	}
538 	spin_unlock(&pc->pc_lock);
539 }
540 
collect_pages(struct page_collection * pc)541 static void collect_pages(struct page_collection *pc)
542 {
543 	INIT_LIST_HEAD(&pc->pc_pages);
544 
545 	if (libcfs_panic_in_progress)
546 		panic_collect_pages(pc);
547 	else
548 		collect_pages_on_all_cpus(pc);
549 }
550 
put_pages_back_on_all_cpus(struct page_collection * pc)551 static void put_pages_back_on_all_cpus(struct page_collection *pc)
552 {
553 	struct cfs_trace_cpu_data *tcd;
554 	struct list_head *cur_head;
555 	struct cfs_trace_page *tage;
556 	struct cfs_trace_page *tmp;
557 	int i, cpu;
558 
559 	spin_lock(&pc->pc_lock);
560 	for_each_possible_cpu(cpu) {
561 		cfs_tcd_for_each_type_lock(tcd, i, cpu) {
562 			cur_head = tcd->tcd_pages.next;
563 
564 			list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
565 						 linkage) {
566 
567 				__LASSERT_TAGE_INVARIANT(tage);
568 
569 				if (tage->cpu != cpu || tage->type != i)
570 					continue;
571 
572 				cfs_tage_to_tail(tage, cur_head);
573 				tcd->tcd_cur_pages++;
574 			}
575 		}
576 	}
577 	spin_unlock(&pc->pc_lock);
578 }
579 
put_pages_back(struct page_collection * pc)580 static void put_pages_back(struct page_collection *pc)
581 {
582 	if (!libcfs_panic_in_progress)
583 		put_pages_back_on_all_cpus(pc);
584 }
585 
586 /* Add pages to a per-cpu debug daemon ringbuffer.  This buffer makes sure that
587  * we have a good amount of data at all times for dumping during an LBUG, even
588  * if we have been steadily writing (and otherwise discarding) pages via the
589  * debug daemon. */
put_pages_on_tcd_daemon_list(struct page_collection * pc,struct cfs_trace_cpu_data * tcd)590 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
591 					 struct cfs_trace_cpu_data *tcd)
592 {
593 	struct cfs_trace_page *tage;
594 	struct cfs_trace_page *tmp;
595 
596 	spin_lock(&pc->pc_lock);
597 	list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
598 
599 		__LASSERT_TAGE_INVARIANT(tage);
600 
601 		if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
602 			continue;
603 
604 		cfs_tage_to_tail(tage, &tcd->tcd_daemon_pages);
605 		tcd->tcd_cur_daemon_pages++;
606 
607 		if (tcd->tcd_cur_daemon_pages > tcd->tcd_max_pages) {
608 			struct cfs_trace_page *victim;
609 
610 			__LASSERT(!list_empty(&tcd->tcd_daemon_pages));
611 			victim = cfs_tage_from_list(tcd->tcd_daemon_pages.next);
612 
613 			__LASSERT_TAGE_INVARIANT(victim);
614 
615 			list_del(&victim->linkage);
616 			cfs_tage_free(victim);
617 			tcd->tcd_cur_daemon_pages--;
618 		}
619 	}
620 	spin_unlock(&pc->pc_lock);
621 }
622 
put_pages_on_daemon_list(struct page_collection * pc)623 static void put_pages_on_daemon_list(struct page_collection *pc)
624 {
625 	struct cfs_trace_cpu_data *tcd;
626 	int i, cpu;
627 
628 	for_each_possible_cpu(cpu) {
629 		cfs_tcd_for_each_type_lock(tcd, i, cpu)
630 			put_pages_on_tcd_daemon_list(pc, tcd);
631 	}
632 }
633 
cfs_trace_debug_print(void)634 void cfs_trace_debug_print(void)
635 {
636 	struct page_collection pc;
637 	struct cfs_trace_page *tage;
638 	struct cfs_trace_page *tmp;
639 
640 	spin_lock_init(&pc.pc_lock);
641 
642 	pc.pc_want_daemon_pages = 1;
643 	collect_pages(&pc);
644 	list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
645 		char *p, *file, *fn;
646 		struct page *page;
647 
648 		__LASSERT_TAGE_INVARIANT(tage);
649 
650 		page = tage->page;
651 		p = page_address(page);
652 		while (p < ((char *)page_address(page) + tage->used)) {
653 			struct ptldebug_header *hdr;
654 			int len;
655 			hdr = (void *)p;
656 			p += sizeof(*hdr);
657 			file = p;
658 			p += strlen(file) + 1;
659 			fn = p;
660 			p += strlen(fn) + 1;
661 			len = hdr->ph_len - (int)(p - (char *)hdr);
662 
663 			cfs_print_to_console(hdr, D_EMERG, p, len, file, fn);
664 
665 			p += len;
666 		}
667 
668 		list_del(&tage->linkage);
669 		cfs_tage_free(tage);
670 	}
671 }
672 
cfs_tracefile_dump_all_pages(char * filename)673 int cfs_tracefile_dump_all_pages(char *filename)
674 {
675 	struct page_collection	pc;
676 	struct file		*filp;
677 	struct cfs_trace_page	*tage;
678 	struct cfs_trace_page	*tmp;
679 	char			*buf;
680 	int rc;
681 
682 	DECL_MMSPACE;
683 
684 	cfs_tracefile_write_lock();
685 
686 	filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
687 	if (IS_ERR(filp)) {
688 		rc = PTR_ERR(filp);
689 		filp = NULL;
690 		pr_err("LustreError: can't open %s for dump: rc %d\n",
691 			filename, rc);
692 		goto out;
693 	}
694 
695 	spin_lock_init(&pc.pc_lock);
696 	pc.pc_want_daemon_pages = 1;
697 	collect_pages(&pc);
698 	if (list_empty(&pc.pc_pages)) {
699 		rc = 0;
700 		goto close;
701 	}
702 
703 	/* ok, for now, just write the pages.  in the future we'll be building
704 	 * iobufs with the pages and calling generic_direct_IO */
705 	MMSPACE_OPEN;
706 	list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
707 
708 		__LASSERT_TAGE_INVARIANT(tage);
709 
710 		buf = kmap(tage->page);
711 		rc = vfs_write(filp, (__force const char __user *)buf,
712 			       tage->used, &filp->f_pos);
713 		kunmap(tage->page);
714 
715 		if (rc != (int)tage->used) {
716 			printk(KERN_WARNING "wanted to write %u but wrote %d\n",
717 			       tage->used, rc);
718 			put_pages_back(&pc);
719 			__LASSERT(list_empty(&pc.pc_pages));
720 			break;
721 		}
722 		list_del(&tage->linkage);
723 		cfs_tage_free(tage);
724 	}
725 	MMSPACE_CLOSE;
726 	rc = vfs_fsync(filp, 1);
727 	if (rc)
728 		pr_err("sync returns %d\n", rc);
729 close:
730 	filp_close(filp, NULL);
731 out:
732 	cfs_tracefile_write_unlock();
733 	return rc;
734 }
735 
cfs_trace_flush_pages(void)736 void cfs_trace_flush_pages(void)
737 {
738 	struct page_collection pc;
739 	struct cfs_trace_page *tage;
740 	struct cfs_trace_page *tmp;
741 
742 	spin_lock_init(&pc.pc_lock);
743 
744 	pc.pc_want_daemon_pages = 1;
745 	collect_pages(&pc);
746 	list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
747 
748 		__LASSERT_TAGE_INVARIANT(tage);
749 
750 		list_del(&tage->linkage);
751 		cfs_tage_free(tage);
752 	}
753 }
754 
cfs_trace_copyin_string(char * knl_buffer,int knl_buffer_nob,const char __user * usr_buffer,int usr_buffer_nob)755 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
756 			    const char __user *usr_buffer, int usr_buffer_nob)
757 {
758 	int    nob;
759 
760 	if (usr_buffer_nob > knl_buffer_nob)
761 		return -EOVERFLOW;
762 
763 	if (copy_from_user((void *)knl_buffer,
764 			   usr_buffer, usr_buffer_nob))
765 		return -EFAULT;
766 
767 	nob = strnlen(knl_buffer, usr_buffer_nob);
768 	while (nob-- >= 0)		      /* strip trailing whitespace */
769 		if (!isspace(knl_buffer[nob]))
770 			break;
771 
772 	if (nob < 0)			    /* empty string */
773 		return -EINVAL;
774 
775 	if (nob == knl_buffer_nob)	      /* no space to terminate */
776 		return -EOVERFLOW;
777 
778 	knl_buffer[nob + 1] = 0;		/* terminate */
779 	return 0;
780 }
781 EXPORT_SYMBOL(cfs_trace_copyin_string);
782 
cfs_trace_copyout_string(char __user * usr_buffer,int usr_buffer_nob,const char * knl_buffer,char * append)783 int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
784 			     const char *knl_buffer, char *append)
785 {
786 	/* NB if 'append' != NULL, it's a single character to append to the
787 	 * copied out string - usually "\n", for /proc entries and "" (i.e. a
788 	 * terminating zero byte) for sysctl entries */
789 	int   nob = strlen(knl_buffer);
790 
791 	if (nob > usr_buffer_nob)
792 		nob = usr_buffer_nob;
793 
794 	if (copy_to_user(usr_buffer, knl_buffer, nob))
795 		return -EFAULT;
796 
797 	if (append != NULL && nob < usr_buffer_nob) {
798 		if (copy_to_user(usr_buffer + nob, append, 1))
799 			return -EFAULT;
800 
801 		nob++;
802 	}
803 
804 	return nob;
805 }
806 EXPORT_SYMBOL(cfs_trace_copyout_string);
807 
cfs_trace_allocate_string_buffer(char ** str,int nob)808 int cfs_trace_allocate_string_buffer(char **str, int nob)
809 {
810 	if (nob > 2 * PAGE_CACHE_SIZE)	    /* string must be "sensible" */
811 		return -EINVAL;
812 
813 	*str = kmalloc(nob, GFP_IOFS | __GFP_ZERO);
814 	if (*str == NULL)
815 		return -ENOMEM;
816 
817 	return 0;
818 }
819 
cfs_trace_free_string_buffer(char * str,int nob)820 void cfs_trace_free_string_buffer(char *str, int nob)
821 {
822 	kfree(str);
823 }
824 
cfs_trace_dump_debug_buffer_usrstr(void __user * usr_str,int usr_str_nob)825 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
826 {
827 	char	 *str;
828 	int	   rc;
829 
830 	rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
831 	if (rc != 0)
832 		return rc;
833 
834 	rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
835 				     usr_str, usr_str_nob);
836 	if (rc != 0)
837 		goto out;
838 
839 	if (str[0] != '/') {
840 		rc = -EINVAL;
841 		goto out;
842 	}
843 	rc = cfs_tracefile_dump_all_pages(str);
844 out:
845 	cfs_trace_free_string_buffer(str, usr_str_nob + 1);
846 	return rc;
847 }
848 
cfs_trace_daemon_command(char * str)849 int cfs_trace_daemon_command(char *str)
850 {
851 	int       rc = 0;
852 
853 	cfs_tracefile_write_lock();
854 
855 	if (strcmp(str, "stop") == 0) {
856 		cfs_tracefile_write_unlock();
857 		cfs_trace_stop_thread();
858 		cfs_tracefile_write_lock();
859 		memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
860 
861 	} else if (strncmp(str, "size=", 5) == 0) {
862 		cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0);
863 		if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480)
864 			cfs_tracefile_size = CFS_TRACEFILE_SIZE;
865 		else
866 			cfs_tracefile_size <<= 20;
867 
868 	} else if (strlen(str) >= sizeof(cfs_tracefile)) {
869 		rc = -ENAMETOOLONG;
870 	} else if (str[0] != '/') {
871 		rc = -EINVAL;
872 	} else {
873 		strcpy(cfs_tracefile, str);
874 
875 		printk(KERN_INFO
876 		       "Lustre: debug daemon will attempt to start writing to %s (%lukB max)\n",
877 		       cfs_tracefile,
878 		       (long)(cfs_tracefile_size >> 10));
879 
880 		cfs_trace_start_thread();
881 	}
882 
883 	cfs_tracefile_write_unlock();
884 	return rc;
885 }
886 
cfs_trace_daemon_command_usrstr(void __user * usr_str,int usr_str_nob)887 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
888 {
889 	char *str;
890 	int   rc;
891 
892 	rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
893 	if (rc != 0)
894 		return rc;
895 
896 	rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
897 				 usr_str, usr_str_nob);
898 	if (rc == 0)
899 		rc = cfs_trace_daemon_command(str);
900 
901 	cfs_trace_free_string_buffer(str, usr_str_nob + 1);
902 	return rc;
903 }
904 
cfs_trace_set_debug_mb(int mb)905 int cfs_trace_set_debug_mb(int mb)
906 {
907 	int i;
908 	int j;
909 	int pages;
910 	int limit = cfs_trace_max_debug_mb();
911 	struct cfs_trace_cpu_data *tcd;
912 
913 	if (mb < num_possible_cpus()) {
914 		printk(KERN_WARNING
915 		       "Lustre: %d MB is too small for debug buffer size, setting it to %d MB.\n",
916 		       mb, num_possible_cpus());
917 		mb = num_possible_cpus();
918 	}
919 
920 	if (mb > limit) {
921 		printk(KERN_WARNING
922 		       "Lustre: %d MB is too large for debug buffer size, setting it to %d MB.\n",
923 		       mb, limit);
924 		mb = limit;
925 	}
926 
927 	mb /= num_possible_cpus();
928 	pages = mb << (20 - PAGE_CACHE_SHIFT);
929 
930 	cfs_tracefile_write_lock();
931 
932 	cfs_tcd_for_each(tcd, i, j)
933 		tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
934 
935 	cfs_tracefile_write_unlock();
936 
937 	return 0;
938 }
939 
cfs_trace_set_debug_mb_usrstr(void __user * usr_str,int usr_str_nob)940 int cfs_trace_set_debug_mb_usrstr(void __user *usr_str, int usr_str_nob)
941 {
942 	char     str[32];
943 	int      rc;
944 
945 	rc = cfs_trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob);
946 	if (rc < 0)
947 		return rc;
948 
949 	return cfs_trace_set_debug_mb(simple_strtoul(str, NULL, 0));
950 }
951 
cfs_trace_get_debug_mb(void)952 int cfs_trace_get_debug_mb(void)
953 {
954 	int i;
955 	int j;
956 	struct cfs_trace_cpu_data *tcd;
957 	int total_pages = 0;
958 
959 	cfs_tracefile_read_lock();
960 
961 	cfs_tcd_for_each(tcd, i, j)
962 		total_pages += tcd->tcd_max_pages;
963 
964 	cfs_tracefile_read_unlock();
965 
966 	return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
967 }
968 
tracefiled(void * arg)969 static int tracefiled(void *arg)
970 {
971 	struct page_collection pc;
972 	struct tracefiled_ctl *tctl = arg;
973 	struct cfs_trace_page *tage;
974 	struct cfs_trace_page *tmp;
975 	struct file *filp;
976 	char *buf;
977 	int last_loop = 0;
978 	int rc;
979 
980 	DECL_MMSPACE;
981 
982 	/* we're started late enough that we pick up init's fs context */
983 	/* this is so broken in uml?  what on earth is going on? */
984 
985 	spin_lock_init(&pc.pc_lock);
986 	complete(&tctl->tctl_start);
987 
988 	while (1) {
989 		wait_queue_t __wait;
990 
991 		pc.pc_want_daemon_pages = 0;
992 		collect_pages(&pc);
993 		if (list_empty(&pc.pc_pages))
994 			goto end_loop;
995 
996 		filp = NULL;
997 		cfs_tracefile_read_lock();
998 		if (cfs_tracefile[0] != 0) {
999 			filp = filp_open(cfs_tracefile,
1000 					 O_CREAT | O_RDWR | O_LARGEFILE,
1001 					 0600);
1002 			if (IS_ERR(filp)) {
1003 				rc = PTR_ERR(filp);
1004 				filp = NULL;
1005 				printk(KERN_WARNING "couldn't open %s: %d\n",
1006 				       cfs_tracefile, rc);
1007 			}
1008 		}
1009 		cfs_tracefile_read_unlock();
1010 		if (filp == NULL) {
1011 			put_pages_on_daemon_list(&pc);
1012 			__LASSERT(list_empty(&pc.pc_pages));
1013 			goto end_loop;
1014 		}
1015 
1016 		MMSPACE_OPEN;
1017 
1018 		list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1019 						   linkage) {
1020 			static loff_t f_pos;
1021 
1022 			__LASSERT_TAGE_INVARIANT(tage);
1023 
1024 			if (f_pos >= (off_t)cfs_tracefile_size)
1025 				f_pos = 0;
1026 			else if (f_pos > i_size_read(file_inode(filp)))
1027 				f_pos = i_size_read(file_inode(filp));
1028 
1029 			buf = kmap(tage->page);
1030 			rc = vfs_write(filp, (__force const char __user *)buf,
1031 				       tage->used, &f_pos);
1032 			kunmap(tage->page);
1033 
1034 			if (rc != (int)tage->used) {
1035 				printk(KERN_WARNING "wanted to write %u but wrote %d\n",
1036 				       tage->used, rc);
1037 				put_pages_back(&pc);
1038 				__LASSERT(list_empty(&pc.pc_pages));
1039 				break;
1040 			}
1041 		}
1042 		MMSPACE_CLOSE;
1043 
1044 		filp_close(filp, NULL);
1045 		put_pages_on_daemon_list(&pc);
1046 		if (!list_empty(&pc.pc_pages)) {
1047 			int i;
1048 
1049 			printk(KERN_ALERT "Lustre: trace pages aren't empty\n");
1050 			pr_err("total cpus(%d): ",
1051 				num_possible_cpus());
1052 			for (i = 0; i < num_possible_cpus(); i++)
1053 				if (cpu_online(i))
1054 					pr_cont("%d(on) ", i);
1055 				else
1056 					pr_cont("%d(off) ", i);
1057 			pr_cont("\n");
1058 
1059 			i = 0;
1060 			list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
1061 						     linkage)
1062 				pr_err("page %d belongs to cpu %d\n",
1063 					++i, tage->cpu);
1064 			pr_err("There are %d pages unwritten\n", i);
1065 		}
1066 		__LASSERT(list_empty(&pc.pc_pages));
1067 end_loop:
1068 		if (atomic_read(&tctl->tctl_shutdown)) {
1069 			if (last_loop == 0) {
1070 				last_loop = 1;
1071 				continue;
1072 			} else {
1073 				break;
1074 			}
1075 		}
1076 		init_waitqueue_entry(&__wait, current);
1077 		add_wait_queue(&tctl->tctl_waitq, &__wait);
1078 		set_current_state(TASK_INTERRUPTIBLE);
1079 		schedule_timeout(cfs_time_seconds(1));
1080 		remove_wait_queue(&tctl->tctl_waitq, &__wait);
1081 	}
1082 	complete(&tctl->tctl_stop);
1083 	return 0;
1084 }
1085 
cfs_trace_start_thread(void)1086 int cfs_trace_start_thread(void)
1087 {
1088 	struct tracefiled_ctl *tctl = &trace_tctl;
1089 	int rc = 0;
1090 
1091 	mutex_lock(&cfs_trace_thread_mutex);
1092 	if (thread_running)
1093 		goto out;
1094 
1095 	init_completion(&tctl->tctl_start);
1096 	init_completion(&tctl->tctl_stop);
1097 	init_waitqueue_head(&tctl->tctl_waitq);
1098 	atomic_set(&tctl->tctl_shutdown, 0);
1099 
1100 	if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
1101 		rc = -ECHILD;
1102 		goto out;
1103 	}
1104 
1105 	wait_for_completion(&tctl->tctl_start);
1106 	thread_running = 1;
1107 out:
1108 	mutex_unlock(&cfs_trace_thread_mutex);
1109 	return rc;
1110 }
1111 
cfs_trace_stop_thread(void)1112 void cfs_trace_stop_thread(void)
1113 {
1114 	struct tracefiled_ctl *tctl = &trace_tctl;
1115 
1116 	mutex_lock(&cfs_trace_thread_mutex);
1117 	if (thread_running) {
1118 		printk(KERN_INFO
1119 		       "Lustre: shutting down debug daemon thread...\n");
1120 		atomic_set(&tctl->tctl_shutdown, 1);
1121 		wait_for_completion(&tctl->tctl_stop);
1122 		thread_running = 0;
1123 	}
1124 	mutex_unlock(&cfs_trace_thread_mutex);
1125 }
1126 
cfs_tracefile_init(int max_pages)1127 int cfs_tracefile_init(int max_pages)
1128 {
1129 	struct cfs_trace_cpu_data *tcd;
1130 	int		    i;
1131 	int		    j;
1132 	int		    rc;
1133 	int		    factor;
1134 
1135 	rc = cfs_tracefile_init_arch();
1136 	if (rc != 0)
1137 		return rc;
1138 
1139 	cfs_tcd_for_each(tcd, i, j) {
1140 		/* tcd_pages_factor is initialized int tracefile_init_arch. */
1141 		factor = tcd->tcd_pages_factor;
1142 		INIT_LIST_HEAD(&tcd->tcd_pages);
1143 		INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1144 		INIT_LIST_HEAD(&tcd->tcd_daemon_pages);
1145 		tcd->tcd_cur_pages = 0;
1146 		tcd->tcd_cur_stock_pages = 0;
1147 		tcd->tcd_cur_daemon_pages = 0;
1148 		tcd->tcd_max_pages = (max_pages * factor) / 100;
1149 		LASSERT(tcd->tcd_max_pages > 0);
1150 		tcd->tcd_shutting_down = 0;
1151 	}
1152 
1153 	return 0;
1154 }
1155 
trace_cleanup_on_all_cpus(void)1156 static void trace_cleanup_on_all_cpus(void)
1157 {
1158 	struct cfs_trace_cpu_data *tcd;
1159 	struct cfs_trace_page *tage;
1160 	struct cfs_trace_page *tmp;
1161 	int i, cpu;
1162 
1163 	for_each_possible_cpu(cpu) {
1164 		cfs_tcd_for_each_type_lock(tcd, i, cpu) {
1165 			tcd->tcd_shutting_down = 1;
1166 
1167 			list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
1168 							   linkage) {
1169 				__LASSERT_TAGE_INVARIANT(tage);
1170 
1171 				list_del(&tage->linkage);
1172 				cfs_tage_free(tage);
1173 			}
1174 
1175 			tcd->tcd_cur_pages = 0;
1176 		}
1177 	}
1178 }
1179 
cfs_trace_cleanup(void)1180 static void cfs_trace_cleanup(void)
1181 {
1182 	struct page_collection pc;
1183 
1184 	INIT_LIST_HEAD(&pc.pc_pages);
1185 	spin_lock_init(&pc.pc_lock);
1186 
1187 	trace_cleanup_on_all_cpus();
1188 
1189 	cfs_tracefile_fini_arch();
1190 }
1191 
cfs_tracefile_exit(void)1192 void cfs_tracefile_exit(void)
1193 {
1194 	cfs_trace_stop_thread();
1195 	cfs_trace_cleanup();
1196 }
1197