1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36 #define DEBUG_SUBSYSTEM S_LLITE
37
38 #include "../include/lustre_lite.h"
39 #include "../include/lprocfs_status.h"
40 #include <linux/seq_file.h>
41 #include "../include/obd_support.h"
42
43 #include "llite_internal.h"
44 #include "vvp_internal.h"
45
46 /* /proc/lustre/llite mount point registration */
47 static struct file_operations ll_rw_extents_stats_fops;
48 static struct file_operations ll_rw_extents_stats_pp_fops;
49 static struct file_operations ll_rw_offset_stats_fops;
50
blocksize_show(struct kobject * kobj,struct attribute * attr,char * buf)51 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
52 char *buf)
53 {
54 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
55 ll_kobj);
56 struct obd_statfs osfs;
57 int rc;
58
59 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
60 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
61 OBD_STATFS_NODELAY);
62 if (!rc)
63 return sprintf(buf, "%u\n", osfs.os_bsize);
64
65 return rc;
66 }
67 LUSTRE_RO_ATTR(blocksize);
68
kbytestotal_show(struct kobject * kobj,struct attribute * attr,char * buf)69 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
70 char *buf)
71 {
72 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
73 ll_kobj);
74 struct obd_statfs osfs;
75 int rc;
76
77 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
78 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
79 OBD_STATFS_NODELAY);
80 if (!rc) {
81 __u32 blk_size = osfs.os_bsize >> 10;
82 __u64 result = osfs.os_blocks;
83
84 while (blk_size >>= 1)
85 result <<= 1;
86
87 rc = sprintf(buf, "%llu\n", result);
88 }
89
90 return rc;
91 }
92 LUSTRE_RO_ATTR(kbytestotal);
93
kbytesfree_show(struct kobject * kobj,struct attribute * attr,char * buf)94 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
95 char *buf)
96 {
97 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
98 ll_kobj);
99 struct obd_statfs osfs;
100 int rc;
101
102 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
103 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
104 OBD_STATFS_NODELAY);
105 if (!rc) {
106 __u32 blk_size = osfs.os_bsize >> 10;
107 __u64 result = osfs.os_bfree;
108
109 while (blk_size >>= 1)
110 result <<= 1;
111
112 rc = sprintf(buf, "%llu\n", result);
113 }
114
115 return rc;
116 }
117 LUSTRE_RO_ATTR(kbytesfree);
118
kbytesavail_show(struct kobject * kobj,struct attribute * attr,char * buf)119 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
120 char *buf)
121 {
122 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
123 ll_kobj);
124 struct obd_statfs osfs;
125 int rc;
126
127 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
128 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
129 OBD_STATFS_NODELAY);
130 if (!rc) {
131 __u32 blk_size = osfs.os_bsize >> 10;
132 __u64 result = osfs.os_bavail;
133
134 while (blk_size >>= 1)
135 result <<= 1;
136
137 rc = sprintf(buf, "%llu\n", result);
138 }
139
140 return rc;
141 }
142 LUSTRE_RO_ATTR(kbytesavail);
143
filestotal_show(struct kobject * kobj,struct attribute * attr,char * buf)144 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
145 char *buf)
146 {
147 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
148 ll_kobj);
149 struct obd_statfs osfs;
150 int rc;
151
152 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
153 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
154 OBD_STATFS_NODELAY);
155 if (!rc)
156 return sprintf(buf, "%llu\n", osfs.os_files);
157
158 return rc;
159 }
160 LUSTRE_RO_ATTR(filestotal);
161
filesfree_show(struct kobject * kobj,struct attribute * attr,char * buf)162 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
163 char *buf)
164 {
165 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
166 ll_kobj);
167 struct obd_statfs osfs;
168 int rc;
169
170 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
171 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
172 OBD_STATFS_NODELAY);
173 if (!rc)
174 return sprintf(buf, "%llu\n", osfs.os_ffree);
175
176 return rc;
177 }
178 LUSTRE_RO_ATTR(filesfree);
179
client_type_show(struct kobject * kobj,struct attribute * attr,char * buf)180 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
181 char *buf)
182 {
183 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
184 ll_kobj);
185
186 return sprintf(buf, "%s client\n",
187 sbi->ll_flags & LL_SBI_RMT_CLIENT ? "remote" : "local");
188 }
189 LUSTRE_RO_ATTR(client_type);
190
fstype_show(struct kobject * kobj,struct attribute * attr,char * buf)191 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
192 char *buf)
193 {
194 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
195 ll_kobj);
196
197 return sprintf(buf, "%s\n", sbi->ll_sb->s_type->name);
198 }
199 LUSTRE_RO_ATTR(fstype);
200
uuid_show(struct kobject * kobj,struct attribute * attr,char * buf)201 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
202 char *buf)
203 {
204 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
205 ll_kobj);
206
207 return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
208 }
209 LUSTRE_RO_ATTR(uuid);
210
ll_site_stats_seq_show(struct seq_file * m,void * v)211 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
212 {
213 struct super_block *sb = m->private;
214
215 /*
216 * See description of statistical counters in struct cl_site, and
217 * struct lu_site.
218 */
219 return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
220 }
221
222 LPROC_SEQ_FOPS_RO(ll_site_stats);
223
max_read_ahead_mb_show(struct kobject * kobj,struct attribute * attr,char * buf)224 static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
225 struct attribute *attr, char *buf)
226 {
227 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
228 ll_kobj);
229 long pages_number;
230 int mult;
231
232 spin_lock(&sbi->ll_lock);
233 pages_number = sbi->ll_ra_info.ra_max_pages;
234 spin_unlock(&sbi->ll_lock);
235
236 mult = 1 << (20 - PAGE_CACHE_SHIFT);
237 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
238 }
239
max_read_ahead_mb_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t count)240 static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
241 struct attribute *attr,
242 const char *buffer,
243 size_t count)
244 {
245 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
246 ll_kobj);
247 int rc;
248 unsigned long pages_number;
249
250 rc = kstrtoul(buffer, 10, &pages_number);
251 if (rc)
252 return rc;
253
254 pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
255
256 if (pages_number > totalram_pages / 2) {
257
258 CERROR("can't set file readahead more than %lu MB\n",
259 totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
260 return -ERANGE;
261 }
262
263 spin_lock(&sbi->ll_lock);
264 sbi->ll_ra_info.ra_max_pages = pages_number;
265 spin_unlock(&sbi->ll_lock);
266
267 return count;
268 }
269 LUSTRE_RW_ATTR(max_read_ahead_mb);
270
max_read_ahead_per_file_mb_show(struct kobject * kobj,struct attribute * attr,char * buf)271 static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
272 struct attribute *attr,
273 char *buf)
274 {
275 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
276 ll_kobj);
277 long pages_number;
278 int mult;
279
280 spin_lock(&sbi->ll_lock);
281 pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
282 spin_unlock(&sbi->ll_lock);
283
284 mult = 1 << (20 - PAGE_CACHE_SHIFT);
285 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
286 }
287
max_read_ahead_per_file_mb_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t count)288 static ssize_t max_read_ahead_per_file_mb_store(struct kobject *kobj,
289 struct attribute *attr,
290 const char *buffer,
291 size_t count)
292 {
293 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
294 ll_kobj);
295 int rc;
296 unsigned long pages_number;
297
298 rc = kstrtoul(buffer, 10, &pages_number);
299 if (rc)
300 return rc;
301
302 if (pages_number > sbi->ll_ra_info.ra_max_pages) {
303 CERROR("can't set file readahead more than max_read_ahead_mb %lu MB\n",
304 sbi->ll_ra_info.ra_max_pages);
305 return -ERANGE;
306 }
307
308 spin_lock(&sbi->ll_lock);
309 sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
310 spin_unlock(&sbi->ll_lock);
311
312 return count;
313 }
314 LUSTRE_RW_ATTR(max_read_ahead_per_file_mb);
315
max_read_ahead_whole_mb_show(struct kobject * kobj,struct attribute * attr,char * buf)316 static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
317 struct attribute *attr,
318 char *buf)
319 {
320 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
321 ll_kobj);
322 long pages_number;
323 int mult;
324
325 spin_lock(&sbi->ll_lock);
326 pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
327 spin_unlock(&sbi->ll_lock);
328
329 mult = 1 << (20 - PAGE_CACHE_SHIFT);
330 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
331 }
332
max_read_ahead_whole_mb_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t count)333 static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
334 struct attribute *attr,
335 const char *buffer,
336 size_t count)
337 {
338 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
339 ll_kobj);
340 int rc;
341 unsigned long pages_number;
342
343 rc = kstrtoul(buffer, 10, &pages_number);
344 if (rc)
345 return rc;
346
347 /* Cap this at the current max readahead window size, the readahead
348 * algorithm does this anyway so it's pointless to set it larger. */
349 if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
350 CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
351 sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT));
352 return -ERANGE;
353 }
354
355 spin_lock(&sbi->ll_lock);
356 sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
357 spin_unlock(&sbi->ll_lock);
358
359 return count;
360 }
361 LUSTRE_RW_ATTR(max_read_ahead_whole_mb);
362
ll_max_cached_mb_seq_show(struct seq_file * m,void * v)363 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
364 {
365 struct super_block *sb = m->private;
366 struct ll_sb_info *sbi = ll_s2sbi(sb);
367 struct cl_client_cache *cache = &sbi->ll_cache;
368 int shift = 20 - PAGE_CACHE_SHIFT;
369 int max_cached_mb;
370 int unused_mb;
371
372 max_cached_mb = cache->ccc_lru_max >> shift;
373 unused_mb = atomic_read(&cache->ccc_lru_left) >> shift;
374 seq_printf(m,
375 "users: %d\n"
376 "max_cached_mb: %d\n"
377 "used_mb: %d\n"
378 "unused_mb: %d\n"
379 "reclaim_count: %u\n",
380 atomic_read(&cache->ccc_users),
381 max_cached_mb,
382 max_cached_mb - unused_mb,
383 unused_mb,
384 cache->ccc_lru_shrinkers);
385 return 0;
386 }
387
ll_max_cached_mb_seq_write(struct file * file,const char __user * buffer,size_t count,loff_t * off)388 static ssize_t ll_max_cached_mb_seq_write(struct file *file,
389 const char __user *buffer,
390 size_t count, loff_t *off)
391 {
392 struct super_block *sb = ((struct seq_file *)file->private_data)->private;
393 struct ll_sb_info *sbi = ll_s2sbi(sb);
394 struct cl_client_cache *cache = &sbi->ll_cache;
395 int mult, rc, pages_number;
396 int diff = 0;
397 int nrpages = 0;
398 char kernbuf[128];
399
400 if (count >= sizeof(kernbuf))
401 return -EINVAL;
402
403 if (copy_from_user(kernbuf, buffer, count))
404 return -EFAULT;
405 kernbuf[count] = 0;
406
407 mult = 1 << (20 - PAGE_CACHE_SHIFT);
408 buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
409 kernbuf;
410 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
411 if (rc)
412 return rc;
413
414 if (pages_number < 0 || pages_number > totalram_pages) {
415 CERROR("%s: can't set max cache more than %lu MB\n",
416 ll_get_fsname(sb, NULL, 0),
417 totalram_pages >> (20 - PAGE_CACHE_SHIFT));
418 return -ERANGE;
419 }
420
421 spin_lock(&sbi->ll_lock);
422 diff = pages_number - cache->ccc_lru_max;
423 spin_unlock(&sbi->ll_lock);
424
425 /* easy - add more LRU slots. */
426 if (diff >= 0) {
427 atomic_add(diff, &cache->ccc_lru_left);
428 rc = 0;
429 goto out;
430 }
431
432 diff = -diff;
433 while (diff > 0) {
434 int tmp;
435
436 /* reduce LRU budget from free slots. */
437 do {
438 int ov, nv;
439
440 ov = atomic_read(&cache->ccc_lru_left);
441 if (ov == 0)
442 break;
443
444 nv = ov > diff ? ov - diff : 0;
445 rc = atomic_cmpxchg(&cache->ccc_lru_left, ov, nv);
446 if (likely(ov == rc)) {
447 diff -= ov - nv;
448 nrpages += ov - nv;
449 break;
450 }
451 } while (1);
452
453 if (diff <= 0)
454 break;
455
456 if (sbi->ll_dt_exp == NULL) { /* being initialized */
457 rc = -ENODEV;
458 break;
459 }
460
461 /* difficult - have to ask OSCs to drop LRU slots. */
462 tmp = diff << 1;
463 rc = obd_set_info_async(NULL, sbi->ll_dt_exp,
464 sizeof(KEY_CACHE_LRU_SHRINK),
465 KEY_CACHE_LRU_SHRINK,
466 sizeof(tmp), &tmp, NULL);
467 if (rc < 0)
468 break;
469 }
470
471 out:
472 if (rc >= 0) {
473 spin_lock(&sbi->ll_lock);
474 cache->ccc_lru_max = pages_number;
475 spin_unlock(&sbi->ll_lock);
476 rc = count;
477 } else {
478 atomic_add(nrpages, &cache->ccc_lru_left);
479 }
480 return rc;
481 }
482
483 LPROC_SEQ_FOPS(ll_max_cached_mb);
484
checksum_pages_show(struct kobject * kobj,struct attribute * attr,char * buf)485 static ssize_t checksum_pages_show(struct kobject *kobj, struct attribute *attr,
486 char *buf)
487 {
488 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
489 ll_kobj);
490
491 return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
492 }
493
checksum_pages_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t count)494 static ssize_t checksum_pages_store(struct kobject *kobj,
495 struct attribute *attr,
496 const char *buffer,
497 size_t count)
498 {
499 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
500 ll_kobj);
501 int rc;
502 unsigned long val;
503
504 if (!sbi->ll_dt_exp)
505 /* Not set up yet */
506 return -EAGAIN;
507
508 rc = kstrtoul(buffer, 10, &val);
509 if (rc)
510 return rc;
511 if (val)
512 sbi->ll_flags |= LL_SBI_CHECKSUM;
513 else
514 sbi->ll_flags &= ~LL_SBI_CHECKSUM;
515
516 rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
517 KEY_CHECKSUM, sizeof(val), &val, NULL);
518 if (rc)
519 CWARN("Failed to set OSC checksum flags: %d\n", rc);
520
521 return count;
522 }
523 LUSTRE_RW_ATTR(checksum_pages);
524
ll_rd_track_id(struct kobject * kobj,char * buf,enum stats_track_type type)525 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
526 enum stats_track_type type)
527 {
528 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
529 ll_kobj);
530
531 if (sbi->ll_stats_track_type == type)
532 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
533 else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
534 return sprintf(buf, "0 (all)\n");
535 else
536 return sprintf(buf, "untracked\n");
537 }
538
ll_wr_track_id(struct kobject * kobj,const char * buffer,size_t count,enum stats_track_type type)539 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
540 size_t count,
541 enum stats_track_type type)
542 {
543 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
544 ll_kobj);
545 int rc;
546 unsigned long pid;
547
548 rc = kstrtoul(buffer, 10, &pid);
549 if (rc)
550 return rc;
551 sbi->ll_stats_track_id = pid;
552 if (pid == 0)
553 sbi->ll_stats_track_type = STATS_TRACK_ALL;
554 else
555 sbi->ll_stats_track_type = type;
556 lprocfs_clear_stats(sbi->ll_stats);
557 return count;
558 }
559
stats_track_pid_show(struct kobject * kobj,struct attribute * attr,char * buf)560 static ssize_t stats_track_pid_show(struct kobject *kobj,
561 struct attribute *attr,
562 char *buf)
563 {
564 return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
565 }
566
stats_track_pid_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t count)567 static ssize_t stats_track_pid_store(struct kobject *kobj,
568 struct attribute *attr,
569 const char *buffer,
570 size_t count)
571 {
572 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
573 }
574 LUSTRE_RW_ATTR(stats_track_pid);
575
stats_track_ppid_show(struct kobject * kobj,struct attribute * attr,char * buf)576 static ssize_t stats_track_ppid_show(struct kobject *kobj,
577 struct attribute *attr,
578 char *buf)
579 {
580 return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
581 }
582
stats_track_ppid_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t count)583 static ssize_t stats_track_ppid_store(struct kobject *kobj,
584 struct attribute *attr,
585 const char *buffer,
586 size_t count)
587 {
588 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
589 }
590 LUSTRE_RW_ATTR(stats_track_ppid);
591
stats_track_gid_show(struct kobject * kobj,struct attribute * attr,char * buf)592 static ssize_t stats_track_gid_show(struct kobject *kobj,
593 struct attribute *attr,
594 char *buf)
595 {
596 return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
597 }
598
stats_track_gid_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t count)599 static ssize_t stats_track_gid_store(struct kobject *kobj,
600 struct attribute *attr,
601 const char *buffer,
602 size_t count)
603 {
604 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
605 }
606 LUSTRE_RW_ATTR(stats_track_gid);
607
statahead_max_show(struct kobject * kobj,struct attribute * attr,char * buf)608 static ssize_t statahead_max_show(struct kobject *kobj,
609 struct attribute *attr,
610 char *buf)
611 {
612 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
613 ll_kobj);
614
615 return sprintf(buf, "%u\n", sbi->ll_sa_max);
616 }
617
statahead_max_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t count)618 static ssize_t statahead_max_store(struct kobject *kobj,
619 struct attribute *attr,
620 const char *buffer,
621 size_t count)
622 {
623 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
624 ll_kobj);
625 int rc;
626 unsigned long val;
627
628 rc = kstrtoul(buffer, 10, &val);
629 if (rc)
630 return rc;
631
632 if (val <= LL_SA_RPC_MAX)
633 sbi->ll_sa_max = val;
634 else
635 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
636 val, LL_SA_RPC_MAX);
637
638 return count;
639 }
640 LUSTRE_RW_ATTR(statahead_max);
641
statahead_agl_show(struct kobject * kobj,struct attribute * attr,char * buf)642 static ssize_t statahead_agl_show(struct kobject *kobj,
643 struct attribute *attr,
644 char *buf)
645 {
646 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
647 ll_kobj);
648
649 return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
650 }
651
statahead_agl_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t count)652 static ssize_t statahead_agl_store(struct kobject *kobj,
653 struct attribute *attr,
654 const char *buffer,
655 size_t count)
656 {
657 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
658 ll_kobj);
659 int rc;
660 unsigned long val;
661
662 rc = kstrtoul(buffer, 10, &val);
663 if (rc)
664 return rc;
665
666 if (val)
667 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
668 else
669 sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
670
671 return count;
672 }
673 LUSTRE_RW_ATTR(statahead_agl);
674
ll_statahead_stats_seq_show(struct seq_file * m,void * v)675 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
676 {
677 struct super_block *sb = m->private;
678 struct ll_sb_info *sbi = ll_s2sbi(sb);
679
680 seq_printf(m,
681 "statahead total: %u\n"
682 "statahead wrong: %u\n"
683 "agl total: %u\n",
684 atomic_read(&sbi->ll_sa_total),
685 atomic_read(&sbi->ll_sa_wrong),
686 atomic_read(&sbi->ll_agl_total));
687 return 0;
688 }
689
690 LPROC_SEQ_FOPS_RO(ll_statahead_stats);
691
lazystatfs_show(struct kobject * kobj,struct attribute * attr,char * buf)692 static ssize_t lazystatfs_show(struct kobject *kobj,
693 struct attribute *attr,
694 char *buf)
695 {
696 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
697 ll_kobj);
698
699 return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_LAZYSTATFS ? 1 : 0);
700 }
701
lazystatfs_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t count)702 static ssize_t lazystatfs_store(struct kobject *kobj,
703 struct attribute *attr,
704 const char *buffer,
705 size_t count)
706 {
707 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
708 ll_kobj);
709 int rc;
710 unsigned long val;
711
712 rc = kstrtoul(buffer, 10, &val);
713 if (rc)
714 return rc;
715
716 if (val)
717 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
718 else
719 sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
720
721 return count;
722 }
723 LUSTRE_RW_ATTR(lazystatfs);
724
max_easize_show(struct kobject * kobj,struct attribute * attr,char * buf)725 static ssize_t max_easize_show(struct kobject *kobj,
726 struct attribute *attr,
727 char *buf)
728 {
729 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
730 ll_kobj);
731 unsigned int ealen;
732 int rc;
733
734 rc = ll_get_max_mdsize(sbi, &ealen);
735 if (rc)
736 return rc;
737
738 return sprintf(buf, "%u\n", ealen);
739 }
740 LUSTRE_RO_ATTR(max_easize);
741
default_easize_show(struct kobject * kobj,struct attribute * attr,char * buf)742 static ssize_t default_easize_show(struct kobject *kobj,
743 struct attribute *attr,
744 char *buf)
745 {
746 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
747 ll_kobj);
748 unsigned int ealen;
749 int rc;
750
751 rc = ll_get_default_mdsize(sbi, &ealen);
752 if (rc)
753 return rc;
754
755 return sprintf(buf, "%u\n", ealen);
756 }
757 LUSTRE_RO_ATTR(default_easize);
758
ll_sbi_flags_seq_show(struct seq_file * m,void * v)759 static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
760 {
761 const char *str[] = LL_SBI_FLAGS;
762 struct super_block *sb = m->private;
763 int flags = ll_s2sbi(sb)->ll_flags;
764 int i = 0;
765
766 while (flags != 0) {
767 if (ARRAY_SIZE(str) <= i) {
768 CERROR("%s: Revise array LL_SBI_FLAGS to match sbi flags please.\n",
769 ll_get_fsname(sb, NULL, 0));
770 return -EINVAL;
771 }
772
773 if (flags & 0x1)
774 seq_printf(m, "%s ", str[i]);
775 flags >>= 1;
776 ++i;
777 }
778 seq_printf(m, "\b\n");
779 return 0;
780 }
781
782 LPROC_SEQ_FOPS_RO(ll_sbi_flags);
783
xattr_cache_show(struct kobject * kobj,struct attribute * attr,char * buf)784 static ssize_t xattr_cache_show(struct kobject *kobj,
785 struct attribute *attr,
786 char *buf)
787 {
788 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
789 ll_kobj);
790
791 return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
792 }
793
xattr_cache_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t count)794 static ssize_t xattr_cache_store(struct kobject *kobj,
795 struct attribute *attr,
796 const char *buffer,
797 size_t count)
798 {
799 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
800 ll_kobj);
801 int rc;
802 unsigned long val;
803
804 rc = kstrtoul(buffer, 10, &val);
805 if (rc)
806 return rc;
807
808 if (val != 0 && val != 1)
809 return -ERANGE;
810
811 if (val == 1 && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
812 return -ENOTSUPP;
813
814 sbi->ll_xattr_cache_enabled = val;
815
816 return count;
817 }
818 LUSTRE_RW_ATTR(xattr_cache);
819
820 static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
821 /* { "mntpt_path", ll_rd_path, 0, 0 }, */
822 { "site", &ll_site_stats_fops, NULL, 0 },
823 /* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */
824 { "max_cached_mb", &ll_max_cached_mb_fops, NULL },
825 { "statahead_stats", &ll_statahead_stats_fops, NULL, 0 },
826 { "sbi_flags", &ll_sbi_flags_fops, NULL, 0 },
827 { NULL }
828 };
829
830 #define MAX_STRING_SIZE 128
831
832 static struct attribute *llite_attrs[] = {
833 &lustre_attr_blocksize.attr,
834 &lustre_attr_kbytestotal.attr,
835 &lustre_attr_kbytesfree.attr,
836 &lustre_attr_kbytesavail.attr,
837 &lustre_attr_filestotal.attr,
838 &lustre_attr_filesfree.attr,
839 &lustre_attr_client_type.attr,
840 &lustre_attr_fstype.attr,
841 &lustre_attr_uuid.attr,
842 &lustre_attr_max_read_ahead_mb.attr,
843 &lustre_attr_max_read_ahead_per_file_mb.attr,
844 &lustre_attr_max_read_ahead_whole_mb.attr,
845 &lustre_attr_checksum_pages.attr,
846 &lustre_attr_stats_track_pid.attr,
847 &lustre_attr_stats_track_ppid.attr,
848 &lustre_attr_stats_track_gid.attr,
849 &lustre_attr_statahead_max.attr,
850 &lustre_attr_statahead_agl.attr,
851 &lustre_attr_lazystatfs.attr,
852 &lustre_attr_max_easize.attr,
853 &lustre_attr_default_easize.attr,
854 &lustre_attr_xattr_cache.attr,
855 NULL,
856 };
857
llite_sb_release(struct kobject * kobj)858 static void llite_sb_release(struct kobject *kobj)
859 {
860 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
861 ll_kobj);
862 complete(&sbi->ll_kobj_unregister);
863 }
864
865 static struct kobj_type llite_ktype = {
866 .default_attrs = llite_attrs,
867 .sysfs_ops = &lustre_sysfs_ops,
868 .release = llite_sb_release,
869 };
870
871 static const struct llite_file_opcode {
872 __u32 opcode;
873 __u32 type;
874 const char *opname;
875 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
876 /* file operation */
877 { LPROC_LL_DIRTY_HITS, LPROCFS_TYPE_REGS, "dirty_pages_hits" },
878 { LPROC_LL_DIRTY_MISSES, LPROCFS_TYPE_REGS, "dirty_pages_misses" },
879 { LPROC_LL_READ_BYTES, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
880 "read_bytes" },
881 { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
882 "write_bytes" },
883 { LPROC_LL_BRW_READ, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
884 "brw_read" },
885 { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
886 "brw_write" },
887 { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
888 "osc_read" },
889 { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
890 "osc_write" },
891 { LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" },
892 { LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" },
893 { LPROC_LL_RELEASE, LPROCFS_TYPE_REGS, "close" },
894 { LPROC_LL_MAP, LPROCFS_TYPE_REGS, "mmap" },
895 { LPROC_LL_LLSEEK, LPROCFS_TYPE_REGS, "seek" },
896 { LPROC_LL_FSYNC, LPROCFS_TYPE_REGS, "fsync" },
897 { LPROC_LL_READDIR, LPROCFS_TYPE_REGS, "readdir" },
898 /* inode operation */
899 { LPROC_LL_SETATTR, LPROCFS_TYPE_REGS, "setattr" },
900 { LPROC_LL_TRUNC, LPROCFS_TYPE_REGS, "truncate" },
901 { LPROC_LL_FLOCK, LPROCFS_TYPE_REGS, "flock" },
902 { LPROC_LL_GETATTR, LPROCFS_TYPE_REGS, "getattr" },
903 /* dir inode operation */
904 { LPROC_LL_CREATE, LPROCFS_TYPE_REGS, "create" },
905 { LPROC_LL_LINK, LPROCFS_TYPE_REGS, "link" },
906 { LPROC_LL_UNLINK, LPROCFS_TYPE_REGS, "unlink" },
907 { LPROC_LL_SYMLINK, LPROCFS_TYPE_REGS, "symlink" },
908 { LPROC_LL_MKDIR, LPROCFS_TYPE_REGS, "mkdir" },
909 { LPROC_LL_RMDIR, LPROCFS_TYPE_REGS, "rmdir" },
910 { LPROC_LL_MKNOD, LPROCFS_TYPE_REGS, "mknod" },
911 { LPROC_LL_RENAME, LPROCFS_TYPE_REGS, "rename" },
912 /* special inode operation */
913 { LPROC_LL_STAFS, LPROCFS_TYPE_REGS, "statfs" },
914 { LPROC_LL_ALLOC_INODE, LPROCFS_TYPE_REGS, "alloc_inode" },
915 { LPROC_LL_SETXATTR, LPROCFS_TYPE_REGS, "setxattr" },
916 { LPROC_LL_GETXATTR, LPROCFS_TYPE_REGS, "getxattr" },
917 { LPROC_LL_GETXATTR_HITS, LPROCFS_TYPE_REGS, "getxattr_hits" },
918 { LPROC_LL_LISTXATTR, LPROCFS_TYPE_REGS, "listxattr" },
919 { LPROC_LL_REMOVEXATTR, LPROCFS_TYPE_REGS, "removexattr" },
920 { LPROC_LL_INODE_PERM, LPROCFS_TYPE_REGS, "inode_permission" },
921 };
922
ll_stats_ops_tally(struct ll_sb_info * sbi,int op,int count)923 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
924 {
925 if (!sbi->ll_stats)
926 return;
927 if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
928 lprocfs_counter_add(sbi->ll_stats, op, count);
929 else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
930 sbi->ll_stats_track_id == current->pid)
931 lprocfs_counter_add(sbi->ll_stats, op, count);
932 else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
933 sbi->ll_stats_track_id == current->real_parent->pid)
934 lprocfs_counter_add(sbi->ll_stats, op, count);
935 else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
936 sbi->ll_stats_track_id ==
937 from_kgid(&init_user_ns, current_gid()))
938 lprocfs_counter_add(sbi->ll_stats, op, count);
939 }
940 EXPORT_SYMBOL(ll_stats_ops_tally);
941
942 static const char *ra_stat_string[] = {
943 [RA_STAT_HIT] = "hits",
944 [RA_STAT_MISS] = "misses",
945 [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
946 [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
947 [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
948 [RA_STAT_FAILED_MATCH] = "failed lock match",
949 [RA_STAT_DISCARDED] = "read but discarded",
950 [RA_STAT_ZERO_LEN] = "zero length file",
951 [RA_STAT_ZERO_WINDOW] = "zero size window",
952 [RA_STAT_EOF] = "read-ahead to EOF",
953 [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
954 [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
955 };
956
ldebugfs_register_mountpoint(struct dentry * parent,struct super_block * sb,char * osc,char * mdc)957 int ldebugfs_register_mountpoint(struct dentry *parent,
958 struct super_block *sb, char *osc, char *mdc)
959 {
960 struct lustre_sb_info *lsi = s2lsi(sb);
961 struct ll_sb_info *sbi = ll_s2sbi(sb);
962 struct obd_device *obd;
963 struct dentry *dir;
964 char name[MAX_STRING_SIZE + 1], *ptr;
965 int err, id, len, rc;
966
967 name[MAX_STRING_SIZE] = '\0';
968
969 LASSERT(sbi != NULL);
970 LASSERT(mdc != NULL);
971 LASSERT(osc != NULL);
972
973 /* Get fsname */
974 len = strlen(lsi->lsi_lmd->lmd_profile);
975 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
976 if (ptr && (strcmp(ptr, "-client") == 0))
977 len -= 7;
978
979 /* Mount info */
980 snprintf(name, MAX_STRING_SIZE, "%.*s-%p", len,
981 lsi->lsi_lmd->lmd_profile, sb);
982
983 dir = ldebugfs_register(name, parent, NULL, NULL);
984 if (IS_ERR_OR_NULL(dir)) {
985 err = dir ? PTR_ERR(dir) : -ENOMEM;
986 sbi->ll_debugfs_entry = NULL;
987 return err;
988 }
989 sbi->ll_debugfs_entry = dir;
990
991 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "dump_page_cache", 0444,
992 &vvp_dump_pgcache_file_ops, sbi);
993 if (rc)
994 CWARN("Error adding the dump_page_cache file\n");
995
996 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "extents_stats", 0644,
997 &ll_rw_extents_stats_fops, sbi);
998 if (rc)
999 CWARN("Error adding the extent_stats file\n");
1000
1001 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry,
1002 "extents_stats_per_process",
1003 0644, &ll_rw_extents_stats_pp_fops, sbi);
1004 if (rc)
1005 CWARN("Error adding the extents_stats_per_process file\n");
1006
1007 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "offset_stats", 0644,
1008 &ll_rw_offset_stats_fops, sbi);
1009 if (rc)
1010 CWARN("Error adding the offset_stats file\n");
1011
1012 /* File operations stats */
1013 sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1014 LPROCFS_STATS_FLAG_NONE);
1015 if (sbi->ll_stats == NULL) {
1016 err = -ENOMEM;
1017 goto out;
1018 }
1019 /* do counter init */
1020 for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1021 __u32 type = llite_opcode_table[id].type;
1022 void *ptr = NULL;
1023
1024 if (type & LPROCFS_TYPE_REGS)
1025 ptr = "regs";
1026 else if (type & LPROCFS_TYPE_BYTES)
1027 ptr = "bytes";
1028 else if (type & LPROCFS_TYPE_PAGES)
1029 ptr = "pages";
1030 lprocfs_counter_init(sbi->ll_stats,
1031 llite_opcode_table[id].opcode,
1032 (type & LPROCFS_CNTR_AVGMINMAX),
1033 llite_opcode_table[id].opname, ptr);
1034 }
1035 err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats",
1036 sbi->ll_stats);
1037 if (err)
1038 goto out;
1039
1040 sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1041 LPROCFS_STATS_FLAG_NONE);
1042 if (sbi->ll_ra_stats == NULL) {
1043 err = -ENOMEM;
1044 goto out;
1045 }
1046
1047 for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1048 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1049 ra_stat_string[id], "pages");
1050
1051 err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats",
1052 sbi->ll_ra_stats);
1053 if (err)
1054 goto out;
1055
1056 err = ldebugfs_add_vars(sbi->ll_debugfs_entry,
1057 lprocfs_llite_obd_vars, sb);
1058 if (err)
1059 goto out;
1060
1061 sbi->ll_kobj.kset = llite_kset;
1062 init_completion(&sbi->ll_kobj_unregister);
1063 err = kobject_init_and_add(&sbi->ll_kobj, &llite_ktype, NULL,
1064 "%s", name);
1065 if (err)
1066 goto out;
1067
1068 /* MDC info */
1069 obd = class_name2obd(mdc);
1070
1071 err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj,
1072 obd->obd_type->typ_name);
1073 if (err)
1074 goto out;
1075
1076 /* OSC */
1077 obd = class_name2obd(osc);
1078
1079 err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj,
1080 obd->obd_type->typ_name);
1081 out:
1082 if (err) {
1083 ldebugfs_remove(&sbi->ll_debugfs_entry);
1084 lprocfs_free_stats(&sbi->ll_ra_stats);
1085 lprocfs_free_stats(&sbi->ll_stats);
1086 }
1087 return err;
1088 }
1089
ldebugfs_unregister_mountpoint(struct ll_sb_info * sbi)1090 void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi)
1091 {
1092 if (sbi->ll_debugfs_entry) {
1093 ldebugfs_remove(&sbi->ll_debugfs_entry);
1094 kobject_put(&sbi->ll_kobj);
1095 wait_for_completion(&sbi->ll_kobj_unregister);
1096 lprocfs_free_stats(&sbi->ll_ra_stats);
1097 lprocfs_free_stats(&sbi->ll_stats);
1098 }
1099 }
1100
1101 #undef MAX_STRING_SIZE
1102
1103 #define pct(a, b) (b ? a * 100 / b : 0)
1104
ll_display_extents_info(struct ll_rw_extents_info * io_extents,struct seq_file * seq,int which)1105 static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
1106 struct seq_file *seq, int which)
1107 {
1108 unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1109 unsigned long start, end, r, w;
1110 char *unitp = "KMGTPEZY";
1111 int i, units = 10;
1112 struct per_process_info *pp_info = &io_extents->pp_extents[which];
1113
1114 read_cum = 0;
1115 write_cum = 0;
1116 start = 0;
1117
1118 for (i = 0; i < LL_HIST_MAX; i++) {
1119 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1120 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1121 }
1122
1123 for (i = 0; i < LL_HIST_MAX; i++) {
1124 r = pp_info->pp_r_hist.oh_buckets[i];
1125 w = pp_info->pp_w_hist.oh_buckets[i];
1126 read_cum += r;
1127 write_cum += w;
1128 end = 1 << (i + LL_HIST_START - units);
1129 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu | %14lu %4lu %4lu\n",
1130 start, *unitp, end, *unitp,
1131 (i == LL_HIST_MAX - 1) ? '+' : ' ',
1132 r, pct(r, read_tot), pct(read_cum, read_tot),
1133 w, pct(w, write_tot), pct(write_cum, write_tot));
1134 start = end;
1135 if (start == 1<<10) {
1136 start = 1;
1137 units += 10;
1138 unitp++;
1139 }
1140 if (read_cum == read_tot && write_cum == write_tot)
1141 break;
1142 }
1143 }
1144
ll_rw_extents_stats_pp_seq_show(struct seq_file * seq,void * v)1145 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1146 {
1147 struct timespec64 now;
1148 struct ll_sb_info *sbi = seq->private;
1149 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1150 int k;
1151
1152 ktime_get_real_ts64(&now);
1153
1154 if (!sbi->ll_rw_stats_on) {
1155 seq_printf(seq, "disabled\n"
1156 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1157 return 0;
1158 }
1159 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1160 (s64)now.tv_sec, (unsigned long)now.tv_nsec);
1161 seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
1162 seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
1163 "extents", "calls", "%", "cum%",
1164 "calls", "%", "cum%");
1165 spin_lock(&sbi->ll_pp_extent_lock);
1166 for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1167 if (io_extents->pp_extents[k].pid != 0) {
1168 seq_printf(seq, "\nPID: %d\n",
1169 io_extents->pp_extents[k].pid);
1170 ll_display_extents_info(io_extents, seq, k);
1171 }
1172 }
1173 spin_unlock(&sbi->ll_pp_extent_lock);
1174 return 0;
1175 }
1176
ll_rw_extents_stats_pp_seq_write(struct file * file,const char __user * buf,size_t len,loff_t * off)1177 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
1178 const char __user *buf,
1179 size_t len,
1180 loff_t *off)
1181 {
1182 struct seq_file *seq = file->private_data;
1183 struct ll_sb_info *sbi = seq->private;
1184 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1185 int i;
1186 int value = 1, rc = 0;
1187
1188 if (len == 0)
1189 return -EINVAL;
1190
1191 rc = lprocfs_write_helper(buf, len, &value);
1192 if (rc < 0 && len < 16) {
1193 char kernbuf[16];
1194
1195 if (copy_from_user(kernbuf, buf, len))
1196 return -EFAULT;
1197 kernbuf[len] = 0;
1198
1199 if (kernbuf[len - 1] == '\n')
1200 kernbuf[len - 1] = 0;
1201
1202 if (strcmp(kernbuf, "disabled") == 0 ||
1203 strcmp(kernbuf, "Disabled") == 0)
1204 value = 0;
1205 }
1206
1207 if (value == 0)
1208 sbi->ll_rw_stats_on = 0;
1209 else
1210 sbi->ll_rw_stats_on = 1;
1211
1212 spin_lock(&sbi->ll_pp_extent_lock);
1213 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1214 io_extents->pp_extents[i].pid = 0;
1215 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1216 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1217 }
1218 spin_unlock(&sbi->ll_pp_extent_lock);
1219 return len;
1220 }
1221
1222 LPROC_SEQ_FOPS(ll_rw_extents_stats_pp);
1223
ll_rw_extents_stats_seq_show(struct seq_file * seq,void * v)1224 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
1225 {
1226 struct timespec64 now;
1227 struct ll_sb_info *sbi = seq->private;
1228 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1229
1230 ktime_get_real_ts64(&now);
1231
1232 if (!sbi->ll_rw_stats_on) {
1233 seq_printf(seq, "disabled\n"
1234 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1235 return 0;
1236 }
1237 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1238 (u64)now.tv_sec, (unsigned long)now.tv_nsec);
1239
1240 seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
1241 seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
1242 "extents", "calls", "%", "cum%",
1243 "calls", "%", "cum%");
1244 spin_lock(&sbi->ll_lock);
1245 ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
1246 spin_unlock(&sbi->ll_lock);
1247
1248 return 0;
1249 }
1250
ll_rw_extents_stats_seq_write(struct file * file,const char __user * buf,size_t len,loff_t * off)1251 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
1252 const char __user *buf,
1253 size_t len, loff_t *off)
1254 {
1255 struct seq_file *seq = file->private_data;
1256 struct ll_sb_info *sbi = seq->private;
1257 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1258 int i;
1259 int value = 1, rc = 0;
1260
1261 if (len == 0)
1262 return -EINVAL;
1263
1264 rc = lprocfs_write_helper(buf, len, &value);
1265 if (rc < 0 && len < 16) {
1266 char kernbuf[16];
1267
1268 if (copy_from_user(kernbuf, buf, len))
1269 return -EFAULT;
1270 kernbuf[len] = 0;
1271
1272 if (kernbuf[len - 1] == '\n')
1273 kernbuf[len - 1] = 0;
1274
1275 if (strcmp(kernbuf, "disabled") == 0 ||
1276 strcmp(kernbuf, "Disabled") == 0)
1277 value = 0;
1278 }
1279
1280 if (value == 0)
1281 sbi->ll_rw_stats_on = 0;
1282 else
1283 sbi->ll_rw_stats_on = 1;
1284
1285 spin_lock(&sbi->ll_pp_extent_lock);
1286 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1287 io_extents->pp_extents[i].pid = 0;
1288 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1289 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1290 }
1291 spin_unlock(&sbi->ll_pp_extent_lock);
1292
1293 return len;
1294 }
1295
1296 LPROC_SEQ_FOPS(ll_rw_extents_stats);
1297
ll_rw_stats_tally(struct ll_sb_info * sbi,pid_t pid,struct ll_file_data * file,loff_t pos,size_t count,int rw)1298 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1299 struct ll_file_data *file, loff_t pos,
1300 size_t count, int rw)
1301 {
1302 int i, cur = -1;
1303 struct ll_rw_process_info *process;
1304 struct ll_rw_process_info *offset;
1305 int *off_count = &sbi->ll_rw_offset_entry_count;
1306 int *process_count = &sbi->ll_offset_process_count;
1307 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1308
1309 if (!sbi->ll_rw_stats_on)
1310 return;
1311 process = sbi->ll_rw_process_info;
1312 offset = sbi->ll_rw_offset_info;
1313
1314 spin_lock(&sbi->ll_pp_extent_lock);
1315 /* Extent statistics */
1316 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1317 if (io_extents->pp_extents[i].pid == pid) {
1318 cur = i;
1319 break;
1320 }
1321 }
1322
1323 if (cur == -1) {
1324 /* new process */
1325 sbi->ll_extent_process_count =
1326 (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
1327 cur = sbi->ll_extent_process_count;
1328 io_extents->pp_extents[cur].pid = pid;
1329 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
1330 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
1331 }
1332
1333 for (i = 0; (count >= (1 << LL_HIST_START << i)) &&
1334 (i < (LL_HIST_MAX - 1)); i++)
1335 ;
1336 if (rw == 0) {
1337 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
1338 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
1339 } else {
1340 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
1341 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
1342 }
1343 spin_unlock(&sbi->ll_pp_extent_lock);
1344
1345 spin_lock(&sbi->ll_process_lock);
1346 /* Offset statistics */
1347 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1348 if (process[i].rw_pid == pid) {
1349 if (process[i].rw_last_file != file) {
1350 process[i].rw_range_start = pos;
1351 process[i].rw_last_file_pos = pos + count;
1352 process[i].rw_smallest_extent = count;
1353 process[i].rw_largest_extent = count;
1354 process[i].rw_offset = 0;
1355 process[i].rw_last_file = file;
1356 spin_unlock(&sbi->ll_process_lock);
1357 return;
1358 }
1359 if (process[i].rw_last_file_pos != pos) {
1360 *off_count =
1361 (*off_count + 1) % LL_OFFSET_HIST_MAX;
1362 offset[*off_count].rw_op = process[i].rw_op;
1363 offset[*off_count].rw_pid = pid;
1364 offset[*off_count].rw_range_start =
1365 process[i].rw_range_start;
1366 offset[*off_count].rw_range_end =
1367 process[i].rw_last_file_pos;
1368 offset[*off_count].rw_smallest_extent =
1369 process[i].rw_smallest_extent;
1370 offset[*off_count].rw_largest_extent =
1371 process[i].rw_largest_extent;
1372 offset[*off_count].rw_offset =
1373 process[i].rw_offset;
1374 process[i].rw_op = rw;
1375 process[i].rw_range_start = pos;
1376 process[i].rw_smallest_extent = count;
1377 process[i].rw_largest_extent = count;
1378 process[i].rw_offset = pos -
1379 process[i].rw_last_file_pos;
1380 }
1381 if (process[i].rw_smallest_extent > count)
1382 process[i].rw_smallest_extent = count;
1383 if (process[i].rw_largest_extent < count)
1384 process[i].rw_largest_extent = count;
1385 process[i].rw_last_file_pos = pos + count;
1386 spin_unlock(&sbi->ll_process_lock);
1387 return;
1388 }
1389 }
1390 *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
1391 process[*process_count].rw_pid = pid;
1392 process[*process_count].rw_op = rw;
1393 process[*process_count].rw_range_start = pos;
1394 process[*process_count].rw_last_file_pos = pos + count;
1395 process[*process_count].rw_smallest_extent = count;
1396 process[*process_count].rw_largest_extent = count;
1397 process[*process_count].rw_offset = 0;
1398 process[*process_count].rw_last_file = file;
1399 spin_unlock(&sbi->ll_process_lock);
1400 }
1401
ll_rw_offset_stats_seq_show(struct seq_file * seq,void * v)1402 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
1403 {
1404 struct timespec64 now;
1405 struct ll_sb_info *sbi = seq->private;
1406 struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
1407 struct ll_rw_process_info *process = sbi->ll_rw_process_info;
1408 int i;
1409
1410 ktime_get_real_ts64(&now);
1411
1412 if (!sbi->ll_rw_stats_on) {
1413 seq_printf(seq, "disabled\n"
1414 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1415 return 0;
1416 }
1417 spin_lock(&sbi->ll_process_lock);
1418
1419 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1420 (s64)now.tv_sec, (unsigned long)now.tv_nsec);
1421 seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
1422 "R/W", "PID", "RANGE START", "RANGE END",
1423 "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
1424 /* We stored the discontiguous offsets here; print them first */
1425 for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
1426 if (offset[i].rw_pid != 0)
1427 seq_printf(seq,
1428 "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
1429 offset[i].rw_op == READ ? 'R' : 'W',
1430 offset[i].rw_pid,
1431 offset[i].rw_range_start,
1432 offset[i].rw_range_end,
1433 (unsigned long)offset[i].rw_smallest_extent,
1434 (unsigned long)offset[i].rw_largest_extent,
1435 offset[i].rw_offset);
1436 }
1437 /* Then print the current offsets for each process */
1438 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1439 if (process[i].rw_pid != 0)
1440 seq_printf(seq,
1441 "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
1442 process[i].rw_op == READ ? 'R' : 'W',
1443 process[i].rw_pid,
1444 process[i].rw_range_start,
1445 process[i].rw_last_file_pos,
1446 (unsigned long)process[i].rw_smallest_extent,
1447 (unsigned long)process[i].rw_largest_extent,
1448 process[i].rw_offset);
1449 }
1450 spin_unlock(&sbi->ll_process_lock);
1451
1452 return 0;
1453 }
1454
ll_rw_offset_stats_seq_write(struct file * file,const char __user * buf,size_t len,loff_t * off)1455 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
1456 const char __user *buf,
1457 size_t len, loff_t *off)
1458 {
1459 struct seq_file *seq = file->private_data;
1460 struct ll_sb_info *sbi = seq->private;
1461 struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
1462 struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
1463 int value = 1, rc = 0;
1464
1465 if (len == 0)
1466 return -EINVAL;
1467
1468 rc = lprocfs_write_helper(buf, len, &value);
1469
1470 if (rc < 0 && len < 16) {
1471 char kernbuf[16];
1472
1473 if (copy_from_user(kernbuf, buf, len))
1474 return -EFAULT;
1475 kernbuf[len] = 0;
1476
1477 if (kernbuf[len - 1] == '\n')
1478 kernbuf[len - 1] = 0;
1479
1480 if (strcmp(kernbuf, "disabled") == 0 ||
1481 strcmp(kernbuf, "Disabled") == 0)
1482 value = 0;
1483 }
1484
1485 if (value == 0)
1486 sbi->ll_rw_stats_on = 0;
1487 else
1488 sbi->ll_rw_stats_on = 1;
1489
1490 spin_lock(&sbi->ll_process_lock);
1491 sbi->ll_offset_process_count = 0;
1492 sbi->ll_rw_offset_entry_count = 0;
1493 memset(process_info, 0, sizeof(struct ll_rw_process_info) *
1494 LL_PROCESS_HIST_MAX);
1495 memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
1496 LL_OFFSET_HIST_MAX);
1497 spin_unlock(&sbi->ll_process_lock);
1498
1499 return len;
1500 }
1501
1502 LPROC_SEQ_FOPS(ll_rw_offset_stats);
1503
lprocfs_llite_init_vars(struct lprocfs_static_vars * lvars)1504 void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
1505 {
1506 lvars->obd_vars = lprocfs_llite_obd_vars;
1507 }
1508