Lines Matching refs:s

176 	struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);  in dm_stat_free()  local
178 kfree(s->program_id); in dm_stat_free()
179 kfree(s->aux_data); in dm_stat_free()
181 dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size); in dm_stat_free()
182 dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size); in dm_stat_free()
184 dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size); in dm_stat_free()
185 dm_kvfree(s, s->shared_alloc_size); in dm_stat_free()
212 struct dm_stat *s; in dm_stats_cleanup() local
216 s = container_of(stats->list.next, struct dm_stat, list_entry); in dm_stats_cleanup()
217 list_del(&s->list_entry); in dm_stats_cleanup()
218 for (ni = 0; ni < s->n_entries; ni++) { in dm_stats_cleanup()
219 shared = &s->stat_shared[ni]; in dm_stats_cleanup()
224 (unsigned long long)s->start, in dm_stats_cleanup()
225 (unsigned long long)s->end, in dm_stats_cleanup()
226 (unsigned long long)s->step, in dm_stats_cleanup()
231 dm_stat_free(&s->rcu_head); in dm_stats_cleanup()
246 struct dm_stat *s, *tmp_s; in dm_stats_create() local
283 s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE); in dm_stats_create()
284 if (!s) in dm_stats_create()
287 s->stat_flags = stat_flags; in dm_stats_create()
288 s->n_entries = n_entries; in dm_stats_create()
289 s->start = start; in dm_stats_create()
290 s->end = end; in dm_stats_create()
291 s->step = step; in dm_stats_create()
292 s->shared_alloc_size = shared_alloc_size; in dm_stats_create()
293 s->percpu_alloc_size = percpu_alloc_size; in dm_stats_create()
294 s->histogram_alloc_size = histogram_alloc_size; in dm_stats_create()
296 s->n_histogram_entries = n_histogram_entries; in dm_stats_create()
297 s->histogram_boundaries = kmemdup(histogram_boundaries, in dm_stats_create()
298 s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL); in dm_stats_create()
299 if (!s->histogram_boundaries) { in dm_stats_create()
304 s->program_id = kstrdup(program_id, GFP_KERNEL); in dm_stats_create()
305 if (!s->program_id) { in dm_stats_create()
309 s->aux_data = kstrdup(aux_data, GFP_KERNEL); in dm_stats_create()
310 if (!s->aux_data) { in dm_stats_create()
316 atomic_set(&s->stat_shared[ni].in_flight[READ], 0); in dm_stats_create()
317 atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0); in dm_stats_create()
320 if (s->n_histogram_entries) { in dm_stats_create()
322 hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE); in dm_stats_create()
328 s->stat_shared[ni].tmp.histogram = hi; in dm_stats_create()
329 hi += s->n_histogram_entries + 1; in dm_stats_create()
339 s->stat_percpu[cpu] = p; in dm_stats_create()
340 if (s->n_histogram_entries) { in dm_stats_create()
342 hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu)); in dm_stats_create()
349 hi += s->n_histogram_entries + 1; in dm_stats_create()
364 s->id = 0; in dm_stats_create()
367 if (WARN_ON(tmp_s->id < s->id)) { in dm_stats_create()
371 if (tmp_s->id > s->id) in dm_stats_create()
373 if (unlikely(s->id == INT_MAX)) { in dm_stats_create()
377 s->id++; in dm_stats_create()
379 ret_id = s->id; in dm_stats_create()
380 list_add_tail_rcu(&s->list_entry, l); in dm_stats_create()
391 dm_stat_free(&s->rcu_head); in dm_stats_create()
397 struct dm_stat *s; in __dm_stats_find() local
399 list_for_each_entry(s, &stats->list, list_entry) { in __dm_stats_find()
400 if (s->id > id) in __dm_stats_find()
402 if (s->id == id) in __dm_stats_find()
403 return s; in __dm_stats_find()
411 struct dm_stat *s; in dm_stats_delete() local
416 s = __dm_stats_find(stats, id); in dm_stats_delete()
417 if (!s) { in dm_stats_delete()
422 list_del_rcu(&s->list_entry); in dm_stats_delete()
429 if (is_vmalloc_addr(s->stat_percpu) || in dm_stats_delete()
430 is_vmalloc_addr(s->stat_percpu[cpu][0].histogram)) in dm_stats_delete()
432 if (is_vmalloc_addr(s) || in dm_stats_delete()
433 is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) { in dm_stats_delete()
436 dm_stat_free(&s->rcu_head); in dm_stats_delete()
439 call_rcu(&s->rcu_head, dm_stat_free); in dm_stats_delete()
447 struct dm_stat *s; in dm_stats_list() local
457 list_for_each_entry(s, &stats->list, list_entry) { in dm_stats_list()
458 if (!program || !strcmp(program, s->program_id)) { in dm_stats_list()
459 len = s->end - s->start; in dm_stats_list()
460 DMEMIT("%d: %llu+%llu %llu %s %s", s->id, in dm_stats_list()
461 (unsigned long long)s->start, in dm_stats_list()
463 (unsigned long long)s->step, in dm_stats_list()
464 s->program_id, in dm_stats_list()
465 s->aux_data); in dm_stats_list()
466 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS) in dm_stats_list()
468 if (s->n_histogram_entries) { in dm_stats_list()
471 for (i = 0; i < s->n_histogram_entries; i++) { in dm_stats_list()
474 DMEMIT("%llu", s->histogram_boundaries[i]); in dm_stats_list()
485 static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared, in dm_stat_round() argument
494 if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS))) in dm_stat_round()
516 static void dm_stat_for_entry(struct dm_stat *s, size_t entry, in dm_stat_for_entry() argument
522 struct dm_stat_shared *shared = &s->stat_shared[entry]; in dm_stat_for_entry()
546 p = &s->stat_percpu[smp_processor_id()][entry]; in dm_stat_for_entry()
549 dm_stat_round(s, shared, p); in dm_stat_for_entry()
553 dm_stat_round(s, shared, p); in dm_stat_for_entry()
558 if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) { in dm_stat_for_entry()
565 if (s->n_histogram_entries) { in dm_stat_for_entry()
566 unsigned lo = 0, hi = s->n_histogram_entries + 1; in dm_stat_for_entry()
569 if (s->histogram_boundaries[mid - 1] > duration) { in dm_stat_for_entry()
587 static void __dm_stat_bio(struct dm_stat *s, unsigned long bi_rw, in __dm_stat_bio() argument
595 if (end_sector <= s->start || bi_sector >= s->end) in __dm_stat_bio()
597 if (unlikely(bi_sector < s->start)) { in __dm_stat_bio()
599 todo = end_sector - s->start; in __dm_stat_bio()
601 rel_sector = bi_sector - s->start; in __dm_stat_bio()
604 if (unlikely(end_sector > s->end)) in __dm_stat_bio()
605 todo -= (end_sector - s->end); in __dm_stat_bio()
607 offset = dm_sector_div64(rel_sector, s->step); in __dm_stat_bio()
610 if (WARN_ON_ONCE(entry >= s->n_entries)) { in __dm_stat_bio()
611 DMCRIT("Invalid area access in region id %d", s->id); in __dm_stat_bio()
615 if (fragment_len > s->step - offset) in __dm_stat_bio()
616 fragment_len = s->step - offset; in __dm_stat_bio()
617 dm_stat_for_entry(s, entry, bi_rw, fragment_len, in __dm_stat_bio()
630 struct dm_stat *s; in dm_stats_account_io() local
658 list_for_each_entry_rcu(s, &stats->list, list_entry) { in dm_stats_account_io()
659 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) { in dm_stats_account_io()
666 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux); in dm_stats_account_io()
673 struct dm_stat *s, size_t x) in __dm_stat_init_temporary_percpu_totals() argument
679 p = &s->stat_percpu[smp_processor_id()][x]; in __dm_stat_init_temporary_percpu_totals()
680 dm_stat_round(s, shared, p); in __dm_stat_init_temporary_percpu_totals()
696 if (s->n_histogram_entries) in __dm_stat_init_temporary_percpu_totals()
697 memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long)); in __dm_stat_init_temporary_percpu_totals()
700 p = &s->stat_percpu[cpu][x]; in __dm_stat_init_temporary_percpu_totals()
713 if (s->n_histogram_entries) { in __dm_stat_init_temporary_percpu_totals()
715 for (i = 0; i < s->n_histogram_entries + 1; i++) in __dm_stat_init_temporary_percpu_totals()
721 static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end, in __dm_stat_clear() argument
729 shared = &s->stat_shared[x]; in __dm_stat_clear()
731 __dm_stat_init_temporary_percpu_totals(shared, s, x); in __dm_stat_clear()
733 p = &s->stat_percpu[smp_processor_id()][x]; in __dm_stat_clear()
747 if (s->n_histogram_entries) { in __dm_stat_clear()
749 for (i = 0; i < s->n_histogram_entries + 1; i++) { in __dm_stat_clear()
751 p = &s->stat_percpu[smp_processor_id()][x]; in __dm_stat_clear()
761 struct dm_stat *s; in dm_stats_clear() local
765 s = __dm_stats_find(stats, id); in dm_stats_clear()
766 if (!s) { in dm_stats_clear()
771 __dm_stat_clear(s, 0, s->n_entries, true); in dm_stats_clear()
781 static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j) in dm_jiffies_to_msec64() argument
786 if (s->stat_flags & STAT_PRECISE_TIMESTAMPS) in dm_jiffies_to_msec64()
807 struct dm_stat *s; in dm_stats_print() local
820 s = __dm_stats_find(stats, id); in dm_stats_print()
821 if (!s) { in dm_stats_print()
828 idx_end > s->n_entries) in dm_stats_print()
829 idx_end = s->n_entries; in dm_stats_print()
834 step = s->step; in dm_stats_print()
835 start = s->start + (step * idx_start); in dm_stats_print()
838 shared = &s->stat_shared[x]; in dm_stats_print()
840 if (unlikely(end > s->end)) in dm_stats_print()
841 end = s->end; in dm_stats_print()
843 __dm_stat_init_temporary_percpu_totals(shared, s, x); in dm_stats_print()
851 dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]), in dm_stats_print()
855 dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]), in dm_stats_print()
857 dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total), in dm_stats_print()
858 dm_jiffies_to_msec64(s, shared->tmp.time_in_queue), in dm_stats_print()
859 dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]), in dm_stats_print()
860 dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE])); in dm_stats_print()
861 if (s->n_histogram_entries) { in dm_stats_print()
863 for (i = 0; i < s->n_histogram_entries + 1; i++) { in dm_stats_print()
874 __dm_stat_clear(s, idx_start, idx_end, false); in dm_stats_print()
884 struct dm_stat *s; in dm_stats_set_aux() local
889 s = __dm_stats_find(stats, id); in dm_stats_set_aux()
890 if (!s) { in dm_stats_set_aux()
901 kfree(s->aux_data); in dm_stats_set_aux()
902 s->aux_data = new_aux_data; in dm_stats_set_aux()
929 int s; in parse_histogram() local
931 s = sscanf(h, "%llu%c", &hi, &ch); in parse_histogram()
932 if (!s || (s == 2 && ch != ',')) in parse_histogram()
938 if (s == 1) in parse_histogram()