/linux-4.1.27/kernel/trace/ |
H A D | trace_kdb.c | 21 /* use static because iter can be a bit big for the stack */ ftrace_dump_buf() 22 static struct trace_iterator iter; ftrace_dump_buf() local 27 trace_init_global_iter(&iter); ftrace_dump_buf() 28 iter.buffer_iter = buffer_iter; ftrace_dump_buf() 31 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); for_each_tracing_cpu() 42 memset(&iter.seq, 0, 45 iter.iter_flags |= TRACE_FILE_LAT_FMT; 46 iter.pos = -1; 50 iter.buffer_iter[cpu] = for_each_tracing_cpu() 51 ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu); for_each_tracing_cpu() 52 ring_buffer_read_start(iter.buffer_iter[cpu]); for_each_tracing_cpu() 53 tracing_iter_reset(&iter, cpu); for_each_tracing_cpu() 56 iter.cpu_file = cpu_file; 57 iter.buffer_iter[cpu_file] = 58 ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file); 59 ring_buffer_read_start(iter.buffer_iter[cpu_file]); 60 tracing_iter_reset(&iter, cpu_file); 63 while (trace_find_next_entry_inc(&iter)) { 69 print_trace_line(&iter); 70 trace_printk_seq(&iter.seq); 88 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); for_each_tracing_cpu() 92 if (iter.buffer_iter[cpu]) { for_each_tracing_cpu() 93 ring_buffer_read_finish(iter.buffer_iter[cpu]); for_each_tracing_cpu() 94 iter.buffer_iter[cpu] = NULL; for_each_tracing_cpu()
|
H A D | trace_output.c | 23 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter) trace_print_bputs_msg_only() argument 25 struct trace_seq *s = &iter->seq; trace_print_bputs_msg_only() 26 struct trace_entry *entry = iter->ent; trace_print_bputs_msg_only() 36 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) trace_print_bprintk_msg_only() argument 38 struct trace_seq *s = &iter->seq; trace_print_bprintk_msg_only() 39 struct trace_entry *entry = iter->ent; trace_print_bprintk_msg_only() 49 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) trace_print_printk_msg_only() argument 51 struct trace_seq *s = &iter->seq; trace_print_printk_msg_only() 52 struct trace_entry *entry = iter->ent; trace_print_printk_msg_only() 225 int ftrace_raw_output_prep(struct trace_iterator *iter, ftrace_raw_output_prep() argument 229 struct trace_seq *s = &iter->seq; ftrace_raw_output_prep() 230 struct trace_seq *p = &iter->tmp_seq; ftrace_raw_output_prep() 234 entry = iter->ent; ftrace_raw_output_prep() 248 static int ftrace_output_raw(struct trace_iterator *iter, char *name, ftrace_output_raw() argument 251 struct trace_seq *s = &iter->seq; ftrace_output_raw() 259 int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) ftrace_output_call() argument 265 ret = ftrace_output_raw(iter, name, fmt, ap); ftrace_output_call() 519 lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) lat_print_timestamp() argument 522 unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS; lat_print_timestamp() 523 unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start; lat_print_timestamp() 524 unsigned long long rel_ts = next_ts - iter->ts; lat_print_timestamp() 525 struct trace_seq *s = &iter->seq; lat_print_timestamp() 540 ns2usecs(iter->ts), lat_print_timestamp() 547 iter->ts, abs_ts, rel_ts); lat_print_timestamp() 562 int trace_print_context(struct trace_iterator *iter) trace_print_context() argument 564 struct trace_seq *s = &iter->seq; trace_print_context() 565 struct trace_entry *entry = iter->ent; trace_print_context() 573 comm, entry->pid, iter->cpu); trace_print_context() 578 if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) { trace_print_context() 579 t = ns2usecs(iter->ts); trace_print_context() 584 trace_seq_printf(s, " %12llu: ", iter->ts); trace_print_context() 589 int trace_print_lat_context(struct trace_iterator *iter) trace_print_lat_context() argument 593 int ent_size = iter->ent_size; trace_print_lat_context() 594 struct trace_seq *s = &iter->seq; trace_print_lat_context() 595 struct trace_entry *entry = iter->ent, trace_print_lat_context() 596 *next_entry = trace_find_next_entry(iter, NULL, trace_print_lat_context() 601 iter->ent_size = ent_size; trace_print_lat_context() 604 next_ts = iter->ts; trace_print_lat_context() 613 comm, entry->pid, iter->cpu, entry->flags, trace_print_lat_context() 614 entry->preempt_count, iter->idx); trace_print_lat_context() 616 lat_print_generic(s, entry, iter->cpu); trace_print_lat_context() 619 lat_print_timestamp(iter, next_ts); trace_print_lat_context() 804 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, trace_nop_print() argument 807 trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type); trace_nop_print() 809 return trace_handle_return(&iter->seq); trace_nop_print() 813 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, trace_fn_trace() argument 817 struct trace_seq *s = &iter->seq; trace_fn_trace() 819 trace_assign_type(field, iter->ent); trace_fn_trace() 833 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, trace_fn_raw() argument 838 trace_assign_type(field, iter->ent); trace_fn_raw() 840 trace_seq_printf(&iter->seq, "%lx %lx\n", trace_fn_raw() 844 return trace_handle_return(&iter->seq); trace_fn_raw() 847 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, trace_fn_hex() argument 851 struct trace_seq *s = &iter->seq; trace_fn_hex() 853 trace_assign_type(field, iter->ent); trace_fn_hex() 861 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, trace_fn_bin() argument 865 struct trace_seq *s = &iter->seq; trace_fn_bin() 867 trace_assign_type(field, iter->ent); trace_fn_bin() 888 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, trace_ctxwake_print() argument 896 trace_assign_type(field, iter->ent); trace_ctxwake_print() 901 trace_seq_printf(&iter->seq, trace_ctxwake_print() 911 return trace_handle_return(&iter->seq); trace_ctxwake_print() 914 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags, trace_ctx_print() argument 917 return trace_ctxwake_print(iter, "==>"); trace_ctx_print() 920 static enum print_line_t trace_wake_print(struct trace_iterator *iter, trace_wake_print() argument 923 return trace_ctxwake_print(iter, " +"); trace_wake_print() 926 static int trace_ctxwake_raw(struct trace_iterator *iter, char S) trace_ctxwake_raw() argument 931 trace_assign_type(field, iter->ent); trace_ctxwake_raw() 936 trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", trace_ctxwake_raw() 945 return trace_handle_return(&iter->seq); trace_ctxwake_raw() 948 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags, trace_ctx_raw() argument 951 return trace_ctxwake_raw(iter, 0); trace_ctx_raw() 954 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags, trace_wake_raw() argument 957 return trace_ctxwake_raw(iter, '+'); trace_wake_raw() 961 static int trace_ctxwake_hex(struct trace_iterator *iter, char S) trace_ctxwake_hex() argument 964 struct trace_seq *s = &iter->seq; trace_ctxwake_hex() 967 trace_assign_type(field, iter->ent); trace_ctxwake_hex() 984 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags, trace_ctx_hex() argument 987 return trace_ctxwake_hex(iter, 0); trace_ctx_hex() 990 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags, trace_wake_hex() argument 993 return trace_ctxwake_hex(iter, '+'); trace_wake_hex() 996 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, trace_ctxwake_bin() argument 1000 struct trace_seq *s = &iter->seq; trace_ctxwake_bin() 1002 trace_assign_type(field, iter->ent); trace_ctxwake_bin() 1041 static enum print_line_t trace_stack_print(struct trace_iterator *iter, trace_stack_print() argument 1045 struct trace_seq *s = &iter->seq; trace_stack_print() 1049 trace_assign_type(field, iter->ent); trace_stack_print() 1050 end = (unsigned long *)((long)iter->ent + iter->ent_size); trace_stack_print() 1077 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, trace_user_stack_print() argument 1081 struct trace_seq *s = &iter->seq; trace_user_stack_print() 1083 trace_assign_type(field, iter->ent); trace_user_stack_print() 1102 trace_bputs_print(struct trace_iterator *iter, int flags, trace_bputs_print() argument 1105 struct trace_entry *entry = iter->ent; trace_bputs_print() 1106 struct trace_seq *s = &iter->seq; trace_bputs_print() 1120 trace_bputs_raw(struct trace_iterator *iter, int flags, trace_bputs_raw() argument 1124 struct trace_seq *s = &iter->seq; trace_bputs_raw() 1126 trace_assign_type(field, iter->ent); trace_bputs_raw() 1146 trace_bprint_print(struct trace_iterator *iter, int flags, trace_bprint_print() argument 1149 struct trace_entry *entry = iter->ent; trace_bprint_print() 1150 struct trace_seq *s = &iter->seq; trace_bprint_print() 1164 trace_bprint_raw(struct trace_iterator *iter, int flags, trace_bprint_raw() argument 1168 struct trace_seq *s = &iter->seq; trace_bprint_raw() 1170 trace_assign_type(field, iter->ent); trace_bprint_raw() 1189 static enum print_line_t trace_print_print(struct trace_iterator *iter, trace_print_print() argument 1193 struct trace_seq *s = &iter->seq; trace_print_print() 1195 trace_assign_type(field, iter->ent); trace_print_print() 1203 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, trace_print_raw() argument 1208 trace_assign_type(field, iter->ent); trace_print_raw() 1210 trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf); trace_print_raw() 1212 return trace_handle_return(&iter->seq); trace_print_raw()
|
H A D | trace_output.h | 8 trace_print_bputs_msg_only(struct trace_iterator *iter); 10 trace_print_bprintk_msg_only(struct trace_iterator *iter); 12 trace_print_printk_msg_only(struct trace_iterator *iter); 22 extern int trace_print_context(struct trace_iterator *iter); 23 extern int trace_print_lat_context(struct trace_iterator *iter); 29 extern enum print_line_t trace_nop_print(struct trace_iterator *iter,
|
H A D | trace_mmiotrace.c | 102 static void mmio_pipe_open(struct trace_iterator *iter) mmio_pipe_open() argument 105 struct trace_seq *s = &iter->seq; mmio_pipe_open() 114 iter->private = hiter; mmio_pipe_open() 118 static void mmio_close(struct trace_iterator *iter) mmio_close() argument 120 struct header_iter *hiter = iter->private; mmio_close() 122 iter->private = NULL; mmio_close() 125 static unsigned long count_overruns(struct trace_iterator *iter) count_overruns() argument 128 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer); count_overruns() 136 static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp, mmio_read() argument 140 struct header_iter *hiter = iter->private; mmio_read() 141 struct trace_seq *s = &iter->seq; mmio_read() 144 n = count_overruns(iter); mmio_read() 162 iter->private = NULL; mmio_read() 170 static enum print_line_t mmio_print_rw(struct trace_iterator *iter) mmio_print_rw() argument 172 struct trace_entry *entry = iter->ent; mmio_print_rw() 175 struct trace_seq *s = &iter->seq; mmio_print_rw() 176 unsigned long long t = ns2usecs(iter->ts); mmio_print_rw() 215 static enum print_line_t mmio_print_map(struct trace_iterator *iter) mmio_print_map() argument 217 struct trace_entry *entry = iter->ent; mmio_print_map() 220 struct trace_seq *s = &iter->seq; mmio_print_map() 221 unsigned long long t = ns2usecs(iter->ts); mmio_print_map() 249 static enum print_line_t mmio_print_mark(struct trace_iterator *iter) mmio_print_mark() argument 251 struct trace_entry *entry = iter->ent; mmio_print_mark() 254 struct trace_seq *s = &iter->seq; mmio_print_mark() 255 unsigned long long t = ns2usecs(iter->ts); mmio_print_mark() 265 static enum print_line_t mmio_print_line(struct trace_iterator *iter) mmio_print_line() argument 267 switch (iter->ent->type) { mmio_print_line() 269 return mmio_print_rw(iter); mmio_print_line() 271 return mmio_print_map(iter); mmio_print_line() 273 return mmio_print_mark(iter); mmio_print_line()
|
H A D | trace.c | 1130 static int wait_on_pipe(struct trace_iterator *iter, bool full) wait_on_pipe() argument 1133 if (trace_buffer_iter(iter, iter->cpu_file)) wait_on_pipe() 1136 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, wait_on_pipe() 2278 static void trace_iterator_increment(struct trace_iterator *iter) trace_iterator_increment() argument 2280 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); trace_iterator_increment() 2282 iter->idx++; trace_iterator_increment() 2288 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, peek_next_entry() argument 2292 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); peek_next_entry() 2297 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, peek_next_entry() 2301 iter->ent_size = ring_buffer_event_length(event); peek_next_entry() 2304 iter->ent_size = 0; peek_next_entry() 2309 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, __find_next_entry() argument 2312 struct ring_buffer *buffer = iter->trace_buffer->buffer; __find_next_entry() 2315 int cpu_file = iter->cpu_file; __find_next_entry() 2328 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); __find_next_entry() 2340 ent = peek_next_entry(iter, cpu, &ts, &lost_events); for_each_tracing_cpu() 2350 next_size = iter->ent_size; for_each_tracing_cpu() 2354 iter->ent_size = next_size; 2369 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, trace_find_next_entry() argument 2372 return __find_next_entry(iter, ent_cpu, NULL, ent_ts); trace_find_next_entry() 2376 void *trace_find_next_entry_inc(struct trace_iterator *iter) trace_find_next_entry_inc() argument 2378 iter->ent = __find_next_entry(iter, &iter->cpu, trace_find_next_entry_inc() 2379 &iter->lost_events, &iter->ts); trace_find_next_entry_inc() 2381 if (iter->ent) trace_find_next_entry_inc() 2382 trace_iterator_increment(iter); trace_find_next_entry_inc() 2384 return iter->ent ? iter : NULL; trace_find_next_entry_inc() 2387 static void trace_consume(struct trace_iterator *iter) trace_consume() argument 2389 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts, trace_consume() 2390 &iter->lost_events); trace_consume() 2395 struct trace_iterator *iter = m->private; s_next() local 2399 WARN_ON_ONCE(iter->leftover); s_next() 2404 if (iter->idx > i) s_next() 2407 if (iter->idx < 0) s_next() 2408 ent = trace_find_next_entry_inc(iter); s_next() 2410 ent = iter; s_next() 2412 while (ent && iter->idx < i) s_next() 2413 ent = trace_find_next_entry_inc(iter); s_next() 2415 iter->pos = *pos; s_next() 2420 void tracing_iter_reset(struct trace_iterator *iter, int cpu) tracing_iter_reset() argument 2427 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; tracing_iter_reset() 2429 buf_iter = trace_buffer_iter(iter, cpu); tracing_iter_reset() 2441 if (ts >= iter->trace_buffer->time_start) tracing_iter_reset() 2447 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; tracing_iter_reset() 2456 struct trace_iterator *iter = m->private; s_start() local 2457 struct trace_array *tr = iter->tr; s_start() 2458 int cpu_file = iter->cpu_file; s_start() 2465 * iter->trace is a copy of current_trace, the pointer to the s_start() 2466 * name may be used instead of a strcmp(), as iter->trace->name s_start() 2470 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) s_start() 2471 *iter->trace = *tr->current_trace; s_start() 2475 if (iter->snapshot && iter->trace->use_max_tr) s_start() 2479 if (!iter->snapshot) s_start() 2482 if (*pos != iter->pos) { s_start() 2483 iter->ent = NULL; s_start() 2484 iter->cpu = 0; s_start() 2485 iter->idx = -1; s_start() 2489 tracing_iter_reset(iter, cpu); s_start() 2491 tracing_iter_reset(iter, cpu_file); s_start() 2493 iter->leftover = 0; s_start() 2494 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) s_start() 2502 if (iter->leftover) s_start() 2503 p = iter; s_start() 2517 struct trace_iterator *iter = m->private; s_stop() local 2520 if (iter->snapshot && iter->trace->use_max_tr) s_stop() 2524 if (!iter->snapshot) s_stop() 2527 trace_access_unlock(iter->cpu_file); s_stop() 2602 print_trace_header(struct seq_file *m, struct trace_iterator *iter) print_trace_header() argument 2605 struct trace_buffer *buf = iter->trace_buffer; print_trace_header() 2607 struct tracer *type = iter->trace; print_trace_header() 2652 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); print_trace_header() 2653 trace_print_seq(m, &iter->seq); print_trace_header() 2655 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); print_trace_header() 2656 trace_print_seq(m, &iter->seq); print_trace_header() 2663 static void test_cpu_buff_start(struct trace_iterator *iter) test_cpu_buff_start() argument 2665 struct trace_seq *s = &iter->seq; test_cpu_buff_start() 2670 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) test_cpu_buff_start() 2673 if (cpumask_test_cpu(iter->cpu, iter->started)) test_cpu_buff_start() 2676 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) test_cpu_buff_start() 2679 cpumask_set_cpu(iter->cpu, iter->started); test_cpu_buff_start() 2682 if (iter->idx > 1) test_cpu_buff_start() 2684 iter->cpu); test_cpu_buff_start() 2687 static enum print_line_t print_trace_fmt(struct trace_iterator *iter) print_trace_fmt() argument 2689 struct trace_seq *s = &iter->seq; print_trace_fmt() 2694 entry = iter->ent; print_trace_fmt() 2696 test_cpu_buff_start(iter); print_trace_fmt() 2701 if (iter->iter_flags & TRACE_FILE_LAT_FMT) print_trace_fmt() 2702 trace_print_lat_context(iter); print_trace_fmt() 2704 trace_print_context(iter); print_trace_fmt() 2711 return event->funcs->trace(iter, sym_flags, event); print_trace_fmt() 2718 static enum print_line_t print_raw_fmt(struct trace_iterator *iter) print_raw_fmt() argument 2720 struct trace_seq *s = &iter->seq; print_raw_fmt() 2724 entry = iter->ent; print_raw_fmt() 2728 entry->pid, iter->cpu, iter->ts); print_raw_fmt() 2735 return event->funcs->raw(iter, 0, event); print_raw_fmt() 2742 static enum print_line_t print_hex_fmt(struct trace_iterator *iter) print_hex_fmt() argument 2744 struct trace_seq *s = &iter->seq; print_hex_fmt() 2749 entry = iter->ent; print_hex_fmt() 2753 SEQ_PUT_HEX_FIELD(s, iter->cpu); print_hex_fmt() 2754 SEQ_PUT_HEX_FIELD(s, iter->ts); print_hex_fmt() 2761 enum print_line_t ret = event->funcs->hex(iter, 0, event); print_hex_fmt() 2771 static enum print_line_t print_bin_fmt(struct trace_iterator *iter) print_bin_fmt() argument 2773 struct trace_seq *s = &iter->seq; print_bin_fmt() 2777 entry = iter->ent; print_bin_fmt() 2781 SEQ_PUT_FIELD(s, iter->cpu); print_bin_fmt() 2782 SEQ_PUT_FIELD(s, iter->ts); print_bin_fmt() 2788 return event ? event->funcs->binary(iter, 0, event) : print_bin_fmt() 2792 int trace_empty(struct trace_iterator *iter) trace_empty() argument 2798 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { trace_empty() 2799 cpu = iter->cpu_file; trace_empty() 2800 buf_iter = trace_buffer_iter(iter, cpu); trace_empty() 2805 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) trace_empty() 2812 buf_iter = trace_buffer_iter(iter, cpu); for_each_tracing_cpu() 2817 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu)) for_each_tracing_cpu() 2826 enum print_line_t print_trace_line(struct trace_iterator *iter) print_trace_line() argument 2830 if (iter->lost_events) { print_trace_line() 2831 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", print_trace_line() 2832 iter->cpu, iter->lost_events); print_trace_line() 2833 if (trace_seq_has_overflowed(&iter->seq)) print_trace_line() 2837 if (iter->trace && iter->trace->print_line) { print_trace_line() 2838 ret = iter->trace->print_line(iter); print_trace_line() 2843 if (iter->ent->type == TRACE_BPUTS && print_trace_line() 2846 return trace_print_bputs_msg_only(iter); print_trace_line() 2848 if (iter->ent->type == TRACE_BPRINT && print_trace_line() 2851 return trace_print_bprintk_msg_only(iter); print_trace_line() 2853 if (iter->ent->type == TRACE_PRINT && print_trace_line() 2856 return trace_print_printk_msg_only(iter); print_trace_line() 2859 return print_bin_fmt(iter); print_trace_line() 2862 return print_hex_fmt(iter); print_trace_line() 2865 return print_raw_fmt(iter); print_trace_line() 2867 return print_trace_fmt(iter); print_trace_line() 2872 struct trace_iterator *iter = m->private; trace_latency_header() local 2875 if (trace_empty(iter)) trace_latency_header() 2878 if (iter->iter_flags & TRACE_FILE_LAT_FMT) trace_latency_header() 2879 print_trace_header(m, iter); trace_latency_header() 2887 struct trace_iterator *iter = m->private; trace_default_header() local 2892 if (iter->iter_flags & TRACE_FILE_LAT_FMT) { trace_default_header() 2894 if (trace_empty(iter)) trace_default_header() 2896 print_trace_header(m, iter); trace_default_header() 2902 print_func_help_header_irq(iter->trace_buffer, m); trace_default_header() 2904 print_func_help_header(iter->trace_buffer, m); trace_default_header() 2943 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) print_snapshot_help() argument 2945 if (iter->tr->allocated_snapshot) print_snapshot_help() 2951 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) print_snapshot_help() 2958 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } print_snapshot_help() argument 2963 struct trace_iterator *iter = v; s_show() local 2966 if (iter->ent == NULL) { s_show() 2967 if (iter->tr) { s_show() 2968 seq_printf(m, "# tracer: %s\n", iter->trace->name); s_show() 2972 if (iter->snapshot && trace_empty(iter)) s_show() 2973 print_snapshot_help(m, iter); s_show() 2974 else if (iter->trace && iter->trace->print_header) s_show() 2975 iter->trace->print_header(m); s_show() 2979 } else if (iter->leftover) { s_show() 2984 ret = trace_print_seq(m, &iter->seq); s_show() 2987 iter->leftover = ret; s_show() 2990 print_trace_line(iter); s_show() 2991 ret = trace_print_seq(m, &iter->seq); s_show() 2999 iter->leftover = ret; s_show() 3027 struct trace_iterator *iter; __tracing_open() local 3033 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); __tracing_open() 3034 if (!iter) __tracing_open() 3037 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(), __tracing_open() 3039 if (!iter->buffer_iter) __tracing_open() 3047 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); __tracing_open() 3048 if (!iter->trace) __tracing_open() 3051 *iter->trace = *tr->current_trace; __tracing_open() 3053 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) __tracing_open() 3056 iter->tr = tr; __tracing_open() 3061 iter->trace_buffer = &tr->max_buffer; __tracing_open() 3064 iter->trace_buffer = &tr->trace_buffer; __tracing_open() 3065 iter->snapshot = snapshot; __tracing_open() 3066 iter->pos = -1; __tracing_open() 3067 iter->cpu_file = tracing_get_cpu(inode); __tracing_open() 3068 mutex_init(&iter->mutex); __tracing_open() 3071 if (iter->trace && iter->trace->open) __tracing_open() 3072 iter->trace->open(iter); __tracing_open() 3075 if (ring_buffer_overruns(iter->trace_buffer->buffer)) __tracing_open() 3076 iter->iter_flags |= TRACE_FILE_ANNOTATE; __tracing_open() 3080 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; __tracing_open() 3083 if (!iter->snapshot) __tracing_open() 3086 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu() 3088 iter->buffer_iter[cpu] = for_each_tracing_cpu() 3089 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); for_each_tracing_cpu() 3093 ring_buffer_read_start(iter->buffer_iter[cpu]); for_each_tracing_cpu() 3094 tracing_iter_reset(iter, cpu); for_each_tracing_cpu() 3097 cpu = iter->cpu_file; 3098 iter->buffer_iter[cpu] = 3099 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu); 3101 ring_buffer_read_start(iter->buffer_iter[cpu]); 3102 tracing_iter_reset(iter, cpu); 3107 return iter; 3111 kfree(iter->trace); 3112 kfree(iter->buffer_iter); 3155 struct trace_iterator *iter; tracing_release() local 3164 iter = m->private; tracing_release() 3168 if (iter->buffer_iter[cpu]) for_each_tracing_cpu() 3169 ring_buffer_read_finish(iter->buffer_iter[cpu]); for_each_tracing_cpu() 3172 if (iter->trace && iter->trace->close) 3173 iter->trace->close(iter); 3175 if (!iter->snapshot) 3183 mutex_destroy(&iter->mutex); 3184 free_cpumask_var(iter->started); 3185 kfree(iter->trace); 3186 kfree(iter->buffer_iter); 3212 struct trace_iterator *iter; tracing_open() local 3229 iter = __tracing_open(inode, file, false); tracing_open() 3230 if (IS_ERR(iter)) tracing_open() 3231 ret = PTR_ERR(iter); tracing_open() 3233 iter->iter_flags |= TRACE_FILE_LAT_FMT; tracing_open() 4541 struct trace_iterator *iter; tracing_open_pipe() local 4553 iter = kzalloc(sizeof(*iter), GFP_KERNEL); tracing_open_pipe() 4554 if (!iter) { tracing_open_pipe() 4560 trace_seq_init(&iter->seq); tracing_open_pipe() 4561 iter->trace = tr->current_trace; tracing_open_pipe() 4563 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { tracing_open_pipe() 4569 cpumask_setall(iter->started); tracing_open_pipe() 4572 iter->iter_flags |= TRACE_FILE_LAT_FMT; tracing_open_pipe() 4576 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; tracing_open_pipe() 4578 iter->tr = tr; tracing_open_pipe() 4579 iter->trace_buffer = &tr->trace_buffer; tracing_open_pipe() 4580 iter->cpu_file = tracing_get_cpu(inode); tracing_open_pipe() 4581 mutex_init(&iter->mutex); tracing_open_pipe() 4582 filp->private_data = iter; tracing_open_pipe() 4584 if (iter->trace->pipe_open) tracing_open_pipe() 4585 iter->trace->pipe_open(iter); tracing_open_pipe() 4595 kfree(iter->trace); tracing_open_pipe() 4596 kfree(iter); tracing_open_pipe() 4604 struct trace_iterator *iter = file->private_data; tracing_release_pipe() local 4611 if (iter->trace->pipe_close) tracing_release_pipe() 4612 iter->trace->pipe_close(iter); tracing_release_pipe() 4616 free_cpumask_var(iter->started); tracing_release_pipe() 4617 mutex_destroy(&iter->mutex); tracing_release_pipe() 4618 kfree(iter); tracing_release_pipe() 4626 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) trace_poll() argument 4629 if (trace_buffer_iter(iter, iter->cpu_file)) trace_poll() 4638 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file, trace_poll() 4645 struct trace_iterator *iter = filp->private_data; tracing_poll_pipe() local 4647 return trace_poll(iter, filp, poll_table); tracing_poll_pipe() 4650 /* Must be called with iter->mutex held. */ tracing_wait_pipe() 4653 struct trace_iterator *iter = filp->private_data; tracing_wait_pipe() local 4656 while (trace_empty(iter)) { tracing_wait_pipe() 4669 * iter->pos will be 0 if we haven't read anything. tracing_wait_pipe() 4671 if (!tracing_is_on() && iter->pos) tracing_wait_pipe() 4674 mutex_unlock(&iter->mutex); tracing_wait_pipe() 4676 ret = wait_on_pipe(iter, false); tracing_wait_pipe() 4678 mutex_lock(&iter->mutex); tracing_wait_pipe() 4694 struct trace_iterator *iter = filp->private_data; tracing_read_pipe() local 4698 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); tracing_read_pipe() 4702 trace_seq_init(&iter->seq); tracing_read_pipe() 4709 mutex_lock(&iter->mutex); tracing_read_pipe() 4710 if (iter->trace->read) { tracing_read_pipe() 4711 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); tracing_read_pipe() 4722 if (trace_empty(iter)) { tracing_read_pipe() 4731 memset(&iter->seq, 0, tracing_read_pipe() 4734 cpumask_clear(iter->started); tracing_read_pipe() 4735 iter->pos = -1; tracing_read_pipe() 4738 trace_access_lock(iter->cpu_file); tracing_read_pipe() 4739 while (trace_find_next_entry_inc(iter) != NULL) { tracing_read_pipe() 4741 int save_len = iter->seq.seq.len; tracing_read_pipe() 4743 ret = print_trace_line(iter); tracing_read_pipe() 4746 iter->seq.seq.len = save_len; tracing_read_pipe() 4750 trace_consume(iter); tracing_read_pipe() 4752 if (trace_seq_used(&iter->seq) >= cnt) tracing_read_pipe() 4760 WARN_ONCE(iter->seq.full, "full flag set for trace type %d", tracing_read_pipe() 4761 iter->ent->type); tracing_read_pipe() 4763 trace_access_unlock(iter->cpu_file); tracing_read_pipe() 4767 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); tracing_read_pipe() 4768 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) tracing_read_pipe() 4769 trace_seq_init(&iter->seq); tracing_read_pipe() 4779 mutex_unlock(&iter->mutex); tracing_read_pipe() 4799 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) tracing_fill_pipe_page() argument 4807 save_len = iter->seq.seq.len; tracing_fill_pipe_page() 4808 ret = print_trace_line(iter); tracing_fill_pipe_page() 4810 if (trace_seq_has_overflowed(&iter->seq)) { tracing_fill_pipe_page() 4811 iter->seq.seq.len = save_len; tracing_fill_pipe_page() 4817 * be set if the iter->seq overflowed. But check it tracing_fill_pipe_page() 4821 iter->seq.seq.len = save_len; tracing_fill_pipe_page() 4825 count = trace_seq_used(&iter->seq) - save_len; tracing_fill_pipe_page() 4828 iter->seq.seq.len = save_len; tracing_fill_pipe_page() 4833 trace_consume(iter); tracing_fill_pipe_page() 4835 if (!trace_find_next_entry_inc(iter)) { tracing_fill_pipe_page() 4837 iter->ent = NULL; tracing_fill_pipe_page() 4853 struct trace_iterator *iter = filp->private_data; tracing_splice_read_pipe() local 4870 mutex_lock(&iter->mutex); tracing_splice_read_pipe() 4872 if (iter->trace->splice_read) { tracing_splice_read_pipe() 4873 ret = iter->trace->splice_read(iter, filp, tracing_splice_read_pipe() 4883 if (!iter->ent && !trace_find_next_entry_inc(iter)) { tracing_splice_read_pipe() 4889 trace_access_lock(iter->cpu_file); tracing_splice_read_pipe() 4897 rem = tracing_fill_pipe_page(rem, iter); tracing_splice_read_pipe() 4900 ret = trace_seq_to_buffer(&iter->seq, tracing_splice_read_pipe() 4902 trace_seq_used(&iter->seq)); tracing_splice_read_pipe() 4908 spd.partial[i].len = trace_seq_used(&iter->seq); tracing_splice_read_pipe() 4910 trace_seq_init(&iter->seq); tracing_splice_read_pipe() 4913 trace_access_unlock(iter->cpu_file); tracing_splice_read_pipe() 4915 mutex_unlock(&iter->mutex); tracing_splice_read_pipe() 4928 mutex_unlock(&iter->mutex); tracing_splice_read_pipe() 5263 struct trace_iterator iter; member in struct:ftrace_buffer_info 5272 struct trace_iterator *iter; tracing_snapshot_open() local 5280 iter = __tracing_open(inode, file, true); tracing_snapshot_open() 5281 if (IS_ERR(iter)) tracing_snapshot_open() 5282 ret = PTR_ERR(iter); tracing_snapshot_open() 5289 iter = kzalloc(sizeof(*iter), GFP_KERNEL); tracing_snapshot_open() 5290 if (!iter) { tracing_snapshot_open() 5296 iter->tr = tr; tracing_snapshot_open() 5297 iter->trace_buffer = &tr->max_buffer; tracing_snapshot_open() 5298 iter->cpu_file = tracing_get_cpu(inode); tracing_snapshot_open() 5299 m->private = iter; tracing_snapshot_open() 5314 struct trace_iterator *iter = m->private; tracing_snapshot_write() local 5315 struct trace_array *tr = iter->tr; tracing_snapshot_write() 5336 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { tracing_snapshot_write() 5346 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { tracing_snapshot_write() 5358 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) tracing_snapshot_write() 5361 update_max_tr_single(tr, current, iter->cpu_file); tracing_snapshot_write() 5366 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) tracing_snapshot_write() 5369 tracing_reset(&tr->max_buffer, iter->cpu_file); tracing_snapshot_write() 5419 if (info->iter.trace->use_max_tr) { snapshot_raw_open() 5424 info->iter.snapshot = true; snapshot_raw_open() 5425 info->iter.trace_buffer = &info->iter.tr->max_buffer; snapshot_raw_open() 5538 info->iter.tr = tr; tracing_buffers_open() 5539 info->iter.cpu_file = tracing_get_cpu(inode); tracing_buffers_open() 5540 info->iter.trace = tr->current_trace; tracing_buffers_open() 5541 info->iter.trace_buffer = &tr->trace_buffer; tracing_buffers_open() 5563 struct trace_iterator *iter = &info->iter; tracing_buffers_poll() local 5565 return trace_poll(iter, filp, poll_table); tracing_buffers_poll() 5573 struct trace_iterator *iter = &info->iter; tracing_buffers_read() local 5581 if (iter->snapshot && iter->tr->current_trace->use_max_tr) tracing_buffers_read() 5586 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, tracing_buffers_read() 5587 iter->cpu_file); tracing_buffers_read() 5596 trace_access_lock(iter->cpu_file); tracing_buffers_read() 5597 ret = ring_buffer_read_page(iter->trace_buffer->buffer, tracing_buffers_read() 5600 iter->cpu_file, 0); tracing_buffers_read() 5601 trace_access_unlock(iter->cpu_file); tracing_buffers_read() 5604 if (trace_empty(iter)) { tracing_buffers_read() 5608 ret = wait_on_pipe(iter, false); tracing_buffers_read() 5638 struct trace_iterator *iter = &info->iter; tracing_buffers_release() local 5642 iter->tr->current_trace->ref--; tracing_buffers_release() 5644 __trace_array_put(iter->tr); tracing_buffers_release() 5647 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare); tracing_buffers_release() 5714 struct trace_iterator *iter = &info->iter; tracing_buffers_splice_read() local 5730 if (iter->snapshot && iter->tr->current_trace->use_max_tr) tracing_buffers_splice_read() 5747 trace_access_lock(iter->cpu_file); tracing_buffers_splice_read() 5748 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); tracing_buffers_splice_read() 5761 ref->buffer = iter->trace_buffer->buffer; tracing_buffers_splice_read() 5762 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); tracing_buffers_splice_read() 5770 len, iter->cpu_file, 1); tracing_buffers_splice_read() 5794 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); tracing_buffers_splice_read() 5797 trace_access_unlock(iter->cpu_file); tracing_buffers_splice_read() 5808 ret = wait_on_pipe(iter, true); tracing_buffers_splice_read() 6945 void trace_init_global_iter(struct trace_iterator *iter) trace_init_global_iter() argument 6947 iter->tr = &global_trace; trace_init_global_iter() 6948 iter->trace = iter->tr->current_trace; trace_init_global_iter() 6949 iter->cpu_file = RING_BUFFER_ALL_CPUS; trace_init_global_iter() 6950 iter->trace_buffer = &global_trace.trace_buffer; trace_init_global_iter() 6952 if (iter->trace && iter->trace->open) trace_init_global_iter() 6953 iter->trace->open(iter); trace_init_global_iter() 6956 if (ring_buffer_overruns(iter->trace_buffer->buffer)) trace_init_global_iter() 6957 iter->iter_flags |= TRACE_FILE_ANNOTATE; trace_init_global_iter() 6960 if (trace_clocks[iter->tr->clock_id].in_ns) trace_init_global_iter() 6961 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; trace_init_global_iter() 6966 /* use static because iter can be a bit big for the stack */ ftrace_dump() 6967 static struct trace_iterator iter; ftrace_dump() local 6992 trace_init_global_iter(&iter); ftrace_dump() 6995 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled); for_each_tracing_cpu() 7005 iter.cpu_file = RING_BUFFER_ALL_CPUS; 7008 iter.cpu_file = raw_smp_processor_id(); 7014 iter.cpu_file = RING_BUFFER_ALL_CPUS; 7032 while (!trace_empty(&iter)) { 7040 memset(&iter.seq, 0, 7043 iter.iter_flags |= TRACE_FILE_LAT_FMT; 7044 iter.pos = -1; 7046 if (trace_find_next_entry_inc(&iter) != NULL) { 7049 ret = print_trace_line(&iter); 7051 trace_consume(&iter); 7055 trace_printk_seq(&iter.seq); 7067 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); for_each_tracing_cpu()
|
H A D | trace_functions_graph.c | 571 get_return_for_leaf(struct trace_iterator *iter, get_return_for_leaf() argument 574 struct fgraph_data *data = iter->private; get_return_for_leaf() 588 ring_iter = trace_buffer_iter(iter, iter->cpu); get_return_for_leaf() 598 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, get_return_for_leaf() 600 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu, get_return_for_leaf() 653 print_graph_irq(struct trace_iterator *iter, unsigned long addr, print_graph_irq() argument 656 struct trace_seq *s = &iter->seq; print_graph_irq() 657 struct trace_entry *ent = iter->ent; print_graph_irq() 666 print_graph_abs_time(iter->ts, s); print_graph_irq() 761 print_graph_entry_leaf(struct trace_iterator *iter, print_graph_entry_leaf() argument 766 struct fgraph_data *data = iter->private; print_graph_entry_leaf() 778 int cpu = iter->cpu; print_graph_entry_leaf() 807 print_graph_entry_nested(struct trace_iterator *iter, print_graph_entry_nested() argument 812 struct fgraph_data *data = iter->private; print_graph_entry_nested() 817 int cpu = iter->cpu; print_graph_entry_nested() 847 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, print_graph_prologue() argument 850 struct fgraph_data *data = iter->private; print_graph_prologue() 851 struct trace_entry *ent = iter->ent; print_graph_prologue() 852 int cpu = iter->cpu; print_graph_prologue() 859 print_graph_irq(iter, addr, type, cpu, ent->pid, flags); print_graph_prologue() 866 print_graph_abs_time(iter->ts, s); print_graph_prologue() 897 check_irq_entry(struct trace_iterator *iter, u32 flags, check_irq_entry() argument 900 int cpu = iter->cpu; check_irq_entry() 902 struct fgraph_data *data = iter->private; check_irq_entry() 944 check_irq_return(struct trace_iterator *iter, u32 flags, int depth) check_irq_return() argument 946 int cpu = iter->cpu; check_irq_return() 948 struct fgraph_data *data = iter->private; check_irq_return() 989 struct trace_iterator *iter, u32 flags) print_graph_entry() 991 struct fgraph_data *data = iter->private; print_graph_entry() 995 int cpu = iter->cpu; print_graph_entry() 997 if (check_irq_entry(iter, flags, call->func, call->depth)) print_graph_entry() 1000 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); print_graph_entry() 1002 leaf_ret = get_return_for_leaf(iter, field); print_graph_entry() 1004 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags); print_graph_entry() 1006 ret = print_graph_entry_nested(iter, field, s, cpu, flags); print_graph_entry() 1025 struct trace_entry *ent, struct trace_iterator *iter, print_graph_return() 1029 struct fgraph_data *data = iter->private; print_graph_return() 1031 int cpu = iter->cpu; print_graph_return() 1035 if (check_irq_return(iter, flags, trace->depth)) print_graph_return() 1040 int cpu = iter->cpu; print_graph_return() 1058 print_graph_prologue(iter, s, 0, 0, flags); print_graph_return() 1084 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, print_graph_return() 1092 struct trace_iterator *iter, u32 flags) print_graph_comment() 1095 struct fgraph_data *data = iter->private; print_graph_comment() 1102 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; print_graph_comment() 1104 print_graph_prologue(iter, s, 0, 0, flags); print_graph_comment() 1117 switch (iter->ent->type) { print_graph_comment() 1119 ret = trace_print_bprintk_msg_only(iter); print_graph_comment() 1124 ret = trace_print_printk_msg_only(iter); print_graph_comment() 1133 ret = event->funcs->trace(iter, sym_flags, event); print_graph_comment() 1154 print_graph_function_flags(struct trace_iterator *iter, u32 flags) print_graph_function_flags() argument 1157 struct fgraph_data *data = iter->private; print_graph_function_flags() 1158 struct trace_entry *entry = iter->ent; print_graph_function_flags() 1159 struct trace_seq *s = &iter->seq; print_graph_function_flags() 1160 int cpu = iter->cpu; print_graph_function_flags() 1174 iter->cpu = data->cpu; print_graph_function_flags() 1175 ret = print_graph_entry(field, s, iter, flags); print_graph_function_flags() 1176 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { print_graph_function_flags() 1177 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; print_graph_function_flags() 1180 iter->cpu = cpu; print_graph_function_flags() 1195 return print_graph_entry(&saved, s, iter, flags); print_graph_function_flags() 1200 return print_graph_return(&field->ret, s, entry, iter, flags); print_graph_function_flags() 1208 return print_graph_comment(s, entry, iter, flags); print_graph_function_flags() 1215 print_graph_function(struct trace_iterator *iter) print_graph_function() argument 1217 return print_graph_function_flags(iter, tracer_flags.val); print_graph_function() 1221 print_graph_function_event(struct trace_iterator *iter, int flags, print_graph_function_event() argument 1224 return print_graph_function(iter); print_graph_function_event() 1291 struct trace_iterator *iter = s->private; print_graph_headers_flags() local 1298 if (trace_empty(iter)) print_graph_headers_flags() 1301 print_trace_header(s, iter); print_graph_headers_flags() 1307 void graph_trace_open(struct trace_iterator *iter) graph_trace_open() argument 1314 iter->private = NULL; graph_trace_open() 1339 iter->private = data; 1349 void graph_trace_close(struct trace_iterator *iter) graph_trace_close() argument 1351 struct fgraph_data *data = iter->private; graph_trace_close() 988 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, struct trace_iterator *iter, u32 flags) print_graph_entry() argument 1024 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, struct trace_entry *ent, struct trace_iterator *iter, u32 flags) print_graph_return() argument 1091 print_graph_comment(struct trace_seq *s, struct trace_entry *ent, struct trace_iterator *iter, u32 flags) print_graph_comment() argument
|
H A D | ftrace.c | 2357 struct ftrace_rec_iter *iter = &ftrace_rec_iter; ftrace_rec_iter_start() local 2359 iter->pg = ftrace_pages_start; ftrace_rec_iter_start() 2360 iter->index = 0; ftrace_rec_iter_start() 2363 while (iter->pg && !iter->pg->index) ftrace_rec_iter_start() 2364 iter->pg = iter->pg->next; ftrace_rec_iter_start() 2366 if (!iter->pg) ftrace_rec_iter_start() 2369 return iter; ftrace_rec_iter_start() 2374 * @iter: The handle to the iterator. 2376 * Returns the next iterator after the given iterator @iter. 2378 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) ftrace_rec_iter_next() argument 2380 iter->index++; ftrace_rec_iter_next() 2382 if (iter->index >= iter->pg->index) { ftrace_rec_iter_next() 2383 iter->pg = iter->pg->next; ftrace_rec_iter_next() 2384 iter->index = 0; ftrace_rec_iter_next() 2387 while (iter->pg && !iter->pg->index) ftrace_rec_iter_next() 2388 iter->pg = iter->pg->next; ftrace_rec_iter_next() 2391 if (!iter->pg) ftrace_rec_iter_next() 2394 return iter; ftrace_rec_iter_next() 2399 * @iter: The current iterator location 2401 * Returns the record that the current @iter is at. 2403 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) ftrace_rec_iter_record() argument 2405 return &iter->pg->records[iter->index]; ftrace_rec_iter_record() 3006 struct ftrace_iterator *iter = m->private; t_hash_next() local 3011 iter->pos = *pos; t_hash_next() 3013 if (iter->probe) t_hash_next() 3014 hnd = &iter->probe->node; t_hash_next() 3016 if (iter->hidx >= FTRACE_FUNC_HASHSIZE) t_hash_next() 3019 hhd = &ftrace_func_hash[iter->hidx]; t_hash_next() 3022 iter->hidx++; t_hash_next() 3032 iter->hidx++; t_hash_next() 3040 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node); t_hash_next() 3042 return iter; t_hash_next() 3047 struct ftrace_iterator *iter = m->private; t_hash_start() local 3051 if (!(iter->flags & FTRACE_ITER_DO_HASH)) t_hash_start() 3054 if (iter->func_pos > *pos) t_hash_start() 3057 iter->hidx = 0; t_hash_start() 3058 for (l = 0; l <= (*pos - iter->func_pos); ) { t_hash_start() 3067 iter->flags |= FTRACE_ITER_HASH; t_hash_start() 3069 return iter; t_hash_start() 3073 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter) t_hash_show() argument 3077 rec = iter->probe; t_hash_show() 3096 struct ftrace_iterator *iter = m->private; t_next() local 3097 struct ftrace_ops *ops = iter->ops; t_next() 3103 if (iter->flags & FTRACE_ITER_HASH) t_next() 3107 iter->pos = iter->func_pos = *pos; t_next() 3109 if (iter->flags & FTRACE_ITER_PRINTALL) t_next() 3113 if (iter->idx >= iter->pg->index) { t_next() 3114 if (iter->pg->next) { t_next() 3115 iter->pg = iter->pg->next; t_next() 3116 iter->idx = 0; t_next() 3120 rec = &iter->pg->records[iter->idx++]; t_next() 3121 if (((iter->flags & FTRACE_ITER_FILTER) && t_next() 3124 ((iter->flags & FTRACE_ITER_NOTRACE) && t_next() 3127 ((iter->flags & FTRACE_ITER_ENABLED) && t_next() 3138 iter->func = rec; t_next() 3140 return iter; t_next() 3143 static void reset_iter_read(struct ftrace_iterator *iter) reset_iter_read() argument 3145 iter->pos = 0; reset_iter_read() 3146 iter->func_pos = 0; reset_iter_read() 3147 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH); reset_iter_read() 3152 struct ftrace_iterator *iter = m->private; t_start() local 3153 struct ftrace_ops *ops = iter->ops; t_start() 3165 if (*pos < iter->pos) t_start() 3166 reset_iter_read(iter); t_start() 3173 if ((iter->flags & FTRACE_ITER_FILTER && t_start() 3175 (iter->flags & FTRACE_ITER_NOTRACE && t_start() 3179 iter->flags |= FTRACE_ITER_PRINTALL; t_start() 3181 iter->flags &= ~FTRACE_ITER_HASH; t_start() 3182 return iter; t_start() 3185 if (iter->flags & FTRACE_ITER_HASH) t_start() 3193 iter->pg = ftrace_pages_start; t_start() 3194 iter->idx = 0; t_start() 3204 return iter; t_start() 3230 struct ftrace_iterator *iter = m->private; t_show() local 3233 if (iter->flags & FTRACE_ITER_HASH) t_show() 3234 return t_hash_show(m, iter); t_show() 3236 if (iter->flags & FTRACE_ITER_PRINTALL) { t_show() 3237 if (iter->flags & FTRACE_ITER_NOTRACE) t_show() 3244 rec = iter->func; t_show() 3250 if (iter->flags & FTRACE_ITER_ENABLED) { t_show() 3284 struct ftrace_iterator *iter; ftrace_avail_open() local 3289 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); ftrace_avail_open() 3290 if (iter) { ftrace_avail_open() 3291 iter->pg = ftrace_pages_start; ftrace_avail_open() 3292 iter->ops = &global_ops; ftrace_avail_open() 3295 return iter ? 0 : -ENOMEM; ftrace_avail_open() 3301 struct ftrace_iterator *iter; ftrace_enabled_open() local 3303 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); ftrace_enabled_open() 3304 if (iter) { ftrace_enabled_open() 3305 iter->pg = ftrace_pages_start; ftrace_enabled_open() 3306 iter->flags = FTRACE_ITER_ENABLED; ftrace_enabled_open() 3307 iter->ops = &global_ops; ftrace_enabled_open() 3310 return iter ? 0 : -ENOMEM; ftrace_enabled_open() 3333 struct ftrace_iterator *iter; ftrace_regex_open() local 3342 iter = kzalloc(sizeof(*iter), GFP_KERNEL); ftrace_regex_open() 3343 if (!iter) ftrace_regex_open() 3346 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { ftrace_regex_open() 3347 kfree(iter); ftrace_regex_open() 3351 iter->ops = ops; ftrace_regex_open() 3352 iter->flags = flag; ftrace_regex_open() 3365 iter->hash = alloc_ftrace_hash(size_bits); ftrace_regex_open() 3367 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); ftrace_regex_open() 3369 if (!iter->hash) { ftrace_regex_open() 3370 trace_parser_put(&iter->parser); ftrace_regex_open() 3371 kfree(iter); ftrace_regex_open() 3378 iter->pg = ftrace_pages_start; ftrace_regex_open() 3383 m->private = iter; ftrace_regex_open() 3386 free_ftrace_hash(iter->hash); ftrace_regex_open() 3387 trace_parser_put(&iter->parser); ftrace_regex_open() 3388 kfree(iter); ftrace_regex_open() 3391 file->private_data = iter; ftrace_regex_open() 4000 struct ftrace_iterator *iter; ftrace_regex_write() local 4009 iter = m->private; ftrace_regex_write() 4011 iter = file->private_data; ftrace_regex_write() 4016 /* iter->hash is a local copy, so we don't need regex_lock */ ftrace_regex_write() 4018 parser = &iter->parser; ftrace_regex_write() 4023 ret = ftrace_process_regex(iter->hash, parser->buffer, ftrace_regex_write() 4358 struct ftrace_iterator *iter; ftrace_regex_release() local 4366 iter = m->private; ftrace_regex_release() 4369 iter = file->private_data; ftrace_regex_release() 4371 parser = &iter->parser; ftrace_regex_release() 4374 ftrace_match_records(iter->hash, parser->buffer, parser->idx); ftrace_regex_release() 4379 mutex_lock(&iter->ops->func_hash->regex_lock); ftrace_regex_release() 4382 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); ftrace_regex_release() 4385 orig_hash = &iter->ops->func_hash->filter_hash; ftrace_regex_release() 4387 orig_hash = &iter->ops->func_hash->notrace_hash; ftrace_regex_release() 4391 old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash; ftrace_regex_release() 4392 old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash; ftrace_regex_release() 4393 ret = ftrace_hash_move(iter->ops, filter_hash, ftrace_regex_release() 4394 orig_hash, iter->hash); ftrace_regex_release() 4396 ftrace_ops_update_code(iter->ops, &old_hash_ops); ftrace_regex_release() 4402 mutex_unlock(&iter->ops->func_hash->regex_lock); ftrace_regex_release() 4403 free_ftrace_hash(iter->hash); ftrace_regex_release() 4404 kfree(iter); ftrace_regex_release()
|
H A D | trace_printk.c | 49 const char **iter; hold_module_trace_bprintk_format() local 57 for (iter = start; iter < end; iter++) { hold_module_trace_bprintk_format() 58 struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter); hold_module_trace_bprintk_format() 60 *iter = tb_fmt->fmt; hold_module_trace_bprintk_format() 67 fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL); hold_module_trace_bprintk_format() 70 strcpy(fmt, *iter); hold_module_trace_bprintk_format() 75 *iter = fmt; hold_module_trace_bprintk_format()
|
H A D | trace_irqsoff.c | 210 static void irqsoff_trace_open(struct trace_iterator *iter) irqsoff_trace_open() argument 213 graph_trace_open(iter); irqsoff_trace_open() 217 static void irqsoff_trace_close(struct trace_iterator *iter) irqsoff_trace_close() argument 219 if (iter->private) irqsoff_trace_close() 220 graph_trace_close(iter); irqsoff_trace_close() 228 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) irqsoff_print_line() argument 235 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); irqsoff_print_line() 273 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) irqsoff_print_line() argument 279 static void irqsoff_trace_open(struct trace_iterator *iter) { } irqsoff_trace_close() argument 280 static void irqsoff_trace_close(struct trace_iterator *iter) { } irqsoff_trace_close() argument
|
H A D | ring_buffer.c | 1884 rb_iter_head_event(struct ring_buffer_iter *iter) rb_iter_head_event() argument 1886 return __rb_page_index(iter->head_page, iter->head); rb_iter_head_event() 1987 static void rb_inc_iter(struct ring_buffer_iter *iter) rb_inc_iter() argument 1989 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; rb_inc_iter() 1997 if (iter->head_page == cpu_buffer->reader_page) rb_inc_iter() 1998 iter->head_page = rb_set_head_page(cpu_buffer); rb_inc_iter() 2000 rb_inc_page(cpu_buffer, &iter->head_page); rb_inc_iter() 2002 iter->read_stamp = iter->head_page->page->time_stamp; rb_inc_iter() 2003 iter->head = 0; rb_inc_iter() 3402 static void rb_iter_reset(struct ring_buffer_iter *iter) rb_iter_reset() argument 3404 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; rb_iter_reset() 3407 iter->head_page = cpu_buffer->reader_page; rb_iter_reset() 3408 iter->head = cpu_buffer->reader_page->read; rb_iter_reset() 3410 iter->cache_reader_page = iter->head_page; rb_iter_reset() 3411 iter->cache_read = cpu_buffer->read; rb_iter_reset() 3413 if (iter->head) rb_iter_reset() 3414 iter->read_stamp = cpu_buffer->read_stamp; rb_iter_reset() 3416 iter->read_stamp = iter->head_page->page->time_stamp; rb_iter_reset() 3421 * @iter: The iterator to reset 3426 void ring_buffer_iter_reset(struct ring_buffer_iter *iter) ring_buffer_iter_reset() argument 3431 if (!iter) ring_buffer_iter_reset() 3434 cpu_buffer = iter->cpu_buffer; ring_buffer_iter_reset() 3437 rb_iter_reset(iter); ring_buffer_iter_reset() 3444 * @iter: The iterator to check 3446 int ring_buffer_iter_empty(struct ring_buffer_iter *iter) ring_buffer_iter_empty() argument 3450 cpu_buffer = iter->cpu_buffer; ring_buffer_iter_empty() 3452 return iter->head_page == cpu_buffer->commit_page && ring_buffer_iter_empty() 3453 iter->head == rb_commit_index(cpu_buffer); ring_buffer_iter_empty() 3489 rb_update_iter_read_stamp(struct ring_buffer_iter *iter, rb_update_iter_read_stamp() argument 3502 iter->read_stamp += delta; rb_update_iter_read_stamp() 3510 iter->read_stamp += event->time_delta; rb_update_iter_read_stamp() 3671 static void rb_advance_iter(struct ring_buffer_iter *iter) rb_advance_iter() argument 3677 cpu_buffer = iter->cpu_buffer; rb_advance_iter() 3682 if (iter->head >= rb_page_size(iter->head_page)) { rb_advance_iter() 3684 if (iter->head_page == cpu_buffer->commit_page) rb_advance_iter() 3686 rb_inc_iter(iter); rb_advance_iter() 3690 event = rb_iter_head_event(iter); rb_advance_iter() 3699 (iter->head_page == cpu_buffer->commit_page) && rb_advance_iter() 3700 (iter->head + length > rb_commit_index(cpu_buffer)))) rb_advance_iter() 3703 rb_update_iter_read_stamp(iter, event); rb_advance_iter() 3705 iter->head += length; rb_advance_iter() 3708 if ((iter->head >= rb_page_size(iter->head_page)) && rb_advance_iter() 3709 (iter->head_page != cpu_buffer->commit_page)) rb_advance_iter() 3710 rb_inc_iter(iter); rb_advance_iter() 3785 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) rb_iter_peek() argument 3792 cpu_buffer = iter->cpu_buffer; rb_iter_peek() 3800 if (unlikely(iter->cache_read != cpu_buffer->read || rb_iter_peek() 3801 iter->cache_reader_page != cpu_buffer->reader_page)) rb_iter_peek() 3802 rb_iter_reset(iter); rb_iter_peek() 3805 if (ring_buffer_iter_empty(iter)) rb_iter_peek() 3822 if (iter->head >= rb_page_size(iter->head_page)) { rb_iter_peek() 3823 rb_inc_iter(iter); rb_iter_peek() 3827 event = rb_iter_head_event(iter); rb_iter_peek() 3832 rb_inc_iter(iter); rb_iter_peek() 3835 rb_advance_iter(iter); rb_iter_peek() 3840 rb_advance_iter(iter); rb_iter_peek() 3845 rb_advance_iter(iter); rb_iter_peek() 3850 *ts = iter->read_stamp + event->time_delta; rb_iter_peek() 3921 * @iter: The ring buffer iterator 3928 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) ring_buffer_iter_peek() argument 3930 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; ring_buffer_iter_peek() 3936 event = rb_iter_peek(iter, ts); ring_buffer_iter_peek() 4023 struct ring_buffer_iter *iter; ring_buffer_read_prepare() local 4028 iter = kmalloc(sizeof(*iter), GFP_KERNEL); ring_buffer_read_prepare() 4029 if (!iter) ring_buffer_read_prepare() 4034 iter->cpu_buffer = cpu_buffer; ring_buffer_read_prepare() 4039 return iter; ring_buffer_read_prepare() 4059 * @iter: The iterator returned by ring_buffer_read_prepare 4069 ring_buffer_read_start(struct ring_buffer_iter *iter) ring_buffer_read_start() argument 4074 if (!iter) ring_buffer_read_start() 4077 cpu_buffer = iter->cpu_buffer; ring_buffer_read_start() 4081 rb_iter_reset(iter); ring_buffer_read_start() 4089 * @iter: The iterator retrieved by ring_buffer_start 4095 ring_buffer_read_finish(struct ring_buffer_iter *iter) ring_buffer_read_finish() argument 4097 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; ring_buffer_read_finish() 4112 kfree(iter); ring_buffer_read_finish() 4118 * @iter: The ring buffer iterator 4124 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) ring_buffer_read() argument 4127 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; ring_buffer_read() 4132 event = rb_iter_peek(iter, ts); ring_buffer_read() 4139 rb_advance_iter(iter); ring_buffer_read()
|
H A D | blktrace.c | 1145 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act); 1147 static void blk_log_action_classic(struct trace_iterator *iter, const char *act) blk_log_action_classic() argument 1150 unsigned long long ts = iter->ts; blk_log_action_classic() 1153 const struct blk_io_trace *t = te_blk_io_trace(iter->ent); blk_log_action_classic() 1157 trace_seq_printf(&iter->seq, blk_log_action_classic() 1159 MAJOR(t->device), MINOR(t->device), iter->cpu, blk_log_action_classic() 1160 secs, nsec_rem, iter->ent->pid, act, rwbs); blk_log_action_classic() 1163 static void blk_log_action(struct trace_iterator *iter, const char *act) blk_log_action() argument 1166 const struct blk_io_trace *t = te_blk_io_trace(iter->ent); blk_log_action() 1169 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", blk_log_action() 1349 static enum print_line_t print_one_line(struct trace_iterator *iter, print_one_line() argument 1352 struct trace_seq *s = &iter->seq; print_one_line() 1358 t = te_blk_io_trace(iter->ent); print_one_line() 1364 log_action(iter, long_act ? "message" : "m"); print_one_line() 1365 blk_log_msg(s, iter->ent); print_one_line() 1371 log_action(iter, what2act[what].act[long_act]); print_one_line() 1372 what2act[what].print(s, iter->ent); print_one_line() 1378 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, blk_trace_event_print() argument 1381 return print_one_line(iter, false); blk_trace_event_print() 1384 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter) blk_trace_synthesize_old_trace() argument 1386 struct trace_seq *s = &iter->seq; blk_trace_synthesize_old_trace() 1387 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; blk_trace_synthesize_old_trace() 1391 .time = iter->ts, blk_trace_synthesize_old_trace() 1400 blk_trace_event_print_binary(struct trace_iterator *iter, int flags, blk_trace_event_print_binary() argument 1403 blk_trace_synthesize_old_trace(iter); blk_trace_event_print_binary() 1405 return trace_handle_return(&iter->seq); blk_trace_event_print_binary() 1408 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) blk_tracer_print_line() argument 1413 return print_one_line(iter, true); blk_tracer_print_line()
|
H A D | trace.h | 363 void (*open)(struct trace_iterator *iter); 364 void (*pipe_open)(struct trace_iterator *iter); 365 void (*close)(struct trace_iterator *iter); 366 void (*pipe_close)(struct trace_iterator *iter); 367 ssize_t (*read)(struct trace_iterator *iter, 370 ssize_t (*splice_read)(struct trace_iterator *iter, 381 enum print_line_t (*print_line)(struct trace_iterator *iter); 525 trace_buffer_iter(struct trace_iterator *iter, int cpu) trace_buffer_iter() argument 527 if (iter->buffer_iter && iter->buffer_iter[cpu]) trace_buffer_iter() 528 return iter->buffer_iter[cpu]; trace_buffer_iter() 560 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 566 int trace_empty(struct trace_iterator *iter); 568 void *trace_find_next_entry_inc(struct trace_iterator *iter); 570 void trace_init_global_iter(struct trace_iterator *iter); 572 void tracing_iter_reset(struct trace_iterator *iter, int cpu); 584 void print_trace_header(struct seq_file *m, struct trace_iterator *iter); 585 int trace_empty(struct trace_iterator *iter); 708 enum print_line_t print_trace_line(struct trace_iterator *iter); 730 print_graph_function_flags(struct trace_iterator *iter, u32 flags); 734 extern void graph_trace_open(struct trace_iterator *iter); 735 extern void graph_trace_close(struct trace_iterator *iter); 804 print_graph_function_flags(struct trace_iterator *iter, u32 flags) print_graph_function_flags() argument
|
H A D | trace_branch.c | 149 static enum print_line_t trace_branch_print(struct trace_iterator *iter, trace_branch_print() argument 154 trace_assign_type(field, iter->ent); trace_branch_print() 156 trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", trace_branch_print() 162 return trace_handle_return(&iter->seq); trace_branch_print()
|
H A D | trace_sched_wakeup.c | 260 static void wakeup_trace_open(struct trace_iterator *iter) wakeup_trace_open() argument 263 graph_trace_open(iter); wakeup_trace_open() 266 static void wakeup_trace_close(struct trace_iterator *iter) wakeup_trace_close() argument 268 if (iter->private) wakeup_trace_close() 269 graph_trace_close(iter); wakeup_trace_close() 276 static enum print_line_t wakeup_print_line(struct trace_iterator *iter) wakeup_print_line() argument 283 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); wakeup_print_line() 320 static enum print_line_t wakeup_print_line(struct trace_iterator *iter) wakeup_print_line() argument 326 static void wakeup_trace_open(struct trace_iterator *iter) { } wakeup_trace_close() argument 327 static void wakeup_trace_close(struct trace_iterator *iter) { } wakeup_trace_close() argument
|
H A D | trace_syscalls.c | 110 print_syscall_enter(struct trace_iterator *iter, int flags, print_syscall_enter() argument 113 struct trace_seq *s = &iter->seq; print_syscall_enter() 114 struct trace_entry *ent = iter->ent; print_syscall_enter() 156 print_syscall_exit(struct trace_iterator *iter, int flags, print_syscall_exit() argument 159 struct trace_seq *s = &iter->seq; print_syscall_exit() 160 struct trace_entry *ent = iter->ent; print_syscall_exit()
|
/linux-4.1.27/block/ |
H A D | t10-pi.c | 49 static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn, t10_pi_generate() argument 54 for (i = 0 ; i < iter->data_size ; i += iter->interval) { t10_pi_generate() 55 struct t10_pi_tuple *pi = iter->prot_buf; t10_pi_generate() 57 pi->guard_tag = fn(iter->data_buf, iter->interval); t10_pi_generate() 61 pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed)); t10_pi_generate() 65 iter->data_buf += iter->interval; t10_pi_generate() 66 iter->prot_buf += sizeof(struct t10_pi_tuple); t10_pi_generate() 67 iter->seed++; t10_pi_generate() 73 static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn, t10_pi_verify() argument 78 for (i = 0 ; i < iter->data_size ; i += iter->interval) { t10_pi_verify() 79 struct t10_pi_tuple *pi = iter->prot_buf; t10_pi_verify() 89 lower_32_bits(iter->seed)) { t10_pi_verify() 91 "(rcvd %u)\n", iter->disk_name, t10_pi_verify() 93 iter->seed, be32_to_cpu(pi->ref_tag)); t10_pi_verify() 104 csum = fn(iter->data_buf, iter->interval); t10_pi_verify() 108 "(rcvd %04x, want %04x)\n", iter->disk_name, t10_pi_verify() 109 (unsigned long long)iter->seed, t10_pi_verify() 115 iter->data_buf += iter->interval; t10_pi_verify() 116 iter->prot_buf += sizeof(struct t10_pi_tuple); t10_pi_verify() 117 iter->seed++; t10_pi_verify() 123 static int t10_pi_type1_generate_crc(struct blk_integrity_iter *iter) t10_pi_type1_generate_crc() argument 125 return t10_pi_generate(iter, t10_pi_crc_fn, 1); t10_pi_type1_generate_crc() 128 static int t10_pi_type1_generate_ip(struct blk_integrity_iter *iter) t10_pi_type1_generate_ip() argument 130 return t10_pi_generate(iter, t10_pi_ip_fn, 1); t10_pi_type1_generate_ip() 133 static int t10_pi_type1_verify_crc(struct blk_integrity_iter *iter) t10_pi_type1_verify_crc() argument 135 return t10_pi_verify(iter, t10_pi_crc_fn, 1); t10_pi_type1_verify_crc() 138 static int t10_pi_type1_verify_ip(struct blk_integrity_iter *iter) t10_pi_type1_verify_ip() argument 140 return t10_pi_verify(iter, t10_pi_ip_fn, 1); t10_pi_type1_verify_ip() 143 static int t10_pi_type3_generate_crc(struct blk_integrity_iter *iter) t10_pi_type3_generate_crc() argument 145 return t10_pi_generate(iter, t10_pi_crc_fn, 3); t10_pi_type3_generate_crc() 148 static int t10_pi_type3_generate_ip(struct blk_integrity_iter *iter) t10_pi_type3_generate_ip() argument 150 return t10_pi_generate(iter, t10_pi_ip_fn, 3); t10_pi_type3_generate_ip() 153 static int t10_pi_type3_verify_crc(struct blk_integrity_iter *iter) t10_pi_type3_verify_crc() argument 155 return t10_pi_verify(iter, t10_pi_crc_fn, 3); t10_pi_type3_verify_crc() 158 static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter) t10_pi_type3_verify_ip() argument 160 return t10_pi_verify(iter, t10_pi_ip_fn, 3); t10_pi_type3_verify_ip()
|
H A D | blk-map.c | 47 * @iter: iovec iterator 65 const struct iov_iter *iter, gfp_t gfp_mask) blk_rq_map_user_iov() 72 if (!iter || !iter->count) blk_rq_map_user_iov() 75 iov_for_each(iov, i, *iter) { blk_rq_map_user_iov() 88 if (unaligned || (q->dma_pad_mask & iter->count) || map_data) blk_rq_map_user_iov() 89 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); blk_rq_map_user_iov() 91 bio = bio_map_user_iov(q, iter, gfp_mask); blk_rq_map_user_iov() 99 if (bio->bi_iter.bi_size != iter->count) { blk_rq_map_user_iov() 63 blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, const struct iov_iter *iter, gfp_t gfp_mask) blk_rq_map_user_iov() argument
|
H A D | bio.c | 506 struct bvec_iter iter; zero_fill_bio() local 508 bio_for_each_segment(bv, bio, iter) { bio_for_each_segment() 618 struct bvec_iter iter; bio_clone_bioset() local 661 bio_for_each_segment(bv, bio_src, iter) bio_clone_bioset() 1025 struct iov_iter iter; member in struct:bio_map_data 1042 * @iter: iov_iter as source 1047 static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter) bio_copy_from_iter() argument 1058 &iter); bio_for_each_segment_all() 1060 if (!iov_iter_count(&iter)) bio_for_each_segment_all() 1073 * @iter: iov_iter as destination 1078 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) bio_copy_to_iter() argument 1089 &iter); bio_for_each_segment_all() 1091 if (!iov_iter_count(&iter)) bio_for_each_segment_all() 1131 ret = bio_copy_to_iter(bio, bmd->iter); bio_uncopy_user() 1145 * @iter: iovec iterator 1154 const struct iov_iter *iter, bio_copy_user_iov() 1162 unsigned int len = iter->count; bio_copy_user_iov() 1165 for (i = 0; i < iter->nr_segs; i++) { bio_copy_user_iov() 1170 uaddr = (unsigned long) iter->iov[i].iov_base; bio_copy_user_iov() 1171 end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1) bio_copy_user_iov() 1187 bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask); bio_copy_user_iov() 1197 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); bio_copy_user_iov() 1198 iov_iter_init(&bmd->iter, iter->type, bmd->iov, bio_copy_user_iov() 1199 iter->nr_segs, iter->count); bio_copy_user_iov() 1206 if (iter->type & WRITE) bio_copy_user_iov() 1254 if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) || bio_copy_user_iov() 1256 ret = bio_copy_from_iter(bio, *iter); bio_copy_user_iov() 1275 * @iter: iovec iterator 1282 const struct iov_iter *iter, bio_map_user_iov() 1294 iov_for_each(iov, i, *iter) { bio_map_user_iov() 1326 iov_for_each(iov, i, *iter) { bio_map_user_iov() 1335 (iter->type & WRITE) != WRITE, bio_map_user_iov() 1376 if (iter->type & WRITE) bio_map_user_iov() 1739 struct bvec_iter iter; bio_flush_dcache_pages() local 1741 bio_for_each_segment(bvec, bi, iter) bio_flush_dcache_pages() 1152 bio_copy_user_iov(struct request_queue *q, struct rq_map_data *map_data, const struct iov_iter *iter, gfp_t gfp_mask) bio_copy_user_iov() argument 1281 bio_map_user_iov(struct request_queue *q, const struct iov_iter *iter, gfp_t gfp_mask) bio_map_user_iov() argument
|
H A D | bio-integrity.c | 218 struct blk_integrity_iter iter; bio_integrity_process() local 226 iter.disk_name = bio->bi_bdev->bd_disk->disk_name; bio_integrity_process() 227 iter.interval = bi->interval; bio_integrity_process() 228 iter.seed = bip_get_seed(bip); bio_integrity_process() 229 iter.prot_buf = prot_buf; bio_integrity_process() 234 iter.data_buf = kaddr + bv.bv_offset; bio_for_each_segment() 235 iter.data_size = bv.bv_len; bio_for_each_segment() 237 ret = proc_fn(&iter); bio_for_each_segment()
|
H A D | bounce.c | 105 struct bvec_iter iter; copy_to_high_bio_irq() local 107 bio_for_each_segment(tovec, to, iter) { bio_for_each_segment() 205 struct bvec_iter iter; __blk_queue_bounce() local 210 bio_for_each_segment(from, *bio_orig, iter) __blk_queue_bounce()
|
H A D | blk-merge.c | 20 struct bvec_iter iter; __blk_recalc_rq_segments() local 41 bio_for_each_segment(bv, bio, iter) { bio_for_each_segment() 127 struct bvec_iter iter; blk_phys_contig_segment() local 139 bio_for_each_segment(end_bv, bio, iter) blk_phys_contig_segment() 140 if (end_bv.bv_len == iter.bi_size) blk_phys_contig_segment() 206 struct bvec_iter iter; __blk_bios_map_sg() local 236 bio_for_each_segment(bvec, bio, iter) __blk_bios_map_sg()
|
H A D | genhd.c | 180 * @piter: iter of interest 740 struct class_dev_iter iter; printk_all_partitions() local 743 class_dev_iter_init(&iter, &block_class, NULL, &disk_type); printk_all_partitions() 744 while ((dev = class_dev_iter_next(&iter))) { printk_all_partitions() 785 class_dev_iter_exit(&iter); printk_all_partitions() 793 struct class_dev_iter *iter; disk_seqf_start() local 796 iter = kmalloc(sizeof(*iter), GFP_KERNEL); disk_seqf_start() 797 if (!iter) disk_seqf_start() 800 seqf->private = iter; disk_seqf_start() 801 class_dev_iter_init(iter, &block_class, NULL, &disk_type); disk_seqf_start() 803 dev = class_dev_iter_next(iter); disk_seqf_start() 825 struct class_dev_iter *iter = seqf->private; disk_seqf_stop() local 828 if (iter) { disk_seqf_stop() 829 class_dev_iter_exit(iter); disk_seqf_stop() 830 kfree(iter); disk_seqf_stop() 1221 struct class_dev_iter iter; blk_lookup_devt() local 1224 class_dev_iter_init(&iter, &block_class, NULL, &disk_type); blk_lookup_devt() 1225 while ((dev = class_dev_iter_next(&iter))) { blk_lookup_devt() 1248 class_dev_iter_exit(&iter); blk_lookup_devt()
|
H A D | blk-integrity.c | 49 struct bvec_iter iter; blk_rq_count_integrity_sg() local 52 bio_for_each_integrity_vec(iv, bio, iter) { bio_for_each_integrity_vec() 95 struct bvec_iter iter; blk_rq_map_integrity_sg() local 98 bio_for_each_integrity_vec(iv, bio, iter) { bio_for_each_integrity_vec()
|
/linux-4.1.27/net/netlabel/ |
H A D | netlabel_addrlist.h | 96 #define netlbl_af4list_foreach(iter, head) \ 97 for (iter = __af4list_valid((head)->next, head); \ 98 &iter->list != (head); \ 99 iter = __af4list_valid(iter->list.next, head)) 101 #define netlbl_af4list_foreach_rcu(iter, head) \ 102 for (iter = __af4list_valid_rcu((head)->next, head); \ 103 &iter->list != (head); \ 104 iter = __af4list_valid_rcu(iter->list.next, head)) 106 #define netlbl_af4list_foreach_safe(iter, tmp, head) \ 107 for (iter = __af4list_valid((head)->next, head), \ 108 tmp = __af4list_valid(iter->list.next, head); \ 109 &iter->list != (head); \ 110 iter = tmp, tmp = __af4list_valid(iter->list.next, head)) 163 #define netlbl_af6list_foreach(iter, head) \ 164 for (iter = __af6list_valid((head)->next, head); \ 165 &iter->list != (head); \ 166 iter = __af6list_valid(iter->list.next, head)) 168 #define netlbl_af6list_foreach_rcu(iter, head) \ 169 for (iter = __af6list_valid_rcu((head)->next, head); \ 170 &iter->list != (head); \ 171 iter = __af6list_valid_rcu(iter->list.next, head)) 173 #define netlbl_af6list_foreach_safe(iter, tmp, head) \ 174 for (iter = __af6list_valid((head)->next, head), \ 175 tmp = __af6list_valid(iter->list.next, head); \ 176 &iter->list != (head); \ 177 iter = tmp, tmp = __af6list_valid(iter->list.next, head))
|
H A D | netlabel_addrlist.c | 63 struct netlbl_af4list *iter; netlbl_af4list_search() local 65 list_for_each_entry_rcu(iter, head, list) netlbl_af4list_search() 66 if (iter->valid && (addr & iter->mask) == iter->addr) netlbl_af4list_search() 67 return iter; netlbl_af4list_search() 88 struct netlbl_af4list *iter; netlbl_af4list_search_exact() local 90 list_for_each_entry_rcu(iter, head, list) netlbl_af4list_search_exact() 91 if (iter->valid && iter->addr == addr && iter->mask == mask) netlbl_af4list_search_exact() 92 return iter; netlbl_af4list_search_exact() 113 struct netlbl_af6list *iter; netlbl_af6list_search() local 115 list_for_each_entry_rcu(iter, head, list) netlbl_af6list_search() 116 if (iter->valid && netlbl_af6list_search() 117 ipv6_masked_addr_cmp(&iter->addr, &iter->mask, addr) == 0) netlbl_af6list_search() 118 return iter; netlbl_af6list_search() 139 struct netlbl_af6list *iter; netlbl_af6list_search_exact() local 141 list_for_each_entry_rcu(iter, head, list) netlbl_af6list_search_exact() 142 if (iter->valid && netlbl_af6list_search_exact() 143 ipv6_addr_equal(&iter->addr, addr) && netlbl_af6list_search_exact() 144 ipv6_addr_equal(&iter->mask, mask)) netlbl_af6list_search_exact() 145 return iter; netlbl_af6list_search_exact() 164 struct netlbl_af4list *iter; netlbl_af4list_add() local 166 iter = netlbl_af4list_search(entry->addr, head); netlbl_af4list_add() 167 if (iter != NULL && netlbl_af4list_add() 168 iter->addr == entry->addr && iter->mask == entry->mask) netlbl_af4list_add() 175 list_for_each_entry_rcu(iter, head, list) list_for_each_entry_rcu() 176 if (iter->valid && list_for_each_entry_rcu() 177 ntohl(entry->mask) > ntohl(iter->mask)) { list_for_each_entry_rcu() 179 iter->list.prev, list_for_each_entry_rcu() 180 &iter->list); list_for_each_entry_rcu() 201 struct netlbl_af6list *iter; netlbl_af6list_add() local 203 iter = netlbl_af6list_search(&entry->addr, head); netlbl_af6list_add() 204 if (iter != NULL && netlbl_af6list_add() 205 ipv6_addr_equal(&iter->addr, &entry->addr) && netlbl_af6list_add() 206 ipv6_addr_equal(&iter->mask, &entry->mask)) netlbl_af6list_add() 213 list_for_each_entry_rcu(iter, head, list) list_for_each_entry_rcu() 214 if (iter->valid && list_for_each_entry_rcu() 215 ipv6_addr_cmp(&entry->mask, &iter->mask) > 0) { list_for_each_entry_rcu() 217 iter->list.prev, list_for_each_entry_rcu() 218 &iter->list); list_for_each_entry_rcu() 371 int iter = -1; netlbl_af6list_audit_addr() local 372 while (ntohl(mask->s6_addr32[++iter]) == 0xffffffff) netlbl_af6list_audit_addr() 374 mask_val = ntohl(mask->s6_addr32[iter]); netlbl_af6list_audit_addr()
|
H A D | netlabel_cipso_v4.c | 105 u32 iter = 0; netlbl_cipsov4_add_common() local 116 if (iter >= CIPSO_V4_TAG_MAXCNT) netlbl_cipsov4_add_common() 118 doi_def->tags[iter++] = nla_get_u8(nla); netlbl_cipsov4_add_common() 120 while (iter < CIPSO_V4_TAG_MAXCNT) netlbl_cipsov4_add_common() 121 doi_def->tags[iter++] = CIPSO_V4_TAG_INVALID; netlbl_cipsov4_add_common() 150 u32 iter; netlbl_cipsov4_add_std() local 220 for (iter = 0; iter < doi_def->map.std->lvl.local_size; iter++) 221 doi_def->map.std->lvl.local[iter] = CIPSO_V4_INV_LVL; 222 for (iter = 0; iter < doi_def->map.std->lvl.cipso_size; iter++) 223 doi_def->map.std->lvl.cipso[iter] = CIPSO_V4_INV_LVL; 295 for (iter = 0; iter < doi_def->map.std->cat.local_size; iter++) 296 doi_def->map.std->cat.local[iter] = CIPSO_V4_INV_CAT; 297 for (iter = 0; iter < doi_def->map.std->cat.cipso_size; iter++) 298 doi_def->map.std->cat.cipso[iter] = CIPSO_V4_INV_CAT; 474 u32 iter; netlbl_cipsov4_list() local 512 for (iter = 0; netlbl_cipsov4_list() 513 iter < CIPSO_V4_TAG_MAXCNT && netlbl_cipsov4_list() 514 doi_def->tags[iter] != CIPSO_V4_TAG_INVALID; netlbl_cipsov4_list() 515 iter++) { netlbl_cipsov4_list() 518 doi_def->tags[iter]); netlbl_cipsov4_list() 531 for (iter = 0; netlbl_cipsov4_list() 532 iter < doi_def->map.std->lvl.local_size; netlbl_cipsov4_list() 533 iter++) { netlbl_cipsov4_list() 534 if (doi_def->map.std->lvl.local[iter] == netlbl_cipsov4_list() 545 iter); netlbl_cipsov4_list() 550 doi_def->map.std->lvl.local[iter]); netlbl_cipsov4_list() 562 for (iter = 0; netlbl_cipsov4_list() 563 iter < doi_def->map.std->cat.local_size; netlbl_cipsov4_list() 564 iter++) { netlbl_cipsov4_list() 565 if (doi_def->map.std->cat.local[iter] == netlbl_cipsov4_list() 576 iter); netlbl_cipsov4_list() 581 doi_def->map.std->cat.local[iter]); netlbl_cipsov4_list()
|
H A D | netlabel_kapi.c | 431 struct netlbl_lsm_catmap *iter = *catmap; _netlbl_catmap_getnode() local 434 if (iter == NULL) _netlbl_catmap_getnode() 436 if (offset < iter->startbit) _netlbl_catmap_getnode() 438 while (iter && offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) { _netlbl_catmap_getnode() 439 prev = iter; _netlbl_catmap_getnode() 440 iter = iter->next; _netlbl_catmap_getnode() 442 if (iter == NULL || offset < iter->startbit) _netlbl_catmap_getnode() 445 return iter; _netlbl_catmap_getnode() 449 return iter; _netlbl_catmap_getnode() 454 iter = netlbl_catmap_alloc(gfp_flags); _netlbl_catmap_getnode() 455 if (iter == NULL) _netlbl_catmap_getnode() 457 iter->startbit = offset & ~(NETLBL_CATMAP_SIZE - 1); _netlbl_catmap_getnode() 460 iter->next = *catmap; _netlbl_catmap_getnode() 461 *catmap = iter; _netlbl_catmap_getnode() 463 iter->next = prev->next; _netlbl_catmap_getnode() 464 prev->next = iter; _netlbl_catmap_getnode() 467 return iter; _netlbl_catmap_getnode() 482 struct netlbl_lsm_catmap *iter = catmap; netlbl_catmap_walk() local 487 iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0); netlbl_catmap_walk() 488 if (iter == NULL) netlbl_catmap_walk() 490 if (offset > iter->startbit) { netlbl_catmap_walk() 491 offset -= iter->startbit; netlbl_catmap_walk() 498 bitmap = iter->bitmap[idx] >> bit; netlbl_catmap_walk() 506 return iter->startbit + netlbl_catmap_walk() 510 if (iter->next != NULL) { netlbl_catmap_walk() 511 iter = iter->next; netlbl_catmap_walk() 516 bitmap = iter->bitmap[idx]; netlbl_catmap_walk() 536 struct netlbl_lsm_catmap *iter; netlbl_catmap_walkrng() local 543 iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0); netlbl_catmap_walkrng() 544 if (iter == NULL) netlbl_catmap_walkrng() 546 if (offset > iter->startbit) { netlbl_catmap_walkrng() 547 offset -= iter->startbit; netlbl_catmap_walkrng() 557 bitmap = iter->bitmap[idx]; netlbl_catmap_walkrng() 566 return iter->startbit + netlbl_catmap_walkrng() 569 if (iter->next == NULL) netlbl_catmap_walkrng() 570 return iter->startbit + NETLBL_CATMAP_SIZE - 1; netlbl_catmap_walkrng() 571 prev = iter; netlbl_catmap_walkrng() 572 iter = iter->next; netlbl_catmap_walkrng() 600 struct netlbl_lsm_catmap *iter; netlbl_catmap_getlong() local 612 iter = _netlbl_catmap_getnode(&catmap, off, _CM_F_NONE, 0); netlbl_catmap_getlong() 613 if (iter == NULL) { netlbl_catmap_getlong() 618 if (off < iter->startbit) { netlbl_catmap_getlong() 619 off = iter->startbit; netlbl_catmap_getlong() 622 off -= iter->startbit; netlbl_catmap_getlong() 625 *bitmap = iter->bitmap[idx] >> (off % NETLBL_CATMAP_SIZE); netlbl_catmap_getlong() 645 struct netlbl_lsm_catmap *iter; netlbl_catmap_setbit() local 648 iter = _netlbl_catmap_getnode(catmap, bit, _CM_F_ALLOC, flags); netlbl_catmap_setbit() 649 if (iter == NULL) netlbl_catmap_setbit() 652 bit -= iter->startbit; netlbl_catmap_setbit() 654 iter->bitmap[idx] |= NETLBL_CATMAP_BIT << (bit % NETLBL_CATMAP_MAPSIZE); netlbl_catmap_setbit() 712 struct netlbl_lsm_catmap *iter; netlbl_catmap_setlong() local 719 iter = _netlbl_catmap_getnode(catmap, offset, _CM_F_ALLOC, flags); netlbl_catmap_setlong() 720 if (iter == NULL) netlbl_catmap_setlong() 723 offset -= iter->startbit; netlbl_catmap_setlong() 725 iter->bitmap[idx] |= bitmap << (offset % NETLBL_CATMAP_MAPSIZE); netlbl_catmap_setlong()
|
H A D | netlabel_domainhash.h | 56 #define netlbl_domhsh_addr4_entry(iter) \ 57 container_of(iter, struct netlbl_domaddr4_map, list) 63 #define netlbl_domhsh_addr6_entry(iter) \ 64 container_of(iter, struct netlbl_domaddr6_map, list)
|
H A D | netlabel_domainhash.c | 117 u32 iter; netlbl_domhsh_hash() local 124 for (iter = 0, val = 0, len = strlen(key); iter < len; iter++) netlbl_domhsh_hash() 125 val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter]; netlbl_domhsh_hash() 144 struct netlbl_dom_map *iter; netlbl_domhsh_search() local 149 list_for_each_entry_rcu(iter, bkt_list, list) netlbl_domhsh_search() 150 if (iter->valid && strcmp(iter->domain, domain) == 0) netlbl_domhsh_search() 151 return iter; netlbl_domhsh_search() 327 u32 iter; netlbl_domhsh_init() local 344 for (iter = 0; iter < hsh_tbl->size; iter++) netlbl_domhsh_init() 345 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); netlbl_domhsh_init()
|
H A D | netlabel_unlabeled.c | 80 #define netlbl_unlhsh_addr4_entry(iter) \ 81 container_of(iter, struct netlbl_unlhsh_addr4, list) 88 #define netlbl_unlhsh_addr6_entry(iter) \ 89 container_of(iter, struct netlbl_unlhsh_addr6, list) 226 struct netlbl_unlhsh_iface *iter; netlbl_unlhsh_search_iface() local 230 list_for_each_entry_rcu(iter, bkt_list, list) netlbl_unlhsh_search_iface() 231 if (iter->valid && iter->ifindex == ifindex) netlbl_unlhsh_search_iface() 232 return iter; netlbl_unlhsh_search_iface() 1420 u32 iter; netlbl_unlabel_init() local 1437 for (iter = 0; iter < hsh_tbl->size; iter++) netlbl_unlabel_init() 1438 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); netlbl_unlabel_init()
|
/linux-4.1.27/kernel/gcov/ |
H A D | gcc_3_4.c | 346 static struct gcov_fn_info *get_func(struct gcov_iterator *iter) get_func() argument 348 return get_fn_info(iter->info, iter->function); get_func() 351 static struct type_info *get_type(struct gcov_iterator *iter) get_type() argument 353 return &iter->type_info[iter->type]; get_type() 364 struct gcov_iterator *iter; gcov_iter_new() local 366 iter = kzalloc(sizeof(struct gcov_iterator) + gcov_iter_new() 369 if (iter) gcov_iter_new() 370 iter->info = info; gcov_iter_new() 372 return iter; gcov_iter_new() 377 * @iter: file iterator to free 379 void gcov_iter_free(struct gcov_iterator *iter) gcov_iter_free() argument 381 kfree(iter); gcov_iter_free() 386 * @iter: file iterator 388 struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter) gcov_iter_get_info() argument 390 return iter->info; gcov_iter_get_info() 395 * @iter: file iterator 397 void gcov_iter_start(struct gcov_iterator *iter) gcov_iter_start() argument 401 iter->record = 0; gcov_iter_start() 402 iter->function = 0; gcov_iter_start() 403 iter->type = 0; gcov_iter_start() 404 iter->count = 0; gcov_iter_start() 405 iter->num_types = 0; gcov_iter_start() 407 if (counter_active(iter->info, i)) { gcov_iter_start() 408 iter->type_info[iter->num_types].ctr_type = i; gcov_iter_start() 409 iter->type_info[iter->num_types++].offset = 0; gcov_iter_start() 428 * @iter: file iterator 432 int gcov_iter_next(struct gcov_iterator *iter) gcov_iter_next() argument 434 switch (iter->record) { gcov_iter_next() 442 iter->record++; gcov_iter_next() 446 iter->count++; gcov_iter_next() 449 if (iter->count < get_func(iter)->n_ctrs[iter->type]) { gcov_iter_next() 450 iter->record = 9; gcov_iter_next() 454 get_type(iter)->offset += iter->count; gcov_iter_next() 455 iter->count = 0; gcov_iter_next() 456 iter->type++; gcov_iter_next() 459 if (iter->type < iter->num_types) { gcov_iter_next() 460 iter->record = 7; gcov_iter_next() 464 iter->type = 0; gcov_iter_next() 465 iter->function++; gcov_iter_next() 468 if (iter->function < iter->info->n_functions) gcov_iter_next() 469 iter->record = 3; gcov_iter_next() 471 iter->record = -1; gcov_iter_next() 475 if (iter->record == -1) gcov_iter_next() 516 * @iter: file iterator 521 int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq) gcov_iter_write() argument 525 switch (iter->record) { gcov_iter_write() 530 rc = seq_write_gcov_u32(seq, iter->info->version); gcov_iter_write() 533 rc = seq_write_gcov_u32(seq, iter->info->stamp); gcov_iter_write() 542 rc = seq_write_gcov_u32(seq, get_func(iter)->ident); gcov_iter_write() 545 rc = seq_write_gcov_u32(seq, get_func(iter)->checksum); gcov_iter_write() 549 GCOV_TAG_FOR_COUNTER(get_type(iter)->ctr_type)); gcov_iter_write() 553 get_func(iter)->n_ctrs[iter->type] * 2); gcov_iter_write() 557 iter->info->counts[iter->type]. gcov_iter_write() 558 values[iter->count + get_type(iter)->offset]); gcov_iter_write()
|
H A D | gcc_4_7.c | 476 struct gcov_iterator *iter; gcov_iter_new() local 478 iter = kzalloc(sizeof(struct gcov_iterator), GFP_KERNEL); gcov_iter_new() 479 if (!iter) gcov_iter_new() 482 iter->info = info; gcov_iter_new() 484 iter->size = convert_to_gcda(NULL, info); gcov_iter_new() 485 iter->buffer = vmalloc(iter->size); gcov_iter_new() 486 if (!iter->buffer) gcov_iter_new() 489 convert_to_gcda(iter->buffer, info); gcov_iter_new() 491 return iter; gcov_iter_new() 494 kfree(iter); gcov_iter_new() 501 * @iter: file iterator 503 void gcov_iter_free(struct gcov_iterator *iter) gcov_iter_free() argument 505 vfree(iter->buffer); gcov_iter_free() 506 kfree(iter); gcov_iter_free() 511 * @iter: file iterator 513 struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter) gcov_iter_get_info() argument 515 return iter->info; gcov_iter_get_info() 520 * @iter: file iterator 522 void gcov_iter_start(struct gcov_iterator *iter) gcov_iter_start() argument 524 iter->pos = 0; gcov_iter_start() 529 * @iter: file iterator 533 int gcov_iter_next(struct gcov_iterator *iter) gcov_iter_next() argument 535 if (iter->pos < iter->size) gcov_iter_next() 536 iter->pos += ITER_STRIDE; gcov_iter_next() 538 if (iter->pos >= iter->size) gcov_iter_next() 546 * @iter: file iterator 551 int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq) gcov_iter_write() argument 555 if (iter->pos >= iter->size) gcov_iter_write() 559 if (iter->pos + len > iter->size) gcov_iter_write() 560 len = iter->size - iter->pos; gcov_iter_write() 562 seq_write(seq, iter->buffer + iter->pos, len); gcov_iter_write()
|
H A D | gcov.h | 63 void gcov_iter_free(struct gcov_iterator *iter); 64 void gcov_iter_start(struct gcov_iterator *iter); 65 int gcov_iter_next(struct gcov_iterator *iter); 66 int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq); 67 struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter);
|
H A D | fs.c | 109 struct gcov_iterator *iter = data; gcov_seq_next() local 111 if (gcov_iter_next(iter)) gcov_seq_next() 115 return iter; gcov_seq_next() 121 struct gcov_iterator *iter = data; gcov_seq_show() local 123 if (gcov_iter_write(iter, seq)) gcov_seq_show() 181 struct gcov_iterator *iter; gcov_seq_open() local 195 iter = gcov_iter_new(info); gcov_seq_open() 196 if (!iter) gcov_seq_open() 202 seq->private = iter; gcov_seq_open() 208 gcov_iter_free(iter); gcov_seq_open() 220 struct gcov_iterator *iter; gcov_seq_release() local 225 iter = seq->private; gcov_seq_release() 226 info = gcov_iter_get_info(iter); gcov_seq_release() 227 gcov_iter_free(iter); gcov_seq_release()
|
/linux-4.1.27/drivers/s390/cio/ |
H A D | blacklist.c | 287 struct ccwdev_iter *iter = s->private; cio_ignore_proc_seq_start() local 291 memset(iter, 0, sizeof(*iter)); cio_ignore_proc_seq_start() 292 iter->ssid = *offset / (__MAX_SUBCHANNEL + 1); cio_ignore_proc_seq_start() 293 iter->devno = *offset % (__MAX_SUBCHANNEL + 1); cio_ignore_proc_seq_start() 294 return iter; cio_ignore_proc_seq_start() 305 struct ccwdev_iter *iter; cio_ignore_proc_seq_next() local 309 iter = it; cio_ignore_proc_seq_next() 310 if (iter->devno == __MAX_SUBCHANNEL) { cio_ignore_proc_seq_next() 311 iter->devno = 0; cio_ignore_proc_seq_next() 312 iter->ssid++; cio_ignore_proc_seq_next() 313 if (iter->ssid > __MAX_SSID) cio_ignore_proc_seq_next() 316 iter->devno++; cio_ignore_proc_seq_next() 318 return iter; cio_ignore_proc_seq_next() 324 struct ccwdev_iter *iter; cio_ignore_proc_seq_show() local 326 iter = it; cio_ignore_proc_seq_show() 327 if (!is_blacklisted(iter->ssid, iter->devno)) cio_ignore_proc_seq_show() 330 if (!iter->in_range) { cio_ignore_proc_seq_show() 332 if ((iter->devno == __MAX_SUBCHANNEL) || cio_ignore_proc_seq_show() 333 !is_blacklisted(iter->ssid, iter->devno + 1)) { cio_ignore_proc_seq_show() 335 seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno); cio_ignore_proc_seq_show() 338 iter->in_range = 1; cio_ignore_proc_seq_show() 339 seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno); cio_ignore_proc_seq_show() 342 if ((iter->devno == __MAX_SUBCHANNEL) || cio_ignore_proc_seq_show() 343 !is_blacklisted(iter->ssid, iter->devno + 1)) { cio_ignore_proc_seq_show() 345 iter->in_range = 0; cio_ignore_proc_seq_show() 346 seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno); cio_ignore_proc_seq_show()
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
H A D | icm.h | 88 struct mlx4_icm_iter *iter) mlx4_icm_first() 90 iter->icm = icm; mlx4_icm_first() 91 iter->chunk = list_empty(&icm->chunk_list) ? mlx4_icm_first() 94 iter->page_idx = 0; mlx4_icm_first() 97 static inline int mlx4_icm_last(struct mlx4_icm_iter *iter) mlx4_icm_last() argument 99 return !iter->chunk; mlx4_icm_last() 102 static inline void mlx4_icm_next(struct mlx4_icm_iter *iter) mlx4_icm_next() argument 104 if (++iter->page_idx >= iter->chunk->nsg) { mlx4_icm_next() 105 if (iter->chunk->list.next == &iter->icm->chunk_list) { mlx4_icm_next() 106 iter->chunk = NULL; mlx4_icm_next() 110 iter->chunk = list_entry(iter->chunk->list.next, mlx4_icm_next() 112 iter->page_idx = 0; mlx4_icm_next() 116 static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) mlx4_icm_addr() argument 118 return sg_dma_address(&iter->chunk->mem[iter->page_idx]); mlx4_icm_addr() 121 static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) mlx4_icm_size() argument 123 return sg_dma_len(&iter->chunk->mem[iter->page_idx]); mlx4_icm_size() 87 mlx4_icm_first(struct mlx4_icm *icm, struct mlx4_icm_iter *iter) mlx4_icm_first() argument
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
H A D | mthca_memfree.h | 100 struct mthca_icm_iter *iter) mthca_icm_first() 102 iter->icm = icm; mthca_icm_first() 103 iter->chunk = list_empty(&icm->chunk_list) ? mthca_icm_first() 106 iter->page_idx = 0; mthca_icm_first() 109 static inline int mthca_icm_last(struct mthca_icm_iter *iter) mthca_icm_last() argument 111 return !iter->chunk; mthca_icm_last() 114 static inline void mthca_icm_next(struct mthca_icm_iter *iter) mthca_icm_next() argument 116 if (++iter->page_idx >= iter->chunk->nsg) { mthca_icm_next() 117 if (iter->chunk->list.next == &iter->icm->chunk_list) { mthca_icm_next() 118 iter->chunk = NULL; mthca_icm_next() 122 iter->chunk = list_entry(iter->chunk->list.next, mthca_icm_next() 124 iter->page_idx = 0; mthca_icm_next() 128 static inline dma_addr_t mthca_icm_addr(struct mthca_icm_iter *iter) mthca_icm_addr() argument 130 return sg_dma_address(&iter->chunk->mem[iter->page_idx]); mthca_icm_addr() 133 static inline unsigned long mthca_icm_size(struct mthca_icm_iter *iter) mthca_icm_size() argument 135 return sg_dma_len(&iter->chunk->mem[iter->page_idx]); mthca_icm_size() 99 mthca_icm_first(struct mthca_icm *icm, struct mthca_icm_iter *iter) mthca_icm_first() argument
|
/linux-4.1.27/kernel/ |
H A D | jump_label.c | 140 struct jump_entry *iter; __jump_label_text_reserved() local 142 iter = iter_start; __jump_label_text_reserved() 143 while (iter < iter_stop) { __jump_label_text_reserved() 144 if (addr_conflict(iter, start, end)) __jump_label_text_reserved() 146 iter++; __jump_label_text_reserved() 197 struct jump_entry *iter; jump_label_init() local 202 for (iter = iter_start; iter < iter_stop; iter++) { jump_label_init() 205 iterk = (struct static_key *)(unsigned long)iter->key; jump_label_init() 206 arch_jump_label_transform_static(iter, jump_label_type(iterk)); jump_label_init() 212 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. jump_label_init() 214 *((unsigned long *)&key->entries) += (unsigned long)iter; jump_label_init() 272 struct jump_entry *iter; jump_label_apply_nops() local 278 for (iter = iter_start; iter < iter_stop; iter++) { jump_label_apply_nops() 279 arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE); jump_label_apply_nops() 287 struct jump_entry *iter; jump_label_add_module() local 297 for (iter = iter_start; iter < iter_stop; iter++) { jump_label_add_module() 300 iterk = (struct static_key *)(unsigned long)iter->key; jump_label_add_module() 305 if (__module_address(iter->key) == mod) { jump_label_add_module() 307 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. jump_label_add_module() 309 *((unsigned long *)&key->entries) += (unsigned long)iter; jump_label_add_module() 317 jlm->entries = iter; jump_label_add_module() 322 __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE); jump_label_add_module() 332 struct jump_entry *iter; jump_label_del_module() local 336 for (iter = iter_start; iter < iter_stop; iter++) { jump_label_del_module() 337 if (iter->key == (jump_label_t)(unsigned long)key) jump_label_del_module() 340 key = (struct static_key *)(unsigned long)iter->key; jump_label_del_module() 342 if (__module_address(iter->key) == mod) jump_label_del_module() 364 struct jump_entry *iter; jump_label_invalidate_module_init() local 366 for (iter = iter_start; iter < iter_stop; iter++) { jump_label_invalidate_module_init() 367 if (within_module_init(iter->code, mod)) jump_label_invalidate_module_init() 368 iter->code = 0; jump_label_invalidate_module_init()
|
H A D | kallsyms.c | 458 static int get_ksymbol_mod(struct kallsym_iter *iter) get_ksymbol_mod() argument 460 if (module_get_kallsym(iter->pos - kallsyms_num_syms, &iter->value, get_ksymbol_mod() 461 &iter->type, iter->name, iter->module_name, get_ksymbol_mod() 462 &iter->exported) < 0) get_ksymbol_mod() 468 static unsigned long get_ksymbol_core(struct kallsym_iter *iter) get_ksymbol_core() argument 470 unsigned off = iter->nameoff; get_ksymbol_core() 472 iter->module_name[0] = '\0'; get_ksymbol_core() 473 iter->value = kallsyms_addresses[iter->pos]; get_ksymbol_core() 475 iter->type = kallsyms_get_symbol_type(off); get_ksymbol_core() 477 off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name)); get_ksymbol_core() 479 return off - iter->nameoff; get_ksymbol_core() 482 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) reset_iter() argument 484 iter->name[0] = '\0'; reset_iter() 485 iter->nameoff = get_symbol_offset(new_pos); reset_iter() 486 iter->pos = new_pos; reset_iter() 490 static int update_iter(struct kallsym_iter *iter, loff_t pos) update_iter() argument 494 iter->pos = pos; update_iter() 495 return get_ksymbol_mod(iter); update_iter() 499 if (pos != iter->pos) update_iter() 500 reset_iter(iter, pos); update_iter() 502 iter->nameoff += get_ksymbol_core(iter); update_iter() 503 iter->pos++; update_iter() 530 struct kallsym_iter *iter = m->private; s_show() local 533 if (!iter->name[0]) s_show() 536 if (iter->module_name[0]) { s_show() 543 type = iter->exported ? toupper(iter->type) : s_show() 544 tolower(iter->type); s_show() 545 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value, s_show() 546 type, iter->name, iter->module_name); s_show() 548 seq_printf(m, "%pK %c %s\n", (void *)iter->value, s_show() 549 iter->type, iter->name); s_show() 567 struct kallsym_iter *iter; kallsyms_open() local 568 iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter)); kallsyms_open() 569 if (!iter) kallsyms_open() 571 reset_iter(iter, 0); kallsyms_open()
|
H A D | tracepoint.c | 353 struct tracepoint * const *iter; tp_module_going_check_quiescent() local 357 for (iter = begin; iter < end; iter++) tp_module_going_check_quiescent() 358 WARN_ON_ONCE((*iter)->funcs); tp_module_going_check_quiescent() 467 struct tracepoint * const *iter; for_each_tracepoint_range() local 471 for (iter = begin; iter < end; iter++) for_each_tracepoint_range() 472 fct(*iter, priv); for_each_tracepoint_range()
|
/linux-4.1.27/arch/sparc/prom/ |
H A D | bootstr_32.c | 18 int iter; prom_getbootargs() local 30 for (iter = 1; iter < 8; iter++) { prom_getbootargs() 31 arg = (*(romvec->pv_v0bootargs))->argv[iter]; prom_getbootargs()
|
/linux-4.1.27/lib/ |
H A D | cordic.c | 59 unsigned iter; cordic_calc_iq() local 80 for (iter = 0; iter < CORDIC_NUM_ITER; iter++) { cordic_calc_iq() 82 valtmp = coord.i - (coord.q >> iter); cordic_calc_iq() 83 coord.q += (coord.i >> iter); cordic_calc_iq() 84 angle += arctan_table[iter]; cordic_calc_iq() 86 valtmp = coord.i + (coord.q >> iter); cordic_calc_iq() 87 coord.q -= (coord.i >> iter); cordic_calc_iq() 88 angle -= arctan_table[iter]; cordic_calc_iq()
|
H A D | plist.c | 76 struct plist_node *first, *iter, *prev = NULL; plist_add() local 86 first = iter = plist_first(head); plist_add() 89 if (node->prio < iter->prio) { plist_add() 90 node_next = &iter->node_list; plist_add() 94 prev = iter; plist_add() 95 iter = list_entry(iter->prio_list.next, plist_add() 97 } while (iter != first); plist_add() 100 list_add_tail(&node->prio_list, &iter->prio_list); plist_add() 148 struct plist_node *iter; plist_requeue() local 158 iter = plist_next(node); plist_requeue() 160 if (node->prio != iter->prio) plist_requeue() 165 plist_for_each_continue(iter, head) { plist_for_each_continue() 166 if (node->prio != iter->prio) { plist_for_each_continue() 167 node_next = &iter->node_list; plist_for_each_continue()
|
H A D | rhashtable.c | 480 * @iter: Hash table Iterator 498 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) rhashtable_walk_init() argument 500 iter->ht = ht; rhashtable_walk_init() 501 iter->p = NULL; rhashtable_walk_init() 502 iter->slot = 0; rhashtable_walk_init() 503 iter->skip = 0; rhashtable_walk_init() 505 iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); rhashtable_walk_init() 506 if (!iter->walker) rhashtable_walk_init() 510 iter->walker->tbl = rhashtable_walk_init() 512 list_add(&iter->walker->list, &iter->walker->tbl->walkers); rhashtable_walk_init() 521 * @iter: Hash table Iterator 525 void rhashtable_walk_exit(struct rhashtable_iter *iter) rhashtable_walk_exit() argument 527 spin_lock(&iter->ht->lock); rhashtable_walk_exit() 528 if (iter->walker->tbl) rhashtable_walk_exit() 529 list_del(&iter->walker->list); rhashtable_walk_exit() 530 spin_unlock(&iter->ht->lock); rhashtable_walk_exit() 531 kfree(iter->walker); rhashtable_walk_exit() 537 * @iter: Hash table iterator 549 int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires() 552 struct rhashtable *ht = iter->ht; __acquires() 557 if (iter->walker->tbl) __acquires() 558 list_del(&iter->walker->list); __acquires() 561 if (!iter->walker->tbl) { __acquires() 562 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); __acquires() 572 * @iter: Hash table iterator 582 void *rhashtable_walk_next(struct rhashtable_iter *iter) rhashtable_walk_next() argument 584 struct bucket_table *tbl = iter->walker->tbl; rhashtable_walk_next() 585 struct rhashtable *ht = iter->ht; rhashtable_walk_next() 586 struct rhash_head *p = iter->p; rhashtable_walk_next() 590 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); rhashtable_walk_next() 594 for (; iter->slot < tbl->size; iter->slot++) { rhashtable_walk_next() 595 int skip = iter->skip; rhashtable_walk_next() 597 rht_for_each_rcu(p, tbl, iter->slot) { rhashtable_walk_next() 605 iter->skip++; rhashtable_walk_next() 606 iter->p = p; rhashtable_walk_next() 611 iter->skip = 0; rhashtable_walk_next() 614 iter->p = NULL; rhashtable_walk_next() 619 iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht); rhashtable_walk_next() 620 if (iter->walker->tbl) { rhashtable_walk_next() 621 iter->slot = 0; rhashtable_walk_next() 622 iter->skip = 0; rhashtable_walk_next() 634 * @iter: Hash table iterator 638 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases() 642 struct bucket_table *tbl = iter->walker->tbl; __releases() 647 ht = iter->ht; __releases() 651 list_add(&iter->walker->list, &tbl->walkers); __releases() 653 iter->walker->tbl = NULL; __releases() 656 iter->p = NULL; __releases()
|
H A D | dynamic_debug.c | 684 static struct _ddebug *ddebug_iter_first(struct ddebug_iter *iter) ddebug_iter_first() argument 687 iter->table = NULL; ddebug_iter_first() 688 iter->idx = 0; ddebug_iter_first() 691 iter->table = list_entry(ddebug_tables.next, ddebug_iter_first() 693 iter->idx = 0; ddebug_iter_first() 694 return &iter->table->ddebugs[iter->idx]; ddebug_iter_first() 703 static struct _ddebug *ddebug_iter_next(struct ddebug_iter *iter) ddebug_iter_next() argument 705 if (iter->table == NULL) ddebug_iter_next() 707 if (++iter->idx == iter->table->num_ddebugs) { ddebug_iter_next() 709 iter->idx = 0; ddebug_iter_next() 710 if (list_is_last(&iter->table->link, &ddebug_tables)) { ddebug_iter_next() 711 iter->table = NULL; ddebug_iter_next() 714 iter->table = list_entry(iter->table->link.next, ddebug_iter_next() 717 return &iter->table->ddebugs[iter->idx]; ddebug_iter_next() 727 struct ddebug_iter *iter = m->private; ddebug_proc_start() local 739 dp = ddebug_iter_first(iter); ddebug_proc_start() 741 dp = ddebug_iter_next(iter); ddebug_proc_start() 752 struct ddebug_iter *iter = m->private; ddebug_proc_next() local 759 dp = ddebug_iter_first(iter); ddebug_proc_next() 761 dp = ddebug_iter_next(iter); ddebug_proc_next() 774 struct ddebug_iter *iter = m->private; ddebug_proc_show() local 788 iter->table->mod_name, dp->function, ddebug_proc_show() 972 struct _ddebug *iter, *iter_start; dynamic_debug_init() local 983 iter = __start___verbose; dynamic_debug_init() 984 modname = iter->modname; dynamic_debug_init() 985 iter_start = iter; dynamic_debug_init() 986 for (; iter < __stop___verbose; iter++) { dynamic_debug_init() 988 verbose_bytes += strlen(iter->modname) + strlen(iter->function) dynamic_debug_init() 989 + strlen(iter->filename) + strlen(iter->format); dynamic_debug_init() 991 if (strcmp(modname, iter->modname)) { dynamic_debug_init() 997 modname = iter->modname; dynamic_debug_init() 998 iter_start = iter; dynamic_debug_init()
|
H A D | radix-tree.c | 743 * @iter: iterator state 748 struct radix_tree_iter *iter, unsigned flags) radix_tree_next_chunk() 758 * Catch next_index overflow after ~0UL. iter->index never overflows radix_tree_next_chunk() 760 * And we cannot overflow iter->next_index in a single step, radix_tree_next_chunk() 766 index = iter->next_index; radix_tree_next_chunk() 767 if (!index && iter->index) radix_tree_next_chunk() 775 iter->index = 0; radix_tree_next_chunk() 776 iter->next_index = 1; radix_tree_next_chunk() 777 iter->tags = 1; radix_tree_next_chunk() 831 iter->index = index; radix_tree_next_chunk() 832 iter->next_index = (index | RADIX_TREE_MAP_MASK) + 1; radix_tree_next_chunk() 834 /* Construct iter->tags bit-mask from node->tags[tag] array */ radix_tree_next_chunk() 840 iter->tags = node->tags[tag][tag_long] >> tag_bit; radix_tree_next_chunk() 845 iter->tags |= node->tags[tag][tag_long + 1] << radix_tree_next_chunk() 848 iter->next_index = index + BITS_PER_LONG; radix_tree_next_chunk() 1009 struct radix_tree_iter iter; radix_tree_gang_lookup() local 1016 radix_tree_for_each_slot(slot, root, &iter, first_index) { radix_tree_gang_lookup() 1021 slot = radix_tree_iter_retry(&iter); radix_tree_gang_lookup() 1055 struct radix_tree_iter iter; radix_tree_gang_lookup_slot() local 1062 radix_tree_for_each_slot(slot, root, &iter, first_index) { radix_tree_gang_lookup_slot() 1065 indices[ret] = iter.index; radix_tree_gang_lookup_slot() 1092 struct radix_tree_iter iter; radix_tree_gang_lookup_tag() local 1099 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { radix_tree_gang_lookup_tag() 1104 slot = radix_tree_iter_retry(&iter); radix_tree_gang_lookup_tag() 1133 struct radix_tree_iter iter; radix_tree_gang_lookup_tag_slot() local 1140 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { radix_tree_gang_lookup_tag_slot() 747 radix_tree_next_chunk(struct radix_tree_root *root, struct radix_tree_iter *iter, unsigned flags) radix_tree_next_chunk() argument
|
/linux-4.1.27/tools/perf/util/ |
H A D | hist.c | 478 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, iter_next_nop_entry() 485 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, iter_add_next_nop_entry() 492 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) iter_prepare_mem_entry() argument 494 struct perf_sample *sample = iter->sample; iter_prepare_mem_entry() 501 iter->priv = mi; iter_prepare_mem_entry() 506 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) iter_add_single_mem_entry() argument 509 struct mem_info *mi = iter->priv; iter_add_single_mem_entry() 510 struct hists *hists = evsel__hists(iter->evsel); iter_add_single_mem_entry() 516 cost = iter->sample->weight; iter_add_single_mem_entry() 527 he = __hists__add_entry(hists, al, iter->parent, NULL, mi, iter_add_single_mem_entry() 532 iter->he = he; iter_add_single_mem_entry() 537 iter_finish_mem_entry(struct hist_entry_iter *iter, iter_finish_mem_entry() argument 540 struct perf_evsel *evsel = iter->evsel; iter_finish_mem_entry() 542 struct hist_entry *he = iter->he; iter_finish_mem_entry() 550 err = hist_entry__append_callchain(he, iter->sample); iter_finish_mem_entry() 554 * We don't need to free iter->priv (mem_info) here since iter_finish_mem_entry() 558 iter->priv = NULL; iter_finish_mem_entry() 560 iter->he = NULL; iter_finish_mem_entry() 565 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) iter_prepare_branch_entry() argument 568 struct perf_sample *sample = iter->sample; iter_prepare_branch_entry() 574 iter->curr = 0; iter_prepare_branch_entry() 575 iter->total = sample->branch_stack->nr; iter_prepare_branch_entry() 577 iter->priv = bi; iter_prepare_branch_entry() 582 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused, iter_add_single_branch_entry() 586 iter->he = NULL; iter_add_single_branch_entry() 592 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) iter_next_branch_entry() argument 594 struct branch_info *bi = iter->priv; iter_next_branch_entry() 595 int i = iter->curr; iter_next_branch_entry() 600 if (iter->curr >= iter->total) iter_next_branch_entry() 610 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al) iter_add_next_branch_entry() argument 613 struct perf_evsel *evsel = iter->evsel; iter_add_next_branch_entry() 616 int i = iter->curr; iter_add_next_branch_entry() 619 bi = iter->priv; iter_add_next_branch_entry() 621 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) iter_add_next_branch_entry() 628 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL, iter_add_next_branch_entry() 636 iter->he = he; iter_add_next_branch_entry() 637 iter->curr++; iter_add_next_branch_entry() 642 iter_finish_branch_entry(struct hist_entry_iter *iter, iter_finish_branch_entry() argument 645 zfree(&iter->priv); iter_finish_branch_entry() 646 iter->he = NULL; iter_finish_branch_entry() 648 return iter->curr >= iter->total ? 0 : -1; iter_finish_branch_entry() 652 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused, iter_prepare_normal_entry() 659 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al) iter_add_single_normal_entry() argument 661 struct perf_evsel *evsel = iter->evsel; iter_add_single_normal_entry() 662 struct perf_sample *sample = iter->sample; iter_add_single_normal_entry() 665 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, iter_add_single_normal_entry() 671 iter->he = he; iter_add_single_normal_entry() 676 iter_finish_normal_entry(struct hist_entry_iter *iter, iter_finish_normal_entry() argument 679 struct hist_entry *he = iter->he; iter_finish_normal_entry() 680 struct perf_evsel *evsel = iter->evsel; iter_finish_normal_entry() 681 struct perf_sample *sample = iter->sample; iter_finish_normal_entry() 686 iter->he = NULL; iter_finish_normal_entry() 694 iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused, iter_prepare_cumulative_entry() 710 iter->priv = he_cache; iter_prepare_cumulative_entry() 711 iter->curr = 0; iter_prepare_cumulative_entry() 717 iter_add_single_cumulative_entry(struct hist_entry_iter *iter, iter_add_single_cumulative_entry() argument 720 struct perf_evsel *evsel = iter->evsel; iter_add_single_cumulative_entry() 722 struct perf_sample *sample = iter->sample; iter_add_single_cumulative_entry() 723 struct hist_entry **he_cache = iter->priv; iter_add_single_cumulative_entry() 727 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL, iter_add_single_cumulative_entry() 733 iter->he = he; iter_add_single_cumulative_entry() 734 he_cache[iter->curr++] = he; iter_add_single_cumulative_entry() 750 iter_next_cumulative_entry(struct hist_entry_iter *iter, iter_next_cumulative_entry() argument 759 return fill_callchain_info(al, node, iter->hide_unresolved); iter_next_cumulative_entry() 763 iter_add_next_cumulative_entry(struct hist_entry_iter *iter, iter_add_next_cumulative_entry() argument 766 struct perf_evsel *evsel = iter->evsel; iter_add_next_cumulative_entry() 767 struct perf_sample *sample = iter->sample; iter_add_next_cumulative_entry() 768 struct hist_entry **he_cache = iter->priv; iter_add_next_cumulative_entry() 779 .parent = iter->parent, iter_add_next_cumulative_entry() 792 for (i = 0; i < iter->curr; i++) { iter_add_next_cumulative_entry() 795 iter->he = NULL; iter_add_next_cumulative_entry() 800 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL, iter_add_next_cumulative_entry() 806 iter->he = he; iter_add_next_cumulative_entry() 807 he_cache[iter->curr++] = he; iter_add_next_cumulative_entry() 815 iter_finish_cumulative_entry(struct hist_entry_iter *iter, iter_finish_cumulative_entry() argument 818 zfree(&iter->priv); iter_finish_cumulative_entry() 819 iter->he = NULL; iter_finish_cumulative_entry() 856 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, hist_entry_iter__add() argument 862 err = sample__resolve_callchain(sample, &iter->parent, evsel, al, hist_entry_iter__add() 867 iter->evsel = evsel; hist_entry_iter__add() 868 iter->sample = sample; hist_entry_iter__add() 870 err = iter->ops->prepare_entry(iter, al); hist_entry_iter__add() 874 err = iter->ops->add_single_entry(iter, al); hist_entry_iter__add() 878 if (iter->he && iter->add_entry_cb) { hist_entry_iter__add() 879 err = iter->add_entry_cb(iter, al, true, arg); hist_entry_iter__add() 884 while (iter->ops->next_entry(iter, al)) { hist_entry_iter__add() 885 err = iter->ops->add_next_entry(iter, al); hist_entry_iter__add() 889 if (iter->he && iter->add_entry_cb) { hist_entry_iter__add() 890 err = iter->add_entry_cb(iter, al, false, arg); hist_entry_iter__add() 897 err2 = iter->ops->finish_entry(iter, al); hist_entry_iter__add() 961 struct hist_entry *iter; hists__collapse_insert_entry() local 966 iter = rb_entry(parent, struct hist_entry, rb_node_in); hists__collapse_insert_entry() 968 cmp = hist_entry__collapse(iter, he); hists__collapse_insert_entry() 971 he_stat__add_stat(&iter->stat, &he->stat); hists__collapse_insert_entry() 973 he_stat__add_stat(iter->stat_acc, he->stat_acc); hists__collapse_insert_entry() 978 iter->callchain, hists__collapse_insert_entry() 1106 struct hist_entry *iter; __hists__insert_output_entry() local 1114 iter = rb_entry(parent, struct hist_entry, rb_node); __hists__insert_output_entry() 1116 if (hist_entry__sort(he, iter) > 0) __hists__insert_output_entry() 1350 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); hists__find_entry() local 1351 int64_t cmp = hist_entry__collapse(iter, he); hists__find_entry() 1358 return iter; hists__find_entry()
|
H A D | comm.c | 50 struct comm_str *iter, *new; comm_str__findnew() local 55 iter = rb_entry(parent, struct comm_str, rb_node); comm_str__findnew() 57 cmp = strcmp(str, iter->str); comm_str__findnew() 59 return iter; comm_str__findnew()
|
H A D | ordered-events.c | 188 struct ordered_event *tmp, *iter; __ordered_events__flush() local 201 list_for_each_entry_safe(iter, tmp, head, list) { list_for_each_entry_safe() 205 if (iter->timestamp > limit) list_for_each_entry_safe() 207 ret = oe->deliver(oe, iter); list_for_each_entry_safe() 211 ordered_events__delete(oe, iter); list_for_each_entry_safe() 212 oe->last_flush = iter->timestamp; list_for_each_entry_safe()
|
H A D | hist.h | 97 int (*add_entry_cb)(struct hist_entry_iter *iter, 113 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
|
/linux-4.1.27/drivers/net/wireless/libertas/ |
H A D | firmware.c | 85 const struct lbs_fw_table *iter; load_next_firmware_from_table() local 88 iter = priv->fw_table; load_next_firmware_from_table() 90 iter = ++priv->fw_iter; load_next_firmware_from_table() 98 if (!iter->helper) { load_next_firmware_from_table() 104 if (iter->model != priv->fw_model) { load_next_firmware_from_table() 105 iter++; load_next_firmware_from_table() 109 priv->fw_iter = iter; load_next_firmware_from_table() 110 do_load_firmware(priv, iter->helper, helper_firmware_cb); load_next_firmware_from_table() 176 const struct lbs_fw_table *iter; lbs_get_firmware() local 183 iter = fw_table; lbs_get_firmware() 184 while (iter && iter->helper) { lbs_get_firmware() 185 if (iter->model != card_model) lbs_get_firmware() 189 ret = request_firmware(helper, iter->helper, dev); lbs_get_firmware() 197 if (iter->fwname == NULL) lbs_get_firmware() 202 ret = request_firmware(mainfw, iter->fwname, dev); lbs_get_firmware() 216 iter++; lbs_get_firmware()
|
/linux-4.1.27/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib_fs.c | 60 struct ipoib_mcast_iter *iter; ipoib_mcg_seq_start() local 63 iter = ipoib_mcast_iter_init(file->private); ipoib_mcg_seq_start() 64 if (!iter) ipoib_mcg_seq_start() 68 if (ipoib_mcast_iter_next(iter)) { ipoib_mcg_seq_start() 69 kfree(iter); ipoib_mcg_seq_start() 74 return iter; ipoib_mcg_seq_start() 80 struct ipoib_mcast_iter *iter = iter_ptr; ipoib_mcg_seq_next() local 84 if (ipoib_mcast_iter_next(iter)) { ipoib_mcg_seq_next() 85 kfree(iter); ipoib_mcg_seq_next() 89 return iter; ipoib_mcg_seq_next() 99 struct ipoib_mcast_iter *iter = iter_ptr; ipoib_mcg_seq_show() local 105 if (!iter) ipoib_mcg_seq_show() 108 ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen, ipoib_mcg_seq_show() 159 struct ipoib_path_iter *iter; ipoib_path_seq_start() local 162 iter = ipoib_path_iter_init(file->private); ipoib_path_seq_start() 163 if (!iter) ipoib_path_seq_start() 167 if (ipoib_path_iter_next(iter)) { ipoib_path_seq_start() 168 kfree(iter); ipoib_path_seq_start() 173 return iter; ipoib_path_seq_start() 179 struct ipoib_path_iter *iter = iter_ptr; ipoib_path_seq_next() local 183 if (ipoib_path_iter_next(iter)) { ipoib_path_seq_next() 184 kfree(iter); ipoib_path_seq_next() 188 return iter; ipoib_path_seq_next() 198 struct ipoib_path_iter *iter = iter_ptr; ipoib_path_seq_show() local 203 if (!iter) ipoib_path_seq_show() 206 ipoib_path_iter_read(iter, &path); ipoib_path_seq_show()
|
H A D | ipoib_multicast.c | 935 struct ipoib_mcast_iter *iter; ipoib_mcast_iter_init() local 937 iter = kmalloc(sizeof *iter, GFP_KERNEL); ipoib_mcast_iter_init() 938 if (!iter) ipoib_mcast_iter_init() 941 iter->dev = dev; ipoib_mcast_iter_init() 942 memset(iter->mgid.raw, 0, 16); ipoib_mcast_iter_init() 944 if (ipoib_mcast_iter_next(iter)) { ipoib_mcast_iter_init() 945 kfree(iter); ipoib_mcast_iter_init() 949 return iter; ipoib_mcast_iter_init() 952 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter) ipoib_mcast_iter_next() argument 954 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); ipoib_mcast_iter_next() 966 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw, ipoib_mcast_iter_next() 968 iter->mgid = mcast->mcmember.mgid; ipoib_mcast_iter_next() 969 iter->created = mcast->created; ipoib_mcast_iter_next() 970 iter->queuelen = skb_queue_len(&mcast->pkt_queue); ipoib_mcast_iter_next() 971 iter->complete = !!mcast->ah; ipoib_mcast_iter_next() 972 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY)); ipoib_mcast_iter_next() 987 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, ipoib_mcast_iter_read() argument 994 *mgid = iter->mgid; ipoib_mcast_iter_read() 995 *created = iter->created; ipoib_mcast_iter_read() 996 *queuelen = iter->queuelen; ipoib_mcast_iter_read() 997 *complete = iter->complete; ipoib_mcast_iter_read() 998 *send_only = iter->send_only; ipoib_mcast_iter_read()
|
H A D | ipoib_main.c | 332 struct ipoib_path_iter *iter; ipoib_path_iter_init() local 334 iter = kmalloc(sizeof *iter, GFP_KERNEL); ipoib_path_iter_init() 335 if (!iter) ipoib_path_iter_init() 338 iter->dev = dev; ipoib_path_iter_init() 339 memset(iter->path.pathrec.dgid.raw, 0, 16); ipoib_path_iter_init() 341 if (ipoib_path_iter_next(iter)) { ipoib_path_iter_init() 342 kfree(iter); ipoib_path_iter_init() 346 return iter; ipoib_path_iter_init() 349 int ipoib_path_iter_next(struct ipoib_path_iter *iter) ipoib_path_iter_next() argument 351 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); ipoib_path_iter_next() 363 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw, ipoib_path_iter_next() 365 iter->path = *path; ipoib_path_iter_next() 378 void ipoib_path_iter_read(struct ipoib_path_iter *iter, ipoib_path_iter_read() argument 381 *path = iter->path; ipoib_path_iter_read()
|
/linux-4.1.27/kernel/time/ |
H A D | timer_list.c | 293 struct timer_list_iter *iter = v; timer_list_show() local 295 if (iter->cpu == -1 && !iter->second_pass) timer_list_show() 296 timer_list_header(m, iter->now); timer_list_show() 297 else if (!iter->second_pass) timer_list_show() 298 print_cpu(m, iter->cpu, iter->now); timer_list_show() 300 else if (iter->cpu == -1 && iter->second_pass) timer_list_show() 303 print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu); timer_list_show() 326 static void *move_iter(struct timer_list_iter *iter, loff_t offset) move_iter() argument 329 iter->cpu = cpumask_next(iter->cpu, cpu_online_mask); move_iter() 330 if (iter->cpu >= nr_cpu_ids) { move_iter() 332 if (!iter->second_pass) { move_iter() 333 iter->cpu = -1; move_iter() 334 iter->second_pass = true; move_iter() 342 return iter; move_iter() 347 struct timer_list_iter *iter = file->private; timer_list_start() local 350 iter->now = ktime_to_ns(ktime_get()); timer_list_start() 351 iter->cpu = -1; timer_list_start() 352 iter->second_pass = false; timer_list_start() 353 return move_iter(iter, *offset); timer_list_start() 358 struct timer_list_iter *iter = file->private; timer_list_next() local 360 return move_iter(iter, 1); timer_list_next()
|
/linux-4.1.27/net/rds/ |
H A D | info.c | 104 void rds_info_iter_unmap(struct rds_info_iterator *iter) rds_info_iter_unmap() argument 106 if (iter->addr) { rds_info_iter_unmap() 107 kunmap_atomic(iter->addr); rds_info_iter_unmap() 108 iter->addr = NULL; rds_info_iter_unmap() 115 void rds_info_copy(struct rds_info_iterator *iter, void *data, rds_info_copy() argument 121 if (!iter->addr) rds_info_copy() 122 iter->addr = kmap_atomic(*iter->pages); rds_info_copy() 124 this = min(bytes, PAGE_SIZE - iter->offset); rds_info_copy() 127 "bytes %lu\n", *iter->pages, iter->addr, rds_info_copy() 128 iter->offset, this, data, bytes); rds_info_copy() 130 memcpy(iter->addr + iter->offset, data, this); rds_info_copy() 134 iter->offset += this; rds_info_copy() 136 if (iter->offset == PAGE_SIZE) { rds_info_copy() 137 kunmap_atomic(iter->addr); rds_info_copy() 138 iter->addr = NULL; rds_info_copy() 139 iter->offset = 0; rds_info_copy() 140 iter->pages++; rds_info_copy() 161 struct rds_info_iterator iter; rds_info_getsockopt() local 215 iter.pages = pages; rds_info_getsockopt() 216 iter.addr = NULL; rds_info_getsockopt() 217 iter.offset = start & (PAGE_SIZE - 1); rds_info_getsockopt() 219 func(sock, len, &iter, &lens); rds_info_getsockopt() 224 rds_info_iter_unmap(&iter); rds_info_getsockopt()
|
H A D | info.h | 14 * should be copied using @iter. The caller will deduce if it was copied 18 struct rds_info_iterator *iter, 25 void rds_info_copy(struct rds_info_iterator *iter, void *data, 27 void rds_info_iter_unmap(struct rds_info_iterator *iter);
|
H A D | transport.c | 106 * copies them using the iter if there is enough space for them. The 110 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter, rds_trans_stats_info_copy() argument 119 rds_info_iter_unmap(iter); rds_trans_stats_info_copy() 128 part = trans->stats_info_copy(iter, avail); rds_trans_stats_info_copy()
|
H A D | stats.c | 81 void rds_stats_info_copy(struct rds_info_iterator *iter, rds_stats_info_copy() argument 93 rds_info_copy(iter, &ctr, sizeof(ctr)); rds_stats_info_copy() 109 struct rds_info_iterator *iter, rds_stats_info() 133 rds_stats_info_copy(iter, (uint64_t *)&stats, rds_stat_names, 139 lens->nr = rds_trans_stats_info_copy(iter, avail) + 108 rds_stats_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) rds_stats_info() argument
|
H A D | ib_stats.c | 74 unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter, rds_ib_stats_info_copy() argument 93 rds_stats_info_copy(iter, (uint64_t *)&stats, rds_ib_stat_names,
|
H A D | iw_stats.c | 72 unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter, rds_iw_stats_info_copy() argument 91 rds_stats_info_copy(iter, (uint64_t *)&stats, rds_iw_stat_names,
|
H A D | tcp_stats.c | 51 unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter, rds_tcp_stats_info_copy() argument 70 rds_stats_info_copy(iter, (uint64_t *)&stats, rds_tcp_stat_names,
|
H A D | connection.c | 393 struct rds_info_iterator *iter, rds_conn_message_info() 423 rds_inc_info_copy(&rm->m_inc, iter, list_for_each_entry() 438 struct rds_info_iterator *iter, rds_conn_message_info_send() 441 rds_conn_message_info(sock, len, iter, lens, 1); rds_conn_message_info_send() 446 struct rds_info_iterator *iter, rds_conn_message_info_retrans() 449 rds_conn_message_info(sock, len, iter, lens, 0); rds_conn_message_info_retrans() 453 struct rds_info_iterator *iter, rds_for_each_conn_info() 480 rds_info_copy(iter, buffer, item_len); hlist_for_each_entry_rcu() 516 struct rds_info_iterator *iter, rds_conn_info() 519 rds_for_each_conn_info(sock, len, iter, lens, rds_conn_info() 392 rds_conn_message_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens, int want_send) rds_conn_message_info() argument 437 rds_conn_message_info_send(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) rds_conn_message_info_send() argument 444 rds_conn_message_info_retrans(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) rds_conn_message_info_retrans() argument 452 rds_for_each_conn_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens, int (*visitor)(struct rds_connection *, void *), size_t item_len) rds_for_each_conn_info() argument 515 rds_conn_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) rds_conn_info() argument
|
H A D | tcp.h | 84 unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter,
|
H A D | af_rds.c | 467 struct rds_info_iterator *iter, rds_sock_inc_info() 485 rds_inc_info_copy(inc, iter, inc->i_saddr, rds_sock_inc_info() 499 struct rds_info_iterator *iter, rds_sock_info() 521 rds_info_copy(iter, &sinfo, sizeof(sinfo)); rds_sock_info() 466 rds_sock_inc_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) rds_sock_inc_info() argument 498 rds_sock_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) rds_sock_info() argument
|
H A D | iw.c | 202 struct rds_info_iterator *iter, rds_iw_ic_info() 205 rds_for_each_conn_info(sock, len, iter, lens, rds_iw_ic_info() 201 rds_iw_ic_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) rds_iw_ic_info() argument
|
H A D | tcp.c | 153 struct rds_info_iterator *iter, rds_tcp_tc_info() 182 rds_info_copy(iter, &tsinfo, sizeof(tsinfo)); rds_tcp_tc_info() 152 rds_tcp_tc_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) rds_tcp_tc_info() argument
|
/linux-4.1.27/arch/unicore32/include/asm/ |
H A D | memblock.h | 34 #define for_each_bank(iter, mi) \ 35 for (iter = 0; iter < (mi)->nr_banks; iter++)
|
/linux-4.1.27/drivers/gpu/drm/ |
H A D | drm_vma_manager.c | 162 struct rb_node *iter; drm_vma_offset_lookup_locked() local 165 iter = mgr->vm_addr_space_rb.rb_node; drm_vma_offset_lookup_locked() 168 while (likely(iter)) { drm_vma_offset_lookup_locked() 169 node = rb_entry(iter, struct drm_vma_offset_node, vm_rb); drm_vma_offset_lookup_locked() 172 iter = iter->rb_right; drm_vma_offset_lookup_locked() 177 iter = iter->rb_left; drm_vma_offset_lookup_locked() 196 struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node; _drm_vma_offset_add_rb() local 200 while (likely(*iter)) { _drm_vma_offset_add_rb() 201 parent = *iter; _drm_vma_offset_add_rb() 202 iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb); _drm_vma_offset_add_rb() 205 iter = &(*iter)->rb_left; _drm_vma_offset_add_rb() 207 iter = &(*iter)->rb_right; _drm_vma_offset_add_rb() 212 rb_link_node(&node->vm_rb, parent, iter); _drm_vma_offset_add_rb() 312 struct rb_node **iter; drm_vma_node_allow() local 325 iter = &node->vm_files.rb_node; drm_vma_node_allow() 327 while (likely(*iter)) { drm_vma_node_allow() 328 parent = *iter; drm_vma_node_allow() 329 entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb); drm_vma_node_allow() 335 iter = &(*iter)->rb_right; drm_vma_node_allow() 337 iter = &(*iter)->rb_left; drm_vma_node_allow() 348 rb_link_node(&new->vm_rb, parent, iter); drm_vma_node_allow() 375 struct rb_node *iter; drm_vma_node_revoke() local 379 iter = node->vm_files.rb_node; drm_vma_node_revoke() 380 while (likely(iter)) { drm_vma_node_revoke() 381 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); drm_vma_node_revoke() 389 iter = iter->rb_right; drm_vma_node_revoke() 391 iter = iter->rb_left; drm_vma_node_revoke() 416 struct rb_node *iter; drm_vma_node_is_allowed() local 420 iter = node->vm_files.rb_node; drm_vma_node_is_allowed() 421 while (likely(iter)) { drm_vma_node_is_allowed() 422 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); drm_vma_node_is_allowed() 426 iter = iter->rb_right; drm_vma_node_is_allowed() 428 iter = iter->rb_left; drm_vma_node_is_allowed() 433 return iter; drm_vma_node_is_allowed()
|
/linux-4.1.27/include/linux/ |
H A D | bio.h | 64 #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) 66 #define bvec_iter_page(bvec, iter) \ 67 (__bvec_iter_bvec((bvec), (iter))->bv_page) 69 #define bvec_iter_len(bvec, iter) \ 70 min((iter).bi_size, \ 71 __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) 73 #define bvec_iter_offset(bvec, iter) \ 74 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) 76 #define bvec_iter_bvec(bvec, iter) \ 78 .bv_page = bvec_iter_page((bvec), (iter)), \ 79 .bv_len = bvec_iter_len((bvec), (iter)), \ 80 .bv_offset = bvec_iter_offset((bvec), (iter)), \ 83 #define bio_iter_iovec(bio, iter) \ 84 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 86 #define bio_iter_page(bio, iter) \ 87 bvec_iter_page((bio)->bi_io_vec, (iter)) 88 #define bio_iter_len(bio, iter) \ 89 bvec_iter_len((bio)->bi_io_vec, (iter)) 90 #define bio_iter_offset(bio, iter) \ 91 bvec_iter_offset((bio)->bi_io_vec, (iter)) 162 #define __bio_kmap_atomic(bio, iter) \ 163 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \ 164 bio_iter_iovec((bio), (iter)).bv_offset) 207 static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter, bvec_iter_advance() argument 210 WARN_ONCE(bytes > iter->bi_size, bvec_iter_advance() 211 "Attempted to advance past end of bvec iter\n"); bvec_iter_advance() 214 unsigned len = min(bytes, bvec_iter_len(bv, *iter)); bvec_iter_advance() 217 iter->bi_size -= len; bvec_iter_advance() 218 iter->bi_bvec_done += len; bvec_iter_advance() 220 if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { bvec_iter_advance() 221 iter->bi_bvec_done = 0; bvec_iter_advance() 222 iter->bi_idx++; bvec_iter_advance() 227 #define for_each_bvec(bvl, bio_vec, iter, start) \ 228 for (iter = (start); \ 229 (iter).bi_size && \ 230 ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ 231 bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) 234 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, bio_advance_iter() argument 237 iter->bi_sector += bytes >> 9; bio_advance_iter() 240 iter->bi_size -= bytes; bio_advance_iter() 242 bvec_iter_advance(bio->bi_io_vec, iter, bytes); bio_advance_iter() 245 #define __bio_for_each_segment(bvl, bio, iter, start) \ 246 for (iter = (start); \ 247 (iter).bi_size && \ 248 ((bvl = bio_iter_iovec((bio), (iter))), 1); \ 249 bio_advance_iter((bio), &(iter), (bvl).bv_len)) 251 #define bio_for_each_segment(bvl, bio, iter) \ 252 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) 254 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) 260 struct bvec_iter iter; bio_segments() local 273 bio_for_each_segment(bv, bio, iter) bio_segments() 520 static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter, __bio_kmap_irq() argument 523 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags); __bio_kmap_irq() 692 #define bip_for_each_vec(bvl, bip, iter) \ 693 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
|
H A D | radix-tree.h | 336 * @iter: pointer to iterator state 341 radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) radix_tree_iter_init() argument 344 * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it radix_tree_iter_init() 351 iter->index = 0; radix_tree_iter_init() 352 iter->next_index = start; radix_tree_iter_init() 360 * @iter: iterator state 365 * @iter->next_index. It returns a pointer to the chunk's first slot. 366 * Also it fills @iter with data about chunk: position in the tree (index), 370 struct radix_tree_iter *iter, unsigned flags); 374 * @iter: iterator state 382 void **radix_tree_iter_retry(struct radix_tree_iter *iter) radix_tree_iter_retry() argument 384 iter->next_index = iter->index; radix_tree_iter_retry() 391 * @iter: pointer to radix tree iterator 395 radix_tree_chunk_size(struct radix_tree_iter *iter) radix_tree_chunk_size() argument 397 return iter->next_index - iter->index; radix_tree_chunk_size() 404 * @iter: pointer to interator state 408 * This function updates @iter->index in the case of a successful lookup. 409 * For tagged lookup it also eats @iter->tags. 412 radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) radix_tree_next_slot() argument 415 iter->tags >>= 1; radix_tree_next_slot() 416 if (likely(iter->tags & 1ul)) { radix_tree_next_slot() 417 iter->index++; radix_tree_next_slot() 420 if (!(flags & RADIX_TREE_ITER_CONTIG) && likely(iter->tags)) { radix_tree_next_slot() 421 unsigned offset = __ffs(iter->tags); radix_tree_next_slot() 423 iter->tags >>= offset; radix_tree_next_slot() 424 iter->index += offset + 1; radix_tree_next_slot() 428 long size = radix_tree_chunk_size(iter); radix_tree_next_slot() 432 iter->index++; radix_tree_next_slot() 437 iter->next_index = 0; radix_tree_next_slot() 450 * @iter: the struct radix_tree_iter pointer 456 #define radix_tree_for_each_chunk(slot, root, iter, start, flags) \ 457 for (slot = radix_tree_iter_init(iter, start) ; \ 458 (slot = radix_tree_next_chunk(root, iter, flags)) ;) 464 * @iter: the struct radix_tree_iter pointer 468 * @slot points to the radix tree slot, @iter->index contains its index. 470 #define radix_tree_for_each_chunk_slot(slot, iter, flags) \ 471 for (; slot ; slot = radix_tree_next_slot(slot, iter, flags)) 478 * @iter: the struct radix_tree_iter pointer 481 * @slot points to radix tree slot, @iter->index contains its index. 483 #define radix_tree_for_each_slot(slot, root, iter, start) \ 484 for (slot = radix_tree_iter_init(iter, start) ; \ 485 slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \ 486 slot = radix_tree_next_slot(slot, iter, 0)) 493 * @iter: the struct radix_tree_iter pointer 496 * @slot points to radix tree slot, @iter->index contains its index. 498 #define radix_tree_for_each_contig(slot, root, iter, start) \ 499 for (slot = radix_tree_iter_init(iter, start) ; \ 500 slot || (slot = radix_tree_next_chunk(root, iter, \ 502 slot = radix_tree_next_slot(slot, iter, \ 510 * @iter: the struct radix_tree_iter pointer 514 * @slot points to radix tree slot, @iter->index contains its index. 516 #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \ 517 for (slot = radix_tree_iter_init(iter, start) ; \ 518 slot || (slot = radix_tree_next_chunk(root, iter, \ 520 slot = radix_tree_next_slot(slot, iter, \
|
H A D | uio.h | 57 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) iov_iter_iovec() argument 60 .iov_base = iter->iov->iov_base + iter->iov_offset, iov_iter_iovec() 61 .iov_len = min(iter->count, iov_iter_iovec() 62 iter->iov->iov_len - iter->iov_offset), iov_iter_iovec() 66 #define iov_for_each(iov, iter, start) \ 68 for (iter = (start); \ 69 (iter).count && \ 70 ((iov = iov_iter_iovec(&(iter))), 1); \ 71 iov_iter_advance(&(iter), (iov).iov_len)) 115 * Get one of READ or WRITE out of iter->type without any other flags OR'd in
|
H A D | pnfs_osd_xdr.h | 259 * of the layout. @iter members need not be initialized. 271 * struct pnfs_osd_xdr_decode_layout_iter iter; 276 * status = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr); 279 * while(pnfs_osd_xdr_decode_layout_comp(&comp, &iter, xdr, &status)) { 294 struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr); 297 struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr,
|
H A D | ring_buffer.h | 130 void ring_buffer_read_start(struct ring_buffer_iter *iter); 131 void ring_buffer_read_finish(struct ring_buffer_iter *iter); 134 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); 136 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); 137 void ring_buffer_iter_reset(struct ring_buffer_iter *iter); 138 int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
|
H A D | cb710.h | 166 * @miter: sg mapping iter 188 * @miter: sg mapping iter
|
/linux-4.1.27/drivers/staging/lustre/lustre/lov/ |
H A D | lov_pool.c | 175 struct pool_iterator *iter = (struct pool_iterator *)s->private; pool_proc_next() local 178 LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic); pool_proc_next() 181 if (*pos >= pool_tgt_count(iter->pool)) pool_proc_next() 185 prev_idx = iter->idx; pool_proc_next() 186 down_read(&pool_tgt_rw_sem(iter->pool)); pool_proc_next() 187 iter->idx++; pool_proc_next() 188 if (iter->idx == pool_tgt_count(iter->pool)) { pool_proc_next() 189 iter->idx = prev_idx; /* we stay on the last entry */ pool_proc_next() 190 up_read(&pool_tgt_rw_sem(iter->pool)); pool_proc_next() 193 up_read(&pool_tgt_rw_sem(iter->pool)); pool_proc_next() 196 return iter; pool_proc_next() 202 struct pool_iterator *iter; pool_proc_start() local 207 /* iter is not created, so stop() has no way to pool_proc_start() 213 OBD_ALLOC_PTR(iter); pool_proc_start() 214 if (!iter) pool_proc_start() 216 iter->magic = POOL_IT_MAGIC; pool_proc_start() 217 iter->pool = pool; pool_proc_start() 218 iter->idx = 0; pool_proc_start() 223 s->private = iter; pool_proc_start() 230 ptr = pool_proc_next(s, &iter, &i); pool_proc_start() 234 return iter; pool_proc_start() 239 struct pool_iterator *iter = (struct pool_iterator *)s->private; pool_proc_stop() local 244 if ((iter) && (iter->magic == POOL_IT_MAGIC)) { pool_proc_stop() 247 s->private = iter->pool; pool_proc_stop() 248 lov_pool_putref(iter->pool); pool_proc_stop() 249 OBD_FREE_PTR(iter); pool_proc_stop() 256 struct pool_iterator *iter = (struct pool_iterator *)v; pool_proc_show() local 259 LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic); pool_proc_show() 260 LASSERT(iter->pool != NULL); pool_proc_show() 261 LASSERT(iter->idx <= pool_tgt_count(iter->pool)); pool_proc_show() 263 down_read(&pool_tgt_rw_sem(iter->pool)); pool_proc_show() 264 tgt = pool_tgt(iter->pool, iter->idx); pool_proc_show() 265 up_read(&pool_tgt_rw_sem(iter->pool)); pool_proc_show()
|
/linux-4.1.27/drivers/dma/ppc4xx/ |
H A D | adma.c | 186 struct ppc440spe_adma_desc_slot *iter) print_cb_list() 188 for (; iter; iter = iter->hw_next) print_cb_list() 189 print_cb(chan, iter->hw_desc); print_cb_list() 334 struct ppc440spe_adma_desc_slot *iter; ppc440spe_desc_init_dma01pq() local 348 list_for_each_entry(iter, &desc->group_list, chain_node) { ppc440spe_desc_init_dma01pq() 349 hw_desc = iter->hw_desc; ppc440spe_desc_init_dma01pq() 350 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); ppc440spe_desc_init_dma01pq() 352 if (likely(!list_is_last(&iter->chain_node, ppc440spe_desc_init_dma01pq() 355 iter->hw_next = list_entry(iter->chain_node.next, ppc440spe_desc_init_dma01pq() 357 clear_bit(PPC440SPE_DESC_INT, &iter->flags); ppc440spe_desc_init_dma01pq() 364 iter->hw_next = NULL; ppc440spe_desc_init_dma01pq() 366 set_bit(PPC440SPE_DESC_INT, &iter->flags); ppc440spe_desc_init_dma01pq() 368 clear_bit(PPC440SPE_DESC_INT, &iter->flags); ppc440spe_desc_init_dma01pq() 379 iter = list_first_entry(&desc->group_list, ppc440spe_desc_init_dma01pq() 384 hw_desc = iter->hw_desc; ppc440spe_desc_init_dma01pq() 386 iter = list_first_entry(&iter->chain_node, ppc440spe_desc_init_dma01pq() 392 hw_desc = iter->hw_desc; ppc440spe_desc_init_dma01pq() 394 iter = list_first_entry(&iter->chain_node, ppc440spe_desc_init_dma01pq() 399 list_for_each_entry_from(iter, &desc->group_list, chain_node) { ppc440spe_desc_init_dma01pq() 400 hw_desc = iter->hw_desc; ppc440spe_desc_init_dma01pq() 410 iter = list_first_entry(&desc->group_list, ppc440spe_desc_init_dma01pq() 413 hw_desc = iter->hw_desc; ppc440spe_desc_init_dma01pq() 417 iter = list_first_entry(&iter->chain_node, ppc440spe_desc_init_dma01pq() 420 hw_desc = iter->hw_desc; ppc440spe_desc_init_dma01pq() 426 iter = list_first_entry(&iter->chain_node, ppc440spe_desc_init_dma01pq() 429 list_for_each_entry_from(iter, &desc->group_list, ppc440spe_desc_init_dma01pq() 431 hw_desc = iter->hw_desc; ppc440spe_desc_init_dma01pq() 447 struct ppc440spe_adma_desc_slot *iter; ppc440spe_desc_init_dma01pqzero_sum() local 457 iter = list_first_entry(&desc->group_list, ppc440spe_desc_init_dma01pqzero_sum() 459 iter = list_entry(iter->chain_node.next, ppc440spe_desc_init_dma01pqzero_sum() 463 iter = list_entry(iter->chain_node.next, ppc440spe_desc_init_dma01pqzero_sum() 467 list_for_each_entry_from(iter, &desc->group_list, chain_node) { ppc440spe_desc_init_dma01pqzero_sum() 468 hw_desc = iter->hw_desc; ppc440spe_desc_init_dma01pqzero_sum() 469 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); ppc440spe_desc_init_dma01pqzero_sum() 470 iter->src_cnt = 0; ppc440spe_desc_init_dma01pqzero_sum() 471 iter->dst_cnt = 0; ppc440spe_desc_init_dma01pqzero_sum() 487 if (likely(!list_is_last(&iter->chain_node, ppc440spe_desc_init_dma01pqzero_sum() 490 iter->hw_next = list_entry(iter->chain_node.next, ppc440spe_desc_init_dma01pqzero_sum() 499 iter->hw_next = NULL; ppc440spe_desc_init_dma01pqzero_sum() 503 set_bit(PPC440SPE_DESC_INT, &iter->flags); ppc440spe_desc_init_dma01pqzero_sum() 952 struct ppc440spe_adma_desc_slot *iter; ppc440spe_adma_device_clear_eot_status() local 959 list_for_each_entry(iter, &chan->chain, ppc440spe_adma_device_clear_eot_status() 961 if (iter->phys == phys) ppc440spe_adma_device_clear_eot_status() 968 BUG_ON(&iter->chain_node == &chan->chain); ppc440spe_adma_device_clear_eot_status() 970 if (iter->xor_check_result) { ppc440spe_adma_device_clear_eot_status() 972 &iter->flags)) { ppc440spe_adma_device_clear_eot_status() 973 *iter->xor_check_result |= ppc440spe_adma_device_clear_eot_status() 977 &iter->flags)) { ppc440spe_adma_device_clear_eot_status() 978 *iter->xor_check_result |= ppc440spe_adma_device_clear_eot_status() 1111 struct ppc440spe_adma_desc_slot *iter; ppc440spe_chan_append() local 1124 iter = chan_last_sub[chan->device->id]; ppc440spe_chan_append() 1125 BUG_ON(!iter); ppc440spe_chan_append() 1128 iter = chan_first_cdb[chan->device->id]; ppc440spe_chan_append() 1129 BUG_ON(!iter); ppc440spe_chan_append() 1130 ppc440spe_dma_put_desc(chan, iter); ppc440spe_chan_append() 1135 if (!iter->hw_next) ppc440spe_chan_append() 1139 list_for_each_entry_continue(iter, &chan->chain, chain_node) { ppc440spe_chan_append() 1140 ppc440spe_dma_put_desc(chan, iter); ppc440spe_chan_append() 1141 if (!iter->hw_next) ppc440spe_chan_append() 1437 struct ppc440spe_adma_desc_slot *iter = tdesc->group_head; ppc440spe_get_group_entry() local 1446 list_for_each_entry(iter, &tdesc->group_list, chain_node) { ppc440spe_get_group_entry() 1450 return iter; ppc440spe_get_group_entry() 1552 struct ppc440spe_adma_desc_slot *iter, *_iter, *group_start = NULL; __ppc440spe_adma_slot_cleanup() local 1571 list_for_each_entry_safe(iter, _iter, &chan->chain, __ppc440spe_adma_slot_cleanup() 1576 iter->async_tx.cookie, iter->idx, busy, iter->phys, __ppc440spe_adma_slot_cleanup() 1577 ppc440spe_desc_get_link(iter, chan), current_desc, __ppc440spe_adma_slot_cleanup() 1578 async_tx_test_ack(&iter->async_tx)); __ppc440spe_adma_slot_cleanup() 1593 if (iter->phys == current_desc) { __ppc440spe_adma_slot_cleanup() 1595 if (busy || ppc440spe_desc_get_link(iter, chan)) { __ppc440spe_adma_slot_cleanup() 1605 slot_cnt = iter->slot_cnt; __ppc440spe_adma_slot_cleanup() 1606 slots_per_op = iter->slots_per_op; __ppc440spe_adma_slot_cleanup() 1615 group_start = iter; __ppc440spe_adma_slot_cleanup() 1659 cookie = ppc440spe_adma_run_tx_complete_actions(iter, chan, __ppc440spe_adma_slot_cleanup() 1662 if (ppc440spe_adma_clean_slot(iter, chan)) __ppc440spe_adma_slot_cleanup() 1704 struct ppc440spe_adma_desc_slot *iter = NULL, *_iter; ppc440spe_adma_alloc_slots() local 1718 iter = chan->last_used; ppc440spe_adma_alloc_slots() 1720 iter = list_entry(&chan->all_slots, ppc440spe_adma_alloc_slots() 1723 list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots, ppc440spe_adma_alloc_slots() 1727 if (iter->slots_per_op) { ppc440spe_adma_alloc_slots() 1734 alloc_start = iter; ppc440spe_adma_alloc_slots() 1740 iter = alloc_start; ppc440spe_adma_alloc_slots() 1745 async_tx_ack(&iter->async_tx); ppc440spe_adma_alloc_slots() 1747 list_add_tail(&iter->chain_node, &chain); ppc440spe_adma_alloc_slots() 1748 alloc_tail = iter; ppc440spe_adma_alloc_slots() 1749 iter->async_tx.cookie = 0; ppc440spe_adma_alloc_slots() 1750 iter->hw_next = NULL; ppc440spe_adma_alloc_slots() 1751 iter->flags = 0; ppc440spe_adma_alloc_slots() 1752 iter->slot_cnt = num_slots; ppc440spe_adma_alloc_slots() 1753 iter->xor_check_result = NULL; ppc440spe_adma_alloc_slots() 1755 iter->slots_per_op = slots_per_op - i; ppc440spe_adma_alloc_slots() 1756 last_used = iter; ppc440spe_adma_alloc_slots() 1757 iter = list_entry(iter->slot_node.next, ppc440spe_adma_alloc_slots() 2118 struct ppc440spe_adma_desc_slot *iter; ppc440spe_dma01_prep_mult() local 2128 iter = list_first_entry(&sw_desc->group_list, ppc440spe_dma01_prep_mult() 2131 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); ppc440spe_dma01_prep_mult() 2133 iter->hw_next = list_entry(iter->chain_node.next, ppc440spe_dma01_prep_mult() 2136 clear_bit(PPC440SPE_DESC_INT, &iter->flags); ppc440spe_dma01_prep_mult() 2137 hw_desc = iter->hw_desc; ppc440spe_dma01_prep_mult() 2140 ppc440spe_desc_set_dest_addr(iter, chan, ppc440spe_dma01_prep_mult() 2142 ppc440spe_desc_set_dest_addr(iter, chan, 0, dst[1], 1); ppc440spe_dma01_prep_mult() 2143 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, ppc440spe_dma01_prep_mult() 2145 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); ppc440spe_dma01_prep_mult() 2146 iter->unmap_len = len; ppc440spe_dma01_prep_mult() 2152 iter = list_first_entry(&iter->chain_node, ppc440spe_dma01_prep_mult() 2155 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); ppc440spe_dma01_prep_mult() 2156 iter->hw_next = NULL; ppc440spe_dma01_prep_mult() 2158 set_bit(PPC440SPE_DESC_INT, &iter->flags); ppc440spe_dma01_prep_mult() 2160 clear_bit(PPC440SPE_DESC_INT, &iter->flags); ppc440spe_dma01_prep_mult() 2162 hw_desc = iter->hw_desc; ppc440spe_dma01_prep_mult() 2164 ppc440spe_desc_set_src_addr(iter, chan, 0, ppc440spe_dma01_prep_mult() 2166 ppc440spe_desc_set_dest_addr(iter, chan, ppc440spe_dma01_prep_mult() 2169 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF, ppc440spe_dma01_prep_mult() 2171 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); ppc440spe_dma01_prep_mult() 2172 iter->unmap_len = len; ppc440spe_dma01_prep_mult() 2204 struct ppc440spe_adma_desc_slot *iter; ppc440spe_dma01_prep_sum_product() local 2212 iter = list_first_entry(&sw_desc->group_list, ppc440spe_dma01_prep_sum_product() 2215 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); ppc440spe_dma01_prep_sum_product() 2216 iter->hw_next = list_entry(iter->chain_node.next, ppc440spe_dma01_prep_sum_product() 2219 clear_bit(PPC440SPE_DESC_INT, &iter->flags); ppc440spe_dma01_prep_sum_product() 2220 hw_desc = iter->hw_desc; ppc440spe_dma01_prep_sum_product() 2223 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, ppc440spe_dma01_prep_sum_product() 2225 ppc440spe_desc_set_dest_addr(iter, chan, 0, ppc440spe_dma01_prep_sum_product() 2227 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, ppc440spe_dma01_prep_sum_product() 2229 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); ppc440spe_dma01_prep_sum_product() 2230 iter->unmap_len = len; ppc440spe_dma01_prep_sum_product() 2234 iter = list_first_entry(&iter->chain_node, ppc440spe_dma01_prep_sum_product() 2237 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); ppc440spe_dma01_prep_sum_product() 2239 iter->hw_next = list_entry(iter->chain_node.next, ppc440spe_dma01_prep_sum_product() 2243 set_bit(PPC440SPE_DESC_INT, &iter->flags); ppc440spe_dma01_prep_sum_product() 2245 clear_bit(PPC440SPE_DESC_INT, &iter->flags); ppc440spe_dma01_prep_sum_product() 2247 hw_desc = iter->hw_desc; ppc440spe_dma01_prep_sum_product() 2249 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, ppc440spe_dma01_prep_sum_product() 2251 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, ppc440spe_dma01_prep_sum_product() 2253 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF, ppc440spe_dma01_prep_sum_product() 2255 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); ppc440spe_dma01_prep_sum_product() 2256 iter->unmap_len = len; ppc440spe_dma01_prep_sum_product() 2262 iter = list_first_entry(&iter->chain_node, ppc440spe_dma01_prep_sum_product() 2265 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); ppc440spe_dma01_prep_sum_product() 2266 iter->hw_next = NULL; ppc440spe_dma01_prep_sum_product() 2268 set_bit(PPC440SPE_DESC_INT, &iter->flags); ppc440spe_dma01_prep_sum_product() 2270 clear_bit(PPC440SPE_DESC_INT, &iter->flags); ppc440spe_dma01_prep_sum_product() 2272 hw_desc = iter->hw_desc; ppc440spe_dma01_prep_sum_product() 2274 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, ppc440spe_dma01_prep_sum_product() 2276 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, ppc440spe_dma01_prep_sum_product() 2278 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF, ppc440spe_dma01_prep_sum_product() 2280 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, len); ppc440spe_dma01_prep_sum_product() 2281 iter->unmap_len = len; ppc440spe_dma01_prep_sum_product() 2296 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter; ppc440spe_dma01_prep_pq() local 2423 list_for_each_entry(iter, &sw_desc->group_list, ppc440spe_dma01_prep_pq() 2425 ppc440spe_desc_set_byte_count(iter, ppc440spe_dma01_prep_pq() 2427 iter->unmap_len = len; ppc440spe_dma01_prep_pq() 2441 struct ppc440spe_adma_desc_slot *sw_desc = NULL, *iter; ppc440spe_dma2_prep_pq() local 2463 list_for_each_entry(iter, &sw_desc->group_list, chain_node) { ppc440spe_dma2_prep_pq() 2464 ppc440spe_desc_init_dma2pq(iter, dst_cnt, src_cnt, ppc440spe_dma2_prep_pq() 2466 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, ppc440spe_dma2_prep_pq() 2468 iter->unmap_len = len; ppc440spe_dma2_prep_pq() 2470 ppc440spe_init_rxor_cursor(&(iter->rxor_cursor)); ppc440spe_dma2_prep_pq() 2471 iter->rxor_cursor.len = len; ppc440spe_dma2_prep_pq() 2472 iter->descs_per_op = descs_per_op; ppc440spe_dma2_prep_pq() 2475 list_for_each_entry(iter, &sw_desc->group_list, chain_node) { ppc440spe_dma2_prep_pq() 2478 ppc440spe_adma_init_dma2rxor_slot(iter, src, ppc440spe_dma2_prep_pq() 2480 if (likely(!list_is_last(&iter->chain_node, ppc440spe_dma2_prep_pq() 2483 iter->hw_next = ppc440spe_dma2_prep_pq() 2484 list_entry(iter->chain_node.next, ppc440spe_dma2_prep_pq() 2487 ppc440spe_xor_set_link(iter, iter->hw_next); ppc440spe_dma2_prep_pq() 2490 iter->hw_next = NULL; ppc440spe_dma2_prep_pq() 2606 struct ppc440spe_adma_desc_slot *sw_desc, *iter; ppc440spe_adma_prep_dma_pqzero_sum() local 2644 list_for_each_entry(iter, &sw_desc->group_list, chain_node) { ppc440spe_adma_prep_dma_pqzero_sum() 2645 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, ppc440spe_adma_prep_dma_pqzero_sum() 2647 iter->unmap_len = len; ppc440spe_adma_prep_dma_pqzero_sum() 2654 iter = sw_desc->group_head; ppc440spe_adma_prep_dma_pqzero_sum() 2655 chan = to_ppc440spe_adma_chan(iter->async_tx.chan); ppc440spe_adma_prep_dma_pqzero_sum() 2656 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); ppc440spe_adma_prep_dma_pqzero_sum() 2657 iter->hw_next = list_entry(iter->chain_node.next, ppc440spe_adma_prep_dma_pqzero_sum() 2660 hw_desc = iter->hw_desc; ppc440spe_adma_prep_dma_pqzero_sum() 2662 iter->src_cnt = 0; ppc440spe_adma_prep_dma_pqzero_sum() 2663 iter->dst_cnt = 0; ppc440spe_adma_prep_dma_pqzero_sum() 2664 ppc440spe_desc_set_dest_addr(iter, chan, 0, ppc440spe_adma_prep_dma_pqzero_sum() 2666 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, pdest); ppc440spe_adma_prep_dma_pqzero_sum() 2667 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, ppc440spe_adma_prep_dma_pqzero_sum() 2669 iter->unmap_len = 0; ppc440spe_adma_prep_dma_pqzero_sum() 2677 iter = list_first_entry(&sw_desc->group_list, ppc440spe_adma_prep_dma_pqzero_sum() 2680 chan = to_ppc440spe_adma_chan(iter->async_tx.chan); ppc440spe_adma_prep_dma_pqzero_sum() 2683 iter = list_entry(iter->chain_node.next, ppc440spe_adma_prep_dma_pqzero_sum() 2688 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); ppc440spe_adma_prep_dma_pqzero_sum() 2689 iter->hw_next = list_entry(iter->chain_node.next, ppc440spe_adma_prep_dma_pqzero_sum() 2692 hw_desc = iter->hw_desc; ppc440spe_adma_prep_dma_pqzero_sum() 2694 iter->src_cnt = 0; ppc440spe_adma_prep_dma_pqzero_sum() 2695 iter->dst_cnt = 0; ppc440spe_adma_prep_dma_pqzero_sum() 2696 ppc440spe_desc_set_dest_addr(iter, chan, 0, ppc440spe_adma_prep_dma_pqzero_sum() 2698 ppc440spe_desc_set_src_addr(iter, chan, 0, 0, qdest); ppc440spe_adma_prep_dma_pqzero_sum() 2699 ppc440spe_desc_set_byte_count(iter, ppc440spe_chan, ppc440spe_adma_prep_dma_pqzero_sum() 2701 iter->unmap_len = 0; ppc440spe_adma_prep_dma_pqzero_sum() 2711 list_for_each_entry_reverse(iter, &sw_desc->group_list, ppc440spe_adma_prep_dma_pqzero_sum() 2721 &iter->flags); ppc440spe_adma_prep_dma_pqzero_sum() 2724 &iter->flags); ppc440spe_adma_prep_dma_pqzero_sum() 2729 &iter->flags); ppc440spe_adma_prep_dma_pqzero_sum() 2732 &iter->flags); ppc440spe_adma_prep_dma_pqzero_sum() 2735 iter->xor_check_result = pqres; ppc440spe_adma_prep_dma_pqzero_sum() 2741 *iter->xor_check_result = 0; ppc440spe_adma_prep_dma_pqzero_sum() 2742 ppc440spe_desc_set_dcheck(iter, ppc440spe_chan, ppc440spe_adma_prep_dma_pqzero_sum() 2750 list_for_each_entry_continue_reverse(iter, &sw_desc->group_list, ppc440spe_adma_prep_dma_pqzero_sum() 2755 chan = to_ppc440spe_adma_chan(iter->async_tx.chan); ppc440spe_adma_prep_dma_pqzero_sum() 2756 ppc440spe_desc_set_src_addr(iter, chan, 0, ppc440spe_adma_prep_dma_pqzero_sum() 2762 ppc440spe_desc_set_src_mult(iter, chan, ppc440spe_adma_prep_dma_pqzero_sum() 2826 static void ppc440spe_adma_pq_zero_op(struct ppc440spe_adma_desc_slot *iter, ppc440spe_adma_pq_zero_op() argument 2833 ppc440spe_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0); ppc440spe_adma_pq_zero_op() 2836 ppc440spe_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr); ppc440spe_adma_pq_zero_op() 2839 ppc440spe_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF, ppc440spe_adma_pq_zero_op() 2850 struct ppc440spe_adma_desc_slot *iter; ppc440spe_adma_pq_set_dest() local 2884 iter = ppc440spe_get_group_entry(sw_desc, index); ppc440spe_adma_pq_set_dest() 2887 list_for_each_entry_from(iter, ppc440spe_adma_pq_set_dest() 2889 ppc440spe_desc_set_dest_addr(iter, chan, ppc440spe_adma_pq_set_dest() 2893 list_for_each_entry_from(iter, ppc440spe_adma_pq_set_dest() 2895 ppc440spe_desc_set_dest_addr(iter, chan, ppc440spe_adma_pq_set_dest() 2897 ppc440spe_desc_set_dest_addr(iter, chan, ppc440spe_adma_pq_set_dest() 2909 iter = ppc440spe_get_group_entry( ppc440spe_adma_pq_set_dest() 2911 ppc440spe_adma_pq_zero_op(iter, chan, ppc440spe_adma_pq_set_dest() 2917 iter = ppc440spe_get_group_entry( ppc440spe_adma_pq_set_dest() 2919 ppc440spe_adma_pq_zero_op(iter, chan, ppc440spe_adma_pq_set_dest() 2941 iter = ppc440spe_get_group_entry(sw_desc, index++); ppc440spe_adma_pq_set_dest() 2942 ppc440spe_desc_set_dest_addr(iter, chan, ppc440spe_adma_pq_set_dest() 2947 iter = ppc440spe_get_group_entry(sw_desc, ppc440spe_adma_pq_set_dest() 2949 ppc440spe_desc_set_dest_addr(iter, chan, ppc440spe_adma_pq_set_dest() 2957 iter = ppc440spe_get_group_entry(sw_desc, ppc440spe_adma_pq_set_dest() 2961 list_for_each_entry_from(iter, ppc440spe_adma_pq_set_dest() 2965 iter, chan, ppc440spe_adma_pq_set_dest() 2971 list_for_each_entry_from(iter, ppc440spe_adma_pq_set_dest() 2975 iter, chan, ppc440spe_adma_pq_set_dest() 2979 iter, chan, ppc440spe_adma_pq_set_dest() 3005 iter = ppc440spe_get_group_entry(sw_desc, 0); ppc440spe_adma_pq_set_dest() 3007 ppc440spe_desc_set_dest_addr(iter, chan, ppc440spe_adma_pq_set_dest() 3010 iter = list_entry(iter->chain_node.next, ppc440spe_adma_pq_set_dest() 3017 iter = ppc440spe_get_group_entry(sw_desc, ppc440spe_adma_pq_set_dest() 3020 ppc440spe_desc_set_dest_addr(iter, ppc440spe_adma_pq_set_dest() 3022 iter = list_entry(iter->chain_node.next, ppc440spe_adma_pq_set_dest() 3040 struct ppc440spe_adma_desc_slot *iter, *end; ppc440spe_adma_pqzero_sum_set_dest() local 3059 iter = ppc440spe_get_group_entry(sw_desc, idx); ppc440spe_adma_pqzero_sum_set_dest() 3063 list_for_each_entry_from(iter, &sw_desc->group_list, ppc440spe_adma_pqzero_sum_set_dest() 3065 if (unlikely(iter == end)) ppc440spe_adma_pqzero_sum_set_dest() 3067 ppc440spe_desc_set_dest_addr(iter, chan, ppc440spe_adma_pqzero_sum_set_dest() 3069 ppc440spe_desc_set_dest_addr(iter, chan, ppc440spe_adma_pqzero_sum_set_dest() 3075 list_for_each_entry_from(iter, &sw_desc->group_list, ppc440spe_adma_pqzero_sum_set_dest() 3077 if (unlikely(iter == end)) ppc440spe_adma_pqzero_sum_set_dest() 3079 ppc440spe_desc_set_dest_addr(iter, chan, ppc440spe_adma_pqzero_sum_set_dest() 3118 struct ppc440spe_adma_desc_slot *iter = NULL; ppc440spe_adma_pq_set_src() local 3156 iter = ppc440spe_get_group_entry(sw_desc, 0); ppc440spe_adma_pq_set_src() 3162 iter = NULL; ppc440spe_adma_pq_set_src() 3168 iter = ppc440spe_get_group_entry(sw_desc, ppc440spe_adma_pq_set_src() 3183 iter = ppc440spe_get_group_entry(sw_desc, ppc440spe_adma_pq_set_src() 3187 if (likely(iter)) { ppc440spe_adma_pq_set_src() 3188 ppc440spe_desc_set_src_addr(iter, chan, 0, haddr, addr); ppc440spe_adma_pq_set_src() 3196 iter = ppc440spe_get_group_entry(sw_desc, 1); ppc440spe_adma_pq_set_src() 3197 ppc440spe_desc_set_src_addr(iter, chan, 0, ppc440spe_adma_pq_set_src() 3205 iter = sw_desc->group_head; ppc440spe_adma_pq_set_src() 3206 if (iter->dst_cnt == 2) { ppc440spe_adma_pq_set_src() 3208 ppc440spe_adma_dma2rxor_set_src(iter, index, addr); ppc440spe_adma_pq_set_src() 3211 iter = ppc440spe_get_group_entry(sw_desc, ppc440spe_adma_pq_set_src() 3214 ppc440spe_adma_dma2rxor_set_src(iter, index, addr); ppc440spe_adma_pq_set_src() 3470 struct ppc440spe_adma_desc_slot *iter = NULL, *iter1 = NULL; ppc440spe_adma_pq_set_src_mult() local 3483 iter = ppc440spe_get_group_entry(sw_desc, ppc440spe_adma_pq_set_src_mult() 3493 iter = ppc440spe_get_group_entry(sw_desc, ppc440spe_adma_pq_set_src_mult() 3512 iter = ppc440spe_get_group_entry(sw_desc, index + znum); ppc440spe_adma_pq_set_src_mult() 3517 if (likely(iter)) { ppc440spe_adma_pq_set_src_mult() 3518 ppc440spe_desc_set_src_mult(iter, chan, ppc440spe_adma_pq_set_src_mult() 3533 iter = sw_desc->group_head; ppc440spe_adma_pq_set_src_mult() 3536 ppc440spe_adma_dma2rxor_set_mult(iter, index, 1); ppc440spe_adma_pq_set_src_mult() 3539 iter = ppc440spe_get_group_entry(sw_desc, ppc440spe_adma_pq_set_src_mult() 3542 ppc440spe_adma_dma2rxor_set_mult(iter, index, mult); ppc440spe_adma_pq_set_src_mult() 3553 struct ppc440spe_adma_desc_slot *iter, *_iter; ppc440spe_adma_free_chan_resources() local 3560 list_for_each_entry_safe(iter, _iter, &ppc440spe_chan->chain, ppc440spe_adma_free_chan_resources() 3563 list_del(&iter->chain_node); ppc440spe_adma_free_chan_resources() 3565 list_for_each_entry_safe_reverse(iter, _iter, ppc440spe_adma_free_chan_resources() 3567 list_del(&iter->slot_node); ppc440spe_adma_free_chan_resources() 3568 kfree(iter); ppc440spe_adma_free_chan_resources() 3719 struct ppc440spe_adma_desc_slot *sw_desc, *iter; ppc440spe_test_raid6() local 3737 list_for_each_entry(iter, &sw_desc->group_list, chain_node) { ppc440spe_test_raid6() 3738 ppc440spe_desc_set_byte_count(iter, chan, PAGE_SIZE); ppc440spe_test_raid6() 3739 iter->unmap_len = PAGE_SIZE; ppc440spe_test_raid6() 185 print_cb_list(struct ppc440spe_adma_chan *chan, struct ppc440spe_adma_desc_slot *iter) print_cb_list() argument
|
/linux-4.1.27/mm/ |
H A D | process_vm_access.c | 29 * @iter: where to copy to/from locally 36 struct iov_iter *iter, process_vm_rw_pages() 40 while (len && iov_iter_count(iter)) { process_vm_rw_pages() 49 copied = copy_page_from_iter(page, offset, copy, iter); process_vm_rw_pages() 52 copied = copy_page_to_iter(page, offset, copy, iter); process_vm_rw_pages() 55 if (copied < copy && iov_iter_count(iter)) process_vm_rw_pages() 69 * @iter: where to copy to/from locally 79 struct iov_iter *iter, process_vm_rw_single_vec() 97 while (!rc && nr_pages && iov_iter_count(iter)) { process_vm_rw_single_vec() 112 start_offset, bytes, iter, process_vm_rw_single_vec() 132 * @iter: where to copy to/from locally 141 static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, process_vm_rw_core() argument 155 size_t total_len = iov_iter_count(iter); process_vm_rw_core() 209 for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++) process_vm_rw_core() 212 iter, process_pages, mm, task, vm_write); process_vm_rw_core() 215 total_len -= iov_iter_count(iter); process_vm_rw_core() 258 struct iov_iter iter; process_vm_rw() local 266 rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter); process_vm_rw() 269 if (!iov_iter_count(&iter)) process_vm_rw() 277 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); process_vm_rw() 316 struct iov_iter iter; compat_process_vm_rw() local 323 rc = compat_import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter); compat_process_vm_rw() 326 if (!iov_iter_count(&iter)) compat_process_vm_rw() 334 rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write); compat_process_vm_rw() 33 process_vm_rw_pages(struct page **pages, unsigned offset, size_t len, struct iov_iter *iter, int vm_write) process_vm_rw_pages() argument 77 process_vm_rw_single_vec(unsigned long addr, unsigned long len, struct iov_iter *iter, struct page **process_pages, struct mm_struct *mm, struct task_struct *task, int vm_write) process_vm_rw_single_vec() argument
|
H A D | slab.h | 170 #define for_each_memcg_cache(iter, root) \ 171 list_for_each_entry(iter, &(root)->memcg_params.list, \ 174 #define for_each_memcg_cache_safe(iter, tmp, root) \ 175 list_for_each_entry_safe(iter, tmp, &(root)->memcg_params.list, \ 256 #define for_each_memcg_cache(iter, root) \ 257 for ((void)(iter), (void)(root); 0; ) 258 #define for_each_memcg_cache_safe(iter, tmp, root) \ 259 for ((void)(iter), (void)(tmp), (void)(root); 0; )
|
H A D | memcontrol.c | 148 struct reclaim_iter iter[DEF_PRIORITY + 1]; member in struct:mem_cgroup_per_zone 1013 struct reclaim_iter *uninitialized_var(iter); mem_cgroup_iter() 1039 iter = &mz->iter[reclaim->priority]; mem_cgroup_iter() 1041 if (prev && reclaim->generation != iter->generation) mem_cgroup_iter() 1045 pos = READ_ONCE(iter->position); mem_cgroup_iter() 1097 if (cmpxchg(&iter->position, pos, memcg) == pos) { mem_cgroup_iter() 1105 * pairs with css_tryget when dereferencing iter->position mem_cgroup_iter() 1112 iter->generation++; mem_cgroup_iter() 1114 reclaim->generation = iter->generation; mem_cgroup_iter() 1145 #define for_each_mem_cgroup_tree(iter, root) \ 1146 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 1147 iter != NULL; \ 1148 iter = mem_cgroup_iter(root, iter, NULL)) 1150 #define for_each_mem_cgroup(iter) \ 1151 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 1152 iter != NULL; \ 1153 iter = mem_cgroup_iter(NULL, iter, NULL)) 1443 struct mem_cgroup *iter; mem_cgroup_print_oom_info() local 1472 for_each_mem_cgroup_tree(iter, memcg) { for_each_mem_cgroup_tree() 1474 pr_cont_cgroup_path(iter->css.cgroup); for_each_mem_cgroup_tree() 1481 K(mem_cgroup_read_stat(iter, i))); for_each_mem_cgroup_tree() 1486 K(mem_cgroup_nr_lru_pages(iter, BIT(i)))); for_each_mem_cgroup_tree() 1500 struct mem_cgroup *iter; mem_cgroup_count_children() local 1502 for_each_mem_cgroup_tree(iter, memcg) mem_cgroup_count_children() 1527 struct mem_cgroup *iter; mem_cgroup_out_of_memory() local 1545 for_each_mem_cgroup_tree(iter, memcg) { for_each_mem_cgroup_tree() 1549 css_task_iter_start(&iter->css, &it); for_each_mem_cgroup_tree() 1564 mem_cgroup_iter_break(memcg, iter); for_each_mem_cgroup_tree() 1757 struct mem_cgroup *iter, *failed = NULL; mem_cgroup_oom_trylock() local 1761 for_each_mem_cgroup_tree(iter, memcg) { for_each_mem_cgroup_tree() 1762 if (iter->oom_lock) { for_each_mem_cgroup_tree() 1767 failed = iter; for_each_mem_cgroup_tree() 1768 mem_cgroup_iter_break(memcg, iter); for_each_mem_cgroup_tree() 1771 iter->oom_lock = true; for_each_mem_cgroup_tree() 1779 for_each_mem_cgroup_tree(iter, memcg) { for_each_mem_cgroup_tree() 1780 if (iter == failed) { for_each_mem_cgroup_tree() 1781 mem_cgroup_iter_break(memcg, iter); for_each_mem_cgroup_tree() 1784 iter->oom_lock = false; for_each_mem_cgroup_tree() 1796 struct mem_cgroup *iter; mem_cgroup_oom_unlock() local 1800 for_each_mem_cgroup_tree(iter, memcg) mem_cgroup_oom_unlock() 1801 iter->oom_lock = false; mem_cgroup_oom_unlock() 1807 struct mem_cgroup *iter; mem_cgroup_mark_under_oom() local 1809 for_each_mem_cgroup_tree(iter, memcg) mem_cgroup_mark_under_oom() 1810 atomic_inc(&iter->under_oom); mem_cgroup_mark_under_oom() 1815 struct mem_cgroup *iter; mem_cgroup_unmark_under_oom() local 1822 for_each_mem_cgroup_tree(iter, memcg) mem_cgroup_unmark_under_oom() 1823 atomic_add_unless(&iter->under_oom, -1, 0); mem_cgroup_unmark_under_oom() 2202 struct mem_cgroup *iter; memcg_cpu_hotplug_callback() local 2210 for_each_mem_cgroup(iter) memcg_cpu_hotplug_callback() 2211 mem_cgroup_drain_pcp_counter(iter, cpu); memcg_cpu_hotplug_callback() 3117 struct mem_cgroup *iter; tree_stat() local 3121 for_each_mem_cgroup_tree(iter, memcg) tree_stat() 3122 val += mem_cgroup_read_stat(iter, idx); tree_stat() 3432 struct mem_cgroup *iter; local 3435 for_each_mem_cgroup_tree(iter, memcg) 3436 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask); 3440 for_each_mem_cgroup_tree(iter, memcg) for_each_node_state() 3442 iter, nid, stat->lru_mask); for_each_node_state() 3662 struct mem_cgroup *iter; mem_cgroup_oom_notify() local 3664 for_each_mem_cgroup_tree(iter, memcg) mem_cgroup_oom_notify() 3665 mem_cgroup_oom_notify_cb(iter); mem_cgroup_oom_notify()
|
H A D | filemap.c | 1161 struct radix_tree_iter iter; find_get_entries() local 1168 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { find_get_entries() 1193 indices[ret] = iter.index; find_get_entries() 1221 struct radix_tree_iter iter; find_get_pages() local 1230 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { find_get_pages() 1244 WARN_ON(iter.index); find_get_pages() 1288 struct radix_tree_iter iter; find_get_pages_contig() local 1297 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { find_get_pages_contig() 1336 if (page->mapping == NULL || page->index != iter.index) { find_get_pages_contig() 1364 struct radix_tree_iter iter; find_get_pages_tag() local 1374 &iter, *index, tag) { find_get_pages_tag() 1452 * @iter: data destination 1462 struct iov_iter *iter, ssize_t written) do_generic_file_read() 1477 last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; do_generic_file_read() 1512 offset, iter->count)) do_generic_file_read() 1564 ret = copy_page_to_iter(page, offset, nr, iter); do_generic_file_read() 1572 if (!iov_iter_count(iter)) do_generic_file_read() 1683 * @iter: destination for the data read 1689 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) generic_file_read_iter() argument 1699 size_t count = iov_iter_count(iter); generic_file_read_iter() 1708 struct iov_iter data = *iter; generic_file_read_iter() 1714 iov_iter_advance(iter, retval); generic_file_read_iter() 1726 if (retval < 0 || !iov_iter_count(iter) || *ppos >= size || generic_file_read_iter() 1733 retval = do_generic_file_read(file, ppos, iter, retval); generic_file_read_iter() 1988 struct radix_tree_iter iter; filemap_map_pages() local 1999 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) { filemap_map_pages() 2000 if (iter.index > vmf->max_pgoff) filemap_map_pages() 2051 if (iter.index == vmf->max_pgoff) filemap_map_pages() 1461 do_generic_file_read(struct file *filp, loff_t *ppos, struct iov_iter *iter, ssize_t written) do_generic_file_read() argument
|
/linux-4.1.27/net/ipv6/ |
H A D | ip6_fib.c | 694 struct rt6_info *iter = NULL; fib6_add_rt2node() local 707 for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) { fib6_add_rt2node() 712 if (iter->rt6i_metric == rt->rt6i_metric) { fib6_add_rt2node() 720 if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) { fib6_add_rt2node() 729 if (iter->dst.dev == rt->dst.dev && fib6_add_rt2node() 730 iter->rt6i_idev == rt->rt6i_idev && fib6_add_rt2node() 731 ipv6_addr_equal(&iter->rt6i_gateway, fib6_add_rt2node() 735 if (!(iter->rt6i_flags & RTF_EXPIRES)) fib6_add_rt2node() 738 rt6_clean_expires(iter); fib6_add_rt2node() 740 rt6_set_expires(iter, rt->dst.expires); fib6_add_rt2node() 755 rt6_qualify_for_ecmp(iter)) fib6_add_rt2node() 759 if (iter->rt6i_metric > rt->rt6i_metric) fib6_add_rt2node() 763 ins = &iter->dst.rt6_next; fib6_add_rt2node() 769 iter = *ins; fib6_add_rt2node() 819 rt->dst.rt6_next = iter; fib6_add_rt2node() 847 rt->dst.rt6_next = iter->dst.rt6_next; fib6_add_rt2node() 854 nsiblings = iter->rt6i_nsiblings; fib6_add_rt2node() 855 fib6_purge_rt(iter, fn, info->nl_net); fib6_add_rt2node() 856 rt6_release(iter); fib6_add_rt2node() 861 iter = *ins; fib6_add_rt2node() 862 while (iter) { fib6_add_rt2node() 863 if (rt6_qualify_for_ecmp(iter)) { fib6_add_rt2node() 864 *ins = iter->dst.rt6_next; fib6_add_rt2node() 865 fib6_purge_rt(iter, fn, info->nl_net); fib6_add_rt2node() 866 rt6_release(iter); fib6_add_rt2node() 869 ins = &iter->dst.rt6_next; fib6_add_rt2node() 871 iter = *ins; fib6_add_rt2node() 1236 int iter = 0; fib6_repair_tree() local 1239 RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter); fib6_repair_tree() 1240 iter++; fib6_repair_tree() 1889 struct ipv6_route_iter *iter = seq->private; ipv6_route_seq_show() local 1907 iter->w.leaf = NULL; ipv6_route_seq_show() 1913 struct ipv6_route_iter *iter = w->args; ipv6_route_yield() local 1915 if (!iter->skip) ipv6_route_yield() 1919 iter->w.leaf = iter->w.leaf->dst.rt6_next; ipv6_route_yield() 1920 iter->skip--; ipv6_route_yield() 1921 if (!iter->skip && iter->w.leaf) ipv6_route_yield() 1923 } while (iter->w.leaf); ipv6_route_yield() 1928 static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter) ipv6_route_seq_setup_walk() argument 1930 memset(&iter->w, 0, sizeof(iter->w)); ipv6_route_seq_setup_walk() 1931 iter->w.func = ipv6_route_yield; ipv6_route_seq_setup_walk() 1932 iter->w.root = &iter->tbl->tb6_root; ipv6_route_seq_setup_walk() 1933 iter->w.state = FWS_INIT; ipv6_route_seq_setup_walk() 1934 iter->w.node = iter->w.root; ipv6_route_seq_setup_walk() 1935 iter->w.args = iter; ipv6_route_seq_setup_walk() 1936 iter->sernum = iter->w.root->fn_sernum; ipv6_route_seq_setup_walk() 1937 INIT_LIST_HEAD(&iter->w.lh); ipv6_route_seq_setup_walk() 1938 fib6_walker_link(&iter->w); ipv6_route_seq_setup_walk() 1962 static void ipv6_route_check_sernum(struct ipv6_route_iter *iter) ipv6_route_check_sernum() argument 1964 if (iter->sernum != iter->w.root->fn_sernum) { ipv6_route_check_sernum() 1965 iter->sernum = iter->w.root->fn_sernum; ipv6_route_check_sernum() 1966 iter->w.state = FWS_INIT; ipv6_route_check_sernum() 1967 iter->w.node = iter->w.root; ipv6_route_check_sernum() 1968 WARN_ON(iter->w.skip); ipv6_route_check_sernum() 1969 iter->w.skip = iter->w.count; ipv6_route_check_sernum() 1978 struct ipv6_route_iter *iter = seq->private; ipv6_route_seq_next() local 1990 ipv6_route_check_sernum(iter); ipv6_route_seq_next() 1991 read_lock(&iter->tbl->tb6_lock); ipv6_route_seq_next() 1992 r = fib6_walk_continue(&iter->w); ipv6_route_seq_next() 1993 read_unlock(&iter->tbl->tb6_lock); ipv6_route_seq_next() 1997 return iter->w.leaf; ipv6_route_seq_next() 1999 fib6_walker_unlink(&iter->w); ipv6_route_seq_next() 2002 fib6_walker_unlink(&iter->w); ipv6_route_seq_next() 2004 iter->tbl = ipv6_route_seq_next_table(iter->tbl, net); ipv6_route_seq_next() 2005 if (!iter->tbl) ipv6_route_seq_next() 2008 ipv6_route_seq_setup_walk(iter); ipv6_route_seq_next() 2016 struct ipv6_route_iter *iter = seq->private; __acquires() local 2019 iter->tbl = ipv6_route_seq_next_table(NULL, net); __acquires() 2020 iter->skip = *pos; __acquires() 2022 if (iter->tbl) { __acquires() 2023 ipv6_route_seq_setup_walk(iter); __acquires() 2030 static bool ipv6_route_iter_active(struct ipv6_route_iter *iter) ipv6_route_iter_active() argument 2032 struct fib6_walker *w = &iter->w; ipv6_route_iter_active() 2039 struct ipv6_route_iter *iter = seq->private; __releases() local 2041 if (ipv6_route_iter_active(iter)) __releases() 2042 fib6_walker_unlink(&iter->w); __releases()
|
H A D | ip6mr.c | 389 struct ipmr_vif_iter *iter, ip6mr_vif_seq_idx() 392 struct mr6_table *mrt = iter->mrt; ip6mr_vif_seq_idx() 394 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { ip6mr_vif_seq_idx() 395 if (!MIF_EXISTS(mrt, iter->ct)) ip6mr_vif_seq_idx() 398 return &mrt->vif6_table[iter->ct]; ip6mr_vif_seq_idx() 406 struct ipmr_vif_iter *iter = seq->private; __acquires() local 414 iter->mrt = mrt; __acquires() 423 struct ipmr_vif_iter *iter = seq->private; ip6mr_vif_seq_next() local 425 struct mr6_table *mrt = iter->mrt; ip6mr_vif_seq_next() 429 return ip6mr_vif_seq_idx(net, iter, 0); ip6mr_vif_seq_next() 431 while (++iter->ct < mrt->maxvif) { ip6mr_vif_seq_next() 432 if (!MIF_EXISTS(mrt, iter->ct)) ip6mr_vif_seq_next() 434 return &mrt->vif6_table[iter->ct]; ip6mr_vif_seq_next() 447 struct ipmr_vif_iter *iter = seq->private; ip6mr_vif_seq_show() local 448 struct mr6_table *mrt = iter->mrt; ip6mr_vif_seq_show() 388 ip6mr_vif_seq_idx(struct net *net, struct ipmr_vif_iter *iter, loff_t pos) ip6mr_vif_seq_idx() argument
|
/linux-4.1.27/drivers/base/ |
H A D | class.c | 284 * @iter: class iterator to initialize 289 * Initialize class iterator @iter such that it iterates over devices 294 void class_dev_iter_init(struct class_dev_iter *iter, struct class *class, class_dev_iter_init() argument 301 klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode); class_dev_iter_init() 302 iter->type = type; class_dev_iter_init() 308 * @iter: class iterator to proceed 310 * Proceed @iter to the next device and return it. Returns NULL if 318 struct device *class_dev_iter_next(struct class_dev_iter *iter) class_dev_iter_next() argument 324 knode = klist_next(&iter->ki); class_dev_iter_next() 328 if (!iter->type || iter->type == dev->type) class_dev_iter_next() 336 * @iter: class iterator to finish 341 void class_dev_iter_exit(struct class_dev_iter *iter) class_dev_iter_exit() argument 343 klist_iter_exit(&iter->ki); class_dev_iter_exit() 368 struct class_dev_iter iter; class_for_each_device() local 380 class_dev_iter_init(&iter, class, start, NULL); class_for_each_device() 381 while ((dev = class_dev_iter_next(&iter))) { class_for_each_device() 386 class_dev_iter_exit(&iter); class_for_each_device() 416 struct class_dev_iter iter; class_find_device() local 427 class_dev_iter_init(&iter, class, start, NULL); class_find_device() 428 while ((dev = class_dev_iter_next(&iter))) { class_find_device() 434 class_dev_iter_exit(&iter); class_find_device() 443 struct class_dev_iter iter; class_interface_register() local 456 class_dev_iter_init(&iter, parent, NULL, NULL); class_interface_register() 457 while ((dev = class_dev_iter_next(&iter))) class_interface_register() 459 class_dev_iter_exit(&iter); class_interface_register() 469 struct class_dev_iter iter; class_interface_unregister() local 478 class_dev_iter_init(&iter, parent, NULL, NULL); class_interface_unregister() 479 while ((dev = class_dev_iter_next(&iter))) class_interface_unregister() 481 class_dev_iter_exit(&iter); class_interface_unregister()
|
H A D | attribute_container.c | 183 #define klist_for_each_entry(pos, head, member, iter) \ 184 for (klist_iter_init(head, iter); (pos = ({ \ 185 struct klist_node *n = klist_next(iter); \ 187 ({ klist_iter_exit(iter) ; NULL; }); \ 217 struct klist_iter iter; attribute_container_remove_device() local 225 klist_for_each_entry(ic, &cont->containers, node, &iter) { attribute_container_remove_device() 261 struct klist_iter iter; attribute_container_device_trigger() local 271 klist_for_each_entry(ic, &cont->containers, node, &iter) { attribute_container_device_trigger() 429 struct klist_iter iter; attribute_container_find_class_device() local 431 klist_for_each_entry(ic, &cont->containers, node, &iter) { attribute_container_find_class_device() 435 klist_iter_exit(&iter); attribute_container_find_class_device()
|
H A D | bus.c | 757 /* Helper for bus_rescan_devices's iter */ bus_rescan_devices_helper() 1052 * @iter: subsys iterator to initialize 1057 * Initialize subsys iterator @iter such that it iterates over devices 1062 void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys, subsys_dev_iter_init() argument 1069 klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode); subsys_dev_iter_init() 1070 iter->type = type; subsys_dev_iter_init() 1076 * @iter: subsys iterator to proceed 1078 * Proceed @iter to the next device and return it. Returns NULL if 1086 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter) subsys_dev_iter_next() argument 1092 knode = klist_next(&iter->ki); subsys_dev_iter_next() 1096 if (!iter->type || iter->type == dev->type) subsys_dev_iter_next() 1104 * @iter: subsys iterator to finish 1109 void subsys_dev_iter_exit(struct subsys_dev_iter *iter) subsys_dev_iter_exit() argument 1111 klist_iter_exit(&iter->ki); subsys_dev_iter_exit() 1118 struct subsys_dev_iter iter; subsys_interface_register() local 1131 subsys_dev_iter_init(&iter, subsys, NULL, NULL); subsys_interface_register() 1132 while ((dev = subsys_dev_iter_next(&iter))) subsys_interface_register() 1134 subsys_dev_iter_exit(&iter); subsys_interface_register() 1145 struct subsys_dev_iter iter; subsys_interface_unregister() local 1156 subsys_dev_iter_init(&iter, subsys, NULL, NULL); subsys_interface_unregister() 1157 while ((dev = subsys_dev_iter_next(&iter))) subsys_interface_unregister() 1159 subsys_dev_iter_exit(&iter); subsys_interface_unregister()
|
/linux-4.1.27/drivers/md/bcache/ |
H A D | bset.c | 55 struct btree_iter iter; __bch_count_data() local 59 for_each_key(b, k, &iter) __bch_count_data() 68 struct btree_iter iter; __bch_check_keys() local 71 for_each_key(b, k, &iter) { __bch_check_keys() 109 static void bch_btree_iter_next_check(struct btree_iter *iter) bch_btree_iter_next_check() argument 111 struct bkey *k = iter->data->k, *next = bkey_next(k); bch_btree_iter_next_check() 113 if (next < iter->data->end && bch_btree_iter_next_check() 114 bkey_cmp(k, iter->b->ops->is_extents ? bch_btree_iter_next_check() 116 bch_dump_bucket(iter->b); bch_btree_iter_next_check() 123 static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} bch_btree_iter_next_check() argument 825 struct btree_iter iter; bch_btree_insert_key() local 829 m = bch_btree_iter_init(b, &iter, b->ops->is_extents bch_btree_insert_key() 833 if (b->ops->insert_fixup(b, k, &iter, replace_key)) bch_btree_insert_key() 1027 static inline bool btree_iter_end(struct btree_iter *iter) btree_iter_end() argument 1029 return !iter->used; btree_iter_end() 1032 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, bch_btree_iter_push() argument 1036 BUG_ON(!heap_add(iter, bch_btree_iter_push() 1042 struct btree_iter *iter, __bch_btree_iter_init() 1047 iter->size = ARRAY_SIZE(iter->data); __bch_btree_iter_init() 1048 iter->used = 0; __bch_btree_iter_init() 1051 iter->b = b; __bch_btree_iter_init() 1056 bch_btree_iter_push(iter, ret, bset_bkey_last(start->data)); __bch_btree_iter_init() 1063 struct btree_iter *iter, bch_btree_iter_init() 1066 return __bch_btree_iter_init(b, iter, search, b->set); bch_btree_iter_init() 1070 static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, __bch_btree_iter_next() argument 1076 if (!btree_iter_end(iter)) { __bch_btree_iter_next() 1077 bch_btree_iter_next_check(iter); __bch_btree_iter_next() 1079 ret = iter->data->k; __bch_btree_iter_next() 1080 iter->data->k = bkey_next(iter->data->k); __bch_btree_iter_next() 1082 if (iter->data->k > iter->data->end) { __bch_btree_iter_next() 1084 iter->data->k = iter->data->end; __bch_btree_iter_next() 1087 if (iter->data->k == iter->data->end) __bch_btree_iter_next() 1088 heap_pop(iter, unused, cmp); __bch_btree_iter_next() 1090 heap_sift(iter, 0, cmp); __bch_btree_iter_next() 1096 struct bkey *bch_btree_iter_next(struct btree_iter *iter) bch_btree_iter_next() argument 1098 return __bch_btree_iter_next(iter, btree_iter_cmp); bch_btree_iter_next() 1103 struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, bch_btree_iter_next_filter() argument 1109 ret = bch_btree_iter_next(iter); bch_btree_iter_next_filter() 1139 struct btree_iter *iter, btree_mergesort() 1150 for (i = iter->used / 2 - 1; i >= 0; --i) btree_mergesort() 1151 heap_sift(iter, i, b->ops->sort_cmp); btree_mergesort() 1153 while (!btree_iter_end(iter)) { btree_mergesort() 1155 k = b->ops->sort_fixup(iter, &tmp.k); btree_mergesort() 1160 k = __bch_btree_iter_next(iter, b->ops->sort_cmp); btree_mergesort() 1179 static void __btree_sort(struct btree_keys *b, struct btree_iter *iter, __btree_sort() argument 1200 btree_mergesort(b, out, iter, fixup, false); __btree_sort() 1235 struct btree_iter iter; bch_btree_sort_partial() local 1238 __bch_btree_iter_init(b, &iter, NULL, &b->set[start]); bch_btree_sort_partial() 1249 __btree_sort(b, &iter, start, order, false, state); bch_btree_sort_partial() 1256 struct btree_iter *iter, bch_btree_sort_and_fix_extents() 1259 __btree_sort(b, iter, 0, b->page_order, true, state); bch_btree_sort_and_fix_extents() 1267 struct btree_iter iter; bch_btree_sort_into() local 1268 bch_btree_iter_init(b, &iter, NULL); bch_btree_sort_into() 1270 btree_mergesort(b, new->set->data, &iter, false, true); bch_btree_sort_into() 1041 __bch_btree_iter_init(struct btree_keys *b, struct btree_iter *iter, struct bkey *search, struct bset_tree *start) __bch_btree_iter_init() argument 1062 bch_btree_iter_init(struct btree_keys *b, struct btree_iter *iter, struct bkey *search) bch_btree_iter_init() argument 1138 btree_mergesort(struct btree_keys *b, struct bset *out, struct btree_iter *iter, bool fixup, bool remove_stale) btree_mergesort() argument 1255 bch_btree_sort_and_fix_extents(struct btree_keys *b, struct btree_iter *iter, struct bset_sort_state *state) bch_btree_sort_and_fix_extents() argument
|
H A D | extents.c | 29 static void sort_key_next(struct btree_iter *iter, sort_key_next() argument 35 *i = iter->data[--iter->used]; sort_key_next() 227 struct btree_iter *iter, bch_btree_ptr_insert_fixup() 264 static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter, bch_extent_sort_fixup() argument 267 while (iter->used > 1) { bch_extent_sort_fixup() 268 struct btree_iter_set *top = iter->data, *i = top + 1; bch_extent_sort_fixup() 270 if (iter->used > 2 && bch_extent_sort_fixup() 278 sort_key_next(iter, i); bch_extent_sort_fixup() 279 heap_sift(iter, i - top, bch_extent_sort_cmp); bch_extent_sort_fixup() 285 sort_key_next(iter, i); bch_extent_sort_fixup() 289 heap_sift(iter, i - top, bch_extent_sort_cmp); bch_extent_sort_fixup() 299 heap_sift(iter, 0, bch_extent_sort_cmp); bch_extent_sort_fixup() 323 struct btree_iter *iter, bch_extent_insert_fixup() 335 struct bkey *k = bch_btree_iter_next(iter); bch_extent_insert_fixup() 225 bch_btree_ptr_insert_fixup(struct btree_keys *bk, struct bkey *insert, struct btree_iter *iter, struct bkey *replace_key) bch_btree_ptr_insert_fixup() argument 321 bch_extent_insert_fixup(struct btree_keys *b, struct bkey *insert, struct btree_iter *iter, struct bkey *replace_key) bch_extent_insert_fixup() argument
|
H A D | debug.c | 110 struct bvec_iter iter; bch_data_verify() local 122 bio_for_each_segment(bv, bio, iter) { bio_for_each_segment() 124 void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page); bio_for_each_segment()
|
H A D | btree.c | 202 struct btree_iter *iter; bch_btree_node_read_done() local 204 iter = mempool_alloc(b->c->fill_iter, GFP_NOIO); bch_btree_node_read_done() 205 iter->size = b->c->sb.bucket_size / b->c->sb.block_size; bch_btree_node_read_done() 206 iter->used = 0; bch_btree_node_read_done() 209 iter->b = &b->keys; bch_btree_node_read_done() 247 bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); bch_btree_node_read_done() 259 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); bch_btree_node_read_done() 271 mempool_free(iter, b->c->fill_iter); bch_btree_node_read_done() 1250 struct btree_iter iter; btree_gc_mark_node() local 1255 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { btree_gc_mark_node() 1503 struct btree_iter iter; btree_gc_count_keys() local 1506 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) btree_gc_count_keys() 1518 struct btree_iter iter; btree_gc_recurse() local 1522 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); btree_gc_recurse() 1528 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); btree_gc_recurse() 1813 struct btree_iter iter; bch_btree_check_recurse() local 1815 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) bch_btree_check_recurse() 1821 bch_btree_iter_init(&b->keys, &iter, NULL); bch_btree_check_recurse() 1824 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_btree_check_recurse() 2276 struct btree_iter iter; bch_btree_map_nodes_recurse() local 2278 bch_btree_iter_init(&b->keys, &iter, from); bch_btree_map_nodes_recurse() 2280 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_btree_map_nodes_recurse() 2309 struct btree_iter iter; bch_btree_map_keys_recurse() local 2311 bch_btree_iter_init(&b->keys, &iter, from); bch_btree_map_keys_recurse() 2313 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) { bch_btree_map_keys_recurse()
|
H A D | util.h | 126 #define fifo_for_each(c, fifo, iter) \ 127 for (iter = (fifo)->front; \ 128 c = (fifo)->data[iter], iter != (fifo)->back; \ 129 iter = (iter + 1) & (fifo)->mask)
|
H A D | io.c | 18 struct bvec_iter iter; bch_bio_max_sectors() local 24 bio_for_each_segment(bv, bio, iter) { bio_for_each_segment()
|
H A D | btree.h | 200 #define for_each_cached_btree(b, c, iter) \ 201 for (iter = 0; \ 202 iter < ARRAY_SIZE((c)->bucket_hash); \ 203 iter++) \ 204 hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
|
H A D | alloc.c | 414 size_t iter; bch_bucket_alloc() local 418 for (iter = 0; iter < prio_buckets(ca) * 2; iter++) bch_bucket_alloc() 419 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); bch_bucket_alloc() 422 fifo_for_each(i, &ca->free[j], iter) bch_bucket_alloc() 424 fifo_for_each(i, &ca->free_inc, iter) bch_bucket_alloc()
|
H A D | bset.h | 338 #define for_each_key_filter(b, k, iter, filter) \ 339 for (bch_btree_iter_init((b), (iter), NULL); \ 340 ((k) = bch_btree_iter_next_filter((iter), (b), filter));) 342 #define for_each_key(b, k, iter) \ 343 for (bch_btree_iter_init((b), (iter), NULL); \ 344 ((k) = bch_btree_iter_next(iter));)
|
H A D | journal.c | 156 unsigned iter; bch_journal_read() local 158 for_each_cache(ca, c, iter) { for_each_cache() 472 unsigned iter, n = 0; journal_reclaim() local 482 for_each_cache(ca, c, iter) { for_each_cache() 491 for_each_cache(ca, c, iter) for_each_cache() 502 for_each_cache(ca, c, iter) { for_each_cache()
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
H A D | qib_debugfs.c | 193 struct qib_qp_iter *iter; _qp_stats_seq_start() local 197 iter = qib_qp_iter_init(s->private); _qp_stats_seq_start() 198 if (!iter) _qp_stats_seq_start() 202 if (qib_qp_iter_next(iter)) { _qp_stats_seq_start() 203 kfree(iter); _qp_stats_seq_start() 208 return iter; _qp_stats_seq_start() 214 struct qib_qp_iter *iter = iter_ptr; _qp_stats_seq_next() local 218 if (qib_qp_iter_next(iter)) { _qp_stats_seq_next() 219 kfree(iter); _qp_stats_seq_next() 223 return iter; _qp_stats_seq_next() 233 struct qib_qp_iter *iter = iter_ptr; _qp_stats_seq_show() local 235 if (!iter) _qp_stats_seq_show() 238 qib_qp_iter_print(s, iter); _qp_stats_seq_show()
|
H A D | qib_qp.c | 1323 struct qib_qp_iter *iter; qib_qp_iter_init() local 1325 iter = kzalloc(sizeof(*iter), GFP_KERNEL); qib_qp_iter_init() 1326 if (!iter) qib_qp_iter_init() 1329 iter->dev = dev; qib_qp_iter_init() 1330 if (qib_qp_iter_next(iter)) { qib_qp_iter_init() 1331 kfree(iter); qib_qp_iter_init() 1335 return iter; qib_qp_iter_init() 1338 int qib_qp_iter_next(struct qib_qp_iter *iter) qib_qp_iter_next() argument 1340 struct qib_ibdev *dev = iter->dev; qib_qp_iter_next() 1341 int n = iter->n; qib_qp_iter_next() 1343 struct qib_qp *pqp = iter->qp; qib_qp_iter_next() 1353 iter->qp = qp; qib_qp_iter_next() 1354 iter->n = n; qib_qp_iter_next() 1365 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter) qib_qp_iter_print() argument 1368 struct qib_qp *qp = iter->qp; qib_qp_iter_print() 1373 iter->n, qib_qp_iter_print()
|
/linux-4.1.27/net/sunrpc/ |
H A D | debugfs.c | 47 struct rpc_clnt_iter *iter = f->private; variable in typeref:struct:rpc_clnt_iter 49 struct rpc_clnt *clnt = iter->clnt; 52 iter->pos = pos + 1; 63 struct rpc_clnt_iter *iter = f->private; tasks_next() local 64 struct rpc_clnt *clnt = iter->clnt; tasks_next() 68 ++iter->pos; tasks_next() 81 struct rpc_clnt_iter *iter = f->private; variable in typeref:struct:rpc_clnt_iter 82 struct rpc_clnt *clnt = iter->clnt; 101 struct rpc_clnt_iter *iter = seq->private; tasks_open() local 103 iter->clnt = inode->i_private; tasks_open() 105 if (!atomic_inc_not_zero(&iter->clnt->cl_count)) { tasks_open() 118 struct rpc_clnt_iter *iter = seq->private; tasks_release() local 120 rpc_release_client(iter->clnt); tasks_release()
|
/linux-4.1.27/drivers/s390/block/ |
H A D | scm_blk_cluster.c | 79 struct scm_request *iter; scm_reserve_cluster() local 86 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { scm_reserve_cluster() 87 if (iter == scmrq) { scm_reserve_cluster() 95 for (pos = 0; pos < iter->aob->request.msb_count; pos++) { scm_reserve_cluster() 96 if (clusters_intersect(req, iter->request[pos]) && scm_reserve_cluster() 98 rq_data_dir(iter->request[pos]) == WRITE)) { scm_reserve_cluster() 136 struct req_iterator iter; scm_prepare_cluster_request() local 183 rq_for_each_segment(bv, req, iter) { rq_for_each_segment()
|
H A D | scm_blk.c | 52 struct list_head *iter, *safe; scm_free_rqs() local 56 list_for_each_safe(iter, safe, &inactive_requests) { scm_free_rqs() 57 scmrq = list_entry(iter, struct scm_request, list); scm_free_rqs() 188 struct req_iterator iter; scm_request_prepare() local 203 rq_for_each_segment(bv, req, iter) { rq_for_each_segment()
|
/linux-4.1.27/fs/nfs/objlayout/ |
H A D | pnfs_osd_xdr_cli.c | 160 struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr) pnfs_osd_xdr_decode_layout_map() 164 memset(iter, 0, sizeof(*iter)); pnfs_osd_xdr_decode_layout_map() 176 iter->total_comps = layout->olo_num_comps; pnfs_osd_xdr_decode_layout_map() 181 struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr, pnfs_osd_xdr_decode_layout_comp() 184 BUG_ON(iter->decoded_comps > iter->total_comps); pnfs_osd_xdr_decode_layout_comp() 185 if (iter->decoded_comps == iter->total_comps) pnfs_osd_xdr_decode_layout_comp() 192 iter->decoded_comps, iter->total_comps); pnfs_osd_xdr_decode_layout_comp() 204 iter->decoded_comps++; pnfs_osd_xdr_decode_layout_comp() 159 pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout, struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr) pnfs_osd_xdr_decode_layout_map() argument 180 pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp, struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr, int *err) pnfs_osd_xdr_decode_layout_comp() argument
|
/linux-4.1.27/drivers/dma/ |
H A D | iop-adma.c | 116 struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL; __iop_adma_slot_cleanup() local 126 list_for_each_entry_safe(iter, _iter, &iop_chan->chain, __iop_adma_slot_cleanup() 130 iter->async_tx.cookie, iter->idx, busy, __iop_adma_slot_cleanup() 131 iter->async_tx.phys, iop_desc_get_next_desc(iter), __iop_adma_slot_cleanup() 132 async_tx_test_ack(&iter->async_tx)); __iop_adma_slot_cleanup() 147 if (iter->async_tx.phys == current_desc) { __iop_adma_slot_cleanup() 149 if (busy || iop_desc_get_next_desc(iter)) __iop_adma_slot_cleanup() 155 slot_cnt = iter->slot_cnt; __iop_adma_slot_cleanup() 156 slots_per_op = iter->slots_per_op; __iop_adma_slot_cleanup() 166 grp_start = iter; __iop_adma_slot_cleanup() 226 if (iter->xor_check_result && iter->async_tx.cookie) __iop_adma_slot_cleanup() 227 *iter->xor_check_result = __iop_adma_slot_cleanup() 228 iop_desc_get_zero_result(iter); __iop_adma_slot_cleanup() 231 iter, iop_chan, cookie); __iop_adma_slot_cleanup() 233 if (iop_adma_clean_slot(iter, iop_chan)) __iop_adma_slot_cleanup() 269 struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL; iop_adma_alloc_slots() local 280 iter = iop_chan->last_used; iop_adma_alloc_slots() 282 iter = list_entry(&iop_chan->all_slots, iop_adma_alloc_slots() 287 iter, _iter, &iop_chan->all_slots, slot_node) { iop_adma_alloc_slots() 290 if (iter->slots_per_op) { iop_adma_alloc_slots() 303 if (iop_desc_is_aligned(iter, slots_per_op)) iop_adma_alloc_slots() 304 alloc_start = iter; iop_adma_alloc_slots() 314 iter = alloc_start; iop_adma_alloc_slots() 320 iter->idx, iter->hw_desc, iop_adma_alloc_slots() 321 iter->async_tx.phys, slots_per_op); iop_adma_alloc_slots() 325 async_tx_ack(&iter->async_tx); iop_adma_alloc_slots() 327 list_add_tail(&iter->chain_node, &chain); iop_adma_alloc_slots() 328 alloc_tail = iter; iop_adma_alloc_slots() 329 iter->async_tx.cookie = 0; iop_adma_alloc_slots() 330 iter->slot_cnt = num_slots; iop_adma_alloc_slots() 331 iter->xor_check_result = NULL; iop_adma_alloc_slots() 333 iter->slots_per_op = slots_per_op - i; iop_adma_alloc_slots() 334 last_used = iter; iop_adma_alloc_slots() 335 iter = list_entry(iter->slot_node.next, iop_adma_alloc_slots() 729 struct iop_adma_desc_slot *iter, *_iter; iop_adma_free_chan_resources() local 735 list_for_each_entry_safe(iter, _iter, &iop_chan->chain, iop_adma_free_chan_resources() 738 list_del(&iter->chain_node); iop_adma_free_chan_resources() 741 iter, _iter, &iop_chan->all_slots, slot_node) { iop_adma_free_chan_resources() 742 list_del(&iter->slot_node); iop_adma_free_chan_resources() 743 kfree(iter); iop_adma_free_chan_resources()
|
H A D | mv_xor.c | 235 struct mv_xor_desc_slot *iter, *_iter; mv_xor_clean_completed_slots() local 238 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, mv_xor_clean_completed_slots() 241 if (async_tx_test_ack(&iter->async_tx)) { mv_xor_clean_completed_slots() 242 list_del(&iter->completed_node); mv_xor_clean_completed_slots() 243 mv_xor_free_slots(mv_chan, iter); mv_xor_clean_completed_slots() 272 struct mv_xor_desc_slot *iter, *_iter; mv_xor_slot_cleanup() local 287 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, mv_xor_slot_cleanup() 291 hw_desc = iter->hw_desc; mv_xor_slot_cleanup() 293 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, mv_xor_slot_cleanup() 297 mv_xor_clean_slot(iter, mv_chan); mv_xor_slot_cleanup() 300 if (iter->async_tx.phys == current_desc) { mv_xor_slot_cleanup() 305 if (iter->async_tx.phys == current_desc) { mv_xor_slot_cleanup() 318 iter = list_entry(mv_chan->chain.next, mv_xor_slot_cleanup() 321 mv_xor_start_new_chain(mv_chan, iter); mv_xor_slot_cleanup() 323 if (!list_is_last(&iter->chain_node, &mv_chan->chain)) { mv_xor_slot_cleanup() 328 iter = list_entry(iter->chain_node.next, mv_xor_slot_cleanup() 331 mv_xor_start_new_chain(mv_chan, iter); mv_xor_slot_cleanup() 358 struct mv_xor_desc_slot *iter, *_iter; mv_xor_alloc_slot() local 367 iter = mv_chan->last_used; mv_xor_alloc_slot() 369 iter = list_entry(&mv_chan->all_slots, mv_xor_alloc_slot() 374 iter, _iter, &mv_chan->all_slots, slot_node) { mv_xor_alloc_slot() 378 if (iter->slot_used) { mv_xor_alloc_slot() 388 async_tx_ack(&iter->async_tx); mv_xor_alloc_slot() 390 iter->slot_used = 1; mv_xor_alloc_slot() 391 INIT_LIST_HEAD(&iter->chain_node); mv_xor_alloc_slot() 392 iter->async_tx.cookie = -EBUSY; mv_xor_alloc_slot() 393 mv_chan->last_used = iter; mv_xor_alloc_slot() 394 mv_desc_clear_next_desc(iter); mv_xor_alloc_slot() 396 return iter; mv_xor_alloc_slot() 574 struct mv_xor_desc_slot *iter, *_iter; mv_xor_free_chan_resources() local 581 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, mv_xor_free_chan_resources() 584 list_del(&iter->chain_node); mv_xor_free_chan_resources() 586 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, mv_xor_free_chan_resources() 589 list_del(&iter->completed_node); mv_xor_free_chan_resources() 592 iter, _iter, &mv_chan->all_slots, slot_node) { mv_xor_free_chan_resources() 593 list_del(&iter->slot_node); mv_xor_free_chan_resources() 594 kfree(iter); mv_xor_free_chan_resources()
|
H A D | fsl-edma.c | 541 u16 soff, doff, iter; fsl_edma_prep_dma_cyclic() local 554 iter = period_len / nbytes; fsl_edma_prep_dma_cyclic() 576 fsl_chan->fsc.attr, soff, nbytes, 0, iter, fsl_edma_prep_dma_cyclic() 577 iter, doff, last_sg, true, false, true); fsl_edma_prep_dma_cyclic() 593 u16 soff, doff, iter; fsl_edma_prep_slave_sg() local 621 iter = sg_dma_len(sg) / nbytes; for_each_sg() 626 nbytes, 0, iter, iter, doff, last_sg, for_each_sg() 632 nbytes, 0, iter, iter, doff, last_sg, for_each_sg()
|
/linux-4.1.27/drivers/scsi/qla4xxx/ |
H A D | ql4_attr.c | 129 struct sysfs_entry *iter; qla4_8xxx_alloc_sysfs_attr() local 132 for (iter = bin_file_entries; iter->name; iter++) { qla4_8xxx_alloc_sysfs_attr() 134 iter->attr); qla4_8xxx_alloc_sysfs_attr() 138 iter->name, ret); qla4_8xxx_alloc_sysfs_attr() 145 struct sysfs_entry *iter; qla4_8xxx_free_sysfs_attr() local 147 for (iter = bin_file_entries; iter->name; iter++) qla4_8xxx_free_sysfs_attr() 149 iter->attr); qla4_8xxx_free_sysfs_attr()
|
/linux-4.1.27/net/netfilter/ |
H A D | nft_hash.c | 186 struct nft_set_iter *iter) nft_hash_walk() 196 iter->err = err; nft_hash_walk() 202 iter->err = err; nft_hash_walk() 210 iter->err = err; nft_hash_walk() 217 if (iter->count < iter->skip) nft_hash_walk() 226 iter->err = iter->fn(ctx, set, iter, &elem); nft_hash_walk() 227 if (iter->err < 0) nft_hash_walk() 231 iter->count++; nft_hash_walk() 185 nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set, struct nft_set_iter *iter) nft_hash_walk() argument
|
H A D | nft_rbtree.c | 175 struct nft_set_iter *iter) nft_rbtree_walk() 187 if (iter->count < iter->skip) nft_rbtree_walk() 194 iter->err = iter->fn(ctx, set, iter, &elem); nft_rbtree_walk() 195 if (iter->err < 0) { nft_rbtree_walk() 200 iter->count++; nft_rbtree_walk() 173 nft_rbtree_walk(const struct nft_ctx *ctx, const struct nft_set *set, struct nft_set_iter *iter) nft_rbtree_walk() argument
|
H A D | nf_tables_api.c | 2842 const struct nft_set_iter *iter, nf_tables_bind_check_setelem() 2859 struct nft_set_iter iter; nf_tables_bind_set() local 2874 iter.skip = 0; nf_tables_bind_set() 2875 iter.count = 0; nf_tables_bind_set() 2876 iter.err = 0; nf_tables_bind_set() 2877 iter.fn = nf_tables_bind_check_setelem; nf_tables_bind_set() 2879 set->ops->walk(ctx, set, &iter); nf_tables_bind_set() 2880 if (iter.err < 0) { nf_tables_bind_set() 2885 return iter.err; nf_tables_bind_set() 3047 struct nft_set_iter iter; member in struct:nft_set_dump_args 3053 const struct nft_set_iter *iter, nf_tables_dump_setelem() 3058 args = container_of(iter, struct nft_set_dump_args, iter); nf_tables_dump_setelem() 3116 args.iter.skip = cb->args[0]; nf_tables_dump_set() 3117 args.iter.count = 0; nf_tables_dump_set() 3118 args.iter.err = 0; nf_tables_dump_set() 3119 args.iter.fn = nf_tables_dump_setelem; nf_tables_dump_set() 3120 set->ops->walk(&ctx, set, &args.iter); nf_tables_dump_set() 3125 if (args.iter.err && args.iter.err != -EMSGSIZE) nf_tables_dump_set() 3126 return args.iter.err; nf_tables_dump_set() 3127 if (args.iter.count == cb->args[0]) nf_tables_dump_set() 3130 cb->args[0] = args.iter.count; nf_tables_dump_set() 4099 const struct nft_set_iter *iter, nf_tables_loop_check_setelem() 4126 struct nft_set_iter iter; nf_tables_check_loops() local 4169 iter.skip = 0; 4170 iter.count = 0; 4171 iter.err = 0; 4172 iter.fn = nf_tables_loop_check_setelem; 4174 set->ops->walk(ctx, set, &iter); 4175 if (iter.err < 0) 4176 return iter.err; 2840 nf_tables_bind_check_setelem(const struct nft_ctx *ctx, const struct nft_set *set, const struct nft_set_iter *iter, const struct nft_set_elem *elem) nf_tables_bind_check_setelem() argument 3051 nf_tables_dump_setelem(const struct nft_ctx *ctx, const struct nft_set *set, const struct nft_set_iter *iter, const struct nft_set_elem *elem) nf_tables_dump_setelem() argument 4097 nf_tables_loop_check_setelem(const struct nft_ctx *ctx, const struct nft_set *set, const struct nft_set_iter *iter, const struct nft_set_elem *elem) nf_tables_loop_check_setelem() argument
|
/linux-4.1.27/tools/perf/ui/gtk/ |
H A D | hists.c | 101 GtkTreeIter iter, new_parent; perf_gtk__add_callchain() local 117 gtk_tree_store_append(store, &iter, &new_parent); perf_gtk__add_callchain() 120 gtk_tree_store_set(store, &iter, 0, buf, -1); perf_gtk__add_callchain() 123 gtk_tree_store_set(store, &iter, col, buf, -1); perf_gtk__add_callchain() 130 new_parent = iter; perf_gtk__add_callchain() 140 /* Now 'iter' contains info of the last callchain_list */ perf_gtk__add_callchain() 141 perf_gtk__add_callchain(&node->rb_root, store, &iter, col, perf_gtk__add_callchain() 225 GtkTreeIter iter; local 236 gtk_tree_store_append(store, &iter, NULL); 249 gtk_tree_store_set(store, &iter, col_idx++, s, -1); perf_hpp__for_each_format() 257 perf_gtk__add_callchain(&h->sorted_chain, store, &iter,
|
H A D | annotate.c | 121 GtkTreeIter iter; perf_gtk__annotate_symbol() local 124 gtk_list_store_append(store, &iter); perf_gtk__annotate_symbol() 140 gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1); perf_gtk__annotate_symbol() 142 gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1); perf_gtk__annotate_symbol() 144 gtk_list_store_set(store, &iter, ANN_COL__LINE, s, -1); perf_gtk__annotate_symbol()
|
/linux-4.1.27/net/ipv4/ |
H A D | cipso_ipv4.c | 253 u32 iter; cipso_v4_cache_init() local 261 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { cipso_v4_cache_init() 262 spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache_init() 263 cipso_v4_cache[iter].size = 0; cipso_v4_cache_init() 264 INIT_LIST_HEAD(&cipso_v4_cache[iter].list); cipso_v4_cache_init() 281 u32 iter; cipso_v4_cache_invalidate() local 283 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { cipso_v4_cache_invalidate() 284 spin_lock_bh(&cipso_v4_cache[iter].lock); cipso_v4_cache_invalidate() 287 &cipso_v4_cache[iter].list, list) { cipso_v4_cache_invalidate() 291 cipso_v4_cache[iter].size = 0; cipso_v4_cache_invalidate() 292 spin_unlock_bh(&cipso_v4_cache[iter].lock); cipso_v4_cache_invalidate() 445 struct cipso_v4_doi *iter; cipso_v4_doi_search() local 447 list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) cipso_v4_doi_search() 448 if (iter->doi == doi && atomic_read(&iter->refcount)) cipso_v4_doi_search() 449 return iter; cipso_v4_doi_search() 470 u32 iter; cipso_v4_doi_add() local 480 for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { cipso_v4_doi_add() 481 switch (doi_def->tags[iter]) { cipso_v4_doi_add() 494 if (iter == 0) cipso_v4_doi_add() 1004 u32 iter; cipso_v4_map_cat_enum_valid() local 1009 for (iter = 0; iter < enumcat_len; iter += 2) { cipso_v4_map_cat_enum_valid() 1010 cat = get_unaligned_be16(&enumcat[iter]); cipso_v4_map_cat_enum_valid() 1074 u32 iter; cipso_v4_map_cat_enum_ntoh() local 1076 for (iter = 0; iter < net_cat_len; iter += 2) { cipso_v4_map_cat_enum_ntoh() 1078 get_unaligned_be16(&net_cat[iter]), cipso_v4_map_cat_enum_ntoh() 1106 u32 iter; cipso_v4_map_cat_rng_valid() local 1111 for (iter = 0; iter < rngcat_len; iter += 4) { cipso_v4_map_cat_rng_valid() 1112 cat_high = get_unaligned_be16(&rngcat[iter]); cipso_v4_map_cat_rng_valid() 1113 if ((iter + 4) <= rngcat_len) cipso_v4_map_cat_rng_valid() 1114 cat_low = get_unaligned_be16(&rngcat[iter + 2]); cipso_v4_map_cat_rng_valid() 1146 int iter = -1; cipso_v4_map_cat_rng_hton() local 1157 iter = netlbl_catmap_walk(secattr->attr.mls.cat, iter + 1); cipso_v4_map_cat_rng_hton() 1158 if (iter < 0) cipso_v4_map_cat_rng_hton() 1160 cat_size += (iter == 0 ? 0 : sizeof(u16)); cipso_v4_map_cat_rng_hton() 1163 array[array_cnt++] = iter; cipso_v4_map_cat_rng_hton() 1165 iter = netlbl_catmap_walkrng(secattr->attr.mls.cat, iter); cipso_v4_map_cat_rng_hton() 1166 if (iter < 0) cipso_v4_map_cat_rng_hton() 1171 array[array_cnt++] = iter; cipso_v4_map_cat_rng_hton() 1174 for (iter = 0; array_cnt > 0;) { cipso_v4_map_cat_rng_hton() 1175 *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); cipso_v4_map_cat_rng_hton() 1176 iter += 2; cipso_v4_map_cat_rng_hton() 1179 *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); cipso_v4_map_cat_rng_hton() 1180 iter += 2; cipso_v4_map_cat_rng_hton() 1820 u32 iter; cipso_v4_genopt() local 1828 iter = 0; cipso_v4_genopt() 1831 switch (doi_def->tags[iter]) { cipso_v4_genopt() 1860 iter++; cipso_v4_genopt() 1862 iter < CIPSO_V4_TAG_MAXCNT && cipso_v4_genopt() 1863 doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); cipso_v4_genopt() 2042 int iter; cipso_v4_delopt() local 2067 iter = 0; cipso_v4_delopt() 2069 while (iter < opt->opt.optlen) cipso_v4_delopt() 2070 if (opt->opt.__data[iter] != IPOPT_NOP) { cipso_v4_delopt() 2071 iter += opt->opt.__data[iter + 1]; cipso_v4_delopt() 2072 optlen_new = iter; cipso_v4_delopt() 2074 iter++; cipso_v4_delopt()
|
H A D | fib_trie.c | 2010 static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter) fib_trie_get_next() argument 2012 unsigned long cindex = iter->index; fib_trie_get_next() 2013 struct key_vector *pn = iter->tnode; fib_trie_get_next() 2016 pr_debug("get_next iter={node=%p index=%d depth=%d}\n", fib_trie_get_next() 2017 iter->tnode, iter->index, iter->depth); fib_trie_get_next() 2027 iter->tnode = pn; fib_trie_get_next() 2028 iter->index = cindex; fib_trie_get_next() 2031 iter->tnode = n; fib_trie_get_next() 2032 iter->index = 0; fib_trie_get_next() 2033 ++iter->depth; fib_trie_get_next() 2043 --iter->depth; fib_trie_get_next() 2047 iter->tnode = pn; fib_trie_get_next() 2048 iter->index = 0; fib_trie_get_next() 2053 static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter, fib_trie_get_first() argument 2066 iter->tnode = n; fib_trie_get_first() 2067 iter->index = 0; fib_trie_get_first() 2068 iter->depth = 1; fib_trie_get_first() 2070 iter->tnode = pn; fib_trie_get_first() 2071 iter->index = 0; fib_trie_get_first() 2072 iter->depth = 0; fib_trie_get_first() 2081 struct fib_trie_iter iter; trie_collect_stats() local 2086 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) { trie_collect_stats() 2091 s->totdepth += iter.depth; trie_collect_stats() 2092 if (iter.depth > s->maxdepth) trie_collect_stats() 2093 s->maxdepth = iter.depth; trie_collect_stats() 2240 struct fib_trie_iter *iter = seq->private; fib_trie_get_idx() local 2252 for (n = fib_trie_get_first(iter, hlist_for_each_entry_rcu() 2254 n; n = fib_trie_get_next(iter)) hlist_for_each_entry_rcu() 2256 iter->tb = tb; hlist_for_each_entry_rcu() 2274 struct fib_trie_iter *iter = seq->private; fib_trie_seq_next() local 2276 struct fib_table *tb = iter->tb; fib_trie_seq_next() 2283 n = fib_trie_get_next(iter); fib_trie_seq_next() 2291 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); fib_trie_seq_next() 2300 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); hlist_for_each_entry_rcu() 2308 iter->tb = tb; 2364 const struct fib_trie_iter *iter = seq->private; fib_trie_seq_show() local 2368 fib_table_print(seq, iter->tb); fib_trie_seq_show() 2373 seq_indent(seq, iter->depth-1); fib_trie_seq_show() 2382 seq_indent(seq, iter->depth); fib_trie_seq_show() 2388 seq_indent(seq, iter->depth + 1); fib_trie_seq_show() 2433 static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, fib_route_get_idx() argument 2436 struct fib_table *tb = iter->main_tb; fib_route_get_idx() 2437 struct key_vector *l, **tp = &iter->tnode; fib_route_get_idx() 2442 if (iter->pos > 0 && pos >= iter->pos) { fib_route_get_idx() 2443 pos -= iter->pos; fib_route_get_idx() 2444 key = iter->key; fib_route_get_idx() 2447 iter->tnode = t->kv; fib_route_get_idx() 2448 iter->pos = 0; fib_route_get_idx() 2454 iter->pos++; fib_route_get_idx() 2467 iter->key = key; /* remember it */ fib_route_get_idx() 2469 iter->pos = 0; /* forget it */ fib_route_get_idx() 2477 struct fib_route_iter *iter = seq->private; __acquires() local 2487 iter->main_tb = tb; __acquires() 2490 return fib_route_get_idx(iter, *pos); __acquires() 2493 iter->tnode = t->kv; __acquires() 2494 iter->pos = 0; __acquires() 2495 iter->key = 0; __acquires() 2502 struct fib_route_iter *iter = seq->private; fib_route_seq_next() local 2504 t_key key = iter->key; fib_route_seq_next() 2510 l = leaf_walk_rcu(&iter->tnode, key); fib_route_seq_next() 2513 iter->key = l->key + 1; fib_route_seq_next() 2514 iter->pos++; fib_route_seq_next() 2516 iter->pos = 0; fib_route_seq_next() 2550 struct fib_route_iter *iter = seq->private; fib_route_seq_show() local 2551 struct fib_table *tb = iter->main_tb; fib_route_seq_show()
|
H A D | ipmr.c | 2435 struct ipmr_vif_iter *iter, ipmr_vif_seq_idx() 2438 struct mr_table *mrt = iter->mrt; ipmr_vif_seq_idx() 2440 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) { ipmr_vif_seq_idx() 2441 if (!VIF_EXISTS(mrt, iter->ct)) ipmr_vif_seq_idx() 2444 return &mrt->vif_table[iter->ct]; ipmr_vif_seq_idx() 2452 struct ipmr_vif_iter *iter = seq->private; __acquires() local 2460 iter->mrt = mrt; __acquires() 2469 struct ipmr_vif_iter *iter = seq->private; ipmr_vif_seq_next() local 2471 struct mr_table *mrt = iter->mrt; ipmr_vif_seq_next() 2475 return ipmr_vif_seq_idx(net, iter, 0); ipmr_vif_seq_next() 2477 while (++iter->ct < mrt->maxvif) { ipmr_vif_seq_next() 2478 if (!VIF_EXISTS(mrt, iter->ct)) ipmr_vif_seq_next() 2480 return &mrt->vif_table[iter->ct]; ipmr_vif_seq_next() 2493 struct ipmr_vif_iter *iter = seq->private; ipmr_vif_seq_show() local 2494 struct mr_table *mrt = iter->mrt; ipmr_vif_seq_show() 2434 ipmr_vif_seq_idx(struct net *net, struct ipmr_vif_iter *iter, loff_t pos) ipmr_vif_seq_idx() argument
|
/linux-4.1.27/drivers/block/aoe/ |
H A D | aoecmd.c | 199 memset(&f->iter, 0, sizeof(f->iter)); aoe_freetframe() 297 skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter) skb_fillup() argument 302 __bio_for_each_segment(bv, bio, iter, iter) skb_fillup() 343 ah->scnt = f->iter.bi_size >> 9; ata_rw_frameinit() 344 put_lba(ah, f->iter.bi_sector); ata_rw_frameinit() 353 skb_fillup(skb, f->buf->bio, f->iter); ata_rw_frameinit() 355 skb->len += f->iter.bi_size; ata_rw_frameinit() 356 skb->data_len = f->iter.bi_size; ata_rw_frameinit() 357 skb->truesize += f->iter.bi_size; ata_rw_frameinit() 385 f->iter = buf->iter; aoecmd_ata_rw() 386 f->iter.bi_size = min_t(unsigned long, aoecmd_ata_rw() 388 f->iter.bi_size); aoecmd_ata_rw() 389 bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size); aoecmd_ata_rw() 391 if (!buf->iter.bi_size) aoecmd_ata_rw() 588 nf->iter = f->iter; reassign_frame() 620 f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT; probe() 623 for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) { probe() 630 skb->len += f->iter.bi_size; probe() 631 skb->data_len = f->iter.bi_size; probe() 632 skb->truesize += f->iter.bi_size; probe() 871 struct bvec_iter iter; bio_pageinc() local 873 bio_for_each_segment(bv, bio, iter) { bio_for_each_segment() 887 struct bvec_iter iter; bio_pagedec() local 889 bio_for_each_segment(bv, bio, iter) { bio_for_each_segment() 901 buf->iter = bio->bi_iter; bufinit() 1087 bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt) bvcpy() argument 1092 iter.bi_size = cnt; bvcpy() 1094 __bio_for_each_segment(bv, bio, iter, iter) { __bio_for_each_segment() 1191 if (n > f->iter.bi_size) { ktiocomplete() 1195 n, f->iter.bi_size); ktiocomplete() 1199 bvcpy(skb, f->buf->bio, f->iter, n); ktiocomplete() 1242 if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0) ktiocomplete() 1697 buf->iter.bi_size = 0; aoe_failbuf()
|
H A D | aoe.h | 104 struct bvec_iter iter; member in struct:buf 123 struct bvec_iter iter; member in struct:frame
|
/linux-4.1.27/drivers/gpu/drm/radeon/ |
H A D | drm_buffer.h | 130 int iter = buffer->iterator + offset * 4; drm_buffer_pointer_to_dword() local 131 return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)]; drm_buffer_pointer_to_dword() 144 int iter = buffer->iterator + offset; drm_buffer_pointer_to_byte() local 145 return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)]; drm_buffer_pointer_to_byte()
|
/linux-4.1.27/net/nfc/ |
H A D | nfc.h | 113 static inline void nfc_device_iter_init(struct class_dev_iter *iter) nfc_device_iter_init() argument 115 class_dev_iter_init(iter, &nfc_class, NULL, NULL); nfc_device_iter_init() 118 static inline struct nfc_dev *nfc_device_iter_next(struct class_dev_iter *iter) nfc_device_iter_next() argument 120 struct device *d = class_dev_iter_next(iter); nfc_device_iter_next() 127 static inline void nfc_device_iter_exit(struct class_dev_iter *iter) nfc_device_iter_exit() argument 129 class_dev_iter_exit(iter); nfc_device_iter_exit()
|
H A D | netlink.c | 580 struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; nfc_genl_dump_devices() local 584 if (!iter) { nfc_genl_dump_devices() 586 iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL); nfc_genl_dump_devices() 587 if (!iter) nfc_genl_dump_devices() 589 cb->args[0] = (long) iter; nfc_genl_dump_devices() 597 nfc_device_iter_init(iter); nfc_genl_dump_devices() 598 dev = nfc_device_iter_next(iter); nfc_genl_dump_devices() 609 dev = nfc_device_iter_next(iter); nfc_genl_dump_devices() 621 struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; nfc_genl_dump_devices_done() local 623 nfc_device_iter_exit(iter); nfc_genl_dump_devices_done() 624 kfree(iter); nfc_genl_dump_devices_done() 1313 struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; nfc_genl_dump_ses() local 1317 if (!iter) { nfc_genl_dump_ses() 1319 iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL); nfc_genl_dump_ses() 1320 if (!iter) nfc_genl_dump_ses() 1322 cb->args[0] = (long) iter; nfc_genl_dump_ses() 1330 nfc_device_iter_init(iter); nfc_genl_dump_ses() 1331 dev = nfc_device_iter_next(iter); nfc_genl_dump_ses() 1342 dev = nfc_device_iter_next(iter); nfc_genl_dump_ses() 1354 struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; nfc_genl_dump_ses_done() local 1356 nfc_device_iter_exit(iter); nfc_genl_dump_ses_done() 1357 kfree(iter); nfc_genl_dump_ses_done() 1593 struct class_dev_iter iter; nfc_urelease_event_work() local 1600 nfc_device_iter_init(&iter); nfc_urelease_event_work() 1601 dev = nfc_device_iter_next(&iter); nfc_urelease_event_work() 1613 dev = nfc_device_iter_next(&iter); nfc_urelease_event_work() 1616 nfc_device_iter_exit(&iter); nfc_urelease_event_work()
|
/linux-4.1.27/drivers/net/wireless/ath/carl9170/ |
H A D | debug.c | 301 struct carl9170_sta_tid *iter; carl9170_debugfs_ampdu_state_read() local 307 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) { carl9170_debugfs_ampdu_state_read() 309 spin_lock_bh(&iter->lock); carl9170_debugfs_ampdu_state_read() 312 cnt, iter->tid, iter->bsn, iter->snx, iter->hsn, carl9170_debugfs_ampdu_state_read() 313 iter->max, iter->state, iter->counter); carl9170_debugfs_ampdu_state_read() 316 CARL9170_BAW_BITS, iter->bitmap); carl9170_debugfs_ampdu_state_read() 325 offset = BM_STR_OFF(SEQ_DIFF(iter->snx, iter->bsn)); carl9170_debugfs_ampdu_state_read() 328 offset = BM_STR_OFF(((int)iter->hsn - (int)iter->bsn) % carl9170_debugfs_ampdu_state_read() 333 " currently queued:%d\n", skb_queue_len(&iter->queue)); carl9170_debugfs_ampdu_state_read() 336 skb_queue_walk(&iter->queue, skb) { carl9170_debugfs_ampdu_state_read() 345 spin_unlock_bh(&iter->lock); carl9170_debugfs_ampdu_state_read() 437 struct carl9170_vif_info *iter; carl9170_debugfs_vif_dump_read() local 447 list_for_each_entry_rcu(iter, &ar->vif_list, list) { carl9170_debugfs_vif_dump_read() 448 struct ieee80211_vif *vif = carl9170_get_vif(iter); carl9170_debugfs_vif_dump_read() 451 "Master" : " Slave"), iter->id, vif->type, vif->addr, carl9170_debugfs_vif_dump_read() 452 iter->enable_beacon ? "beaconing " : ""); carl9170_debugfs_vif_dump_read()
|
H A D | fw.c | 36 const struct carl9170fw_desc_head *iter; carl9170_fw_find_desc() local 38 carl9170fw_for_each_hdr(iter, ar->fw.desc) { carl9170_fw_find_desc() 39 if (carl9170fw_desc_cmp(iter, descid, len, carl9170_fw_find_desc() 41 return (void *)iter; carl9170_fw_find_desc() 45 if (carl9170fw_desc_cmp(iter, descid, len, carl9170_fw_find_desc() 47 return (void *)iter; carl9170_fw_find_desc()
|
H A D | tx.c | 605 struct carl9170_sta_tid *iter; carl9170_tx_ampdu_timeout() local 612 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) { carl9170_tx_ampdu_timeout() 613 if (iter->state < CARL9170_TID_STATE_IDLE) carl9170_tx_ampdu_timeout() 616 spin_lock_bh(&iter->lock); carl9170_tx_ampdu_timeout() 617 skb = skb_peek(&iter->queue); carl9170_tx_ampdu_timeout() 627 sta = iter->sta; carl9170_tx_ampdu_timeout() 631 ieee80211_stop_tx_ba_session(sta, iter->tid); carl9170_tx_ampdu_timeout() 633 spin_unlock_bh(&iter->lock); carl9170_tx_ampdu_timeout() 1397 struct sk_buff *iter; carl9170_tx_ampdu_queue() local 1432 skb_queue_reverse_walk(&agg->queue, iter) { carl9170_tx_ampdu_queue() 1433 qseq = carl9170_get_seq(iter); carl9170_tx_ampdu_queue() 1436 __skb_queue_after(&agg->queue, iter, skb); carl9170_tx_ampdu_queue()
|
/linux-4.1.27/include/net/ |
H A D | bonding.h | 78 * @iter: list_head * iterator 82 #define bond_for_each_slave(bond, pos, iter) \ 83 netdev_for_each_lower_private((bond)->dev, pos, iter) 86 #define bond_for_each_slave_rcu(bond, pos, iter) \ 87 netdev_for_each_lower_private_rcu((bond)->dev, pos, iter) 354 struct list_head *iter; bond_slave_state_change() local 357 bond_for_each_slave(bond, tmp, iter) { bond_for_each_slave() 367 struct list_head *iter; bond_slave_state_notify() local 370 bond_for_each_slave(bond, tmp, iter) { bond_for_each_slave() 585 struct list_head *iter; bond_slave_has_mac() local 588 bond_for_each_slave(bond, tmp, iter) bond_slave_has_mac() 599 struct list_head *iter; bond_slave_has_mac_rcu() local 602 bond_for_each_slave_rcu(bond, tmp, iter) bond_slave_has_mac_rcu() 612 struct list_head *iter; bond_slave_has_mac_rx() local 616 bond_for_each_slave_rcu(bond, tmp, iter) bond_slave_has_mac_rx()
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
H A D | rw26.c | 362 static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, ll_direct_IO_26() argument 370 ssize_t count = iov_iter_count(iter); ll_direct_IO_26() 390 if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK) ll_direct_IO_26() 402 if (iov_iter_rw(iter) == READ) ll_direct_IO_26() 406 while (iov_iter_count(iter)) { ll_direct_IO_26() 410 count = min_t(size_t, iov_iter_count(iter), size); ll_direct_IO_26() 411 if (iov_iter_rw(iter) == READ) { ll_direct_IO_26() 418 result = iov_iter_get_pages_alloc(iter, &pages, count, &offs); ll_direct_IO_26() 421 result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter), ll_direct_IO_26() 425 ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ); ll_direct_IO_26() 446 iov_iter_advance(iter, result); ll_direct_IO_26() 452 if (iov_iter_rw(iter) == READ) ll_direct_IO_26() 456 if (iov_iter_rw(iter) == WRITE) { ll_direct_IO_26()
|
/linux-4.1.27/drivers/hid/ |
H A D | hid-wiimote-core.c | 619 const __u8 *mods, *iter; wiimote_modules_load() local 625 for (iter = mods; *iter != WIIMOD_NULL; ++iter) { wiimote_modules_load() 626 if (wiimod_table[*iter]->flags & WIIMOD_FLAG_INPUT) { wiimote_modules_load() 646 for (iter = mods; *iter != WIIMOD_NULL; ++iter) { wiimote_modules_load() 647 ops = wiimod_table[*iter]; wiimote_modules_load() 668 for ( ; iter-- != mods; ) { wiimote_modules_load() 669 ops = wiimod_table[*iter]; wiimote_modules_load() 682 const __u8 *mods, *iter; wiimote_modules_unload() local 693 for (iter = mods; *iter != WIIMOD_NULL; ++iter) wiimote_modules_unload() 701 for ( ; iter-- != mods; ) { wiimote_modules_unload() 702 ops = wiimod_table[*iter]; wiimote_modules_unload() 1240 const __u8 *iter, *mods; handler_keys() local 1250 for (iter = mods; *iter != WIIMOD_NULL; ++iter) { handler_keys() 1251 ops = wiimod_table[*iter]; handler_keys() 1261 const __u8 *iter, *mods; handler_accel() local 1271 for (iter = mods; *iter != WIIMOD_NULL; ++iter) { handler_accel() 1272 ops = wiimod_table[*iter]; handler_accel() 1301 const __u8 *iter, *mods; handler_ext() local 1363 for (iter = mods; *iter != WIIMOD_NULL; ++iter) { handler_ext() 1364 ops = wiimod_table[*iter]; handler_ext() 1383 const __u8 *iter, *mods; handler_ir() local 1393 for (iter = mods; *iter != WIIMOD_NULL; ++iter) { handler_ir() 1394 ops = wiimod_table[*iter]; handler_ir()
|
/linux-4.1.27/fs/ |
H A D | read_write.c | 332 ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos) vfs_iter_read() argument 343 iter->type |= READ; vfs_iter_read() 344 ret = file->f_op->read_iter(&kiocb, iter); vfs_iter_read() 352 ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos) vfs_iter_write() argument 363 iter->type |= WRITE; vfs_iter_write() 364 ret = file->f_op->write_iter(&kiocb, iter); vfs_iter_write() 415 struct iov_iter iter; new_sync_read() local 420 iov_iter_init(&iter, READ, &iov, 1, len); new_sync_read() 422 ret = filp->f_op->read_iter(&kiocb, &iter); new_sync_read() 471 struct iov_iter iter; new_sync_write() local 476 iov_iter_init(&iter, WRITE, &iov, 1, len); new_sync_write() 478 ret = filp->f_op->write_iter(&kiocb, &iter); new_sync_write() 655 static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, do_iter_readv_writev() argument 664 ret = fn(&kiocb, iter); do_iter_readv_writev() 671 static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter, do_loop_readv_writev() argument 676 while (iov_iter_count(iter)) { do_loop_readv_writev() 677 struct iovec iovec = iov_iter_iovec(iter); do_loop_readv_writev() 690 iov_iter_advance(iter, nr); do_loop_readv_writev() 781 struct iov_iter iter; do_readv_writev() local 787 ARRAY_SIZE(iovstack), &iov, &iter); do_readv_writev() 791 tot_len = iov_iter_count(&iter); do_readv_writev() 808 ret = do_iter_readv_writev(file, &iter, pos, iter_fn); do_readv_writev() 810 ret = do_loop_readv_writev(file, &iter, pos, fn); do_readv_writev() 955 struct iov_iter iter; compat_do_readv_writev() local 961 UIO_FASTIOV, &iov, &iter); compat_do_readv_writev() 965 tot_len = iov_iter_count(&iter); compat_do_readv_writev() 982 ret = do_iter_readv_writev(file, &iter, pos, iter_fn); compat_do_readv_writev() 984 ret = do_loop_readv_writev(file, &iter, pos, fn); compat_do_readv_writev()
|
H A D | dax.c | 101 static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, dax_io() argument 112 if (iov_iter_rw(iter) != WRITE) dax_io() 127 iov_iter_rw(iter) == WRITE); dax_io() 140 hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh); dax_io() 157 if (iov_iter_rw(iter) == WRITE) dax_io() 158 len = copy_from_iter(addr, max - pos, iter); dax_io() 160 len = copy_to_iter(addr, max - pos, iter); dax_io() 162 len = iov_iter_zero(max - pos, iter); dax_io() 178 * @iter: The addresses to do I/O from or to 192 struct iov_iter *iter, loff_t pos, get_block_t get_block, dax_do_io() 197 loff_t end = pos + iov_iter_count(iter); dax_do_io() 201 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) { dax_do_io() 214 retval = dax_io(inode, iter, pos, end, get_block, &bh); dax_do_io() 216 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) dax_do_io() 191 dax_do_io(struct kiocb *iocb, struct inode *inode, struct iov_iter *iter, loff_t pos, get_block_t get_block, dio_iodone_t end_io, int flags) dax_do_io() argument
|
H A D | direct-io.c | 98 struct iov_iter *iter; member in struct:dio_submit 160 ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, dio_refill_pages() 182 iov_iter_advance(sdio->iter, ret); dio_refill_pages() 1099 struct block_device *bdev, struct iov_iter *iter, do_blockdev_direct_IO() 1107 size_t count = iov_iter_count(iter); do_blockdev_direct_IO() 1113 unsigned long align = offset | iov_iter_alignment(iter); do_blockdev_direct_IO() 1129 if (iov_iter_rw(iter) == READ && !iov_iter_count(iter)) do_blockdev_direct_IO() 1145 if (iov_iter_rw(iter) == READ) { do_blockdev_direct_IO() 1164 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { do_blockdev_direct_IO() 1181 iov_iter_rw(iter) == WRITE && end > i_size_read(inode)) do_blockdev_direct_IO() 1187 dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ; do_blockdev_direct_IO() 1193 if (dio->is_async && iov_iter_rw(iter) == WRITE && do_blockdev_direct_IO() 1229 sdio.iter = iter; do_blockdev_direct_IO() 1231 (offset + iov_iter_count(iter)) >> blkbits; do_blockdev_direct_IO() 1240 sdio.pages_in_io += iov_iter_npages(iter, INT_MAX); do_blockdev_direct_IO() 1286 if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING)) do_blockdev_direct_IO() 1298 (iov_iter_rw(iter) == READ || dio->result == count)) do_blockdev_direct_IO() 1313 struct block_device *bdev, struct iov_iter *iter, __blockdev_direct_IO() 1330 return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block, __blockdev_direct_IO() 1098 do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, loff_t offset, get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io, int flags) do_blockdev_direct_IO() argument 1312 __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, loff_t offset, get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io, int flags) __blockdev_direct_IO() argument
|
/linux-4.1.27/net/sctp/ |
H A D | tsnmap.c | 158 struct sctp_tsnmap_iter *iter) sctp_tsnmap_iter_init() 161 iter->start = map->cumulative_tsn_ack_point + 1; sctp_tsnmap_iter_init() 168 struct sctp_tsnmap_iter *iter, sctp_tsnmap_next_gap_ack() 175 if (TSN_lte(map->max_tsn_seen, iter->start)) sctp_tsnmap_next_gap_ack() 178 offset = iter->start - map->base_tsn; sctp_tsnmap_next_gap_ack() 197 iter->start = map->cumulative_tsn_ack_point + *end + 1; sctp_tsnmap_next_gap_ack() 335 struct sctp_tsnmap_iter iter; sctp_tsnmap_num_gabs() local 341 sctp_tsnmap_iter_init(map, &iter); sctp_tsnmap_num_gabs() 342 while (sctp_tsnmap_next_gap_ack(map, &iter, sctp_tsnmap_num_gabs() 157 sctp_tsnmap_iter_init(const struct sctp_tsnmap *map, struct sctp_tsnmap_iter *iter) sctp_tsnmap_iter_init() argument 167 sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map, struct sctp_tsnmap_iter *iter, __u16 *start, __u16 *end) sctp_tsnmap_next_gap_ack() argument
|
/linux-4.1.27/arch/arm/mach-iop13xx/include/mach/ |
H A D | adma.h | 372 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; iop_desc_set_zero_sum_byte_count() local 379 iter = iop_hw_desc_slot_idx(hw_desc, i); iop_desc_set_zero_sum_byte_count() 380 iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; iop_desc_set_zero_sum_byte_count() 386 iter = iop_hw_desc_slot_idx(hw_desc, i); iop_desc_set_zero_sum_byte_count() 387 iter->byte_count = len; iop_desc_set_zero_sum_byte_count() 425 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; iop_desc_set_xor_src_addr() local 429 iter = iop_hw_desc_slot_idx(hw_desc, i); iop_desc_set_xor_src_addr() 430 iter->src[src_idx].src_addr = addr; iop_desc_set_xor_src_addr() 431 iter->src[src_idx].upper_src_addr = 0; iop_desc_set_xor_src_addr() 445 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; iop_desc_set_pq_src_addr() local 450 iter = iop_hw_desc_slot_idx(hw_desc, i); iop_desc_set_pq_src_addr() 451 src = &iter->src[src_idx]; iop_desc_set_pq_src_addr()
|
/linux-4.1.27/fs/f2fs/ |
H A D | trace.c | 129 struct radix_tree_iter iter; gang_lookup_pids() local 136 radix_tree_for_each_slot(slot, &pids, &iter, first_index) { gang_lookup_pids() 137 results[ret] = iter.index; gang_lookup_pids()
|
H A D | data.c | 774 struct radix_tree_iter iter; f2fs_shrink_extent_tree() local 812 radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter, f2fs_shrink_extent_tree() 1703 static int check_direct_IO(struct inode *inode, struct iov_iter *iter, check_direct_IO() argument 1708 if (iov_iter_rw(iter) == READ) check_direct_IO() 1714 if (iov_iter_alignment(iter) & blocksize_mask) check_direct_IO() 1720 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, f2fs_direct_IO() argument 1726 size_t count = iov_iter_count(iter); f2fs_direct_IO() 1736 if (check_direct_IO(inode, iter, offset)) f2fs_direct_IO() 1739 trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); f2fs_direct_IO() 1741 if (iov_iter_rw(iter) == WRITE) f2fs_direct_IO() 1744 err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block); f2fs_direct_IO() 1745 if (err < 0 && iov_iter_rw(iter) == WRITE) f2fs_direct_IO() 1748 trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err); f2fs_direct_IO()
|
/linux-4.1.27/fs/nfs/ |
H A D | direct.c | 256 ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) nfs_direct_IO() argument 264 VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE); nfs_direct_IO() 266 if (iov_iter_rw(iter) == READ) nfs_direct_IO() 267 return nfs_file_direct_read(iocb, iter, pos); nfs_direct_IO() 268 return nfs_file_direct_write(iocb, iter); nfs_direct_IO() 472 struct iov_iter *iter, nfs_direct_read_schedule_iovec() 487 while (iov_iter_count(iter)) { nfs_direct_read_schedule_iovec() 493 result = iov_iter_get_pages_alloc(iter, &pagevec, nfs_direct_read_schedule_iovec() 499 iov_iter_advance(iter, bytes); nfs_direct_read_schedule_iovec() 550 * @iter: vector of user buffers into which to read data 567 ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, nfs_file_direct_read() argument 576 size_t count = iov_iter_count(iter); nfs_file_direct_read() 612 result = nfs_direct_read_schedule_iovec(dreq, iter, pos); nfs_file_direct_read() 858 struct iov_iter *iter, nfs_direct_write_schedule_iovec() 873 NFS_I(inode)->write_io += iov_iter_count(iter); nfs_direct_write_schedule_iovec() 874 while (iov_iter_count(iter)) { nfs_direct_write_schedule_iovec() 880 result = iov_iter_get_pages_alloc(iter, &pagevec, nfs_direct_write_schedule_iovec() 886 iov_iter_advance(iter, bytes); nfs_direct_write_schedule_iovec() 940 * @iter: vector of user buffers from which to write data 958 ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) nfs_file_direct_write() argument 969 file, iov_iter_count(iter), (long long) iocb->ki_pos); nfs_file_direct_write() 972 iov_iter_count(iter)); nfs_file_direct_write() 975 end = (pos + iov_iter_count(iter) - 1) >> PAGE_CACHE_SHIFT; nfs_file_direct_write() 990 task_io_account_write(iov_iter_count(iter)); nfs_file_direct_write() 998 dreq->bytes_left = iov_iter_count(iter); nfs_file_direct_write() 1010 result = nfs_direct_write_schedule_iovec(dreq, iter, pos); nfs_file_direct_write() 471 nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, struct iov_iter *iter, loff_t pos) nfs_direct_read_schedule_iovec() argument 857 nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, struct iov_iter *iter, loff_t pos) nfs_direct_write_schedule_iovec() argument
|
/linux-4.1.27/net/ipv4/netfilter/ |
H A D | arp_tables.c | 625 struct arpt_entry *iter; translate_table() local 642 xt_entry_foreach(iter, entry0, newinfo->size) { translate_table() 643 ret = check_entry_size_and_hooks(iter, newinfo, entry0, translate_table() 651 if (strcmp(arpt_get_target(iter)->u.user.name, translate_table() 689 xt_entry_foreach(iter, entry0, newinfo->size) { translate_table() 690 ret = find_check_entry(iter, repl->name, repl->size); translate_table() 697 xt_entry_foreach(iter, entry0, newinfo->size) { translate_table() 700 cleanup_entry(iter); translate_table() 717 struct arpt_entry *iter; get_counters() local 725 xt_entry_foreach(iter, t->entries[cpu], t->size) { for_each_possible_cpu() 731 bcnt = iter->counters.bcnt; for_each_possible_cpu() 732 pcnt = iter->counters.pcnt; for_each_possible_cpu() 865 struct arpt_entry *iter; compat_table_info() local 877 xt_entry_foreach(iter, loc_cpu_entry, info->size) { compat_table_info() 878 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); compat_table_info() 998 struct arpt_entry *iter; __do_replace() local 1041 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) __do_replace() 1042 cleanup_entry(iter); __do_replace() 1070 struct arpt_entry *iter; do_replace() local 1108 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) do_replace() 1109 cleanup_entry(iter); do_replace() 1129 struct arpt_entry *iter; do_add_counters() local 1188 xt_entry_foreach(iter, loc_cpu_entry, private->size) { do_add_counters() 1189 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); do_add_counters() 1495 struct arpt_entry *iter; compat_do_replace() local 1537 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) compat_do_replace() 1538 cleanup_entry(iter); compat_do_replace() 1614 struct arpt_entry *iter; compat_copy_entries_to_user() local 1624 xt_entry_foreach(iter, loc_cpu_entry, total_size) { xt_entry_foreach() 1625 ret = compat_copy_entry_to_user(iter, &pos, xt_entry_foreach() 1820 struct arpt_entry *iter; arpt_unregister_table() local 1826 xt_entry_foreach(iter, loc_cpu_entry, private->size) arpt_unregister_table() 1827 cleanup_entry(iter); arpt_unregister_table()
|
H A D | ip_tables.c | 260 const struct ipt_entry *iter; trace_packet() local 270 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) trace_packet() 271 if (get_chainname_rulenum(iter, e, hookname, trace_packet() 795 struct ipt_entry *iter; translate_table() local 811 xt_entry_foreach(iter, entry0, newinfo->size) { translate_table() 812 ret = check_entry_size_and_hooks(iter, newinfo, entry0, translate_table() 820 if (strcmp(ipt_get_target(iter)->u.user.name, translate_table() 853 xt_entry_foreach(iter, entry0, newinfo->size) { translate_table() 854 ret = find_check_entry(iter, net, repl->name, repl->size); translate_table() 861 xt_entry_foreach(iter, entry0, newinfo->size) { translate_table() 864 cleanup_entry(iter, net); translate_table() 882 struct ipt_entry *iter; get_counters() local 890 xt_entry_foreach(iter, t->entries[cpu], t->size) { for_each_possible_cpu() 896 bcnt = iter->counters.bcnt; for_each_possible_cpu() 897 pcnt = iter->counters.pcnt; for_each_possible_cpu() 1053 struct ipt_entry *iter; compat_table_info() local 1065 xt_entry_foreach(iter, loc_cpu_entry, info->size) { compat_table_info() 1066 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); compat_table_info() 1185 struct ipt_entry *iter; __do_replace() local 1228 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) __do_replace() 1229 cleanup_entry(iter, net); __do_replace() 1257 struct ipt_entry *iter; do_replace() local 1295 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) do_replace() 1296 cleanup_entry(iter, net); do_replace() 1317 struct ipt_entry *iter; do_add_counters() local 1376 xt_entry_foreach(iter, loc_cpu_entry, private->size) { do_add_counters() 1377 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); do_add_counters() 1805 struct ipt_entry *iter; compat_do_replace() local 1848 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) compat_do_replace() 1849 cleanup_entry(iter, net); compat_do_replace() 1898 struct ipt_entry *iter; compat_copy_entries_to_user() local 1911 xt_entry_foreach(iter, loc_cpu_entry, total_size) { xt_entry_foreach() 1912 ret = compat_copy_entry_to_user(iter, &pos, xt_entry_foreach() 2113 struct ipt_entry *iter; ipt_unregister_table() local 2119 xt_entry_foreach(iter, loc_cpu_entry, private->size) ipt_unregister_table() 2120 cleanup_entry(iter, net); ipt_unregister_table()
|
/linux-4.1.27/drivers/scsi/ |
H A D | sd_dif.c | 127 struct bvec_iter iter; sd_dif_prepare() local 136 bip_for_each_vec(iv, bip, iter) { bip_for_each_vec() 179 struct bvec_iter iter; sd_dif_complete() local 183 bip_for_each_vec(iv, bip, iter) { bip_for_each_vec()
|
/linux-4.1.27/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_gmr.c | 39 struct vmw_piter *iter, vmw_gmr2_bind() 86 *cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT; vmw_gmr2_bind() 88 *((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >> vmw_gmr2_bind() 92 vmw_piter_next(iter); vmw_gmr2_bind() 38 vmw_gmr2_bind(struct vmw_private *dev_priv, struct vmw_piter *iter, unsigned long num_pages, int gmr_id) vmw_gmr2_bind() argument
|
H A D | vmwgfx_buffer.c | 233 return __sg_page_iter_next(&viter->iter); __vmw_piter_sg_next() 253 return sg_page_iter_page(&viter->iter); __vmw_piter_sg_page() 278 return sg_page_iter_dma_address(&viter->iter); __vmw_piter_sg_addr() 316 __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl, vmw_piter_start() 384 struct vmw_piter iter; vmw_ttm_map_dma() local 440 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { vmw_ttm_map_dma() 441 dma_addr_t cur = vmw_piter_dma_addr(&iter); vmw_ttm_map_dma()
|
H A D | vmwgfx_mob.c | 104 struct vmw_piter iter; vmw_setup_otable_base() local 110 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); vmw_setup_otable_base() 111 WARN_ON(!vmw_piter_next(&iter)); vmw_setup_otable_base() 121 mob->pt_root_page = vmw_piter_dma_addr(&iter); vmw_setup_otable_base() 124 mob->pt_root_page = vmw_piter_dma_addr(&iter); vmw_setup_otable_base() 130 vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT); vmw_setup_otable_base()
|
/linux-4.1.27/net/switchdev/ |
H A D | switchdev.c | 52 struct list_head *iter; netdev_switch_port_stp_update() local 58 netdev_for_each_lower_dev(dev, lower_dev, iter) { netdev_for_each_lower_dev() 195 struct list_head *iter; ndo_dflt_netdev_switch_port_bridge_setlink() local 201 netdev_for_each_lower_dev(dev, lower_dev, iter) { netdev_for_each_lower_dev() 225 struct list_head *iter; ndo_dflt_netdev_switch_port_bridge_dellink() local 231 netdev_for_each_lower_dev(dev, lower_dev, iter) { netdev_for_each_lower_dev() 246 struct list_head *iter; netdev_switch_get_lowest_dev() local 256 netdev_for_each_lower_dev(dev, lower_dev, iter) { netdev_for_each_lower_dev()
|
/linux-4.1.27/drivers/scsi/qla2xxx/ |
H A D | qla_inline.h | 75 uint32_t iter = bsize >> 2; host_to_fcp_swap() local 77 for (; iter ; iter--) host_to_fcp_swap() 88 uint32_t iter = bsize >> 2; host_to_adap() local 90 for (; iter ; iter--) host_to_adap()
|
H A D | qla_attr.c | 270 uint32_t *iter; qla2x00_sysfs_write_nvram() local 273 iter = (uint32_t *)buf; qla2x00_sysfs_write_nvram() 276 chksum += le32_to_cpu(*iter++); qla2x00_sysfs_write_nvram() 278 *iter = cpu_to_le32(chksum); qla2x00_sysfs_write_nvram() 280 uint8_t *iter; qla2x00_sysfs_write_nvram() local 283 iter = (uint8_t *)buf; qla2x00_sysfs_write_nvram() 286 chksum += *iter++; qla2x00_sysfs_write_nvram() 288 *iter = chksum; qla2x00_sysfs_write_nvram() 639 uint16_t iter, addr, offset; qla2x00_sysfs_read_sfp() local 659 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE; qla2x00_sysfs_read_sfp() 660 iter++, offset += SFP_BLOCK_SIZE) { qla2x00_sysfs_read_sfp() 661 if (iter == 4) { qla2x00_sysfs_read_sfp() 951 struct sysfs_entry *iter; qla2x00_alloc_sysfs_attr() local 954 for (iter = bin_file_entries; iter->name; iter++) { qla2x00_alloc_sysfs_attr() 955 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw)) qla2x00_alloc_sysfs_attr() 957 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) qla2x00_alloc_sysfs_attr() 959 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) qla2x00_alloc_sysfs_attr() 961 if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw)) qla2x00_alloc_sysfs_attr() 965 iter->attr); qla2x00_alloc_sysfs_attr() 969 iter->name, ret); qla2x00_alloc_sysfs_attr() 973 iter->name); qla2x00_alloc_sysfs_attr() 981 struct sysfs_entry *iter; qla2x00_free_sysfs_attr() local 984 for (iter = bin_file_entries; iter->name; iter++) { qla2x00_free_sysfs_attr() 985 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha)) qla2x00_free_sysfs_attr() 987 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) qla2x00_free_sysfs_attr() 989 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) qla2x00_free_sysfs_attr() 991 if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw)) qla2x00_free_sysfs_attr() 995 iter->attr); qla2x00_free_sysfs_attr()
|
/linux-4.1.27/security/selinux/ |
H A D | netnode.c | 305 int iter; sel_netnode_init() local 310 for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) { sel_netnode_init() 311 INIT_LIST_HEAD(&sel_netnode_hash[iter].list); sel_netnode_init() 312 sel_netnode_hash[iter].size = 0; sel_netnode_init()
|
H A D | netport.c | 239 int iter; sel_netport_init() local 244 for (iter = 0; iter < SEL_NETPORT_HASH_SIZE; iter++) { sel_netport_init() 245 INIT_LIST_HEAD(&sel_netport_hash[iter].list); sel_netport_init() 246 sel_netport_hash[iter].size = 0; sel_netport_init()
|
H A D | xfrm.c | 456 struct dst_entry *iter; selinux_xfrm_postroute_last() local 458 for (iter = dst; iter != NULL; iter = iter->child) { selinux_xfrm_postroute_last() 459 struct xfrm_state *x = iter->xfrm; selinux_xfrm_postroute_last()
|
/linux-4.1.27/arch/arm/include/asm/hardware/ |
H A D | iop3xx-adma.h | 576 struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter; iop_desc_init_zero_sum() local 587 iter = iop_hw_desc_slot_idx(hw_desc, i); iop_desc_init_zero_sum() 588 u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags); iop_desc_init_zero_sum() 592 iter->desc_ctrl = u_desc_ctrl.value; iop_desc_init_zero_sum() 695 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; iop_desc_set_zero_sum_byte_count() local 702 iter = iop_hw_desc_slot_idx(hw_desc, i); iop_desc_set_zero_sum_byte_count() 703 iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; iop_desc_set_zero_sum_byte_count() 708 iter = iop_hw_desc_slot_idx(hw_desc, i); iop_desc_set_zero_sum_byte_count() 709 iter->byte_count = len; iop_desc_set_zero_sum_byte_count() 744 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; iop_desc_set_zero_sum_src_addr() local 750 iter = iop_hw_desc_slot_idx(hw_desc, i); iop_desc_set_zero_sum_src_addr() 751 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr); iop_desc_set_zero_sum_src_addr() 759 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; iop_desc_set_xor_src_addr() local 765 iter = iop_hw_desc_slot_idx(hw_desc, i); iop_desc_set_xor_src_addr() 766 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr); iop_desc_set_xor_src_addr()
|
/linux-4.1.27/kernel/locking/ |
H A D | lockdep_proc.c | 569 struct lock_stat_data *iter; ls_start() local 574 iter = data->stats + (*pos - 1); ls_start() 575 if (iter >= data->iter_end) ls_start() 576 iter = NULL; ls_start() 578 return iter; ls_start() 619 struct lock_stat_data *iter = data->stats; lock_stat_open() local 623 iter->class = class; lock_stat_open() 624 iter->stats = lock_stats(class); lock_stat_open() 625 iter++; lock_stat_open() 627 data->iter_end = iter; lock_stat_open()
|
/linux-4.1.27/arch/powerpc/kernel/ |
H A D | cacheinfo.c | 166 struct cache *iter; release_cache_debugcheck() local 168 list_for_each_entry(iter, &cache_list, list) release_cache_debugcheck() 169 WARN_ONCE(iter->next_local == cache, release_cache_debugcheck() 171 iter->ofnode->full_name, release_cache_debugcheck() 172 cache_type_string(iter), release_cache_debugcheck() 304 struct cache *iter; cache_find_first_sibling() local 310 list_for_each_entry(iter, &cache_list, list) cache_find_first_sibling() 311 if (iter->ofnode == cache->ofnode && iter->next_local == cache) cache_find_first_sibling() 312 return iter; cache_find_first_sibling() 321 struct cache *iter; cache_lookup_by_node() local 323 list_for_each_entry(iter, &cache_list, list) { cache_lookup_by_node() 324 if (iter->ofnode != node) cache_lookup_by_node() 326 cache = cache_find_first_sibling(iter); cache_lookup_by_node()
|
H A D | ftrace.c | 443 struct ftrace_rec_iter *iter; ftrace_replace_code() local 447 for (iter = ftrace_rec_iter_start(); iter; ftrace_replace_code() 448 iter = ftrace_rec_iter_next(iter)) { ftrace_replace_code() 449 rec = ftrace_rec_iter_record(iter); ftrace_replace_code()
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
H A D | ehca_cq.c | 75 struct hlist_node *iter; ehca_cq_unassign_qp() local 80 hlist_for_each(iter, &cq->qp_hashtab[key]) { ehca_cq_unassign_qp() 81 qp = hlist_entry(iter, struct ehca_qp, list_entries); ehca_cq_unassign_qp() 83 hlist_del(iter); ehca_cq_unassign_qp() 104 struct hlist_node *iter; ehca_cq_get_qp() local 106 hlist_for_each(iter, &cq->qp_hashtab[key]) { ehca_cq_get_qp() 107 qp = hlist_entry(iter, struct ehca_qp, list_entries); ehca_cq_get_qp()
|
/linux-4.1.27/include/trace/ |
H A D | ftrace.h | 206 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) 208 * struct trace_seq *s = &iter->seq; 211 * struct trace_seq *p = &iter->tmp_seq; 214 * entry = iter->ent; 307 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 310 struct trace_seq *s = &iter->seq; \ 311 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ 315 field = (typeof(field))iter->ent; \ 317 ret = ftrace_raw_output_prep(iter, trace_event); \ 332 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ 337 struct trace_seq *p = &iter->tmp_seq; \ 339 entry = iter->ent; \ 349 return ftrace_output_call(iter, #call, print); \
|
/linux-4.1.27/net/ipv6/netfilter/ |
H A D | ip6_tables.c | 289 const struct ip6t_entry *iter; trace_packet() local 299 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) trace_packet() 300 if (get_chainname_rulenum(iter, e, hookname, trace_packet() 808 struct ip6t_entry *iter; translate_table() local 824 xt_entry_foreach(iter, entry0, newinfo->size) { translate_table() 825 ret = check_entry_size_and_hooks(iter, newinfo, entry0, translate_table() 833 if (strcmp(ip6t_get_target(iter)->u.user.name, translate_table() 866 xt_entry_foreach(iter, entry0, newinfo->size) { translate_table() 867 ret = find_check_entry(iter, net, repl->name, repl->size); translate_table() 874 xt_entry_foreach(iter, entry0, newinfo->size) { translate_table() 877 cleanup_entry(iter, net); translate_table() 895 struct ip6t_entry *iter; get_counters() local 903 xt_entry_foreach(iter, t->entries[cpu], t->size) { for_each_possible_cpu() 909 bcnt = iter->counters.bcnt; for_each_possible_cpu() 910 pcnt = iter->counters.pcnt; for_each_possible_cpu() 1066 struct ip6t_entry *iter; compat_table_info() local 1078 xt_entry_foreach(iter, loc_cpu_entry, info->size) { compat_table_info() 1079 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); compat_table_info() 1198 struct ip6t_entry *iter; __do_replace() local 1241 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) __do_replace() 1242 cleanup_entry(iter, net); __do_replace() 1270 struct ip6t_entry *iter; do_replace() local 1308 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) do_replace() 1309 cleanup_entry(iter, net); do_replace() 1330 struct ip6t_entry *iter; do_add_counters() local 1390 xt_entry_foreach(iter, loc_cpu_entry, private->size) { do_add_counters() 1391 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); do_add_counters() 1818 struct ip6t_entry *iter; compat_do_replace() local 1861 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) compat_do_replace() 1862 cleanup_entry(iter, net); compat_do_replace() 1911 struct ip6t_entry *iter; compat_copy_entries_to_user() local 1924 xt_entry_foreach(iter, loc_cpu_entry, total_size) { xt_entry_foreach() 1925 ret = compat_copy_entry_to_user(iter, &pos, xt_entry_foreach() 2125 struct ip6t_entry *iter; ip6t_unregister_table() local 2131 xt_entry_foreach(iter, loc_cpu_entry, private->size) ip6t_unregister_table() 2132 cleanup_entry(iter, net); ip6t_unregister_table()
|
/linux-4.1.27/scripts/kconfig/ |
H A D | gconf.c | 784 GtkTreeIter iter; renderer_edited() local 789 if (!gtk_tree_model_get_iter(model2, &iter, path)) renderer_edited() 792 gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); renderer_edited() 795 gtk_tree_model_get(model2, &iter, COL_VALUE, &old_def, -1); renderer_edited() 886 GtkTreeIter iter; on_treeview2_button_press_event() local 903 if (!gtk_tree_model_get_iter(model2, &iter, path)) on_treeview2_button_press_event() 905 gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); on_treeview2_button_press_event() 943 GtkTreeIter iter; on_treeview2_key_press_event() local 963 gtk_tree_model_get_iter(model2, &iter, path); on_treeview2_key_press_event() 964 gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); on_treeview2_key_press_event() 985 GtkTreeIter iter; on_treeview2_cursor_changed() local 989 if (gtk_tree_selection_get_selected(selection, &model2, &iter)) { on_treeview2_cursor_changed() 990 gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); on_treeview2_cursor_changed() 1004 GtkTreeIter iter; on_treeview1_button_press_event() local 1016 gtk_tree_model_get_iter(model1, &iter, path); on_treeview1_button_press_event() 1017 gtk_tree_model_get(model1, &iter, COL_MENU, &menu, -1); on_treeview1_button_press_event() 1217 GtkTreeIter iter; gtktree_iter_find_node() local 1218 GtkTreeIter *child = &iter; gtktree_iter_find_node() 1251 GtkTreeIter iter, tmp; update_tree() local 1252 GtkTreeIter *child2 = &iter; update_tree()
|
/linux-4.1.27/ipc/ |
H A D | util.c | 778 struct ipc_proc_iter *iter = s->private; sysvipc_proc_next() local 779 struct ipc_proc_iface *iface = iter->iface; sysvipc_proc_next() 786 return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos); sysvipc_proc_next() 795 struct ipc_proc_iter *iter = s->private; sysvipc_proc_start() local 796 struct ipc_proc_iface *iface = iter->iface; sysvipc_proc_start() 799 ids = &iter->ns->ids[iface->ids]; sysvipc_proc_start() 822 struct ipc_proc_iter *iter = s->private; sysvipc_proc_stop() local 823 struct ipc_proc_iface *iface = iter->iface; sysvipc_proc_stop() 830 ids = &iter->ns->ids[iface->ids]; sysvipc_proc_stop() 837 struct ipc_proc_iter *iter = s->private; sysvipc_proc_show() local 838 struct ipc_proc_iface *iface = iter->iface; sysvipc_proc_show() 857 struct ipc_proc_iter *iter; sysvipc_proc_open() local 859 iter = __seq_open_private(file, &sysvipc_proc_seqops, sizeof(*iter)); sysvipc_proc_open() 860 if (!iter) sysvipc_proc_open() 863 iter->iface = PDE_DATA(inode); sysvipc_proc_open() 864 iter->ns = get_ipc_ns(current->nsproxy->ipc_ns); sysvipc_proc_open() 872 struct ipc_proc_iter *iter = seq->private; sysvipc_proc_release() local 873 put_ipc_ns(iter->ns); sysvipc_proc_release()
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/engine/pm/ |
H A D | base.c | 122 nv_ioctl(object, "perfctr query vers %d iter %08x\n", nvkm_perfctr_query() 123 args->v0.version, args->v0.iter); nvkm_perfctr_query() 124 di = (args->v0.iter & 0xff000000) >> 24; nvkm_perfctr_query() 125 si = (args->v0.iter & 0x00ffffff) - 1; nvkm_perfctr_query() 151 args->v0.iter = (di << 24) | ++si; nvkm_perfctr_query() 160 args->v0.iter = 0xffffffff; nvkm_perfctr_query()
|
/linux-4.1.27/net/appletalk/ |
H A D | aarp.c | 921 static struct aarp_entry *iter_next(struct aarp_iter_state *iter, loff_t *pos) iter_next() argument 923 int ct = iter->bucket; iter_next() 924 struct aarp_entry **table = iter->table; iter_next() 932 iter->table = table; iter_next() 933 iter->bucket = ct; iter_next() 956 struct aarp_iter_state *iter = seq->private; __acquires() local 959 iter->table = resolved; __acquires() 960 iter->bucket = 0; __acquires() 962 return *pos ? iter_next(iter, pos) : SEQ_START_TOKEN; __acquires() 968 struct aarp_iter_state *iter = seq->private; aarp_seq_next() local 974 entry = iter_next(iter, NULL); aarp_seq_next() 982 ++iter->bucket; aarp_seq_next() 983 entry = iter_next(iter, NULL); aarp_seq_next() 1005 struct aarp_iter_state *iter = seq->private; aarp_seq_show() local 1021 if (iter->table == unresolved) aarp_seq_show() 1028 (iter->table == resolved) ? "resolved" aarp_seq_show() 1029 : (iter->table == unresolved) ? "unresolved" aarp_seq_show() 1030 : (iter->table == proxies) ? "proxies" aarp_seq_show()
|
/linux-4.1.27/fs/btrfs/tests/ |
H A D | btrfs-tests.c | 130 struct radix_tree_iter iter; btrfs_free_dummy_fs_info() local 135 radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) { btrfs_free_dummy_fs_info()
|
/linux-4.1.27/kernel/sched/ |
H A D | rt.c | 459 #define for_each_rt_rq(rt_rq, iter, rq) \ 460 for (iter = container_of(&task_groups, typeof(*iter), list); \ 461 (iter = next_task_group(iter)) && \ 462 (rt_rq = iter->rt_rq[cpu_of(rq)]);) 563 #define for_each_rt_rq(rt_rq, iter, rq) \ 564 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 637 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); do_balance_runtime() local 640 if (iter == rt_rq) do_balance_runtime() 643 raw_spin_lock(&iter->rt_runtime_lock); do_balance_runtime() 649 if (iter->rt_runtime == RUNTIME_INF) do_balance_runtime() 656 diff = iter->rt_runtime - iter->rt_time; do_balance_runtime() 661 iter->rt_runtime -= diff; do_balance_runtime() 665 raw_spin_unlock(&iter->rt_runtime_lock); do_balance_runtime() 670 raw_spin_unlock(&iter->rt_runtime_lock); do_balance_runtime() 683 rt_rq_iter_t iter; __disable_runtime() local 689 for_each_rt_rq(rt_rq, iter, rq) { for_each_rt_rq() 717 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); for_each_rt_rq() local 723 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) for_each_rt_rq() 726 raw_spin_lock(&iter->rt_runtime_lock); for_each_rt_rq() 728 diff = min_t(s64, iter->rt_runtime, want); for_each_rt_rq() 729 iter->rt_runtime -= diff; for_each_rt_rq() 732 iter->rt_runtime -= want; for_each_rt_rq() 735 raw_spin_unlock(&iter->rt_runtime_lock); for_each_rt_rq() 764 rt_rq_iter_t iter; __enable_runtime() local 773 for_each_rt_rq(rt_rq, iter, rq) { for_each_rt_rq() 2338 rt_rq_iter_t iter; print_rt_stats() local 2342 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) print_rt_stats()
|
/linux-4.1.27/drivers/net/bonding/ |
H A D | bond_main.c | 290 struct list_head *iter; bond_vlan_rx_add_vid() local 293 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 303 bond_for_each_slave(bond, rollback_slave, iter) { bond_for_each_slave() 322 struct list_head *iter; bond_vlan_rx_kill_vid() local 325 bond_for_each_slave(bond, slave, iter) bond_vlan_rx_kill_vid() 344 struct list_head *iter; bond_set_carrier() local 353 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 496 struct list_head *iter; bond_set_promiscuity() local 507 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 519 struct list_head *iter; bond_set_allmulti() local 530 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 634 struct list_head *iter; bond_get_old_active() local 636 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 741 struct list_head *iter; bond_find_best_slave() local 749 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 957 struct list_head *iter; bond_poll_controller() local 967 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu() 993 struct list_head *iter; bond_netpoll_cleanup() local 996 bond_for_each_slave(bond, slave, iter) bond_netpoll_cleanup() 1004 struct list_head *iter; bond_netpoll_setup() local 1008 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 1036 struct list_head *iter; bond_fix_features() local 1048 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 1072 struct list_head *iter; bond_compute_features() local 1082 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 1946 struct list_head *iter; bond_slave_info_query() local 1950 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 1967 struct list_head *iter; bond_miimon_inspect() local 1973 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu() 2063 struct list_head *iter; bond_miimon_commit() local 2066 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 2209 struct list_head *iter; bond_has_this_ip() local 2216 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { bond_has_this_ip() 2296 struct list_head *iter; bond_verify_device_path() local 2306 netdev_for_each_upper_dev_rcu(start_dev, upper, iter) { netdev_for_each_upper_dev_rcu() 2516 struct list_head *iter; bond_loadbalance_arp_mon() local 2533 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu() 2627 struct list_head *iter; bond_ab_arp_inspect() local 2631 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu() 2694 struct list_head *iter; bond_ab_arp_commit() local 2697 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 2771 struct list_head *iter; bond_ab_arp_probe() local 2798 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu() 2839 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu() 3186 struct list_head *iter; bond_open() local 3191 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 3254 struct list_head *iter; bond_get_stats() local 3259 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 3421 struct list_head *iter; bond_set_rx_mode() local 3432 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu() 3497 struct list_head *iter; bond_change_mtu() local 3502 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 3529 bond_for_each_slave(bond, rollback_slave, iter) { bond_for_each_slave() 3556 struct list_head *iter; bond_set_mac_address() local 3575 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 3599 bond_for_each_slave(bond, rollback_slave, iter) { bond_for_each_slave() 3627 struct list_head *iter; bond_xmit_slave_id() local 3632 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu() 3643 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu() 3780 struct list_head *iter; bond_update_slave_arr() local 3816 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 3888 struct list_head *iter; bond_xmit_broadcast() local 3890 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu() 3919 struct list_head *iter; bond_slave_override() local 3925 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu() 4019 struct list_head *iter; bond_ethtool_get_settings() local 4030 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 4154 struct list_head *iter; bond_uninit() local 4161 bond_for_each_slave(bond, slave, iter) bond_uninit()
|
H A D | bond_procfs.c | 13 struct list_head *iter; __acquires() local 22 bond_for_each_slave_rcu(bond, slave, iter) __acquires() 32 struct list_head *iter; bond_info_seq_next() local 40 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu()
|
H A D | bond_alb.c | 198 struct list_head *iter; tlb_get_least_loaded_slave() local 205 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu() 338 struct list_head *iter; __rlb_next_rx_slave() local 341 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu() 961 struct list_head *iter; alb_send_learning_packets() local 971 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { alb_send_learning_packets() 1143 struct list_head *iter; alb_handle_addr_collision_on_attach() local 1167 bond_for_each_slave(bond, tmp_slave1, iter) { bond_for_each_slave() 1217 struct list_head *iter; alb_set_mac_address() local 1225 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 1245 bond_for_each_slave(bond, rollback_slave, iter) { bond_for_each_slave() 1465 struct list_head *iter; bond_alb_monitor() local 1483 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu() 1499 bond_for_each_slave_rcu(bond, slave, iter) { bond_for_each_slave_rcu()
|
H A D | bond_options.c | 911 struct list_head *iter; _bond_options_arp_ip_target_set() local 915 bond_for_each_slave(bond, slave, iter) _bond_options_arp_ip_target_set() 959 struct list_head *iter; bond_option_arp_ip_target_rem() local 982 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 1060 struct list_head *iter; bond_option_primary_set() local 1077 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 1160 struct list_head *iter; bond_option_all_slaves_active_set() local 1166 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave() 1241 struct list_head *iter; bond_option_queue_id_set() local 1270 bond_for_each_slave(bond, slave, iter) { bond_for_each_slave()
|
/linux-4.1.27/drivers/net/ethernet/qlogic/qlge/ |
H A D | qlge_ethtool.c | 247 u64 *iter = &qdev->nic_stats.tx_pkts; ql_update_stats() local 265 *iter = data; ql_update_stats() 266 iter++; ql_update_stats() 279 *iter = data; ql_update_stats() 280 iter++; ql_update_stats() 284 iter += QLGE_RCV_MAC_ERR_STATS; ql_update_stats() 296 *iter = data; ql_update_stats() 297 iter++; ql_update_stats() 310 *iter = data; ql_update_stats() 311 iter++; ql_update_stats() 322 *iter = data; ql_update_stats()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb/ |
H A D | common.h | 314 #define for_each_port(adapter, iter) \ 315 for (iter = 0; iter < (adapter)->params.nports; ++iter)
|
/linux-4.1.27/arch/sh/mm/ |
H A D | asids-debugfs.c | 27 static int asids_seq_show(struct seq_file *file, void *iter) asids_seq_show() argument
|
H A D | cache-debugfs.c | 25 static int cache_seq_show(struct seq_file *file, void *iter) cache_seq_show() argument
|
H A D | pmb.c | 145 struct pmb_entry *pmbe, *iter; pmb_mapping_exists() local 175 for (iter = pmbe->link; iter; iter = iter->link) pmb_mapping_exists() 176 span += iter->size; pmb_mapping_exists() 815 static int pmb_seq_show(struct seq_file *file, void *iter) pmb_seq_show() argument
|
H A D | tlb-debugfs.c | 39 static int tlb_seq_show(struct seq_file *file, void *iter) tlb_seq_show() argument
|
/linux-4.1.27/fs/9p/ |
H A D | vfs_addr.c | 248 v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) v9fs_direct_IO() argument 253 if (iov_iter_rw(iter) == WRITE) { v9fs_direct_IO() 254 n = p9_client_write(file->private_data, pos, iter, &err); v9fs_direct_IO() 262 n = p9_client_read(file->private_data, pos, iter, &err); v9fs_direct_IO()
|
/linux-4.1.27/kernel/events/ |
H A D | hw_breakpoint.c | 120 struct perf_event *iter; task_bp_pinned() local 123 list_for_each_entry(iter, &bp_task_head, hw.bp_list) { task_bp_pinned() 124 if (iter->hw.target == tsk && task_bp_pinned() 125 find_slot_idx(iter) == type && task_bp_pinned() 126 (iter->cpu < 0 || cpu == iter->cpu)) task_bp_pinned() 127 count += hw_breakpoint_weight(iter); task_bp_pinned()
|
/linux-4.1.27/security/selinux/ss/ |
H A D | ebitmap.c | 94 unsigned int iter; ebitmap_netlbl_export() local 108 for (iter = 0; iter < EBITMAP_UNIT_NUMS; iter++) { ebitmap_netlbl_export() 109 e_map = e_iter->maps[iter]; ebitmap_netlbl_export()
|
/linux-4.1.27/net/irda/ |
H A D | irlmp.c | 1834 static void *irlmp_seq_hb_idx(struct irlmp_iter_state *iter, loff_t *off) irlmp_seq_hb_idx() argument 1838 spin_lock_irq(&iter->hashbin->hb_spinlock); irlmp_seq_hb_idx() 1839 for (element = hashbin_get_first(iter->hashbin); irlmp_seq_hb_idx() 1841 element = hashbin_get_next(iter->hashbin)) { irlmp_seq_hb_idx() 1847 spin_unlock_irq(&iter->hashbin->hb_spinlock); irlmp_seq_hb_idx() 1848 iter->hashbin = NULL; irlmp_seq_hb_idx() 1855 struct irlmp_iter_state *iter = seq->private; irlmp_seq_start() local 1859 iter->hashbin = NULL; irlmp_seq_start() 1863 iter->hashbin = irlmp->unconnected_lsaps; irlmp_seq_start() 1864 v = irlmp_seq_hb_idx(iter, &off); irlmp_seq_start() 1871 iter->hashbin = irlmp->links; irlmp_seq_start() 1872 return irlmp_seq_hb_idx(iter, &off); irlmp_seq_start() 1877 struct irlmp_iter_state *iter = seq->private; irlmp_seq_next() local 1882 iter->hashbin = irlmp->unconnected_lsaps; irlmp_seq_next() 1883 v = irlmp_seq_hb_idx(iter, NULL); irlmp_seq_next() 1888 iter->hashbin = irlmp->links; irlmp_seq_next() 1889 return irlmp_seq_hb_idx(iter, NULL); irlmp_seq_next() 1892 v = hashbin_get_next(iter->hashbin); irlmp_seq_next() 1895 spin_unlock_irq(&iter->hashbin->hb_spinlock); irlmp_seq_next() 1897 if (iter->hashbin == irlmp->unconnected_lsaps) irlmp_seq_next() 1900 iter->hashbin = NULL; irlmp_seq_next() 1907 struct irlmp_iter_state *iter = seq->private; irlmp_seq_stop() local 1909 if (iter->hashbin) irlmp_seq_stop() 1910 spin_unlock_irq(&iter->hashbin->hb_spinlock); irlmp_seq_stop() 1915 const struct irlmp_iter_state *iter = seq->private; irlmp_seq_show() local 1922 else if (iter->hashbin == irlmp->unconnected_lsaps) { irlmp_seq_show() 1932 } else if (iter->hashbin == irlmp->links) { irlmp_seq_show()
|
H A D | irlap.c | 1075 struct irlap_iter_state *iter = seq->private; irlap_seq_start() local 1080 iter->id = 0; irlap_seq_start() 1084 if (iter->id == *pos) irlap_seq_start() 1086 ++iter->id; irlap_seq_start() 1094 struct irlap_iter_state *iter = seq->private; irlap_seq_next() local 1097 ++iter->id; irlap_seq_next() 1108 const struct irlap_iter_state *iter = seq->private; irlap_seq_show() local 1113 seq_printf(seq, "irlap%d ", iter->id); irlap_seq_show()
|
/linux-4.1.27/tools/perf/ |
H A D | builtin-report.c | 89 static int hist_iter__report_callback(struct hist_entry_iter *iter, hist_iter__report_callback() argument 95 struct hist_entry *he = iter->he; hist_iter__report_callback() 96 struct perf_evsel *evsel = iter->evsel; hist_iter__report_callback() 139 struct hist_entry_iter iter = { process_sample_event() local 158 iter.ops = &hist_iter_branch; process_sample_event() 160 iter.ops = &hist_iter_mem; process_sample_event() 162 iter.ops = &hist_iter_cumulative; process_sample_event() 164 iter.ops = &hist_iter_normal; process_sample_event() 169 ret = hist_entry_iter__add(&iter, &al, evsel, sample, rep->max_stack, process_sample_event()
|
/linux-4.1.27/drivers/block/ |
H A D | ps3disk.c | 96 struct req_iterator iter; ps3disk_scatter_gather() local 102 rq_for_each_segment(bvec, req, iter) { rq_for_each_segment() 105 __func__, __LINE__, i, bio_sectors(iter.bio), rq_for_each_segment() 106 iter.bio->bi_iter.bi_sector); rq_for_each_segment() 133 struct req_iterator iter; ps3disk_submit_request_sg() local 135 rq_for_each_segment(bv, req, iter) ps3disk_submit_request_sg()
|
H A D | pmem.c | 67 struct bvec_iter iter; pmem_make_request() local 79 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
|
/linux-4.1.27/arch/m68k/emu/ |
H A D | nfblock.c | 66 struct bvec_iter iter; nfhd_make_request() local 72 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4vf/ |
H A D | t4vf_common.h | 228 #define for_each_port(adapter, iter) \ 229 for (iter = 0; iter < (adapter)->params.nports; iter++)
|
/linux-4.1.27/fs/jfs/ |
H A D | inode.c | 333 static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, jfs_direct_IO() argument 339 size_t count = iov_iter_count(iter); jfs_direct_IO() 342 ret = blockdev_direct_IO(iocb, inode, iter, offset, jfs_get_block); jfs_direct_IO() 348 if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { jfs_direct_IO()
|
/linux-4.1.27/net/core/ |
H A D | dev.c | 1461 struct list_head *iter; dev_disable_lro() local 1469 netdev_for_each_lower_dev(dev, lower_dev, iter) dev_disable_lro() 4823 * @iter: list_head ** of the current position 4825 * Gets the next device from the dev's upper list, starting from iter 4829 struct list_head **iter) netdev_upper_get_next_dev_rcu() 4835 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); netdev_upper_get_next_dev_rcu() 4840 *iter = &upper->list; netdev_upper_get_next_dev_rcu() 4849 * @iter: list_head ** of the current position 4851 * Gets the next device from the dev's upper list, starting from iter 4855 struct list_head **iter) netdev_all_upper_get_next_dev_rcu() 4861 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); netdev_all_upper_get_next_dev_rcu() 4866 *iter = &upper->list; netdev_all_upper_get_next_dev_rcu() 4876 * @iter: list_head ** of the current position 4879 * list, starting from iter position. The caller must hold either hold the 4884 struct list_head **iter) netdev_lower_get_next_private() 4888 lower = list_entry(*iter, struct netdev_adjacent, list); netdev_lower_get_next_private() 4893 *iter = lower->list.next; netdev_lower_get_next_private() 4904 * @iter: list_head ** of the current position 4907 * list, starting from iter position. The caller must hold RCU read lock. 4910 struct list_head **iter) netdev_lower_get_next_private_rcu() 4916 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); netdev_lower_get_next_private_rcu() 4921 *iter = &lower->list; netdev_lower_get_next_private_rcu() 4931 * @iter: list_head ** of the current position 4934 * list, starting from iter position. The caller must hold RTNL lock or 4938 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) netdev_lower_get_next() argument 4942 lower = list_entry((*iter)->next, struct netdev_adjacent, list); netdev_lower_get_next() 4947 *iter = &lower->list; netdev_lower_get_next() 5396 struct netdev_adjacent *iter; netdev_adjacent_add_links() local 5400 list_for_each_entry(iter, &dev->adj_list.upper, list) { netdev_adjacent_add_links() 5401 if (!net_eq(net,dev_net(iter->dev))) netdev_adjacent_add_links() 5403 netdev_adjacent_sysfs_add(iter->dev, dev, netdev_adjacent_add_links() 5404 &iter->dev->adj_list.lower); netdev_adjacent_add_links() 5405 netdev_adjacent_sysfs_add(dev, iter->dev, netdev_adjacent_add_links() 5409 list_for_each_entry(iter, &dev->adj_list.lower, list) { netdev_adjacent_add_links() 5410 if (!net_eq(net,dev_net(iter->dev))) netdev_adjacent_add_links() 5412 netdev_adjacent_sysfs_add(iter->dev, dev, netdev_adjacent_add_links() 5413 &iter->dev->adj_list.upper); netdev_adjacent_add_links() 5414 netdev_adjacent_sysfs_add(dev, iter->dev, netdev_adjacent_add_links() 5421 struct netdev_adjacent *iter; netdev_adjacent_del_links() local 5425 list_for_each_entry(iter, &dev->adj_list.upper, list) { netdev_adjacent_del_links() 5426 if (!net_eq(net,dev_net(iter->dev))) netdev_adjacent_del_links() 5428 netdev_adjacent_sysfs_del(iter->dev, dev->name, netdev_adjacent_del_links() 5429 &iter->dev->adj_list.lower); netdev_adjacent_del_links() 5430 netdev_adjacent_sysfs_del(dev, iter->dev->name, netdev_adjacent_del_links() 5434 list_for_each_entry(iter, &dev->adj_list.lower, list) { netdev_adjacent_del_links() 5435 if (!net_eq(net,dev_net(iter->dev))) netdev_adjacent_del_links() 5437 netdev_adjacent_sysfs_del(iter->dev, dev->name, netdev_adjacent_del_links() 5438 &iter->dev->adj_list.upper); netdev_adjacent_del_links() 5439 netdev_adjacent_sysfs_del(dev, iter->dev->name, netdev_adjacent_del_links() 5446 struct netdev_adjacent *iter; netdev_adjacent_rename_links() local 5450 list_for_each_entry(iter, &dev->adj_list.upper, list) { netdev_adjacent_rename_links() 5451 if (!net_eq(net,dev_net(iter->dev))) netdev_adjacent_rename_links() 5453 netdev_adjacent_sysfs_del(iter->dev, oldname, netdev_adjacent_rename_links() 5454 &iter->dev->adj_list.lower); netdev_adjacent_rename_links() 5455 netdev_adjacent_sysfs_add(iter->dev, dev, netdev_adjacent_rename_links() 5456 &iter->dev->adj_list.lower); netdev_adjacent_rename_links() 5459 list_for_each_entry(iter, &dev->adj_list.lower, list) { netdev_adjacent_rename_links() 5460 if (!net_eq(net,dev_net(iter->dev))) netdev_adjacent_rename_links() 5462 netdev_adjacent_sysfs_del(iter->dev, oldname, netdev_adjacent_rename_links() 5463 &iter->dev->adj_list.upper); netdev_adjacent_rename_links() 5464 netdev_adjacent_sysfs_add(iter->dev, dev, netdev_adjacent_rename_links() 5465 &iter->dev->adj_list.upper); netdev_adjacent_rename_links() 5489 struct list_head *iter; dev_get_nest_level() local 5495 netdev_for_each_lower_dev(dev, lower, iter) { netdev_for_each_lower_dev() 4828 netdev_upper_get_next_dev_rcu(struct net_device *dev, struct list_head **iter) netdev_upper_get_next_dev_rcu() argument 4854 netdev_all_upper_get_next_dev_rcu(struct net_device *dev, struct list_head **iter) netdev_all_upper_get_next_dev_rcu() argument 4883 netdev_lower_get_next_private(struct net_device *dev, struct list_head **iter) netdev_lower_get_next_private() argument 4909 netdev_lower_get_next_private_rcu(struct net_device *dev, struct list_head **iter) netdev_lower_get_next_private_rcu() argument
|
/linux-4.1.27/include/crypto/ |
H A D | if_alg.h | 78 int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
|
/linux-4.1.27/arch/x86/kvm/ |
H A D | mmu_audit.c | 193 struct rmap_iterator iter; audit_write_protection() local 200 for (sptep = rmap_get_first(*rmapp, &iter); sptep; audit_write_protection() 201 sptep = rmap_get_next(&iter)) { audit_write_protection()
|
H A D | mmu.c | 1078 static u64 *rmap_get_first(unsigned long rmap, struct rmap_iterator *iter) rmap_get_first() argument 1084 iter->desc = NULL; rmap_get_first() 1088 iter->desc = (struct pte_list_desc *)(rmap & ~1ul); rmap_get_first() 1089 iter->pos = 0; rmap_get_first() 1090 return iter->desc->sptes[iter->pos]; rmap_get_first() 1098 static u64 *rmap_get_next(struct rmap_iterator *iter) rmap_get_next() argument 1100 if (iter->desc) { rmap_get_next() 1101 if (iter->pos < PTE_LIST_EXT - 1) { rmap_get_next() 1104 ++iter->pos; rmap_get_next() 1105 sptep = iter->desc->sptes[iter->pos]; rmap_get_next() 1110 iter->desc = iter->desc->more; rmap_get_next() 1112 if (iter->desc) { rmap_get_next() 1113 iter->pos = 0; rmap_get_next() 1115 return iter->desc->sptes[iter->pos]; rmap_get_next() 1182 struct rmap_iterator iter; __rmap_write_protect() local 1185 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { __rmap_write_protect() 1189 sptep = rmap_get_next(&iter); __rmap_write_protect() 1209 struct rmap_iterator iter; __rmap_clear_dirty() local 1212 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { __rmap_clear_dirty() 1216 sptep = rmap_get_next(&iter); __rmap_clear_dirty() 1236 struct rmap_iterator iter; __rmap_set_dirty() local 1239 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { __rmap_set_dirty() 1243 sptep = rmap_get_next(&iter); __rmap_set_dirty() 1345 struct rmap_iterator iter; kvm_unmap_rmapp() local 1348 while ((sptep = rmap_get_first(*rmapp, &iter))) { kvm_unmap_rmapp() 1365 struct rmap_iterator iter; kvm_set_pte_rmapp() local 1374 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { kvm_set_pte_rmapp() 1383 sptep = rmap_get_first(*rmapp, &iter); kvm_set_pte_rmapp() 1394 sptep = rmap_get_next(&iter); kvm_set_pte_rmapp() 1493 struct rmap_iterator uninitialized_var(iter); kvm_age_rmapp() 1498 for (sptep = rmap_get_first(*rmapp, &iter); sptep; kvm_age_rmapp() 1499 sptep = rmap_get_next(&iter)) { kvm_age_rmapp() 1517 struct rmap_iterator iter; kvm_test_age_rmapp() local 1528 for (sptep = rmap_get_first(*rmapp, &iter); sptep; kvm_test_age_rmapp() 1529 sptep = rmap_get_next(&iter)) { kvm_test_age_rmapp() 2210 struct rmap_iterator iter; kvm_mmu_unlink_parents() local 2212 while ((sptep = rmap_get_first(sp->parent_ptes, &iter))) kvm_mmu_unlink_parents() 4435 struct rmap_iterator iter; kvm_mmu_zap_collapsible_spte() local 4440 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { kvm_mmu_zap_collapsible_spte() 4457 sptep = rmap_get_first(*rmapp, &iter); kvm_mmu_zap_collapsible_spte() 4460 sptep = rmap_get_next(&iter); kvm_mmu_zap_collapsible_spte()
|
/linux-4.1.27/arch/xtensa/platforms/iss/ |
H A D | simdisk.c | 107 struct bvec_iter iter; simdisk_xfer_bio() local 110 bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment() 111 char *buffer = __bio_kmap_atomic(bio, iter); bio_for_each_segment()
|
/linux-4.1.27/drivers/iommu/ |
H A D | dmar.c | 547 struct acpi_dmar_header *iter, *next; dmar_walk_remapping_entries() local 550 for (iter = start; iter < end && ret == 0; iter = next) { dmar_walk_remapping_entries() 551 next = (void *)iter + iter->length; dmar_walk_remapping_entries() 552 if (iter->length == 0) { dmar_walk_remapping_entries() 564 dmar_table_print_dmar_entry(iter); dmar_walk_remapping_entries() 566 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) { dmar_walk_remapping_entries() 569 iter->type); dmar_walk_remapping_entries() 570 } else if (cb->cb[iter->type]) { dmar_walk_remapping_entries() 571 ret = cb->cb[iter->type](iter, cb->arg[iter->type]); dmar_walk_remapping_entries() 574 iter->type); dmar_walk_remapping_entries()
|
/linux-4.1.27/drivers/gpu/drm/i915/ |
H A D | i915_gem_gtt.h | 344 #define gen6_for_each_pde(pt, pd, start, length, temp, iter) \ 345 for (iter = gen6_pde_index(start); \ 346 pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \ 347 iter++, \
|
/linux-4.1.27/fs/proc/ |
H A D | base.c | 2816 static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter) next_tgid() argument 2820 if (iter.task) next_tgid() 2821 put_task_struct(iter.task); next_tgid() 2824 iter.task = NULL; next_tgid() 2825 pid = find_ge_pid(iter.tgid, ns); next_tgid() 2827 iter.tgid = pid_nr_ns(pid, ns); next_tgid() 2828 iter.task = pid_task(pid, PIDTYPE_PID); next_tgid() 2841 if (!iter.task || !has_group_leader_pid(iter.task)) { next_tgid() 2842 iter.tgid += 1; next_tgid() 2845 get_task_struct(iter.task); next_tgid() 2848 return iter; next_tgid() 2856 struct tgid_iter iter; proc_pid_readdir() local 2875 iter.tgid = pos - TGID_OFFSET; proc_pid_readdir() 2876 iter.task = NULL; proc_pid_readdir() 2877 for (iter = next_tgid(ns, iter); proc_pid_readdir() 2878 iter.task; proc_pid_readdir() 2879 iter.tgid += 1, iter = next_tgid(ns, iter)) { proc_pid_readdir() 2882 if (!has_pid_permissions(ns, iter.task, 2)) proc_pid_readdir() 2885 len = snprintf(name, sizeof(name), "%d", iter.tgid); proc_pid_readdir() 2886 ctx->pos = iter.tgid + TGID_OFFSET; proc_pid_readdir() 2888 proc_pid_instantiate, iter.task, NULL)) { proc_pid_readdir() 2889 put_task_struct(iter.task); proc_pid_readdir()
|
/linux-4.1.27/drivers/cpufreq/ |
H A D | acpi-cpufreq.c | 956 struct freq_attr **iter; acpi_cpufreq_init() local 960 for (iter = acpi_cpufreq_attr; *iter != NULL; iter++) acpi_cpufreq_init() 964 if (iter[1] == NULL) acpi_cpufreq_init() 965 *iter = &cpb; acpi_cpufreq_init()
|
/linux-4.1.27/fs/ocfs2/dlm/ |
H A D | dlmmaster.c | 743 struct dlm_node_iter iter; dlm_get_lock_resource() local 956 dlm_node_iter_init(mle->vote_map, &iter); dlm_get_lock_resource() 957 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { dlm_get_lock_resource() 1191 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, dlm_bitmap_diff_iter_init() argument 1198 iter->curnode = -1; dlm_bitmap_diff_iter_init() 1199 iter->orig_bm = orig_bm; dlm_bitmap_diff_iter_init() 1200 iter->cur_bm = cur_bm; dlm_bitmap_diff_iter_init() 1203 p1 = *(iter->orig_bm + i); dlm_bitmap_diff_iter_init() 1204 p2 = *(iter->cur_bm + i); dlm_bitmap_diff_iter_init() 1205 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); dlm_bitmap_diff_iter_init() 1209 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter, dlm_bitmap_diff_iter_next() argument 1214 if (iter->curnode >= O2NM_MAX_NODES) dlm_bitmap_diff_iter_next() 1217 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, dlm_bitmap_diff_iter_next() 1218 iter->curnode+1); dlm_bitmap_diff_iter_next() 1220 iter->curnode = O2NM_MAX_NODES; dlm_bitmap_diff_iter_next() 1225 if (test_bit(bit, iter->orig_bm)) dlm_bitmap_diff_iter_next() 1230 iter->curnode = bit; dlm_bitmap_diff_iter_next() 1693 struct dlm_node_iter iter; dlm_do_assert_master() local 1709 dlm_node_iter_init(nodemap, &iter); dlm_do_assert_master() 1710 while ((to = dlm_node_iter_next(&iter)) >= 0) { dlm_do_assert_master() 2205 struct dlm_node_iter iter; dlm_pre_master_reco_lockres() local 2211 dlm_node_iter_init(dlm->domain_map, &iter); dlm_pre_master_reco_lockres() 2214 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { dlm_pre_master_reco_lockres() 2959 struct dlm_node_iter *iter) dlm_do_migrate_request() 2974 while ((nodenum = dlm_node_iter_next(iter)) >= 0) { dlm_do_migrate_request() 2984 clear_bit(nodenum, iter->node_map); dlm_do_migrate_request() 2999 clear_bit(nodenum, iter->node_map); dlm_do_migrate_request() 3344 struct dlm_node_iter iter; dlm_finish_migration() local 3348 dlm_node_iter_init(dlm->domain_map, &iter); dlm_finish_migration() 3349 clear_bit(old_master, iter.node_map); dlm_finish_migration() 3350 clear_bit(dlm->node_num, iter.node_map); dlm_finish_migration() 3362 dlm->node_num, &iter); dlm_finish_migration() 3372 ret = dlm_do_assert_master(dlm, res, iter.node_map, dlm_finish_migration() 3380 memset(iter.node_map, 0, sizeof(iter.node_map)); dlm_finish_migration() 3381 set_bit(old_master, iter.node_map); dlm_finish_migration() 3384 ret = dlm_do_assert_master(dlm, res, iter.node_map, dlm_finish_migration() 2956 dlm_do_migrate_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 master, u8 new_master, struct dlm_node_iter *iter) dlm_do_migrate_request() argument
|
H A D | dlmcommon.h | 1113 struct dlm_node_iter *iter) dlm_node_iter_init() 1115 memcpy(iter->node_map, map, sizeof(iter->node_map)); dlm_node_iter_init() 1116 iter->curnode = -1; dlm_node_iter_init() 1119 static inline int dlm_node_iter_next(struct dlm_node_iter *iter) dlm_node_iter_next() argument 1122 bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1); dlm_node_iter_next() 1124 iter->curnode = O2NM_MAX_NODES; dlm_node_iter_next() 1127 iter->curnode = bit; dlm_node_iter_next() 1112 dlm_node_iter_init(unsigned long *map, struct dlm_node_iter *iter) dlm_node_iter_init() argument
|
/linux-4.1.27/drivers/iio/ |
H A D | industrialio-trigger.c | 104 struct iio_trigger *trig = NULL, *iter; iio_trigger_find_by_name() local 107 list_for_each_entry(iter, &iio_trigger_list, list) iio_trigger_find_by_name() 108 if (sysfs_streq(iter->name, name)) { iio_trigger_find_by_name() 109 trig = iter; iio_trigger_find_by_name()
|
/linux-4.1.27/drivers/net/ |
H A D | macvtap.c | 800 struct iov_iter *iter) macvtap_put_user() 810 if (iov_iter_count(iter) < vnet_hdr_len) macvtap_put_user() 815 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != macvtap_put_user() 819 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr)); macvtap_put_user() 835 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); macvtap_put_user() 836 if (ret || !iov_iter_count(iter)) macvtap_put_user() 839 ret = copy_to_iter(&veth, sizeof(veth), iter); macvtap_put_user() 840 if (ret != sizeof(veth) || !iov_iter_count(iter)) macvtap_put_user() 844 ret = skb_copy_datagram_iter(skb, vlan_offset, iter, macvtap_put_user() 798 macvtap_put_user(struct macvtap_queue *q, const struct sk_buff *skb, struct iov_iter *iter) macvtap_put_user() argument
|
/linux-4.1.27/arch/x86/kernel/ |
H A D | ftrace.c | 541 struct ftrace_rec_iter *iter; ftrace_replace_code() local 547 for_ftrace_rec_iter(iter) { for_ftrace_rec_iter() 548 rec = ftrace_rec_iter_record(iter); for_ftrace_rec_iter() 560 for_ftrace_rec_iter(iter) { for_ftrace_rec_iter() 561 rec = ftrace_rec_iter_record(iter); for_ftrace_rec_iter() 572 for_ftrace_rec_iter(iter) { for_ftrace_rec_iter() 573 rec = ftrace_rec_iter_record(iter); for_ftrace_rec_iter() 587 for_ftrace_rec_iter(iter) { for_ftrace_rec_iter() 588 rec = ftrace_rec_iter_record(iter); for_ftrace_rec_iter()
|
/linux-4.1.27/fs/hfsplus/ |
H A D | inode.c | 125 static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter, hfsplus_direct_IO() argument 131 size_t count = iov_iter_count(iter); hfsplus_direct_IO() 134 ret = blockdev_direct_IO(iocb, inode, iter, offset, hfsplus_get_block); hfsplus_direct_IO() 140 if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { hfsplus_direct_IO()
|
/linux-4.1.27/tools/perf/tests/ |
H A D | hists_cumulate.c | 89 struct hist_entry_iter iter = { add_hist_entries() local 94 iter.ops = &hist_iter_cumulative; add_hist_entries() 96 iter.ops = &hist_iter_normal; add_hist_entries() 107 if (hist_entry_iter__add(&iter, &al, evsel, &sample, add_hist_entries()
|
H A D | hists_filter.c | 65 struct hist_entry_iter iter = { evlist__for_each() local 84 if (hist_entry_iter__add(&iter, &al, evsel, &sample, evlist__for_each()
|
/linux-4.1.27/fs/fuse/ |
H A D | file.c | 1306 ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, fuse_direct_io() argument 1317 size_t count = iov_iter_count(iter); fuse_direct_io() 1324 req = fuse_get_req_for_background(fc, fuse_iter_npages(iter)); fuse_direct_io() 1326 req = fuse_get_req(fc, fuse_iter_npages(iter)); fuse_direct_io() 1342 int err = fuse_get_user_pages(req, iter, &nbytes, write); fuse_direct_io() 1372 fuse_iter_npages(iter)); fuse_direct_io() 1374 req = fuse_get_req(fc, fuse_iter_npages(iter)); fuse_direct_io() 1389 struct iov_iter *iter, __fuse_direct_read() 1399 res = fuse_direct_io(io, iter, ppos, 0); __fuse_direct_read() 2784 fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) fuse_direct_IO() argument 2794 size_t count = iov_iter_count(iter); fuse_direct_IO() 2802 if ((iov_iter_rw(iter) == READ) && (offset > i_size)) fuse_direct_IO() 2806 if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) { fuse_direct_IO() 2809 iov_iter_truncate(iter, fuse_round_up(i_size - offset)); fuse_direct_IO() 2810 count = iov_iter_count(iter); fuse_direct_IO() 2822 io->write = (iov_iter_rw(iter) == WRITE); fuse_direct_IO() 2838 iov_iter_rw(iter) == WRITE) fuse_direct_IO() 2850 if (iov_iter_rw(iter) == WRITE) { fuse_direct_IO() 2851 ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE); fuse_direct_IO() 2854 ret = __fuse_direct_read(io, iter, &pos); fuse_direct_IO() 2870 if (iov_iter_rw(iter) == WRITE) { fuse_direct_IO() 1388 __fuse_direct_read(struct fuse_io_priv *io, struct iov_iter *iter, loff_t *ppos) __fuse_direct_read() argument
|
/linux-4.1.27/include/net/netfilter/ |
H A D | nf_conntrack.h | 249 /* Iterate over all conntracks: if iter returns true, it's deleted. */ 251 int (*iter)(struct nf_conn *i, void *data),
|
/linux-4.1.27/arch/powerpc/sysdev/ |
H A D | axonram.c | 114 struct bvec_iter iter; axon_ram_make_request() local 120 bio_for_each_segment(vec, bio, iter) { bio_for_each_segment()
|
/linux-4.1.27/net/netfilter/ipvs/ |
H A D | ip_vs_conn.c | 988 struct ip_vs_iter_state *iter = seq->private; ip_vs_conn_array() local 996 iter->l = &ip_vs_conn_tab[idx]; ip_vs_conn_array() 1009 struct ip_vs_iter_state *iter = seq->private; __acquires() local 1011 iter->l = NULL; __acquires() 1019 struct ip_vs_iter_state *iter = seq->private; ip_vs_conn_seq_next() local 1021 struct hlist_head *l = iter->l; ip_vs_conn_seq_next() 1036 iter->l = &ip_vs_conn_tab[idx]; ip_vs_conn_seq_next() 1041 iter->l = NULL; ip_vs_conn_seq_next()
|