/linux-4.4.14/block/ |
D | t10-pi.c | 49 static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn, in t10_pi_generate() argument 54 for (i = 0 ; i < iter->data_size ; i += iter->interval) { in t10_pi_generate() 55 struct t10_pi_tuple *pi = iter->prot_buf; in t10_pi_generate() 57 pi->guard_tag = fn(iter->data_buf, iter->interval); in t10_pi_generate() 61 pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed)); in t10_pi_generate() 65 iter->data_buf += iter->interval; in t10_pi_generate() 66 iter->prot_buf += sizeof(struct t10_pi_tuple); in t10_pi_generate() 67 iter->seed++; in t10_pi_generate() 73 static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn, in t10_pi_verify() argument 78 for (i = 0 ; i < iter->data_size ; i += iter->interval) { in t10_pi_verify() [all …]
|
D | blk-map.c | 83 const struct iov_iter *iter, gfp_t gfp_mask) in blk_rq_map_user_iov() argument 90 if (!iter || !iter->count) in blk_rq_map_user_iov() 93 iov_for_each(iov, i, *iter) { in blk_rq_map_user_iov() 110 if (unaligned || (q->dma_pad_mask & iter->count) || map_data) in blk_rq_map_user_iov() 111 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); in blk_rq_map_user_iov() 113 bio = bio_map_user_iov(q, iter, gfp_mask); in blk_rq_map_user_iov() 121 if (bio->bi_iter.bi_size != iter->count) { in blk_rq_map_user_iov()
|
D | bio.c | 519 struct bvec_iter iter; in zero_fill_bio() local 521 bio_for_each_segment(bv, bio, iter) { in zero_fill_bio() 635 struct bvec_iter iter; in bio_clone_bioset() local 678 bio_for_each_segment(bv, bio_src, iter) in bio_clone_bioset() 993 struct iov_iter iter; member 1015 static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter) in bio_copy_from_iter() argument 1026 &iter); in bio_copy_from_iter() 1028 if (!iov_iter_count(&iter)) in bio_copy_from_iter() 1046 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument 1057 &iter); in bio_copy_to_iter() [all …]
|
D | bio-integrity.c | 228 struct blk_integrity_iter iter; in bio_integrity_process() local 236 iter.disk_name = bio->bi_bdev->bd_disk->disk_name; in bio_integrity_process() 237 iter.interval = 1 << bi->interval_exp; in bio_integrity_process() 238 iter.seed = bip_get_seed(bip); in bio_integrity_process() 239 iter.prot_buf = prot_buf; in bio_integrity_process() 244 iter.data_buf = kaddr + bv.bv_offset; in bio_integrity_process() 245 iter.data_size = bv.bv_len; in bio_integrity_process() 247 ret = proc_fn(&iter); in bio_integrity_process()
|
D | bounce.c | 106 struct bvec_iter iter; in copy_to_high_bio_irq() local 108 bio_for_each_segment(tovec, to, iter) { in copy_to_high_bio_irq() 188 struct bvec_iter iter; in __blk_queue_bounce() local 191 bio_for_each_segment(from, *bio_orig, iter) in __blk_queue_bounce()
|
D | genhd.c | 743 struct class_dev_iter iter; in printk_all_partitions() local 746 class_dev_iter_init(&iter, &block_class, NULL, &disk_type); in printk_all_partitions() 747 while ((dev = class_dev_iter_next(&iter))) { in printk_all_partitions() 788 class_dev_iter_exit(&iter); in printk_all_partitions() 796 struct class_dev_iter *iter; in disk_seqf_start() local 799 iter = kmalloc(sizeof(*iter), GFP_KERNEL); in disk_seqf_start() 800 if (!iter) in disk_seqf_start() 803 seqf->private = iter; in disk_seqf_start() 804 class_dev_iter_init(iter, &block_class, NULL, &disk_type); in disk_seqf_start() 806 dev = class_dev_iter_next(iter); in disk_seqf_start() [all …]
|
D | blk-merge.c | 89 struct bvec_iter iter; in blk_bio_segment_split() local 96 bio_for_each_segment(bv, bio, iter) { in blk_bio_segment_split() 204 struct bvec_iter iter; in __blk_recalc_rq_segments() local 224 bio_for_each_segment(bv, bio, iter) { in __blk_recalc_rq_segments() 304 struct bvec_iter iter; in blk_phys_contig_segment() local 316 bio_for_each_segment(end_bv, bio, iter) in blk_phys_contig_segment() 317 if (end_bv.bv_len == iter.bi_size) in blk_phys_contig_segment() 383 struct bvec_iter iter; in __blk_bios_map_sg() local 413 bio_for_each_segment(bvec, bio, iter) in __blk_bios_map_sg()
|
D | blk-integrity.c | 46 struct bvec_iter iter; in blk_rq_count_integrity_sg() local 49 bio_for_each_integrity_vec(iv, bio, iter) { in blk_rq_count_integrity_sg() 92 struct bvec_iter iter; in blk_rq_map_integrity_sg() local 95 bio_for_each_integrity_vec(iv, bio, iter) { in blk_rq_map_integrity_sg() 387 static int blk_integrity_nop_fn(struct blk_integrity_iter *iter) in blk_integrity_nop_fn() argument
|
/linux-4.4.14/net/netlabel/ |
D | netlabel_addrlist.c | 63 struct netlbl_af4list *iter; in netlbl_af4list_search() local 65 list_for_each_entry_rcu(iter, head, list) in netlbl_af4list_search() 66 if (iter->valid && (addr & iter->mask) == iter->addr) in netlbl_af4list_search() 67 return iter; in netlbl_af4list_search() 88 struct netlbl_af4list *iter; in netlbl_af4list_search_exact() local 90 list_for_each_entry_rcu(iter, head, list) in netlbl_af4list_search_exact() 91 if (iter->valid && iter->addr == addr && iter->mask == mask) in netlbl_af4list_search_exact() 92 return iter; in netlbl_af4list_search_exact() 113 struct netlbl_af6list *iter; in netlbl_af6list_search() local 115 list_for_each_entry_rcu(iter, head, list) in netlbl_af6list_search() [all …]
|
D | netlabel_addrlist.h | 96 #define netlbl_af4list_foreach(iter, head) \ argument 97 for (iter = __af4list_valid((head)->next, head); \ 98 &iter->list != (head); \ 99 iter = __af4list_valid(iter->list.next, head)) 101 #define netlbl_af4list_foreach_rcu(iter, head) \ argument 102 for (iter = __af4list_valid_rcu((head)->next, head); \ 103 &iter->list != (head); \ 104 iter = __af4list_valid_rcu(iter->list.next, head)) 106 #define netlbl_af4list_foreach_safe(iter, tmp, head) \ argument 107 for (iter = __af4list_valid((head)->next, head), \ [all …]
|
D | netlabel_kapi.c | 431 struct netlbl_lsm_catmap *iter = *catmap; in _netlbl_catmap_getnode() local 434 if (iter == NULL) in _netlbl_catmap_getnode() 436 if (offset < iter->startbit) in _netlbl_catmap_getnode() 438 while (iter && offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) { in _netlbl_catmap_getnode() 439 prev = iter; in _netlbl_catmap_getnode() 440 iter = iter->next; in _netlbl_catmap_getnode() 442 if (iter == NULL || offset < iter->startbit) in _netlbl_catmap_getnode() 445 return iter; in _netlbl_catmap_getnode() 449 return iter; in _netlbl_catmap_getnode() 454 iter = netlbl_catmap_alloc(gfp_flags); in _netlbl_catmap_getnode() [all …]
|
D | netlabel_cipso_v4.c | 105 u32 iter = 0; in netlbl_cipsov4_add_common() local 116 if (iter >= CIPSO_V4_TAG_MAXCNT) in netlbl_cipsov4_add_common() 118 doi_def->tags[iter++] = nla_get_u8(nla); in netlbl_cipsov4_add_common() 120 while (iter < CIPSO_V4_TAG_MAXCNT) in netlbl_cipsov4_add_common() 121 doi_def->tags[iter++] = CIPSO_V4_TAG_INVALID; in netlbl_cipsov4_add_common() 150 u32 iter; in netlbl_cipsov4_add_std() local 220 for (iter = 0; iter < doi_def->map.std->lvl.local_size; iter++) in netlbl_cipsov4_add_std() 221 doi_def->map.std->lvl.local[iter] = CIPSO_V4_INV_LVL; in netlbl_cipsov4_add_std() 222 for (iter = 0; iter < doi_def->map.std->lvl.cipso_size; iter++) in netlbl_cipsov4_add_std() 223 doi_def->map.std->lvl.cipso[iter] = CIPSO_V4_INV_LVL; in netlbl_cipsov4_add_std() [all …]
|
D | netlabel_domainhash.c | 117 u32 iter; in netlbl_domhsh_hash() local 124 for (iter = 0, val = 0, len = strlen(key); iter < len; iter++) in netlbl_domhsh_hash() 125 val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter]; in netlbl_domhsh_hash() 144 struct netlbl_dom_map *iter; in netlbl_domhsh_search() local 149 list_for_each_entry_rcu(iter, bkt_list, list) in netlbl_domhsh_search() 150 if (iter->valid && strcmp(iter->domain, domain) == 0) in netlbl_domhsh_search() 151 return iter; in netlbl_domhsh_search() 327 u32 iter; in netlbl_domhsh_init() local 344 for (iter = 0; iter < hsh_tbl->size; iter++) in netlbl_domhsh_init() 345 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); in netlbl_domhsh_init()
|
D | netlabel_domainhash.h | 56 #define netlbl_domhsh_addr4_entry(iter) \ argument 57 container_of(iter, struct netlbl_domaddr4_map, list) 63 #define netlbl_domhsh_addr6_entry(iter) \ argument 64 container_of(iter, struct netlbl_domaddr6_map, list)
|
D | netlabel_unlabeled.c | 80 #define netlbl_unlhsh_addr4_entry(iter) \ argument 81 container_of(iter, struct netlbl_unlhsh_addr4, list) 88 #define netlbl_unlhsh_addr6_entry(iter) \ argument 89 container_of(iter, struct netlbl_unlhsh_addr6, list) 226 struct netlbl_unlhsh_iface *iter; in netlbl_unlhsh_search_iface() local 230 list_for_each_entry_rcu(iter, bkt_list, list) in netlbl_unlhsh_search_iface() 231 if (iter->valid && iter->ifindex == ifindex) in netlbl_unlhsh_search_iface() 232 return iter; in netlbl_unlhsh_search_iface() 1420 u32 iter; in netlbl_unlabel_init() local 1437 for (iter = 0; iter < hsh_tbl->size; iter++) in netlbl_unlabel_init() [all …]
|
/linux-4.4.14/kernel/gcov/ |
D | gcc_3_4.c | 346 static struct gcov_fn_info *get_func(struct gcov_iterator *iter) in get_func() argument 348 return get_fn_info(iter->info, iter->function); in get_func() 351 static struct type_info *get_type(struct gcov_iterator *iter) in get_type() argument 353 return &iter->type_info[iter->type]; in get_type() 364 struct gcov_iterator *iter; in gcov_iter_new() local 366 iter = kzalloc(sizeof(struct gcov_iterator) + in gcov_iter_new() 369 if (iter) in gcov_iter_new() 370 iter->info = info; in gcov_iter_new() 372 return iter; in gcov_iter_new() 379 void gcov_iter_free(struct gcov_iterator *iter) in gcov_iter_free() argument [all …]
|
D | gcc_4_7.c | 478 struct gcov_iterator *iter; in gcov_iter_new() local 480 iter = kzalloc(sizeof(struct gcov_iterator), GFP_KERNEL); in gcov_iter_new() 481 if (!iter) in gcov_iter_new() 484 iter->info = info; in gcov_iter_new() 486 iter->size = convert_to_gcda(NULL, info); in gcov_iter_new() 487 iter->buffer = vmalloc(iter->size); in gcov_iter_new() 488 if (!iter->buffer) in gcov_iter_new() 491 convert_to_gcda(iter->buffer, info); in gcov_iter_new() 493 return iter; in gcov_iter_new() 496 kfree(iter); in gcov_iter_new() [all …]
|
D | gcov.h | 63 void gcov_iter_free(struct gcov_iterator *iter); 64 void gcov_iter_start(struct gcov_iterator *iter); 65 int gcov_iter_next(struct gcov_iterator *iter); 66 int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq); 67 struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter);
|
D | fs.c | 109 struct gcov_iterator *iter = data; in gcov_seq_next() local 111 if (gcov_iter_next(iter)) in gcov_seq_next() 115 return iter; in gcov_seq_next() 121 struct gcov_iterator *iter = data; in gcov_seq_show() local 123 if (gcov_iter_write(iter, seq)) in gcov_seq_show() 181 struct gcov_iterator *iter; in gcov_seq_open() local 195 iter = gcov_iter_new(info); in gcov_seq_open() 196 if (!iter) in gcov_seq_open() 202 seq->private = iter; in gcov_seq_open() 208 gcov_iter_free(iter); in gcov_seq_open() [all …]
|
/linux-4.4.14/drivers/gpu/drm/ |
D | drm_vma_manager.c | 146 struct rb_node *iter; in drm_vma_offset_lookup_locked() local 149 iter = mgr->vm_addr_space_rb.rb_node; in drm_vma_offset_lookup_locked() 152 while (likely(iter)) { in drm_vma_offset_lookup_locked() 153 node = rb_entry(iter, struct drm_vma_offset_node, vm_rb); in drm_vma_offset_lookup_locked() 156 iter = iter->rb_right; in drm_vma_offset_lookup_locked() 161 iter = iter->rb_left; in drm_vma_offset_lookup_locked() 180 struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node; in _drm_vma_offset_add_rb() local 184 while (likely(*iter)) { in _drm_vma_offset_add_rb() 185 parent = *iter; in _drm_vma_offset_add_rb() 186 iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb); in _drm_vma_offset_add_rb() [all …]
|
/linux-4.4.14/arch/x86/kvm/ |
D | mtrr.c | 482 static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter) in mtrr_lookup_fixed_start() argument 486 if (!fixed_mtrr_is_enabled(iter->mtrr_state)) in mtrr_lookup_fixed_start() 489 seg = fixed_mtrr_addr_to_seg(iter->start); in mtrr_lookup_fixed_start() 493 iter->fixed = true; in mtrr_lookup_fixed_start() 494 index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg); in mtrr_lookup_fixed_start() 495 iter->index = index; in mtrr_lookup_fixed_start() 496 iter->seg = seg; in mtrr_lookup_fixed_start() 500 static bool match_var_range(struct mtrr_iter *iter, in match_var_range() argument 506 if (!(start >= iter->end || end <= iter->start)) { in match_var_range() 507 iter->range = range; in match_var_range() [all …]
|
D | mmu.c | 1105 static u64 *rmap_get_first(unsigned long rmap, struct rmap_iterator *iter) in rmap_get_first() argument 1111 iter->desc = NULL; in rmap_get_first() 1115 iter->desc = (struct pte_list_desc *)(rmap & ~1ul); in rmap_get_first() 1116 iter->pos = 0; in rmap_get_first() 1117 return iter->desc->sptes[iter->pos]; in rmap_get_first() 1125 static u64 *rmap_get_next(struct rmap_iterator *iter) in rmap_get_next() argument 1127 if (iter->desc) { in rmap_get_next() 1128 if (iter->pos < PTE_LIST_EXT - 1) { in rmap_get_next() 1131 ++iter->pos; in rmap_get_next() 1132 sptep = iter->desc->sptes[iter->pos]; in rmap_get_next() [all …]
|
/linux-4.4.14/include/linux/ |
D | radix-tree.h | 341 radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) in radix_tree_iter_init() argument 351 iter->index = 0; in radix_tree_iter_init() 352 iter->next_index = start; in radix_tree_iter_init() 370 struct radix_tree_iter *iter, unsigned flags); 382 void **radix_tree_iter_retry(struct radix_tree_iter *iter) in radix_tree_iter_retry() argument 384 iter->next_index = iter->index; in radix_tree_iter_retry() 395 radix_tree_chunk_size(struct radix_tree_iter *iter) in radix_tree_chunk_size() argument 397 return iter->next_index - iter->index; in radix_tree_chunk_size() 412 radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) in radix_tree_next_slot() argument 415 iter->tags >>= 1; in radix_tree_next_slot() [all …]
|
D | bio.h | 64 #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) argument 66 #define bvec_iter_page(bvec, iter) \ argument 67 (__bvec_iter_bvec((bvec), (iter))->bv_page) 69 #define bvec_iter_len(bvec, iter) \ argument 70 min((iter).bi_size, \ 71 __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) 73 #define bvec_iter_offset(bvec, iter) \ argument 74 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) 76 #define bvec_iter_bvec(bvec, iter) \ argument 78 .bv_page = bvec_iter_page((bvec), (iter)), \ [all …]
|
D | uio.h | 57 static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) in iov_iter_iovec() argument 60 .iov_base = iter->iov->iov_base + iter->iov_offset, in iov_iter_iovec() 61 .iov_len = min(iter->count, in iov_iter_iovec() 62 iter->iov->iov_len - iter->iov_offset), in iov_iter_iovec() 66 #define iov_for_each(iov, iter, start) \ argument 68 for (iter = (start); \ 69 (iter).count && \ 70 ((iov = iov_iter_iovec(&(iter))), 1); \ 71 iov_iter_advance(&(iter), (iov).iov_len))
|
D | ring_buffer.h | 130 void ring_buffer_read_start(struct ring_buffer_iter *iter); 131 void ring_buffer_read_finish(struct ring_buffer_iter *iter); 134 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); 136 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); 137 void ring_buffer_iter_reset(struct ring_buffer_iter *iter); 138 int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
|
D | ftrace.h | 428 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); 429 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); 431 #define for_ftrace_rec_iter(iter) \ argument 432 for (iter = ftrace_rec_iter_start(); \ 433 iter; \ 434 iter = ftrace_rec_iter_next(iter))
|
D | pnfs_osd_xdr.h | 294 struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr); 297 struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr,
|
D | netdevice.h | 3567 struct list_head **iter); 3569 struct list_head **iter); 3572 #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ argument 3573 for (iter = &(dev)->adj_list.upper, \ 3574 updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ 3576 updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) 3579 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \ argument 3580 for (iter = &(dev)->all_adj_list.upper, \ 3581 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \ 3583 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter))) [all …]
|
D | rhashtable.h | 349 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); 350 void rhashtable_walk_exit(struct rhashtable_iter *iter); 351 int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); 352 void *rhashtable_walk_next(struct rhashtable_iter *iter); 353 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
|
/linux-4.4.14/kernel/trace/ |
D | trace_kdb.c | 22 static struct trace_iterator iter; in ftrace_dump_buf() local 28 trace_init_global_iter(&iter); in ftrace_dump_buf() 29 iter.buffer_iter = buffer_iter; in ftrace_dump_buf() 30 tr = iter.tr; in ftrace_dump_buf() 33 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in ftrace_dump_buf() 44 memset(&iter.seq, 0, in ftrace_dump_buf() 47 iter.iter_flags |= TRACE_FILE_LAT_FMT; in ftrace_dump_buf() 48 iter.pos = -1; in ftrace_dump_buf() 52 iter.buffer_iter[cpu] = in ftrace_dump_buf() 53 ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu); in ftrace_dump_buf() [all …]
|
D | trace_output.c | 23 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter) in trace_print_bputs_msg_only() argument 25 struct trace_seq *s = &iter->seq; in trace_print_bputs_msg_only() 26 struct trace_entry *entry = iter->ent; in trace_print_bputs_msg_only() 36 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) in trace_print_bprintk_msg_only() argument 38 struct trace_seq *s = &iter->seq; in trace_print_bprintk_msg_only() 39 struct trace_entry *entry = iter->ent; in trace_print_bprintk_msg_only() 49 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) in trace_print_printk_msg_only() argument 51 struct trace_seq *s = &iter->seq; in trace_print_printk_msg_only() 52 struct trace_entry *entry = iter->ent; in trace_print_printk_msg_only() 225 int trace_raw_output_prep(struct trace_iterator *iter, in trace_raw_output_prep() argument [all …]
|
D | trace.c | 1144 static int wait_on_pipe(struct trace_iterator *iter, bool full) in wait_on_pipe() argument 1147 if (trace_buffer_iter(iter, iter->cpu_file)) in wait_on_pipe() 1150 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, in wait_on_pipe() 2275 static void trace_iterator_increment(struct trace_iterator *iter) in trace_iterator_increment() argument 2277 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); in trace_iterator_increment() 2279 iter->idx++; in trace_iterator_increment() 2285 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, in peek_next_entry() argument 2289 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); in peek_next_entry() 2294 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts, in peek_next_entry() 2298 iter->ent_size = ring_buffer_event_length(event); in peek_next_entry() [all …]
|
D | trace_functions_graph.c | 570 get_return_for_leaf(struct trace_iterator *iter, in get_return_for_leaf() argument 573 struct fgraph_data *data = iter->private; in get_return_for_leaf() 587 ring_iter = trace_buffer_iter(iter, iter->cpu); in get_return_for_leaf() 597 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, in get_return_for_leaf() 599 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu, in get_return_for_leaf() 652 print_graph_irq(struct trace_iterator *iter, unsigned long addr, in print_graph_irq() argument 655 struct trace_array *tr = iter->tr; in print_graph_irq() 656 struct trace_seq *s = &iter->seq; in print_graph_irq() 657 struct trace_entry *ent = iter->ent; in print_graph_irq() 666 print_graph_abs_time(iter->ts, s); in print_graph_irq() [all …]
|
D | trace_mmiotrace.c | 102 static void mmio_pipe_open(struct trace_iterator *iter) in mmio_pipe_open() argument 105 struct trace_seq *s = &iter->seq; in mmio_pipe_open() 114 iter->private = hiter; in mmio_pipe_open() 118 static void mmio_close(struct trace_iterator *iter) in mmio_close() argument 120 struct header_iter *hiter = iter->private; in mmio_close() 122 iter->private = NULL; in mmio_close() 125 static unsigned long count_overruns(struct trace_iterator *iter) in count_overruns() argument 128 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer); in count_overruns() 136 static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp, in mmio_read() argument 140 struct header_iter *hiter = iter->private; in mmio_read() [all …]
|
D | trace_output.h | 8 trace_print_bputs_msg_only(struct trace_iterator *iter); 10 trace_print_bprintk_msg_only(struct trace_iterator *iter); 12 trace_print_printk_msg_only(struct trace_iterator *iter); 18 extern int trace_print_context(struct trace_iterator *iter); 19 extern int trace_print_lat_context(struct trace_iterator *iter); 25 extern enum print_line_t trace_nop_print(struct trace_iterator *iter,
|
D | ftrace.c | 2367 struct ftrace_rec_iter *iter = &ftrace_rec_iter; in ftrace_rec_iter_start() local 2369 iter->pg = ftrace_pages_start; in ftrace_rec_iter_start() 2370 iter->index = 0; in ftrace_rec_iter_start() 2373 while (iter->pg && !iter->pg->index) in ftrace_rec_iter_start() 2374 iter->pg = iter->pg->next; in ftrace_rec_iter_start() 2376 if (!iter->pg) in ftrace_rec_iter_start() 2379 return iter; in ftrace_rec_iter_start() 2388 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) in ftrace_rec_iter_next() argument 2390 iter->index++; in ftrace_rec_iter_next() 2392 if (iter->index >= iter->pg->index) { in ftrace_rec_iter_next() [all …]
|
D | trace_printk.c | 49 const char **iter; in hold_module_trace_bprintk_format() local 57 for (iter = start; iter < end; iter++) { in hold_module_trace_bprintk_format() 58 struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter); in hold_module_trace_bprintk_format() 60 *iter = tb_fmt->fmt; in hold_module_trace_bprintk_format() 67 fmt = kmalloc(strlen(*iter) + 1, GFP_KERNEL); in hold_module_trace_bprintk_format() 70 strcpy(fmt, *iter); in hold_module_trace_bprintk_format() 75 *iter = fmt; in hold_module_trace_bprintk_format()
|
D | ring_buffer.c | 1860 rb_iter_head_event(struct ring_buffer_iter *iter) in rb_iter_head_event() argument 1862 return __rb_page_index(iter->head_page, iter->head); in rb_iter_head_event() 1890 static void rb_inc_iter(struct ring_buffer_iter *iter) in rb_inc_iter() argument 1892 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter() 1900 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter() 1901 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter() 1903 rb_inc_page(cpu_buffer, &iter->head_page); in rb_inc_iter() 1905 iter->read_stamp = iter->head_page->page->time_stamp; in rb_inc_iter() 1906 iter->head = 0; in rb_inc_iter() 3396 static void rb_iter_reset(struct ring_buffer_iter *iter) in rb_iter_reset() argument [all …]
|
D | trace.h | 388 void (*open)(struct trace_iterator *iter); 389 void (*pipe_open)(struct trace_iterator *iter); 390 void (*close)(struct trace_iterator *iter); 391 void (*pipe_close)(struct trace_iterator *iter); 392 ssize_t (*read)(struct trace_iterator *iter, 395 ssize_t (*splice_read)(struct trace_iterator *iter, 406 enum print_line_t (*print_line)(struct trace_iterator *iter); 550 trace_buffer_iter(struct trace_iterator *iter, int cpu) in trace_buffer_iter() argument 552 if (iter->buffer_iter && iter->buffer_iter[cpu]) in trace_buffer_iter() 553 return iter->buffer_iter[cpu]; in trace_buffer_iter() [all …]
|
D | trace_irqsoff.c | 203 static void irqsoff_trace_open(struct trace_iterator *iter) in irqsoff_trace_open() argument 205 if (is_graph(iter->tr)) in irqsoff_trace_open() 206 graph_trace_open(iter); in irqsoff_trace_open() 210 static void irqsoff_trace_close(struct trace_iterator *iter) in irqsoff_trace_close() argument 212 if (iter->private) in irqsoff_trace_close() 213 graph_trace_close(iter); in irqsoff_trace_close() 221 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) in irqsoff_print_line() argument 227 if (is_graph(iter->tr)) in irqsoff_print_line() 228 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); in irqsoff_print_line() 264 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) in irqsoff_print_line() argument [all …]
|
D | blktrace.c | 1136 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act); 1138 static void blk_log_action_classic(struct trace_iterator *iter, const char *act) in blk_log_action_classic() argument 1141 unsigned long long ts = iter->ts; in blk_log_action_classic() 1144 const struct blk_io_trace *t = te_blk_io_trace(iter->ent); in blk_log_action_classic() 1148 trace_seq_printf(&iter->seq, in blk_log_action_classic() 1150 MAJOR(t->device), MINOR(t->device), iter->cpu, in blk_log_action_classic() 1151 secs, nsec_rem, iter->ent->pid, act, rwbs); in blk_log_action_classic() 1154 static void blk_log_action(struct trace_iterator *iter, const char *act) in blk_log_action() argument 1157 const struct blk_io_trace *t = te_blk_io_trace(iter->ent); in blk_log_action() 1160 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", in blk_log_action() [all …]
|
D | trace_sched_wakeup.c | 271 static void wakeup_trace_open(struct trace_iterator *iter) in wakeup_trace_open() argument 273 if (is_graph(iter->tr)) in wakeup_trace_open() 274 graph_trace_open(iter); in wakeup_trace_open() 277 static void wakeup_trace_close(struct trace_iterator *iter) in wakeup_trace_close() argument 279 if (iter->private) in wakeup_trace_close() 280 graph_trace_close(iter); in wakeup_trace_close() 287 static enum print_line_t wakeup_print_line(struct trace_iterator *iter) in wakeup_print_line() argument 293 if (is_graph(iter->tr)) in wakeup_print_line() 294 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); in wakeup_print_line() 320 static enum print_line_t wakeup_print_line(struct trace_iterator *iter) in wakeup_print_line() argument [all …]
|
D | trace_branch.c | 138 static enum print_line_t trace_branch_print(struct trace_iterator *iter, in trace_branch_print() argument 143 trace_assign_type(field, iter->ent); in trace_branch_print() 145 trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", in trace_branch_print() 151 return trace_handle_return(&iter->seq); in trace_branch_print()
|
D | trace_syscalls.c | 110 print_syscall_enter(struct trace_iterator *iter, int flags, in print_syscall_enter() argument 113 struct trace_array *tr = iter->tr; in print_syscall_enter() 114 struct trace_seq *s = &iter->seq; in print_syscall_enter() 115 struct trace_entry *ent = iter->ent; in print_syscall_enter() 157 print_syscall_exit(struct trace_iterator *iter, int flags, in print_syscall_exit() argument 160 struct trace_seq *s = &iter->seq; in print_syscall_exit() 161 struct trace_entry *ent = iter->ent; in print_syscall_exit()
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
D | icm.h | 88 struct mlx4_icm_iter *iter) in mlx4_icm_first() argument 90 iter->icm = icm; in mlx4_icm_first() 91 iter->chunk = list_empty(&icm->chunk_list) ? in mlx4_icm_first() 94 iter->page_idx = 0; in mlx4_icm_first() 97 static inline int mlx4_icm_last(struct mlx4_icm_iter *iter) in mlx4_icm_last() argument 99 return !iter->chunk; in mlx4_icm_last() 102 static inline void mlx4_icm_next(struct mlx4_icm_iter *iter) in mlx4_icm_next() argument 104 if (++iter->page_idx >= iter->chunk->nsg) { in mlx4_icm_next() 105 if (iter->chunk->list.next == &iter->icm->chunk_list) { in mlx4_icm_next() 106 iter->chunk = NULL; in mlx4_icm_next() [all …]
|
/linux-4.4.14/kernel/ |
D | kallsyms.c | 458 static int get_ksymbol_mod(struct kallsym_iter *iter) in get_ksymbol_mod() argument 460 if (module_get_kallsym(iter->pos - kallsyms_num_syms, &iter->value, in get_ksymbol_mod() 461 &iter->type, iter->name, iter->module_name, in get_ksymbol_mod() 462 &iter->exported) < 0) in get_ksymbol_mod() 468 static unsigned long get_ksymbol_core(struct kallsym_iter *iter) in get_ksymbol_core() argument 470 unsigned off = iter->nameoff; in get_ksymbol_core() 472 iter->module_name[0] = '\0'; in get_ksymbol_core() 473 iter->value = kallsyms_addresses[iter->pos]; in get_ksymbol_core() 475 iter->type = kallsyms_get_symbol_type(off); in get_ksymbol_core() 477 off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name)); in get_ksymbol_core() [all …]
|
D | jump_label.c | 132 struct jump_entry *iter; in __jump_label_text_reserved() local 134 iter = iter_start; in __jump_label_text_reserved() 135 while (iter < iter_stop) { in __jump_label_text_reserved() 136 if (addr_conflict(iter, start, end)) in __jump_label_text_reserved() 138 iter++; in __jump_label_text_reserved() 206 struct jump_entry *iter; in jump_label_init() local 211 for (iter = iter_start; iter < iter_stop; iter++) { in jump_label_init() 215 if (jump_label_type(iter) == JUMP_LABEL_NOP) in jump_label_init() 216 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); in jump_label_init() 218 iterk = jump_entry_key(iter); in jump_label_init() [all …]
|
D | tracepoint.c | 392 struct tracepoint * const *iter; in tp_module_going_check_quiescent() local 396 for (iter = begin; iter < end; iter++) in tp_module_going_check_quiescent() 397 WARN_ON_ONCE((*iter)->funcs); in tp_module_going_check_quiescent() 506 struct tracepoint * const *iter; in for_each_tracepoint_range() local 510 for (iter = begin; iter < end; iter++) in for_each_tracepoint_range() 511 fct(*iter, priv); in for_each_tracepoint_range()
|
/linux-4.4.14/net/rds/ |
D | info.c | 104 void rds_info_iter_unmap(struct rds_info_iterator *iter) in rds_info_iter_unmap() argument 106 if (iter->addr) { in rds_info_iter_unmap() 107 kunmap_atomic(iter->addr); in rds_info_iter_unmap() 108 iter->addr = NULL; in rds_info_iter_unmap() 115 void rds_info_copy(struct rds_info_iterator *iter, void *data, in rds_info_copy() argument 121 if (!iter->addr) in rds_info_copy() 122 iter->addr = kmap_atomic(*iter->pages); in rds_info_copy() 124 this = min(bytes, PAGE_SIZE - iter->offset); in rds_info_copy() 127 "bytes %lu\n", *iter->pages, iter->addr, in rds_info_copy() 128 iter->offset, this, data, bytes); in rds_info_copy() [all …]
|
D | stats.c | 81 void rds_stats_info_copy(struct rds_info_iterator *iter, in rds_stats_info_copy() argument 93 rds_info_copy(iter, &ctr, sizeof(ctr)); in rds_stats_info_copy() 109 struct rds_info_iterator *iter, in rds_stats_info() argument 133 rds_stats_info_copy(iter, (uint64_t *)&stats, rds_stat_names, in rds_stats_info() 139 lens->nr = rds_trans_stats_info_copy(iter, avail) + in rds_stats_info()
|
D | info.h | 18 struct rds_info_iterator *iter, 25 void rds_info_copy(struct rds_info_iterator *iter, void *data, 27 void rds_info_iter_unmap(struct rds_info_iterator *iter);
|
D | connection.c | 390 struct rds_info_iterator *iter, in rds_conn_message_info() argument 420 rds_inc_info_copy(&rm->m_inc, iter, in rds_conn_message_info() 435 struct rds_info_iterator *iter, in rds_conn_message_info_send() argument 438 rds_conn_message_info(sock, len, iter, lens, 1); in rds_conn_message_info_send() 443 struct rds_info_iterator *iter, in rds_conn_message_info_retrans() argument 446 rds_conn_message_info(sock, len, iter, lens, 0); in rds_conn_message_info_retrans() 450 struct rds_info_iterator *iter, in rds_for_each_conn_info() argument 477 rds_info_copy(iter, buffer, item_len); in rds_for_each_conn_info() 513 struct rds_info_iterator *iter, in rds_conn_info() argument 516 rds_for_each_conn_info(sock, len, iter, lens, in rds_conn_info()
|
D | transport.c | 131 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter, in rds_trans_stats_info_copy() argument 140 rds_info_iter_unmap(iter); in rds_trans_stats_info_copy() 149 part = trans->stats_info_copy(iter, avail); in rds_trans_stats_info_copy()
|
D | iw_stats.c | 72 unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter, in rds_iw_stats_info_copy() argument 91 rds_stats_info_copy(iter, (uint64_t *)&stats, rds_iw_stat_names, in rds_iw_stats_info_copy()
|
D | ib_stats.c | 80 unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter, in rds_ib_stats_info_copy() argument 99 rds_stats_info_copy(iter, (uint64_t *)&stats, rds_ib_stat_names, in rds_ib_stats_info_copy()
|
D | tcp_stats.c | 51 unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter, in rds_tcp_stats_info_copy() argument 70 rds_stats_info_copy(iter, (uint64_t *)&stats, rds_tcp_stat_names, in rds_tcp_stats_info_copy()
|
/linux-4.4.14/drivers/infiniband/ulp/ipoib/ |
D | ipoib_fs.c | 60 struct ipoib_mcast_iter *iter; in ipoib_mcg_seq_start() local 63 iter = ipoib_mcast_iter_init(file->private); in ipoib_mcg_seq_start() 64 if (!iter) in ipoib_mcg_seq_start() 68 if (ipoib_mcast_iter_next(iter)) { in ipoib_mcg_seq_start() 69 kfree(iter); in ipoib_mcg_seq_start() 74 return iter; in ipoib_mcg_seq_start() 80 struct ipoib_mcast_iter *iter = iter_ptr; in ipoib_mcg_seq_next() local 84 if (ipoib_mcast_iter_next(iter)) { in ipoib_mcg_seq_next() 85 kfree(iter); in ipoib_mcg_seq_next() 89 return iter; in ipoib_mcg_seq_next() [all …]
|
D | ipoib_multicast.c | 971 struct ipoib_mcast_iter *iter; in ipoib_mcast_iter_init() local 973 iter = kmalloc(sizeof *iter, GFP_KERNEL); in ipoib_mcast_iter_init() 974 if (!iter) in ipoib_mcast_iter_init() 977 iter->dev = dev; in ipoib_mcast_iter_init() 978 memset(iter->mgid.raw, 0, 16); in ipoib_mcast_iter_init() 980 if (ipoib_mcast_iter_next(iter)) { in ipoib_mcast_iter_init() 981 kfree(iter); in ipoib_mcast_iter_init() 985 return iter; in ipoib_mcast_iter_init() 988 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter) in ipoib_mcast_iter_next() argument 990 struct ipoib_dev_priv *priv = netdev_priv(iter->dev); in ipoib_mcast_iter_next() [all …]
|
D | ipoib_main.c | 305 struct list_head *iter; in ipoib_get_net_dev_match_addr() local 314 netdev_for_each_all_upper_dev_rcu(dev, upper, iter) { in ipoib_get_net_dev_match_addr() 560 struct ipoib_path_iter *iter; in ipoib_path_iter_init() local 562 iter = kmalloc(sizeof *iter, GFP_KERNEL); in ipoib_path_iter_init() 563 if (!iter) in ipoib_path_iter_init() 566 iter->dev = dev; in ipoib_path_iter_init() 567 memset(iter->path.pathrec.dgid.raw, 0, 16); in ipoib_path_iter_init() 569 if (ipoib_path_iter_next(iter)) { in ipoib_path_iter_init() 570 kfree(iter); in ipoib_path_iter_init() 574 return iter; in ipoib_path_iter_init() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_memfree.h | 100 struct mthca_icm_iter *iter) in mthca_icm_first() argument 102 iter->icm = icm; in mthca_icm_first() 103 iter->chunk = list_empty(&icm->chunk_list) ? in mthca_icm_first() 106 iter->page_idx = 0; in mthca_icm_first() 109 static inline int mthca_icm_last(struct mthca_icm_iter *iter) in mthca_icm_last() argument 111 return !iter->chunk; in mthca_icm_last() 114 static inline void mthca_icm_next(struct mthca_icm_iter *iter) in mthca_icm_next() argument 116 if (++iter->page_idx >= iter->chunk->nsg) { in mthca_icm_next() 117 if (iter->chunk->list.next == &iter->icm->chunk_list) { in mthca_icm_next() 118 iter->chunk = NULL; in mthca_icm_next() [all …]
|
/linux-4.4.14/drivers/s390/cio/ |
D | blacklist.c | 287 struct ccwdev_iter *iter = s->private; in cio_ignore_proc_seq_start() local 291 memset(iter, 0, sizeof(*iter)); in cio_ignore_proc_seq_start() 292 iter->ssid = *offset / (__MAX_SUBCHANNEL + 1); in cio_ignore_proc_seq_start() 293 iter->devno = *offset % (__MAX_SUBCHANNEL + 1); in cio_ignore_proc_seq_start() 294 return iter; in cio_ignore_proc_seq_start() 305 struct ccwdev_iter *iter; in cio_ignore_proc_seq_next() local 309 iter = it; in cio_ignore_proc_seq_next() 310 if (iter->devno == __MAX_SUBCHANNEL) { in cio_ignore_proc_seq_next() 311 iter->devno = 0; in cio_ignore_proc_seq_next() 312 iter->ssid++; in cio_ignore_proc_seq_next() [all …]
|
/linux-4.4.14/drivers/hwtracing/intel_th/ |
D | msu.c | 242 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter) in msc_iter_bdesc() argument 244 return iter->win->block[iter->block].bdesc; in msc_iter_bdesc() 247 static void msc_iter_init(struct msc_iter *iter) in msc_iter_init() argument 249 memset(iter, 0, sizeof(*iter)); in msc_iter_init() 250 iter->start_block = -1; in msc_iter_init() 251 iter->block = -1; in msc_iter_init() 256 struct msc_iter *iter; in msc_iter_install() local 258 iter = kzalloc(sizeof(*iter), GFP_KERNEL); in msc_iter_install() 259 if (!iter) in msc_iter_install() 262 msc_iter_init(iter); in msc_iter_install() [all …]
|
/linux-4.4.14/drivers/dma/ppc4xx/ |
D | adma.c | 186 struct ppc440spe_adma_desc_slot *iter) in print_cb_list() argument 188 for (; iter; iter = iter->hw_next) in print_cb_list() 189 print_cb(chan, iter->hw_desc); in print_cb_list() 334 struct ppc440spe_adma_desc_slot *iter; in ppc440spe_desc_init_dma01pq() local 348 list_for_each_entry(iter, &desc->group_list, chain_node) { in ppc440spe_desc_init_dma01pq() 349 hw_desc = iter->hw_desc; in ppc440spe_desc_init_dma01pq() 350 memset(iter->hw_desc, 0, sizeof(struct dma_cdb)); in ppc440spe_desc_init_dma01pq() 352 if (likely(!list_is_last(&iter->chain_node, in ppc440spe_desc_init_dma01pq() 355 iter->hw_next = list_entry(iter->chain_node.next, in ppc440spe_desc_init_dma01pq() 357 clear_bit(PPC440SPE_DESC_INT, &iter->flags); in ppc440spe_desc_init_dma01pq() [all …]
|
/linux-4.4.14/tools/perf/util/ |
D | hist.c | 494 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, in iter_next_nop_entry() 501 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused, in iter_add_next_nop_entry() 508 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) in iter_prepare_mem_entry() argument 510 struct perf_sample *sample = iter->sample; in iter_prepare_mem_entry() 517 iter->priv = mi; in iter_prepare_mem_entry() 522 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al) in iter_add_single_mem_entry() argument 525 struct mem_info *mi = iter->priv; in iter_add_single_mem_entry() 526 struct hists *hists = evsel__hists(iter->evsel); in iter_add_single_mem_entry() 532 cost = iter->sample->weight; in iter_add_single_mem_entry() 543 he = __hists__add_entry(hists, al, iter->parent, NULL, mi, in iter_add_single_mem_entry() [all …]
|
D | comm.c | 55 struct comm_str *iter, *new; in comm_str__findnew() local 60 iter = rb_entry(parent, struct comm_str, rb_node); in comm_str__findnew() 62 cmp = strcmp(str, iter->str); in comm_str__findnew() 64 return iter; in comm_str__findnew()
|
D | ordered-events.c | 188 struct ordered_event *tmp, *iter; in __ordered_events__flush() local 201 list_for_each_entry_safe(iter, tmp, head, list) { in __ordered_events__flush() 205 if (iter->timestamp > limit) in __ordered_events__flush() 207 ret = oe->deliver(oe, iter); in __ordered_events__flush() 211 ordered_events__delete(oe, iter); in __ordered_events__flush() 212 oe->last_flush = iter->timestamp; in __ordered_events__flush()
|
/linux-4.4.14/lib/ |
D | rhashtable.c | 510 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) in rhashtable_walk_init() argument 512 iter->ht = ht; in rhashtable_walk_init() 513 iter->p = NULL; in rhashtable_walk_init() 514 iter->slot = 0; in rhashtable_walk_init() 515 iter->skip = 0; in rhashtable_walk_init() 517 iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); in rhashtable_walk_init() 518 if (!iter->walker) in rhashtable_walk_init() 522 iter->walker->tbl = in rhashtable_walk_init() 524 list_add(&iter->walker->list, &iter->walker->tbl->walkers); in rhashtable_walk_init() 537 void rhashtable_walk_exit(struct rhashtable_iter *iter) in rhashtable_walk_exit() argument [all …]
|
D | cordic.c | 59 unsigned iter; in cordic_calc_iq() local 80 for (iter = 0; iter < CORDIC_NUM_ITER; iter++) { in cordic_calc_iq() 82 valtmp = coord.i - (coord.q >> iter); in cordic_calc_iq() 83 coord.q += (coord.i >> iter); in cordic_calc_iq() 84 angle += arctan_table[iter]; in cordic_calc_iq() 86 valtmp = coord.i + (coord.q >> iter); in cordic_calc_iq() 87 coord.q -= (coord.i >> iter); in cordic_calc_iq() 88 angle -= arctan_table[iter]; in cordic_calc_iq()
|
D | plist.c | 76 struct plist_node *first, *iter, *prev = NULL; in plist_add() local 86 first = iter = plist_first(head); in plist_add() 89 if (node->prio < iter->prio) { in plist_add() 90 node_next = &iter->node_list; in plist_add() 94 prev = iter; in plist_add() 95 iter = list_entry(iter->prio_list.next, in plist_add() 97 } while (iter != first); in plist_add() 100 list_add_tail(&node->prio_list, &iter->prio_list); in plist_add() 148 struct plist_node *iter; in plist_requeue() local 158 iter = plist_next(node); in plist_requeue() [all …]
|
D | dynamic_debug.c | 684 static struct _ddebug *ddebug_iter_first(struct ddebug_iter *iter) in ddebug_iter_first() argument 687 iter->table = NULL; in ddebug_iter_first() 688 iter->idx = 0; in ddebug_iter_first() 691 iter->table = list_entry(ddebug_tables.next, in ddebug_iter_first() 693 iter->idx = 0; in ddebug_iter_first() 694 return &iter->table->ddebugs[iter->idx]; in ddebug_iter_first() 703 static struct _ddebug *ddebug_iter_next(struct ddebug_iter *iter) in ddebug_iter_next() argument 705 if (iter->table == NULL) in ddebug_iter_next() 707 if (++iter->idx == iter->table->num_ddebugs) { in ddebug_iter_next() 709 iter->idx = 0; in ddebug_iter_next() [all …]
|
D | radix-tree.c | 753 struct radix_tree_iter *iter, unsigned flags) in radix_tree_next_chunk() argument 771 index = iter->next_index; in radix_tree_next_chunk() 772 if (!index && iter->index) in radix_tree_next_chunk() 780 iter->index = 0; in radix_tree_next_chunk() 781 iter->next_index = 1; in radix_tree_next_chunk() 782 iter->tags = 1; in radix_tree_next_chunk() 836 iter->index = index; in radix_tree_next_chunk() 837 iter->next_index = (index | RADIX_TREE_MAP_MASK) + 1; in radix_tree_next_chunk() 845 iter->tags = node->tags[tag][tag_long] >> tag_bit; in radix_tree_next_chunk() 850 iter->tags |= node->tags[tag][tag_long + 1] << in radix_tree_next_chunk() [all …]
|
/linux-4.4.14/drivers/net/wireless/libertas/ |
D | firmware.c | 85 const struct lbs_fw_table *iter; in load_next_firmware_from_table() local 88 iter = priv->fw_table; in load_next_firmware_from_table() 90 iter = ++priv->fw_iter; in load_next_firmware_from_table() 98 if (!iter->helper) { in load_next_firmware_from_table() 104 if (iter->model != priv->fw_model) { in load_next_firmware_from_table() 105 iter++; in load_next_firmware_from_table() 109 priv->fw_iter = iter; in load_next_firmware_from_table() 110 do_load_firmware(priv, iter->helper, helper_firmware_cb); in load_next_firmware_from_table() 176 const struct lbs_fw_table *iter; in lbs_get_firmware() local 183 iter = fw_table; in lbs_get_firmware() [all …]
|
/linux-4.4.14/drivers/staging/lustre/lustre/lov/ |
D | lov_pool.c | 174 struct pool_iterator *iter = (struct pool_iterator *)s->private; in pool_proc_next() local 177 LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic); in pool_proc_next() 180 if (*pos >= pool_tgt_count(iter->pool)) in pool_proc_next() 184 prev_idx = iter->idx; in pool_proc_next() 185 down_read(&pool_tgt_rw_sem(iter->pool)); in pool_proc_next() 186 iter->idx++; in pool_proc_next() 187 if (iter->idx == pool_tgt_count(iter->pool)) { in pool_proc_next() 188 iter->idx = prev_idx; /* we stay on the last entry */ in pool_proc_next() 189 up_read(&pool_tgt_rw_sem(iter->pool)); in pool_proc_next() 192 up_read(&pool_tgt_rw_sem(iter->pool)); in pool_proc_next() [all …]
|
/linux-4.4.14/kernel/time/ |
D | timer_list.c | 298 struct timer_list_iter *iter = v; in timer_list_show() local 300 if (iter->cpu == -1 && !iter->second_pass) in timer_list_show() 301 timer_list_header(m, iter->now); in timer_list_show() 302 else if (!iter->second_pass) in timer_list_show() 303 print_cpu(m, iter->cpu, iter->now); in timer_list_show() 305 else if (iter->cpu == -1 && iter->second_pass) in timer_list_show() 308 print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu); in timer_list_show() 331 static void *move_iter(struct timer_list_iter *iter, loff_t offset) in move_iter() argument 334 iter->cpu = cpumask_next(iter->cpu, cpu_online_mask); in move_iter() 335 if (iter->cpu >= nr_cpu_ids) { in move_iter() [all …]
|
/linux-4.4.14/mm/ |
D | process_vm_access.c | 36 struct iov_iter *iter, in process_vm_rw_pages() argument 40 while (len && iov_iter_count(iter)) { in process_vm_rw_pages() 49 copied = copy_page_from_iter(page, offset, copy, iter); in process_vm_rw_pages() 52 copied = copy_page_to_iter(page, offset, copy, iter); in process_vm_rw_pages() 55 if (copied < copy && iov_iter_count(iter)) in process_vm_rw_pages() 79 struct iov_iter *iter, in process_vm_rw_single_vec() argument 97 while (!rc && nr_pages && iov_iter_count(iter)) { in process_vm_rw_single_vec() 112 start_offset, bytes, iter, in process_vm_rw_single_vec() 141 static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter, in process_vm_rw_core() argument 155 size_t total_len = iov_iter_count(iter); in process_vm_rw_core() [all …]
|
D | memcontrol.c | 876 struct mem_cgroup_reclaim_iter *uninitialized_var(iter); in mem_cgroup_iter() 902 iter = &mz->iter[reclaim->priority]; in mem_cgroup_iter() 904 if (prev && reclaim->generation != iter->generation) in mem_cgroup_iter() 908 pos = READ_ONCE(iter->position); in mem_cgroup_iter() 919 (void)cmpxchg(&iter->position, pos, NULL); in mem_cgroup_iter() 971 (void)cmpxchg(&iter->position, pos, memcg); in mem_cgroup_iter() 977 iter->generation++; in mem_cgroup_iter() 979 reclaim->generation = iter->generation; in mem_cgroup_iter() 1008 struct mem_cgroup_reclaim_iter *iter; in invalidate_reclaim_iterators() local 1018 iter = &mz->iter[i]; in invalidate_reclaim_iterators() [all …]
|
D | filemap.c | 1228 struct radix_tree_iter iter; in find_get_entries() local 1235 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { in find_get_entries() 1260 indices[ret] = iter.index; in find_get_entries() 1288 struct radix_tree_iter iter; in find_get_pages() local 1297 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { in find_get_pages() 1311 WARN_ON(iter.index); in find_get_pages() 1355 struct radix_tree_iter iter; in find_get_pages_contig() local 1364 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { in find_get_pages_contig() 1403 if (page->mapping == NULL || page->index != iter.index) { in find_get_pages_contig() 1431 struct radix_tree_iter iter; in find_get_pages_tag() local [all …]
|
D | slab.h | 180 #define for_each_memcg_cache(iter, root) \ argument 181 list_for_each_entry(iter, &(root)->memcg_params.list, \ 255 #define for_each_memcg_cache(iter, root) \ argument 256 for ((void)(iter), (void)(root); 0; )
|
/linux-4.4.14/net/ipv4/ |
D | cipso_ipv4.c | 253 u32 iter; in cipso_v4_cache_init() local 261 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { in cipso_v4_cache_init() 262 spin_lock_init(&cipso_v4_cache[iter].lock); in cipso_v4_cache_init() 263 cipso_v4_cache[iter].size = 0; in cipso_v4_cache_init() 264 INIT_LIST_HEAD(&cipso_v4_cache[iter].list); in cipso_v4_cache_init() 281 u32 iter; in cipso_v4_cache_invalidate() local 283 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { in cipso_v4_cache_invalidate() 284 spin_lock_bh(&cipso_v4_cache[iter].lock); in cipso_v4_cache_invalidate() 287 &cipso_v4_cache[iter].list, list) { in cipso_v4_cache_invalidate() 291 cipso_v4_cache[iter].size = 0; in cipso_v4_cache_invalidate() [all …]
|
D | fib_trie.c | 2029 static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter) in fib_trie_get_next() argument 2031 unsigned long cindex = iter->index; in fib_trie_get_next() 2032 struct key_vector *pn = iter->tnode; in fib_trie_get_next() 2036 iter->tnode, iter->index, iter->depth); in fib_trie_get_next() 2046 iter->tnode = pn; in fib_trie_get_next() 2047 iter->index = cindex; in fib_trie_get_next() 2050 iter->tnode = n; in fib_trie_get_next() 2051 iter->index = 0; in fib_trie_get_next() 2052 ++iter->depth; in fib_trie_get_next() 2062 --iter->depth; in fib_trie_get_next() [all …]
|
/linux-4.4.14/net/ipv6/ |
D | ip6_fib.c | 735 struct rt6_info *iter = NULL; in fib6_add_rt2node() local 748 for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) { in fib6_add_rt2node() 753 if (iter->rt6i_metric == rt->rt6i_metric) { in fib6_add_rt2node() 761 if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) { in fib6_add_rt2node() 770 if (iter->dst.dev == rt->dst.dev && in fib6_add_rt2node() 771 iter->rt6i_idev == rt->rt6i_idev && in fib6_add_rt2node() 772 ipv6_addr_equal(&iter->rt6i_gateway, in fib6_add_rt2node() 776 if (!(iter->rt6i_flags & RTF_EXPIRES)) in fib6_add_rt2node() 779 rt6_clean_expires(iter); in fib6_add_rt2node() 781 rt6_set_expires(iter, rt->dst.expires); in fib6_add_rt2node() [all …]
|
/linux-4.4.14/drivers/base/ |
D | class.c | 294 void class_dev_iter_init(struct class_dev_iter *iter, struct class *class, in class_dev_iter_init() argument 301 klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode); in class_dev_iter_init() 302 iter->type = type; in class_dev_iter_init() 318 struct device *class_dev_iter_next(struct class_dev_iter *iter) in class_dev_iter_next() argument 324 knode = klist_next(&iter->ki); in class_dev_iter_next() 328 if (!iter->type || iter->type == dev->type) in class_dev_iter_next() 341 void class_dev_iter_exit(struct class_dev_iter *iter) in class_dev_iter_exit() argument 343 klist_iter_exit(&iter->ki); in class_dev_iter_exit() 368 struct class_dev_iter iter; in class_for_each_device() local 380 class_dev_iter_init(&iter, class, start, NULL); in class_for_each_device() [all …]
|
D | attribute_container.c | 183 #define klist_for_each_entry(pos, head, member, iter) \ argument 184 for (klist_iter_init(head, iter); (pos = ({ \ 185 struct klist_node *n = klist_next(iter); \ 187 ({ klist_iter_exit(iter) ; NULL; }); \ 217 struct klist_iter iter; in attribute_container_remove_device() local 225 klist_for_each_entry(ic, &cont->containers, node, &iter) { in attribute_container_remove_device() 261 struct klist_iter iter; in attribute_container_device_trigger() local 271 klist_for_each_entry(ic, &cont->containers, node, &iter) { in attribute_container_device_trigger() 429 struct klist_iter iter; in attribute_container_find_class_device() local 431 klist_for_each_entry(ic, &cont->containers, node, &iter) { in attribute_container_find_class_device() [all …]
|
D | bus.c | 1077 void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys, in subsys_dev_iter_init() argument 1084 klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode); in subsys_dev_iter_init() 1085 iter->type = type; in subsys_dev_iter_init() 1101 struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter) in subsys_dev_iter_next() argument 1107 knode = klist_next(&iter->ki); in subsys_dev_iter_next() 1111 if (!iter->type || iter->type == dev->type) in subsys_dev_iter_next() 1124 void subsys_dev_iter_exit(struct subsys_dev_iter *iter) in subsys_dev_iter_exit() argument 1126 klist_iter_exit(&iter->ki); in subsys_dev_iter_exit() 1133 struct subsys_dev_iter iter; in subsys_interface_register() local 1146 subsys_dev_iter_init(&iter, subsys, NULL, NULL); in subsys_interface_register() [all …]
|
/linux-4.4.14/drivers/gpu/drm/i915/ |
D | i915_gem_gtt.h | 395 #define gen6_for_each_pde(pt, pd, start, length, temp, iter) \ argument 396 for (iter = gen6_pde_index(start); \ 397 length > 0 && iter < I915_PDES ? \ 398 (pt = (pd)->page_table[iter]), 1 : 0; \ 399 iter++, \ 404 #define gen6_for_all_pdes(pt, ppgtt, iter) \ argument 405 for (iter = 0; \ 406 pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \ 407 iter++) 461 #define gen8_for_each_pde(pt, pd, start, length, temp, iter) \ argument [all …]
|
/linux-4.4.14/drivers/md/bcache/ |
D | bset.c | 55 struct btree_iter iter; in __bch_count_data() local 59 for_each_key(b, k, &iter) in __bch_count_data() 68 struct btree_iter iter; in __bch_check_keys() local 71 for_each_key(b, k, &iter) { in __bch_check_keys() 109 static void bch_btree_iter_next_check(struct btree_iter *iter) in bch_btree_iter_next_check() argument 111 struct bkey *k = iter->data->k, *next = bkey_next(k); in bch_btree_iter_next_check() 113 if (next < iter->data->end && in bch_btree_iter_next_check() 114 bkey_cmp(k, iter->b->ops->is_extents ? in bch_btree_iter_next_check() 116 bch_dump_bucket(iter->b); in bch_btree_iter_next_check() 123 static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} in bch_btree_iter_next_check() argument [all …]
|
D | extents.c | 29 static void sort_key_next(struct btree_iter *iter, in sort_key_next() argument 35 *i = iter->data[--iter->used]; in sort_key_next() 227 struct btree_iter *iter, in bch_btree_ptr_insert_fixup() argument 264 static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter, in bch_extent_sort_fixup() argument 267 while (iter->used > 1) { in bch_extent_sort_fixup() 268 struct btree_iter_set *top = iter->data, *i = top + 1; in bch_extent_sort_fixup() 270 if (iter->used > 2 && in bch_extent_sort_fixup() 278 sort_key_next(iter, i); in bch_extent_sort_fixup() 279 heap_sift(iter, i - top, bch_extent_sort_cmp); in bch_extent_sort_fixup() 285 sort_key_next(iter, i); in bch_extent_sort_fixup() [all …]
|
D | btree.h | 200 #define for_each_cached_btree(b, c, iter) \ argument 201 for (iter = 0; \ 202 iter < ARRAY_SIZE((c)->bucket_hash); \ 203 iter++) \ 204 hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
|
D | bset.h | 338 #define for_each_key_filter(b, k, iter, filter) \ argument 339 for (bch_btree_iter_init((b), (iter), NULL); \ 340 ((k) = bch_btree_iter_next_filter((iter), (b), filter));) 342 #define for_each_key(b, k, iter) \ argument 343 for (bch_btree_iter_init((b), (iter), NULL); \ 344 ((k) = bch_btree_iter_next(iter));)
|
D | alloc.c | 414 size_t iter; in bch_bucket_alloc() local 418 for (iter = 0; iter < prio_buckets(ca) * 2; iter++) in bch_bucket_alloc() 419 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); in bch_bucket_alloc() 422 fifo_for_each(i, &ca->free[j], iter) in bch_bucket_alloc() 424 fifo_for_each(i, &ca->free_inc, iter) in bch_bucket_alloc()
|
D | btree.c | 202 struct btree_iter *iter; in bch_btree_node_read_done() local 204 iter = mempool_alloc(b->c->fill_iter, GFP_NOIO); in bch_btree_node_read_done() 205 iter->size = b->c->sb.bucket_size / b->c->sb.block_size; in bch_btree_node_read_done() 206 iter->used = 0; in bch_btree_node_read_done() 209 iter->b = &b->keys; in bch_btree_node_read_done() 247 bch_btree_iter_push(iter, i->start, bset_bkey_last(i)); in bch_btree_node_read_done() 259 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); in bch_btree_node_read_done() 271 mempool_free(iter, b->c->fill_iter); in bch_btree_node_read_done() 1250 struct btree_iter iter; in btree_gc_mark_node() local 1255 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { in btree_gc_mark_node() [all …]
|
D | bcache.h | 799 #define for_each_cache(ca, cs, iter) \ argument 800 for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
|
D | util.h | 124 #define fifo_for_each(c, fifo, iter) \ argument 125 for (iter = (fifo)->front; \ 126 c = (fifo)->data[iter], iter != (fifo)->back; \ 127 iter = (iter + 1) & (fifo)->mask)
|
D | debug.c | 110 struct bvec_iter iter; in bch_data_verify() local 122 bio_for_each_segment(bv, bio, iter) { in bch_data_verify() 124 void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page); in bch_data_verify()
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_debugfs.c | 193 struct qib_qp_iter *iter; in DEBUGFS_FILE() local 197 iter = qib_qp_iter_init(s->private); in DEBUGFS_FILE() 198 if (!iter) in DEBUGFS_FILE() 202 if (qib_qp_iter_next(iter)) { in DEBUGFS_FILE() 203 kfree(iter); in DEBUGFS_FILE() 208 return iter; in DEBUGFS_FILE() 214 struct qib_qp_iter *iter = iter_ptr; in _qp_stats_seq_next() local 218 if (qib_qp_iter_next(iter)) { in _qp_stats_seq_next() 219 kfree(iter); in _qp_stats_seq_next() 223 return iter; in _qp_stats_seq_next() [all …]
|
D | qib_qp.c | 1323 struct qib_qp_iter *iter; in qib_qp_iter_init() local 1325 iter = kzalloc(sizeof(*iter), GFP_KERNEL); in qib_qp_iter_init() 1326 if (!iter) in qib_qp_iter_init() 1329 iter->dev = dev; in qib_qp_iter_init() 1330 if (qib_qp_iter_next(iter)) { in qib_qp_iter_init() 1331 kfree(iter); in qib_qp_iter_init() 1335 return iter; in qib_qp_iter_init() 1338 int qib_qp_iter_next(struct qib_qp_iter *iter) in qib_qp_iter_next() argument 1340 struct qib_ibdev *dev = iter->dev; in qib_qp_iter_next() 1341 int n = iter->n; in qib_qp_iter_next() [all …]
|
/linux-4.4.14/drivers/gpu/drm/radeon/ |
D | drm_buffer.h | 130 int iter = buffer->iterator + offset * 4; in drm_buffer_pointer_to_dword() local 131 return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)]; in drm_buffer_pointer_to_dword() 144 int iter = buffer->iterator + offset; in drm_buffer_pointer_to_byte() local 145 return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)]; in drm_buffer_pointer_to_byte()
|
/linux-4.4.14/drivers/crypto/marvell/ |
D | cesa.h | 535 struct sg_mapping_iter iter; member 728 static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter, in mv_cesa_req_dma_iter_init() argument 731 iter->len = len; in mv_cesa_req_dma_iter_init() 732 iter->op_len = min(len, CESA_SA_SRAM_PAYLOAD_SIZE); in mv_cesa_req_dma_iter_init() 733 iter->offset = 0; in mv_cesa_req_dma_iter_init() 736 static inline void mv_cesa_sg_dma_iter_init(struct mv_cesa_sg_dma_iter *iter, in mv_cesa_sg_dma_iter_init() argument 740 iter->op_offset = 0; in mv_cesa_sg_dma_iter_init() 741 iter->offset = 0; in mv_cesa_sg_dma_iter_init() 742 iter->sg = sg; in mv_cesa_sg_dma_iter_init() 743 iter->dir = dir; in mv_cesa_sg_dma_iter_init() [all …]
|
D | cipher.c | 42 mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter, in mv_cesa_ablkcipher_req_iter_init() argument 45 mv_cesa_req_dma_iter_init(&iter->base, req->nbytes); in mv_cesa_ablkcipher_req_iter_init() 46 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); in mv_cesa_ablkcipher_req_iter_init() 47 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE); in mv_cesa_ablkcipher_req_iter_init() 51 mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter) in mv_cesa_ablkcipher_req_iter_next_op() argument 53 iter->src.op_offset = 0; in mv_cesa_ablkcipher_req_iter_next_op() 54 iter->dst.op_offset = 0; in mv_cesa_ablkcipher_req_iter_next_op() 56 return mv_cesa_req_dma_iter_next_op(&iter->base); in mv_cesa_ablkcipher_req_iter_next_op() 299 struct mv_cesa_ablkcipher_dma_iter iter; in mv_cesa_ablkcipher_dma_req_init() local 328 mv_cesa_ablkcipher_req_iter_init(&iter, req); in mv_cesa_ablkcipher_dma_req_init() [all …]
|
D | hash.c | 26 mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter, in mv_cesa_ahash_req_iter_init() argument 35 mv_cesa_req_dma_iter_init(&iter->base, len); in mv_cesa_ahash_req_iter_init() 36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); in mv_cesa_ahash_req_iter_init() 37 iter->src.op_offset = creq->cache_ptr; in mv_cesa_ahash_req_iter_init() 41 mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter) in mv_cesa_ahash_req_iter_next_op() argument 43 iter->src.op_offset = 0; in mv_cesa_ahash_req_iter_next_op() 45 return mv_cesa_req_dma_iter_next_op(&iter->base); in mv_cesa_ahash_req_iter_next_op() 604 struct mv_cesa_ahash_dma_iter iter; in mv_cesa_ahash_dma_req_init() local 622 mv_cesa_ahash_req_iter_init(&iter, req); in mv_cesa_ahash_dma_req_init() 628 ret = mv_cesa_ahash_dma_add_cache(&dreq->chain, &iter, creq, flags); in mv_cesa_ahash_dma_req_init() [all …]
|
/linux-4.4.14/arch/sparc/prom/ |
D | bootstr_32.c | 18 int iter; in prom_getbootargs() local 30 for (iter = 1; iter < 8; iter++) { in prom_getbootargs() 31 arg = (*(romvec->pv_v0bootargs))->argv[iter]; in prom_getbootargs()
|
/linux-4.4.14/fs/nfs/objlayout/ |
D | pnfs_osd_xdr_cli.c | 160 struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr) in pnfs_osd_xdr_decode_layout_map() argument 164 memset(iter, 0, sizeof(*iter)); in pnfs_osd_xdr_decode_layout_map() 176 iter->total_comps = layout->olo_num_comps; in pnfs_osd_xdr_decode_layout_map() 181 struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr, in pnfs_osd_xdr_decode_layout_comp() argument 184 BUG_ON(iter->decoded_comps > iter->total_comps); in pnfs_osd_xdr_decode_layout_comp() 185 if (iter->decoded_comps == iter->total_comps) in pnfs_osd_xdr_decode_layout_comp() 192 iter->decoded_comps, iter->total_comps); in pnfs_osd_xdr_decode_layout_comp() 204 iter->decoded_comps++; in pnfs_osd_xdr_decode_layout_comp()
|
/linux-4.4.14/net/sunrpc/ |
D | debugfs.c | 50 struct rpc_clnt_iter *iter = f->private; in tasks_start() local 52 struct rpc_clnt *clnt = iter->clnt; in tasks_start() 55 iter->pos = pos + 1; in tasks_start() 66 struct rpc_clnt_iter *iter = f->private; in tasks_next() local 67 struct rpc_clnt *clnt = iter->clnt; in tasks_next() 71 ++iter->pos; in tasks_next() 84 struct rpc_clnt_iter *iter = f->private; in tasks_stop() local 85 struct rpc_clnt *clnt = iter->clnt; in tasks_stop() 104 struct rpc_clnt_iter *iter = seq->private; in tasks_open() local 106 iter->clnt = inode->i_private; in tasks_open() [all …]
|
/linux-4.4.14/ipc/ |
D | util.c | 778 struct ipc_proc_iter *iter = s->private; in sysvipc_proc_next() local 779 struct ipc_proc_iface *iface = iter->iface; in sysvipc_proc_next() 786 return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos); in sysvipc_proc_next() 795 struct ipc_proc_iter *iter = s->private; in sysvipc_proc_start() local 796 struct ipc_proc_iface *iface = iter->iface; in sysvipc_proc_start() 799 ids = &iter->ns->ids[iface->ids]; in sysvipc_proc_start() 822 struct ipc_proc_iter *iter = s->private; in sysvipc_proc_stop() local 823 struct ipc_proc_iface *iface = iter->iface; in sysvipc_proc_stop() 830 ids = &iter->ns->ids[iface->ids]; in sysvipc_proc_stop() 837 struct ipc_proc_iter *iter = s->private; in sysvipc_proc_show() local [all …]
|
/linux-4.4.14/drivers/dma/ |
D | iop-adma.c | 116 struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL; in __iop_adma_slot_cleanup() local 126 list_for_each_entry_safe(iter, _iter, &iop_chan->chain, in __iop_adma_slot_cleanup() 130 iter->async_tx.cookie, iter->idx, busy, in __iop_adma_slot_cleanup() 131 iter->async_tx.phys, iop_desc_get_next_desc(iter), in __iop_adma_slot_cleanup() 132 async_tx_test_ack(&iter->async_tx)); in __iop_adma_slot_cleanup() 147 if (iter->async_tx.phys == current_desc) { in __iop_adma_slot_cleanup() 149 if (busy || iop_desc_get_next_desc(iter)) in __iop_adma_slot_cleanup() 155 slot_cnt = iter->slot_cnt; in __iop_adma_slot_cleanup() 156 slots_per_op = iter->slots_per_op; in __iop_adma_slot_cleanup() 166 grp_start = iter; in __iop_adma_slot_cleanup() [all …]
|
D | mv_xor.c | 258 struct mv_xor_desc_slot *iter, *_iter; in mv_chan_clean_completed_slots() local 261 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, in mv_chan_clean_completed_slots() 264 if (async_tx_test_ack(&iter->async_tx)) in mv_chan_clean_completed_slots() 265 list_move_tail(&iter->node, &mv_chan->free_slots); in mv_chan_clean_completed_slots() 292 struct mv_xor_desc_slot *iter, *_iter; in mv_chan_slot_cleanup() local 307 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, in mv_chan_slot_cleanup() 311 hw_desc = iter->hw_desc; in mv_chan_slot_cleanup() 313 cookie = mv_desc_run_tx_complete_actions(iter, mv_chan, in mv_chan_slot_cleanup() 317 mv_desc_clean_slot(iter, mv_chan); in mv_chan_slot_cleanup() 320 if (iter->async_tx.phys == current_desc) { in mv_chan_slot_cleanup() [all …]
|
D | fsl-edma.c | 541 u16 soff, doff, iter; in fsl_edma_prep_dma_cyclic() local 554 iter = period_len / nbytes; in fsl_edma_prep_dma_cyclic() 576 fsl_chan->fsc.attr, soff, nbytes, 0, iter, in fsl_edma_prep_dma_cyclic() 577 iter, doff, last_sg, true, false, true); in fsl_edma_prep_dma_cyclic() 593 u16 soff, doff, iter; in fsl_edma_prep_slave_sg() local 621 iter = sg_dma_len(sg) / nbytes; in fsl_edma_prep_slave_sg() 626 nbytes, 0, iter, iter, doff, last_sg, in fsl_edma_prep_slave_sg() 632 nbytes, 0, iter, iter, doff, last_sg, in fsl_edma_prep_slave_sg()
|
/linux-4.4.14/arch/unicore32/include/asm/ |
D | memblock.h | 34 #define for_each_bank(iter, mi) \ argument 35 for (iter = 0; iter < (mi)->nr_banks; iter++)
|
/linux-4.4.14/net/sctp/ |
D | tsnmap.c | 158 struct sctp_tsnmap_iter *iter) in sctp_tsnmap_iter_init() argument 161 iter->start = map->cumulative_tsn_ack_point + 1; in sctp_tsnmap_iter_init() 168 struct sctp_tsnmap_iter *iter, in sctp_tsnmap_next_gap_ack() argument 175 if (TSN_lte(map->max_tsn_seen, iter->start)) in sctp_tsnmap_next_gap_ack() 178 offset = iter->start - map->base_tsn; in sctp_tsnmap_next_gap_ack() 197 iter->start = map->cumulative_tsn_ack_point + *end + 1; in sctp_tsnmap_next_gap_ack() 335 struct sctp_tsnmap_iter iter; in sctp_tsnmap_num_gabs() local 341 sctp_tsnmap_iter_init(map, &iter); in sctp_tsnmap_num_gabs() 342 while (sctp_tsnmap_next_gap_ack(map, &iter, in sctp_tsnmap_num_gabs()
|
/linux-4.4.14/drivers/staging/lustre/lustre/llite/ |
D | rw26.c | 362 static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, in ll_direct_IO_26() argument 370 ssize_t count = iov_iter_count(iter); in ll_direct_IO_26() 390 if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK) in ll_direct_IO_26() 402 if (iov_iter_rw(iter) == READ) in ll_direct_IO_26() 406 while (iov_iter_count(iter)) { in ll_direct_IO_26() 410 count = min_t(size_t, iov_iter_count(iter), size); in ll_direct_IO_26() 411 if (iov_iter_rw(iter) == READ) { in ll_direct_IO_26() 418 result = iov_iter_get_pages_alloc(iter, &pages, count, &offs); in ll_direct_IO_26() 422 result = ll_direct_IO_26_seg(env, io, iov_iter_rw(iter), in ll_direct_IO_26() 426 ll_free_user_pages(pages, n, iov_iter_rw(iter) == READ); in ll_direct_IO_26() [all …]
|
/linux-4.4.14/security/selinux/ |
D | netport.c | 239 int iter; in sel_netport_init() local 244 for (iter = 0; iter < SEL_NETPORT_HASH_SIZE; iter++) { in sel_netport_init() 245 INIT_LIST_HEAD(&sel_netport_hash[iter].list); in sel_netport_init() 246 sel_netport_hash[iter].size = 0; in sel_netport_init()
|
D | netnode.c | 305 int iter; in sel_netnode_init() local 310 for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) { in sel_netnode_init() 311 INIT_LIST_HEAD(&sel_netnode_hash[iter].list); in sel_netnode_init() 312 sel_netnode_hash[iter].size = 0; in sel_netnode_init()
|
D | xfrm.c | 453 struct dst_entry *iter; in selinux_xfrm_postroute_last() local 455 for (iter = dst; iter != NULL; iter = iter->child) { in selinux_xfrm_postroute_last() 456 struct xfrm_state *x = iter->xfrm; in selinux_xfrm_postroute_last()
|
/linux-4.4.14/net/netfilter/ |
D | nft_hash.c | 186 struct nft_set_iter *iter) in nft_hash_walk() argument 196 iter->err = err; in nft_hash_walk() 202 iter->err = err; in nft_hash_walk() 210 iter->err = err; in nft_hash_walk() 217 if (iter->count < iter->skip) in nft_hash_walk() 226 iter->err = iter->fn(ctx, set, iter, &elem); in nft_hash_walk() 227 if (iter->err < 0) in nft_hash_walk() 231 iter->count++; in nft_hash_walk()
|
D | nft_rbtree.c | 175 struct nft_set_iter *iter) in nft_rbtree_walk() argument 187 if (iter->count < iter->skip) in nft_rbtree_walk() 194 iter->err = iter->fn(ctx, set, iter, &elem); in nft_rbtree_walk() 195 if (iter->err < 0) { in nft_rbtree_walk() 200 iter->count++; in nft_rbtree_walk()
|
/linux-4.4.14/drivers/scsi/qla4xxx/ |
D | ql4_attr.c | 129 struct sysfs_entry *iter; in qla4_8xxx_alloc_sysfs_attr() local 132 for (iter = bin_file_entries; iter->name; iter++) { in qla4_8xxx_alloc_sysfs_attr() 134 iter->attr); in qla4_8xxx_alloc_sysfs_attr() 138 iter->name, ret); in qla4_8xxx_alloc_sysfs_attr() 145 struct sysfs_entry *iter; in qla4_8xxx_free_sysfs_attr() local 147 for (iter = bin_file_entries; iter->name; iter++) in qla4_8xxx_free_sysfs_attr() 149 iter->attr); in qla4_8xxx_free_sysfs_attr()
|
/linux-4.4.14/fs/nfs/ |
D | direct.c | 259 ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) in nfs_direct_IO() argument 267 VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE); in nfs_direct_IO() 269 if (iov_iter_rw(iter) == READ) in nfs_direct_IO() 270 return nfs_file_direct_read(iocb, iter, pos); in nfs_direct_IO() 271 return nfs_file_direct_write(iocb, iter); in nfs_direct_IO() 475 struct iov_iter *iter, in nfs_direct_read_schedule_iovec() argument 490 while (iov_iter_count(iter)) { in nfs_direct_read_schedule_iovec() 496 result = iov_iter_get_pages_alloc(iter, &pagevec, in nfs_direct_read_schedule_iovec() 502 iov_iter_advance(iter, bytes); in nfs_direct_read_schedule_iovec() 570 ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, in nfs_file_direct_read() argument [all …]
|
/linux-4.4.14/tools/perf/ui/gtk/ |
D | hists.c | 101 GtkTreeIter iter, new_parent; in perf_gtk__add_callchain() local 117 gtk_tree_store_append(store, &iter, &new_parent); in perf_gtk__add_callchain() 120 gtk_tree_store_set(store, &iter, 0, buf, -1); in perf_gtk__add_callchain() 123 gtk_tree_store_set(store, &iter, col, buf, -1); in perf_gtk__add_callchain() 130 new_parent = iter; in perf_gtk__add_callchain() 141 perf_gtk__add_callchain(&node->rb_root, store, &iter, col, in perf_gtk__add_callchain() 225 GtkTreeIter iter; in perf_gtk__show_hists() local 236 gtk_tree_store_append(store, &iter, NULL); in perf_gtk__show_hists() 249 gtk_tree_store_set(store, &iter, col_idx++, s, -1); in perf_gtk__show_hists() 257 perf_gtk__add_callchain(&h->sorted_chain, store, &iter, in perf_gtk__show_hists()
|
D | annotate.c | 121 GtkTreeIter iter; in perf_gtk__annotate_symbol() local 124 gtk_list_store_append(store, &iter); in perf_gtk__annotate_symbol() 140 gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1); in perf_gtk__annotate_symbol() 142 gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1); in perf_gtk__annotate_symbol() 144 gtk_list_store_set(store, &iter, ANN_COL__LINE, s, -1); in perf_gtk__annotate_symbol()
|
/linux-4.4.14/drivers/s390/block/ |
D | scm_blk_cluster.c | 79 struct scm_request *iter; in scm_reserve_cluster() local 86 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { in scm_reserve_cluster() 87 if (iter == scmrq) { in scm_reserve_cluster() 95 for (pos = 0; pos < iter->aob->request.msb_count; pos++) { in scm_reserve_cluster() 96 if (clusters_intersect(req, iter->request[pos]) && in scm_reserve_cluster() 98 rq_data_dir(iter->request[pos]) == WRITE)) { in scm_reserve_cluster() 136 struct req_iterator iter; in scm_prepare_cluster_request() local 183 rq_for_each_segment(bv, req, iter) { in scm_prepare_cluster_request()
|
D | scm_blk.c | 52 struct list_head *iter, *safe; in scm_free_rqs() local 56 list_for_each_safe(iter, safe, &inactive_requests) { in scm_free_rqs() 57 scmrq = list_entry(iter, struct scm_request, list); in scm_free_rqs() 188 struct req_iterator iter; in scm_request_prepare() local 203 rq_for_each_segment(bv, req, iter) { in scm_request_prepare()
|
/linux-4.4.14/drivers/net/wireless/ath/carl9170/ |
D | debug.c | 301 struct carl9170_sta_tid *iter; in carl9170_debugfs_ampdu_state_read() local 307 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) { in carl9170_debugfs_ampdu_state_read() 309 spin_lock_bh(&iter->lock); in carl9170_debugfs_ampdu_state_read() 312 cnt, iter->tid, iter->bsn, iter->snx, iter->hsn, in carl9170_debugfs_ampdu_state_read() 313 iter->max, iter->state, iter->counter); in carl9170_debugfs_ampdu_state_read() 316 CARL9170_BAW_BITS, iter->bitmap); in carl9170_debugfs_ampdu_state_read() 325 offset = BM_STR_OFF(SEQ_DIFF(iter->snx, iter->bsn)); in carl9170_debugfs_ampdu_state_read() 328 offset = BM_STR_OFF(((int)iter->hsn - (int)iter->bsn) % in carl9170_debugfs_ampdu_state_read() 333 " currently queued:%d\n", skb_queue_len(&iter->queue)); in carl9170_debugfs_ampdu_state_read() 336 skb_queue_walk(&iter->queue, skb) { in carl9170_debugfs_ampdu_state_read() [all …]
|
D | fw.c | 36 const struct carl9170fw_desc_head *iter; in carl9170_fw_find_desc() local 38 carl9170fw_for_each_hdr(iter, ar->fw.desc) { in carl9170_fw_find_desc() 39 if (carl9170fw_desc_cmp(iter, descid, len, in carl9170_fw_find_desc() 41 return (void *)iter; in carl9170_fw_find_desc() 45 if (carl9170fw_desc_cmp(iter, descid, len, in carl9170_fw_find_desc() 47 return (void *)iter; in carl9170_fw_find_desc()
|
D | tx.c | 605 struct carl9170_sta_tid *iter; in carl9170_tx_ampdu_timeout() local 612 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) { in carl9170_tx_ampdu_timeout() 613 if (iter->state < CARL9170_TID_STATE_IDLE) in carl9170_tx_ampdu_timeout() 616 spin_lock_bh(&iter->lock); in carl9170_tx_ampdu_timeout() 617 skb = skb_peek(&iter->queue); in carl9170_tx_ampdu_timeout() 627 sta = iter->sta; in carl9170_tx_ampdu_timeout() 631 ieee80211_stop_tx_ba_session(sta, iter->tid); in carl9170_tx_ampdu_timeout() 633 spin_unlock_bh(&iter->lock); in carl9170_tx_ampdu_timeout() 1397 struct sk_buff *iter; in carl9170_tx_ampdu_queue() local 1432 skb_queue_reverse_walk(&agg->queue, iter) { in carl9170_tx_ampdu_queue() [all …]
|
/linux-4.4.14/net/nfc/ |
D | nfc.h | 116 static inline void nfc_device_iter_init(struct class_dev_iter *iter) in nfc_device_iter_init() argument 118 class_dev_iter_init(iter, &nfc_class, NULL, NULL); in nfc_device_iter_init() 121 static inline struct nfc_dev *nfc_device_iter_next(struct class_dev_iter *iter) in nfc_device_iter_next() argument 123 struct device *d = class_dev_iter_next(iter); in nfc_device_iter_next() 130 static inline void nfc_device_iter_exit(struct class_dev_iter *iter) in nfc_device_iter_exit() argument 132 class_dev_iter_exit(iter); in nfc_device_iter_exit()
|
D | netlink.c | 588 struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; in nfc_genl_dump_devices() local 592 if (!iter) { in nfc_genl_dump_devices() 594 iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL); in nfc_genl_dump_devices() 595 if (!iter) in nfc_genl_dump_devices() 597 cb->args[0] = (long) iter; in nfc_genl_dump_devices() 605 nfc_device_iter_init(iter); in nfc_genl_dump_devices() 606 dev = nfc_device_iter_next(iter); in nfc_genl_dump_devices() 617 dev = nfc_device_iter_next(iter); in nfc_genl_dump_devices() 629 struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; in nfc_genl_dump_devices_done() local 631 nfc_device_iter_exit(iter); in nfc_genl_dump_devices_done() [all …]
|
/linux-4.4.14/include/net/ |
D | bonding.h | 82 #define bond_for_each_slave(bond, pos, iter) \ argument 83 netdev_for_each_lower_private((bond)->dev, pos, iter) 86 #define bond_for_each_slave_rcu(bond, pos, iter) \ argument 87 netdev_for_each_lower_private_rcu((bond)->dev, pos, iter) 365 struct list_head *iter; in bond_slave_state_change() local 368 bond_for_each_slave(bond, tmp, iter) { in bond_slave_state_change() 378 struct list_head *iter; in bond_slave_state_notify() local 381 bond_for_each_slave(bond, tmp, iter) { in bond_slave_state_notify() 596 struct list_head *iter; in bond_slave_has_mac() local 599 bond_for_each_slave(bond, tmp, iter) in bond_slave_has_mac() [all …]
|
D | netlabel.h | 284 struct netlbl_lsm_catmap *iter; in netlbl_catmap_free() local 287 iter = catmap; in netlbl_catmap_free() 289 kfree(iter); in netlbl_catmap_free()
|
/linux-4.4.14/fs/ |
D | direct-io.c | 98 struct iov_iter *iter; member 163 ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, in dio_refill_pages() 185 iov_iter_advance(sdio->iter, ret); in dio_refill_pages() 1109 struct block_device *bdev, struct iov_iter *iter, in do_blockdev_direct_IO() argument 1117 size_t count = iov_iter_count(iter); in do_blockdev_direct_IO() 1123 unsigned long align = offset | iov_iter_alignment(iter); in do_blockdev_direct_IO() 1139 if (iov_iter_rw(iter) == READ && !iov_iter_count(iter)) in do_blockdev_direct_IO() 1155 if (iov_iter_rw(iter) == READ) { in do_blockdev_direct_IO() 1174 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { in do_blockdev_direct_IO() 1191 iov_iter_rw(iter) == WRITE && end > i_size_read(inode)) in do_blockdev_direct_IO() [all …]
|
D | read_write.c | 332 ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos) in vfs_iter_read() argument 343 iter->type |= READ; in vfs_iter_read() 344 ret = file->f_op->read_iter(&kiocb, iter); in vfs_iter_read() 352 ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos) in vfs_iter_write() argument 363 iter->type |= WRITE; in vfs_iter_write() 364 ret = file->f_op->write_iter(&kiocb, iter); in vfs_iter_write() 415 struct iov_iter iter; in new_sync_read() local 420 iov_iter_init(&iter, READ, &iov, 1, len); in new_sync_read() 422 ret = filp->f_op->read_iter(&kiocb, &iter); in new_sync_read() 471 struct iov_iter iter; in new_sync_write() local [all …]
|
D | dax.c | 108 static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, in dax_io() argument 120 if (iov_iter_rw(iter) != WRITE) in dax_io() 136 iov_iter_rw(iter) == WRITE); in dax_io() 149 hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh); in dax_io() 168 if (iov_iter_rw(iter) == WRITE) { in dax_io() 169 len = copy_from_iter_pmem(addr, max - pos, iter); in dax_io() 173 iter); in dax_io() 175 len = iov_iter_zero(max - pos, iter); in dax_io() 210 struct iov_iter *iter, loff_t pos, get_block_t get_block, in dax_do_io() argument 215 loff_t end = pos + iov_iter_count(iter); in dax_do_io() [all …]
|
/linux-4.4.14/drivers/hid/ |
D | hid-wiimote-core.c | 619 const __u8 *mods, *iter; in wiimote_modules_load() local 625 for (iter = mods; *iter != WIIMOD_NULL; ++iter) { in wiimote_modules_load() 626 if (wiimod_table[*iter]->flags & WIIMOD_FLAG_INPUT) { in wiimote_modules_load() 646 for (iter = mods; *iter != WIIMOD_NULL; ++iter) { in wiimote_modules_load() 647 ops = wiimod_table[*iter]; in wiimote_modules_load() 668 for ( ; iter-- != mods; ) { in wiimote_modules_load() 669 ops = wiimod_table[*iter]; in wiimote_modules_load() 682 const __u8 *mods, *iter; in wiimote_modules_unload() local 693 for (iter = mods; *iter != WIIMOD_NULL; ++iter) in wiimote_modules_unload() 701 for ( ; iter-- != mods; ) { in wiimote_modules_unload() [all …]
|
/linux-4.4.14/drivers/block/aoe/ |
D | aoecmd.c | 199 memset(&f->iter, 0, sizeof(f->iter)); in aoe_freetframe() 297 skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter) in skb_fillup() argument 302 __bio_for_each_segment(bv, bio, iter, iter) in skb_fillup() 343 ah->scnt = f->iter.bi_size >> 9; in ata_rw_frameinit() 344 put_lba(ah, f->iter.bi_sector); in ata_rw_frameinit() 353 skb_fillup(skb, f->buf->bio, f->iter); in ata_rw_frameinit() 355 skb->len += f->iter.bi_size; in ata_rw_frameinit() 356 skb->data_len = f->iter.bi_size; in ata_rw_frameinit() 357 skb->truesize += f->iter.bi_size; in ata_rw_frameinit() 385 f->iter = buf->iter; in aoecmd_ata_rw() [all …]
|
D | aoe.h | 104 struct bvec_iter iter; member 123 struct bvec_iter iter; member
|
/linux-4.4.14/net/ipv4/netfilter/ |
D | arp_tables.c | 373 struct arpt_entry *iter; in find_jump_target() local 375 xt_entry_foreach(iter, t->entries, t->size) { in find_jump_target() 376 if (iter == target) in find_jump_target() 644 struct arpt_entry *iter; in translate_table() local 661 xt_entry_foreach(iter, entry0, newinfo->size) { in translate_table() 662 ret = check_entry_size_and_hooks(iter, newinfo, entry0, in translate_table() 670 if (strcmp(arpt_get_target(iter)->u.user.name, in translate_table() 706 xt_entry_foreach(iter, entry0, newinfo->size) { in translate_table() 707 ret = find_check_entry(iter, repl->name, repl->size); in translate_table() 714 xt_entry_foreach(iter, entry0, newinfo->size) { in translate_table() [all …]
|
D | ip_tables.c | 260 const struct ipt_entry *iter; in trace_packet() local 268 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) in trace_packet() 269 if (get_chainname_rulenum(iter, e, hookname, in trace_packet() 449 struct ipt_entry *iter; in find_jump_target() local 451 xt_entry_foreach(iter, t->entries, t->size) { in find_jump_target() 452 if (iter == target) in find_jump_target() 813 struct ipt_entry *iter; in translate_table() local 829 xt_entry_foreach(iter, entry0, newinfo->size) { in translate_table() 830 ret = check_entry_size_and_hooks(iter, newinfo, entry0, in translate_table() 838 if (strcmp(ipt_get_target(iter)->u.user.name, in translate_table() [all …]
|
/linux-4.4.14/arch/powerpc/kernel/ |
D | cacheinfo.c | 166 struct cache *iter; in release_cache_debugcheck() local 168 list_for_each_entry(iter, &cache_list, list) in release_cache_debugcheck() 169 WARN_ONCE(iter->next_local == cache, in release_cache_debugcheck() 171 iter->ofnode->full_name, in release_cache_debugcheck() 172 cache_type_string(iter), in release_cache_debugcheck() 304 struct cache *iter; in cache_find_first_sibling() local 310 list_for_each_entry(iter, &cache_list, list) in cache_find_first_sibling() 311 if (iter->ofnode == cache->ofnode && iter->next_local == cache) in cache_find_first_sibling() 312 return iter; in cache_find_first_sibling() 321 struct cache *iter; in cache_lookup_by_node() local [all …]
|
D | ftrace.c | 443 struct ftrace_rec_iter *iter; in ftrace_replace_code() local 447 for (iter = ftrace_rec_iter_start(); iter; in ftrace_replace_code() 448 iter = ftrace_rec_iter_next(iter)) { in ftrace_replace_code() 449 rec = ftrace_rec_iter_record(iter); in ftrace_replace_code()
|
/linux-4.4.14/net/appletalk/ |
D | aarp.c | 921 static struct aarp_entry *iter_next(struct aarp_iter_state *iter, loff_t *pos) in iter_next() argument 923 int ct = iter->bucket; in iter_next() 924 struct aarp_entry **table = iter->table; in iter_next() 932 iter->table = table; in iter_next() 933 iter->bucket = ct; in iter_next() 956 struct aarp_iter_state *iter = seq->private; in aarp_seq_start() local 959 iter->table = resolved; in aarp_seq_start() 960 iter->bucket = 0; in aarp_seq_start() 962 return *pos ? iter_next(iter, pos) : SEQ_START_TOKEN; in aarp_seq_start() 968 struct aarp_iter_state *iter = seq->private; in aarp_seq_next() local [all …]
|
/linux-4.4.14/arch/arm/include/asm/hardware/ |
D | iop3xx-adma.h | 576 struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter; in iop_desc_init_zero_sum() local 587 iter = iop_hw_desc_slot_idx(hw_desc, i); in iop_desc_init_zero_sum() 588 u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags); in iop_desc_init_zero_sum() 592 iter->desc_ctrl = u_desc_ctrl.value; in iop_desc_init_zero_sum() 695 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; in iop_desc_set_zero_sum_byte_count() local 702 iter = iop_hw_desc_slot_idx(hw_desc, i); in iop_desc_set_zero_sum_byte_count() 703 iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; in iop_desc_set_zero_sum_byte_count() 708 iter = iop_hw_desc_slot_idx(hw_desc, i); in iop_desc_set_zero_sum_byte_count() 709 iter->byte_count = len; in iop_desc_set_zero_sum_byte_count() 744 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; in iop_desc_set_zero_sum_src_addr() local [all …]
|
/linux-4.4.14/drivers/net/bonding/ |
D | bond_main.c | 290 struct list_head *iter; in bond_vlan_rx_add_vid() local 293 bond_for_each_slave(bond, slave, iter) { in bond_vlan_rx_add_vid() 303 bond_for_each_slave(bond, rollback_slave, iter) { in bond_vlan_rx_add_vid() 322 struct list_head *iter; in bond_vlan_rx_kill_vid() local 325 bond_for_each_slave(bond, slave, iter) in bond_vlan_rx_kill_vid() 344 struct list_head *iter; in bond_set_carrier() local 353 bond_for_each_slave(bond, slave, iter) { in bond_set_carrier() 496 struct list_head *iter; in bond_set_promiscuity() local 507 bond_for_each_slave(bond, slave, iter) { in bond_set_promiscuity() 519 struct list_head *iter; in bond_set_allmulti() local [all …]
|
D | bond_procfs.c | 13 struct list_head *iter; in bond_info_seq_start() local 22 bond_for_each_slave_rcu(bond, slave, iter) in bond_info_seq_start() 32 struct list_head *iter; in bond_info_seq_next() local 40 bond_for_each_slave_rcu(bond, slave, iter) { in bond_info_seq_next()
|
D | bond_3ad.c | 700 struct list_head *iter; in __get_active_agg() local 703 bond_for_each_slave_rcu(bond, slave, iter) in __get_active_agg() 1310 struct list_head *iter; in ad_port_selection_logic() local 1374 bond_for_each_slave(bond, slave, iter) { in ad_port_selection_logic() 1582 struct list_head *iter; in ad_agg_selection_logic() local 1591 bond_for_each_slave_rcu(bond, slave, iter) { in ad_agg_selection_logic() 1636 bond_for_each_slave_rcu(bond, slave, iter) { in ad_agg_selection_logic() 1995 struct list_head *iter; in bond_3ad_unbind_slave() local 2028 bond_for_each_slave(bond, slave_iter, iter) { in bond_3ad_unbind_slave() 2105 bond_for_each_slave(bond, slave_iter, iter) { in bond_3ad_unbind_slave() [all …]
|
D | bond_alb.c | 198 struct list_head *iter; in tlb_get_least_loaded_slave() local 205 bond_for_each_slave_rcu(bond, slave, iter) { in tlb_get_least_loaded_slave() 338 struct list_head *iter; in __rlb_next_rx_slave() local 341 bond_for_each_slave_rcu(bond, slave, iter) { in __rlb_next_rx_slave() 961 struct list_head *iter; in alb_send_learning_packets() local 971 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { in alb_send_learning_packets() 1143 struct list_head *iter; in alb_handle_addr_collision_on_attach() local 1167 bond_for_each_slave(bond, tmp_slave1, iter) { in alb_handle_addr_collision_on_attach() 1217 struct list_head *iter; in alb_set_mac_address() local 1225 bond_for_each_slave(bond, slave, iter) { in alb_set_mac_address() [all …]
|
D | bond_options.c | 946 struct list_head *iter; in _bond_options_arp_ip_target_set() local 950 bond_for_each_slave(bond, slave, iter) in _bond_options_arp_ip_target_set() 994 struct list_head *iter; in bond_option_arp_ip_target_rem() local 1017 bond_for_each_slave(bond, slave, iter) { in bond_option_arp_ip_target_rem() 1095 struct list_head *iter; in bond_option_primary_set() local 1112 bond_for_each_slave(bond, slave, iter) { in bond_option_primary_set() 1195 struct list_head *iter; in bond_option_all_slaves_active_set() local 1201 bond_for_each_slave(bond, slave, iter) { in bond_option_all_slaves_active_set() 1276 struct list_head *iter; in bond_option_queue_id_set() local 1305 bond_for_each_slave(bond, slave, iter) { in bond_option_queue_id_set()
|
/linux-4.4.14/arch/arm/mach-iop13xx/include/mach/ |
D | adma.h | 372 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; in iop_desc_set_zero_sum_byte_count() local 379 iter = iop_hw_desc_slot_idx(hw_desc, i); in iop_desc_set_zero_sum_byte_count() 380 iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT; in iop_desc_set_zero_sum_byte_count() 386 iter = iop_hw_desc_slot_idx(hw_desc, i); in iop_desc_set_zero_sum_byte_count() 387 iter->byte_count = len; in iop_desc_set_zero_sum_byte_count() 425 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; in iop_desc_set_xor_src_addr() local 429 iter = iop_hw_desc_slot_idx(hw_desc, i); in iop_desc_set_xor_src_addr() 430 iter->src[src_idx].src_addr = addr; in iop_desc_set_xor_src_addr() 431 iter->src[src_idx].upper_src_addr = 0; in iop_desc_set_xor_src_addr() 445 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; in iop_desc_set_pq_src_addr() local [all …]
|
/linux-4.4.14/net/ipv6/netfilter/ |
D | ip6_tables.c | 289 const struct ip6t_entry *iter; in trace_packet() local 297 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) in trace_packet() 298 if (get_chainname_rulenum(iter, e, hookname, in trace_packet() 461 struct ip6t_entry *iter; in find_jump_target() local 463 xt_entry_foreach(iter, t->entries, t->size) { in find_jump_target() 464 if (iter == target) in find_jump_target() 825 struct ip6t_entry *iter; in translate_table() local 841 xt_entry_foreach(iter, entry0, newinfo->size) { in translate_table() 842 ret = check_entry_size_and_hooks(iter, newinfo, entry0, in translate_table() 850 if (strcmp(ip6t_get_target(iter)->u.user.name, in translate_table() [all …]
|
/linux-4.4.14/net/irda/ |
D | irlmp.c | 1834 static void *irlmp_seq_hb_idx(struct irlmp_iter_state *iter, loff_t *off) in irlmp_seq_hb_idx() argument 1838 spin_lock_irq(&iter->hashbin->hb_spinlock); in irlmp_seq_hb_idx() 1839 for (element = hashbin_get_first(iter->hashbin); in irlmp_seq_hb_idx() 1841 element = hashbin_get_next(iter->hashbin)) { in irlmp_seq_hb_idx() 1847 spin_unlock_irq(&iter->hashbin->hb_spinlock); in irlmp_seq_hb_idx() 1848 iter->hashbin = NULL; in irlmp_seq_hb_idx() 1855 struct irlmp_iter_state *iter = seq->private; in irlmp_seq_start() local 1859 iter->hashbin = NULL; in irlmp_seq_start() 1863 iter->hashbin = irlmp->unconnected_lsaps; in irlmp_seq_start() 1864 v = irlmp_seq_hb_idx(iter, &off); in irlmp_seq_start() [all …]
|
D | irlap.c | 1075 struct irlap_iter_state *iter = seq->private; in irlap_seq_start() local 1080 iter->id = 0; in irlap_seq_start() 1084 if (iter->id == *pos) in irlap_seq_start() 1086 ++iter->id; in irlap_seq_start() 1094 struct irlap_iter_state *iter = seq->private; in irlap_seq_next() local 1097 ++iter->id; in irlap_seq_next() 1108 const struct irlap_iter_state *iter = seq->private; in irlap_seq_show() local 1113 seq_printf(seq, "irlap%d ", iter->id); in irlap_seq_show()
|
/linux-4.4.14/kernel/sched/ |
D | rt.c | 472 #define for_each_rt_rq(rt_rq, iter, rq) \ argument 473 for (iter = container_of(&task_groups, typeof(*iter), list); \ 474 (iter = next_task_group(iter)) && \ 475 (rt_rq = iter->rt_rq[cpu_of(rq)]);) 576 #define for_each_rt_rq(rt_rq, iter, rq) \ argument 577 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 650 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); in do_balance_runtime() local 653 if (iter == rt_rq) in do_balance_runtime() 656 raw_spin_lock(&iter->rt_runtime_lock); in do_balance_runtime() 662 if (iter->rt_runtime == RUNTIME_INF) in do_balance_runtime() [all …]
|
/linux-4.4.14/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_gmr.c | 39 struct vmw_piter *iter, in vmw_gmr2_bind() argument 86 *cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT; in vmw_gmr2_bind() 88 *((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >> in vmw_gmr2_bind() 92 vmw_piter_next(iter); in vmw_gmr2_bind()
|
D | vmwgfx_buffer.c | 246 return __sg_page_iter_next(&viter->iter); in __vmw_piter_sg_next() 266 return sg_page_iter_page(&viter->iter); in __vmw_piter_sg_page() 291 return sg_page_iter_dma_address(&viter->iter); in __vmw_piter_sg_addr() 329 __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl, in vmw_piter_start() 397 struct vmw_piter iter; in vmw_ttm_map_dma() local 453 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { in vmw_ttm_map_dma() 454 dma_addr_t cur = vmw_piter_dma_addr(&iter); in vmw_ttm_map_dma()
|
D | vmwgfx_mob.c | 119 struct vmw_piter iter; in vmw_setup_otable_base() local 125 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); in vmw_setup_otable_base() 126 WARN_ON(!vmw_piter_next(&iter)); in vmw_setup_otable_base() 136 mob->pt_root_page = vmw_piter_dma_addr(&iter); in vmw_setup_otable_base() 139 mob->pt_root_page = vmw_piter_dma_addr(&iter); in vmw_setup_otable_base() 145 vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT); in vmw_setup_otable_base()
|
/linux-4.4.14/scripts/coccinelle/null/ |
D | deref_null.cocci | 57 iterator iter; 66 iter(subE,...) S4 // no use 118 iterator iter; 127 iter(subE,...) S4 // no use 178 iterator iter; 187 iter(subE,...) S4 // no use 251 iterator iter; 260 iter(subE,...) S4 // no use
|
/linux-4.4.14/drivers/scsi/qla2xxx/ |
D | qla_inline.h | 75 uint32_t iter = bsize >> 2; in host_to_fcp_swap() local 77 for (; iter ; iter--) in host_to_fcp_swap() 88 uint32_t iter = bsize >> 2; in host_to_adap() local 90 for (; iter ; iter--) in host_to_adap()
|
D | qla_attr.c | 270 uint32_t *iter; in qla2x00_sysfs_write_nvram() local 273 iter = (uint32_t *)buf; in qla2x00_sysfs_write_nvram() 276 chksum += le32_to_cpu(*iter++); in qla2x00_sysfs_write_nvram() 278 *iter = cpu_to_le32(chksum); in qla2x00_sysfs_write_nvram() 280 uint8_t *iter; in qla2x00_sysfs_write_nvram() local 283 iter = (uint8_t *)buf; in qla2x00_sysfs_write_nvram() 286 chksum += *iter++; in qla2x00_sysfs_write_nvram() 288 *iter = chksum; in qla2x00_sysfs_write_nvram() 639 uint16_t iter, addr, offset; in qla2x00_sysfs_read_sfp() local 659 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE; in qla2x00_sysfs_read_sfp() [all …]
|
D | qla_sup.c | 2665 uint32_t istart, iend, iter, vend; in qla2x00_get_fcode_version() local 2678 iter = istart; in qla2x00_get_fcode_version() 2679 while ((iter < iend) && !do_next) { in qla2x00_get_fcode_version() 2680 iter++; in qla2x00_get_fcode_version() 2681 if (qla2x00_read_flash_byte(ha, iter) == '/') { in qla2x00_get_fcode_version() 2682 if (qla2x00_read_flash_byte(ha, iter + 2) == in qla2x00_get_fcode_version() 2686 iter + 3) == '/') in qla2x00_get_fcode_version() 2695 while ((iter > istart) && !do_next) { in qla2x00_get_fcode_version() 2696 iter--; in qla2x00_get_fcode_version() 2697 if (qla2x00_read_flash_byte(ha, iter) == ' ') in qla2x00_get_fcode_version() [all …]
|
/linux-4.4.14/drivers/vfio/platform/ |
D | vfio_platform_common.c | 36 struct vfio_platform_reset_node *iter; in vfio_platform_lookup_reset() local 40 list_for_each_entry(iter, &reset_list, link) { in vfio_platform_lookup_reset() 41 if (!strcmp(iter->compat, compat) && in vfio_platform_lookup_reset() 42 try_module_get(iter->owner)) { in vfio_platform_lookup_reset() 43 *module = iter->owner; in vfio_platform_lookup_reset() 44 reset_fn = iter->reset; in vfio_platform_lookup_reset() 610 struct vfio_platform_reset_node *iter, *temp; in vfio_platform_unregister_reset() local 613 list_for_each_entry_safe(iter, temp, &reset_list, link) { in vfio_platform_unregister_reset() 614 if (!strcmp(iter->compat, compat) && (iter->reset == fn)) { in vfio_platform_unregister_reset() 615 list_del(&iter->link); in vfio_platform_unregister_reset()
|
/linux-4.4.14/drivers/scsi/ |
D | sd_dif.c | 132 struct bvec_iter iter; in sd_dif_prepare() local 141 bip_for_each_vec(iv, bip, iter) { in sd_dif_prepare() 184 struct bvec_iter iter; in sd_dif_complete() local 188 bip_for_each_vec(iv, bip, iter) { in sd_dif_complete()
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ehca_cq.c | 75 struct hlist_node *iter; in ehca_cq_unassign_qp() local 80 hlist_for_each(iter, &cq->qp_hashtab[key]) { in ehca_cq_unassign_qp() 81 qp = hlist_entry(iter, struct ehca_qp, list_entries); in ehca_cq_unassign_qp() 83 hlist_del(iter); in ehca_cq_unassign_qp() 104 struct hlist_node *iter; in ehca_cq_get_qp() local 106 hlist_for_each(iter, &cq->qp_hashtab[key]) { in ehca_cq_get_qp() 107 qp = hlist_entry(iter, struct ehca_qp, list_entries); in ehca_cq_get_qp()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | debugfs.c | 231 struct qp_iter *iter; in _qp_stats_seq_start() local 235 iter = qp_iter_init(s->private); in _qp_stats_seq_start() 236 if (!iter) in _qp_stats_seq_start() 240 if (qp_iter_next(iter)) { in _qp_stats_seq_start() 241 kfree(iter); in _qp_stats_seq_start() 246 return iter; in _qp_stats_seq_start() 252 struct qp_iter *iter = iter_ptr; in _qp_stats_seq_next() local 256 if (qp_iter_next(iter)) { in _qp_stats_seq_next() 257 kfree(iter); in _qp_stats_seq_next() 261 return iter; in _qp_stats_seq_next() [all …]
|
D | qp.c | 1552 struct qp_iter *iter; in qp_iter_init() local 1554 iter = kzalloc(sizeof(*iter), GFP_KERNEL); in qp_iter_init() 1555 if (!iter) in qp_iter_init() 1558 iter->dev = dev; in qp_iter_init() 1559 iter->specials = dev->ibdev.phys_port_cnt * 2; in qp_iter_init() 1560 if (qp_iter_next(iter)) { in qp_iter_init() 1561 kfree(iter); in qp_iter_init() 1565 return iter; in qp_iter_init() 1568 int qp_iter_next(struct qp_iter *iter) in qp_iter_next() argument 1570 struct hfi1_ibdev *dev = iter->dev; in qp_iter_next() [all …]
|
D | qp.h | 235 int qp_iter_next(struct qp_iter *iter); 242 void qp_iter_print(struct seq_file *s, struct qp_iter *iter);
|
/linux-4.4.14/net/switchdev/ |
D | switchdev.c | 188 struct list_head *iter; in switchdev_port_attr_get() local 206 netdev_for_each_lower_dev(dev, lower_dev, iter) { in switchdev_port_attr_get() 226 struct list_head *iter; in __switchdev_port_attr_set() local 242 netdev_for_each_lower_dev(dev, lower_dev, iter) { in __switchdev_port_attr_set() 361 struct list_head *iter; in __switchdev_port_obj_add() local 372 netdev_for_each_lower_dev(dev, lower_dev, iter) { in __switchdev_port_obj_add() 473 struct list_head *iter; in switchdev_port_obj_del_now() local 484 netdev_for_each_lower_dev(dev, lower_dev, iter) { in switchdev_port_obj_del_now() 547 struct list_head *iter; in switchdev_port_obj_dump() local 560 netdev_for_each_lower_dev(dev, lower_dev, iter) { in switchdev_port_obj_dump() [all …]
|
/linux-4.4.14/scripts/kconfig/ |
D | zconf.lex.c | 2408 struct file *iter; in zconf_nextfile() local 2424 for (iter = current_file->parent; iter; iter = iter->parent ) { in zconf_nextfile() 2425 if (!strcmp(current_file->name,iter->name) ) { in zconf_nextfile() 2430 iter = current_file->parent; in zconf_nextfile() 2431 while (iter && \ in zconf_nextfile() 2432 strcmp(iter->name,current_file->name)) { in zconf_nextfile() 2434 iter->name, iter->lineno-1); in zconf_nextfile() 2435 iter = iter->parent; in zconf_nextfile() 2437 if (iter) in zconf_nextfile() 2439 iter->name, iter->lineno+1); in zconf_nextfile()
|
D | gconf.c | 784 GtkTreeIter iter; in renderer_edited() local 789 if (!gtk_tree_model_get_iter(model2, &iter, path)) in renderer_edited() 792 gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); in renderer_edited() 795 gtk_tree_model_get(model2, &iter, COL_VALUE, &old_def, -1); in renderer_edited() 886 GtkTreeIter iter; in on_treeview2_button_press_event() local 903 if (!gtk_tree_model_get_iter(model2, &iter, path)) in on_treeview2_button_press_event() 905 gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); in on_treeview2_button_press_event() 943 GtkTreeIter iter; in on_treeview2_key_press_event() local 963 gtk_tree_model_get_iter(model2, &iter, path); in on_treeview2_key_press_event() 964 gtk_tree_model_get(model2, &iter, COL_MENU, &menu, -1); in on_treeview2_key_press_event() [all …]
|
/linux-4.4.14/kernel/events/ |
D | hw_breakpoint.c | 120 struct perf_event *iter; in task_bp_pinned() local 123 list_for_each_entry(iter, &bp_task_head, hw.bp_list) { in task_bp_pinned() 124 if (iter->hw.target == tsk && in task_bp_pinned() 125 find_slot_idx(iter) == type && in task_bp_pinned() 126 (iter->cpu < 0 || cpu == iter->cpu)) in task_bp_pinned() 127 count += hw_breakpoint_weight(iter); in task_bp_pinned()
|
/linux-4.4.14/arch/x86/kernel/ |
D | ftrace.c | 541 struct ftrace_rec_iter *iter; in ftrace_replace_code() local 547 for_ftrace_rec_iter(iter) { in ftrace_replace_code() 548 rec = ftrace_rec_iter_record(iter); in ftrace_replace_code() 561 for_ftrace_rec_iter(iter) { in ftrace_replace_code() 562 rec = ftrace_rec_iter_record(iter); in ftrace_replace_code() 575 for_ftrace_rec_iter(iter) { in ftrace_replace_code() 576 rec = ftrace_rec_iter_record(iter); in ftrace_replace_code() 591 for_ftrace_rec_iter(iter) { in ftrace_replace_code() 592 rec = ftrace_rec_iter_record(iter); in ftrace_replace_code()
|
/linux-4.4.14/kernel/locking/ |
D | lockdep_proc.c | 569 struct lock_stat_data *iter; in ls_start() local 574 iter = data->stats + (*pos - 1); in ls_start() 575 if (iter >= data->iter_end) in ls_start() 576 iter = NULL; in ls_start() 578 return iter; in ls_start() 619 struct lock_stat_data *iter = data->stats; in lock_stat_open() local 623 iter->class = class; in lock_stat_open() 624 iter->stats = lock_stats(class); in lock_stat_open() 625 iter++; in lock_stat_open() 627 data->iter_end = iter; in lock_stat_open()
|
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | t4vf_common.h | 236 #define for_each_port(adapter, iter) \ argument 237 for (iter = 0; iter < (adapter)->params.nports; iter++)
|
D | adapter.h | 348 #define for_each_ethrxq(sge, iter) \ argument 349 for (iter = 0; iter < (sge)->ethqsets; iter++)
|
/linux-4.4.14/tools/perf/ |
D | builtin-report.c | 93 static int hist_iter__report_callback(struct hist_entry_iter *iter, in hist_iter__report_callback() argument 99 struct hist_entry *he = iter->he; in hist_iter__report_callback() 100 struct perf_evsel *evsel = iter->evsel; in hist_iter__report_callback() 107 hist__account_cycles(iter->sample->branch_stack, al, iter->sample, in hist_iter__report_callback() 146 struct hist_entry_iter iter = { in process_sample_event() local 173 iter.ops = &hist_iter_branch; in process_sample_event() 175 iter.ops = &hist_iter_mem; in process_sample_event() 177 iter.ops = &hist_iter_cumulative; in process_sample_event() 179 iter.ops = &hist_iter_normal; in process_sample_event() 185 ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep); in process_sample_event()
|
D | builtin-top.c | 682 static int hist_iter__top_callback(struct hist_entry_iter *iter, in hist_iter__top_callback() argument 687 struct hist_entry *he = iter->he; in hist_iter__top_callback() 688 struct perf_evsel *evsel = iter->evsel; in hist_iter__top_callback() 699 hist__account_cycles(iter->sample->branch_stack, al, iter->sample, in hist_iter__top_callback() 788 struct hist_entry_iter iter = { in perf_event__process_sample() local 795 iter.ops = &hist_iter_cumulative; in perf_event__process_sample() 797 iter.ops = &hist_iter_normal; in perf_event__process_sample() 801 err = hist_entry_iter__add(&iter, &al, top->max_stack, top); in perf_event__process_sample()
|
/linux-4.4.14/drivers/net/ethernet/qlogic/qlge/ |
D | qlge_ethtool.c | 247 u64 *iter = &qdev->nic_stats.tx_pkts; in ql_update_stats() local 265 *iter = data; in ql_update_stats() 266 iter++; in ql_update_stats() 279 *iter = data; in ql_update_stats() 280 iter++; in ql_update_stats() 284 iter += QLGE_RCV_MAC_ERR_STATS; in ql_update_stats() 296 *iter = data; in ql_update_stats() 297 iter++; in ql_update_stats() 310 *iter = data; in ql_update_stats() 311 iter++; in ql_update_stats() [all …]
|
/linux-4.4.14/fs/f2fs/ |
D | trace.c | 129 struct radix_tree_iter iter; in gang_lookup_pids() local 136 radix_tree_for_each_slot(slot, &pids, &iter, first_index) { in gang_lookup_pids() 137 results[ret] = iter.index; in gang_lookup_pids()
|
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb/ |
D | common.h | 314 #define for_each_port(adapter, iter) \ argument 315 for (iter = 0; iter < (adapter)->params.nports; ++iter)
|
/linux-4.4.14/fs/ocfs2/dlm/ |
D | dlmmaster.c | 733 struct dlm_node_iter iter; in dlm_get_lock_resource() local 956 dlm_node_iter_init(mle->vote_map, &iter); in dlm_get_lock_resource() 957 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { in dlm_get_lock_resource() 1191 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, in dlm_bitmap_diff_iter_init() argument 1198 iter->curnode = -1; in dlm_bitmap_diff_iter_init() 1199 iter->orig_bm = orig_bm; in dlm_bitmap_diff_iter_init() 1200 iter->cur_bm = cur_bm; in dlm_bitmap_diff_iter_init() 1203 p1 = *(iter->orig_bm + i); in dlm_bitmap_diff_iter_init() 1204 p2 = *(iter->cur_bm + i); in dlm_bitmap_diff_iter_init() 1205 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); in dlm_bitmap_diff_iter_init() [all …]
|
D | dlmcommon.h | 1112 struct dlm_node_iter *iter) in dlm_node_iter_init() argument 1114 memcpy(iter->node_map, map, sizeof(iter->node_map)); in dlm_node_iter_init() 1115 iter->curnode = -1; in dlm_node_iter_init() 1118 static inline int dlm_node_iter_next(struct dlm_node_iter *iter) in dlm_node_iter_next() argument 1121 bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1); in dlm_node_iter_next() 1123 iter->curnode = O2NM_MAX_NODES; in dlm_node_iter_next() 1126 iter->curnode = bit; in dlm_node_iter_next()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/pm/ |
D | base.c | 447 args->v0.version, args->v0.iter); in nvkm_perfmon_mthd_query_domain() 448 di = (args->v0.iter & 0xff) - 1; in nvkm_perfmon_mthd_query_domain() 471 args->v0.iter = ++di; in nvkm_perfmon_mthd_query_domain() 475 args->v0.iter = 0xff; in nvkm_perfmon_mthd_query_domain() 499 args->v0.version, args->v0.domain, args->v0.iter); in nvkm_perfmon_mthd_query_signal() 500 si = (args->v0.iter & 0xffff) - 1; in nvkm_perfmon_mthd_query_signal() 524 args->v0.iter = ++si; in nvkm_perfmon_mthd_query_signal() 529 args->v0.iter = 0xffff; in nvkm_perfmon_mthd_query_signal() 553 args->v0.iter); in nvkm_perfmon_mthd_query_source() 554 si = (args->v0.iter & 0xff) - 1; in nvkm_perfmon_mthd_query_source() [all …]
|
/linux-4.4.14/include/trace/ |
D | trace_events.h | 307 trace_raw_output_##call(struct trace_iterator *iter, int flags, \ 310 struct trace_seq *s = &iter->seq; \ 311 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ 315 field = (typeof(field))iter->ent; \ 317 ret = trace_raw_output_prep(iter, trace_event); \ 332 trace_raw_output_##call(struct trace_iterator *iter, int flags, \ 337 struct trace_seq *p = &iter->tmp_seq; \ 339 entry = iter->ent; \ 349 return trace_output_call(iter, #call, print); \
|
/linux-4.4.14/fs/9p/ |
D | vfs_addr.c | 248 v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) in v9fs_direct_IO() argument 253 if (iov_iter_rw(iter) == WRITE) { in v9fs_direct_IO() 254 n = p9_client_write(file->private_data, pos, iter, &err); in v9fs_direct_IO() 262 n = p9_client_read(file->private_data, pos, iter, &err); in v9fs_direct_IO()
|
/linux-4.4.14/drivers/misc/mic/scif/ |
D | scif_rma.h | 369 scif_init_window_iter(struct scif_window *window, struct scif_window_iter *iter) in scif_init_window_iter() argument 371 iter->offset = window->offset; in scif_init_window_iter() 372 iter->index = 0; in scif_init_window_iter() 377 struct scif_window_iter *iter);
|
/linux-4.4.14/net/core/ |
D | dev.c | 1493 struct list_head *iter; in dev_disable_lro() local 1501 netdev_for_each_lower_dev(dev, lower_dev, iter) in dev_disable_lro() 4996 struct list_head **iter) in netdev_upper_get_next_dev_rcu() argument 5002 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); in netdev_upper_get_next_dev_rcu() 5007 *iter = &upper->list; in netdev_upper_get_next_dev_rcu() 5022 struct list_head **iter) in netdev_all_upper_get_next_dev_rcu() argument 5028 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); in netdev_all_upper_get_next_dev_rcu() 5033 *iter = &upper->list; in netdev_all_upper_get_next_dev_rcu() 5051 struct list_head **iter) in netdev_lower_get_next_private() argument 5055 lower = list_entry(*iter, struct netdev_adjacent, list); in netdev_lower_get_next_private() [all …]
|
/linux-4.4.14/arch/sh/mm/ |
D | pmb.c | 145 struct pmb_entry *pmbe, *iter; in pmb_mapping_exists() local 175 for (iter = pmbe->link; iter; iter = iter->link) in pmb_mapping_exists() 176 span += iter->size; in pmb_mapping_exists() 815 static int pmb_seq_show(struct seq_file *file, void *iter) in pmb_seq_show() argument
|
/linux-4.4.14/fs/jfs/ |
D | inode.c | 334 static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, in jfs_direct_IO() argument 340 size_t count = iov_iter_count(iter); in jfs_direct_IO() 343 ret = blockdev_direct_IO(iocb, inode, iter, offset, jfs_get_block); in jfs_direct_IO() 349 if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { in jfs_direct_IO()
|
/linux-4.4.14/security/selinux/ss/ |
D | ebitmap.c | 94 unsigned int iter; in ebitmap_netlbl_export() local 108 for (iter = 0; iter < EBITMAP_UNIT_NUMS; iter++) { in ebitmap_netlbl_export() 109 e_map = e_iter->maps[iter]; in ebitmap_netlbl_export()
|
/linux-4.4.14/drivers/block/ |
D | ps3disk.c | 96 struct req_iterator iter; in ps3disk_scatter_gather() local 102 rq_for_each_segment(bvec, req, iter) { in ps3disk_scatter_gather() 105 __func__, __LINE__, i, bio_sectors(iter.bio), in ps3disk_scatter_gather() 106 iter.bio->bi_iter.bi_sector); in ps3disk_scatter_gather() 133 struct req_iterator iter; in ps3disk_submit_request_sg() local 135 rq_for_each_segment(bv, req, iter) in ps3disk_submit_request_sg()
|
/linux-4.4.14/drivers/target/ |
D | target_core_file.c | 254 struct iov_iter iter; in fd_do_rw() local 274 iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len); in fd_do_rw() 276 ret = vfs_iter_write(fd, &iter, &pos); in fd_do_rw() 278 ret = vfs_iter_read(fd, &iter, &pos); in fd_do_rw() 363 struct iov_iter iter; in fd_execute_write_same() local 400 iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len); in fd_execute_write_same() 401 ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos); in fd_execute_write_same()
|
/linux-4.4.14/drivers/nvdimm/ |
D | blk.c | 170 struct bvec_iter iter; in nd_blk_make_request() local 191 bio_for_each_segment(bvec, bio, iter) { in nd_blk_make_request() 196 bvec.bv_offset, rw, iter.bi_sector); in nd_blk_make_request() 201 (unsigned long long) iter.bi_sector, len); in nd_blk_make_request()
|
/linux-4.4.14/drivers/iommu/ |
D | dmar.c | 548 struct acpi_dmar_header *iter, *next; in dmar_walk_remapping_entries() local 551 for (iter = start; iter < end && ret == 0; iter = next) { in dmar_walk_remapping_entries() 552 next = (void *)iter + iter->length; in dmar_walk_remapping_entries() 553 if (iter->length == 0) { in dmar_walk_remapping_entries() 565 dmar_table_print_dmar_entry(iter); in dmar_walk_remapping_entries() 567 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) { in dmar_walk_remapping_entries() 570 iter->type); in dmar_walk_remapping_entries() 571 } else if (cb->cb[iter->type]) { in dmar_walk_remapping_entries() 572 ret = cb->cb[iter->type](iter, cb->arg[iter->type]); in dmar_walk_remapping_entries() 575 iter->type); in dmar_walk_remapping_entries()
|
/linux-4.4.14/fs/proc/ |
D | base.c | 3000 static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter) in next_tgid() argument 3004 if (iter.task) in next_tgid() 3005 put_task_struct(iter.task); in next_tgid() 3008 iter.task = NULL; in next_tgid() 3009 pid = find_ge_pid(iter.tgid, ns); in next_tgid() 3011 iter.tgid = pid_nr_ns(pid, ns); in next_tgid() 3012 iter.task = pid_task(pid, PIDTYPE_PID); in next_tgid() 3025 if (!iter.task || !has_group_leader_pid(iter.task)) { in next_tgid() 3026 iter.tgid += 1; in next_tgid() 3029 get_task_struct(iter.task); in next_tgid() [all …]
|
/linux-4.4.14/fs/btrfs/tests/ |
D | btrfs-tests.c | 130 struct radix_tree_iter iter; in btrfs_free_dummy_fs_info() local 135 radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) { in btrfs_free_dummy_fs_info()
|
/linux-4.4.14/drivers/hv/ |
D | vmbus_drv.c | 1143 struct resource *iter; in vmbus_allocate_mmio() local 1149 for (iter = hyperv_mmio; iter; iter = iter->sibling) { in vmbus_allocate_mmio() 1150 if ((iter->start >= max) || (iter->end <= min)) in vmbus_allocate_mmio() 1153 range_min = iter->start; in vmbus_allocate_mmio() 1154 range_max = iter->end; in vmbus_allocate_mmio()
|
/linux-4.4.14/scripts/coccinelle/free/ |
D | kfree.cocci | 72 iterator iter; 80 iter(...,subE,...) S // no use
|
/linux-4.4.14/tools/perf/tests/ |
D | hists_cumulate.c | 89 struct hist_entry_iter iter = { in add_hist_entries() local 96 iter.ops = &hist_iter_cumulative; in add_hist_entries() 98 iter.ops = &hist_iter_normal; in add_hist_entries() 109 if (hist_entry_iter__add(&iter, &al, PERF_MAX_STACK_DEPTH, in add_hist_entries()
|
/linux-4.4.14/drivers/staging/unisys/visorhba/ |
D | visorhba_main.c | 148 #define for_each_vdisk_match(iter, list, match) \ argument 149 for (iter = &list->head; iter->next; iter = iter->next) \ 150 if ((iter->channel == match->channel) && \ 151 (iter->id == match->id) && \ 152 (iter->lun == match->lun))
|
/linux-4.4.14/fs/ext4/ |
D | indirect.c | 647 ssize_t ext4_ind_direct_IO(struct kiocb *iocb, struct iov_iter *iter, in ext4_ind_direct_IO() argument 656 size_t count = iov_iter_count(iter); in ext4_ind_direct_IO() 659 if (iov_iter_rw(iter) == WRITE) { in ext4_ind_direct_IO() 681 if (iov_iter_rw(iter) == READ && ext4_should_dioread_nolock(inode)) { in ext4_ind_direct_IO() 695 ret = dax_do_io(iocb, inode, iter, offset, in ext4_ind_direct_IO() 699 inode->i_sb->s_bdev, iter, in ext4_ind_direct_IO() 706 ret = dax_do_io(iocb, inode, iter, offset, in ext4_ind_direct_IO() 709 ret = blockdev_direct_IO(iocb, inode, iter, offset, in ext4_ind_direct_IO() 712 if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { in ext4_ind_direct_IO()
|
/linux-4.4.14/drivers/hwspinlock/ |
D | hwspinlock_core.c | 299 struct radix_tree_iter iter; in of_hwspin_lock_get_id() local 312 radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) { in of_hwspin_lock_get_id() 317 slot = radix_tree_iter_retry(&iter); in of_hwspin_lock_get_id()
|
/linux-4.4.14/drivers/iio/ |
D | industrialio-trigger.c | 111 struct iio_trigger *trig = NULL, *iter; in iio_trigger_find_by_name() local 114 list_for_each_entry(iter, &iio_trigger_list, list) in iio_trigger_find_by_name() 115 if (sysfs_streq(iter->name, name)) { in iio_trigger_find_by_name() 116 trig = iter; in iio_trigger_find_by_name()
|