head 400 arch/arc/kernel/kprobes.c struct hlist_head *head, empty_rp; head 406 arch/arc/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); head 421 arch/arc/kernel/kprobes.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 50 arch/arm/crypto/ghash-ce-glue.c const char *head); head 54 arch/arm/crypto/ghash-ce-glue.c const char *head); head 58 arch/arm/crypto/ghash-ce-glue.c const char *head); head 69 arch/arm/crypto/ghash-ce-glue.c struct ghash_key *key, const char *head) head 73 arch/arm/crypto/ghash-ce-glue.c pmull_ghash_update(blocks, dg, src, key, head); head 81 arch/arm/crypto/ghash-ce-glue.c if (head) { head 82 arch/arm/crypto/ghash-ce-glue.c in = head; head 84 arch/arm/crypto/ghash-ce-glue.c head = NULL; head 455 arch/arm/kernel/bios32.c struct list_head *head) head 516 arch/arm/kernel/bios32.c list_add(&sys->node, head); head 528 arch/arm/kernel/bios32.c LIST_HEAD(head); head 533 arch/arm/kernel/bios32.c pcibios_init_hw(parent, hw, &head); head 537 arch/arm/kernel/bios32.c list_for_each_entry(sys, &head, node) { head 175 arch/arm/kernel/machine_kexec.c page_list = image->head & PAGE_MASK; head 417 arch/arm/probes/kprobes/core.c struct hlist_head *head, empty_rp; head 424 arch/arm/probes/kprobes/core.c kretprobe_hash_lock(current, &head, &flags); head 439 arch/arm/probes/kprobes/core.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 458 arch/arm/probes/kprobes/core.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 55 arch/arm64/crypto/ghash-ce-glue.c const char *head); head 59 arch/arm64/crypto/ghash-ce-glue.c const char *head); head 82 arch/arm64/crypto/ghash-ce-glue.c struct ghash_key *key, const char *head, head 86 arch/arm64/crypto/ghash-ce-glue.c const char *head)) head 90 arch/arm64/crypto/ghash-ce-glue.c simd_update(blocks, dg, src, key, head); head 98 arch/arm64/crypto/ghash-ce-glue.c if (head) { head 99 arch/arm64/crypto/ghash-ce-glue.c in = head; head 101 arch/arm64/crypto/ghash-ce-glue.c head = NULL; head 123 arch/arm64/crypto/ghash-ce-glue.c const char *head)) head 507 arch/arm64/crypto/ghash-ce-glue.c u8 *head = NULL; head 513 arch/arm64/crypto/ghash-ce-glue.c head = dst; head 520 arch/arm64/crypto/ghash-ce-glue.c ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head, head 641 arch/arm64/crypto/ghash-ce-glue.c const u8 *head = NULL; head 645 arch/arm64/crypto/ghash-ce-glue.c head = src; head 652 arch/arm64/crypto/ghash-ce-glue.c ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head, head 74 arch/arm64/include/uapi/asm/sigcontext.h struct _aarch64_ctx head; head 93 arch/arm64/include/uapi/asm/sigcontext.h struct _aarch64_ctx head; head 126 arch/arm64/include/uapi/asm/sigcontext.h struct _aarch64_ctx head; head 135 arch/arm64/include/uapi/asm/sigcontext.h struct _aarch64_ctx head; head 43 arch/arm64/kernel/machine_kexec.c pr_debug(" head: %lx\n", kimage->head); head 87 arch/arm64/kernel/machine_kexec.c for (entry = &kimage->head; ; entry++) { head 199 arch/arm64/kernel/machine_kexec.c if ((kimage != kexec_crash_image) && (kimage->head & IND_DONE)) head 218 arch/arm64/kernel/machine_kexec.c cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, head 478 arch/arm64/kernel/probes/kprobes.c struct hlist_head *head, empty_rp; head 486 arch/arm64/kernel/probes/kprobes.c kretprobe_hash_lock(current, &head, &flags); head 501 arch/arm64/kernel/probes/kprobes.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 520 arch/arm64/kernel/probes/kprobes.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 182 arch/arm64/kernel/signal.c __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); head 183 arch/arm64/kernel/signal.c __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); head 195 arch/arm64/kernel/signal.c __get_user_error(magic, &ctx->head.magic, err); head 196 arch/arm64/kernel/signal.c __get_user_error(size, &ctx->head.size, err); head 237 arch/arm64/kernel/signal.c __put_user_error(SVE_MAGIC, &ctx->head.magic, err); head 239 arch/arm64/kernel/signal.c &ctx->head.size, err); head 270 arch/arm64/kernel/signal.c if (sve.head.size <= sizeof(*user->sve)) { head 277 arch/arm64/kernel/signal.c if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq)) head 328 arch/arm64/kernel/signal.c struct _aarch64_ctx __user *head; head 351 arch/arm64/kernel/signal.c if (limit - offset < sizeof(*head)) head 357 arch/arm64/kernel/signal.c head = (struct _aarch64_ctx __user *)(base + offset); head 358 arch/arm64/kernel/signal.c __get_user_error(magic, &head->magic, err); head 359 arch/arm64/kernel/signal.c __get_user_error(size, &head->size, err); head 380 arch/arm64/kernel/signal.c user->fpsimd = (struct fpsimd_context __user *)head; head 397 arch/arm64/kernel/signal.c user->sve = (struct sve_context __user *)head; head 407 arch/arm64/kernel/signal.c userp = (char const __user *)head; head 466 arch/arm64/kernel/signal.c if (size < sizeof(*head)) head 637 arch/arm64/kernel/signal.c __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); head 638 arch/arm64/kernel/signal.c __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); head 673 arch/arm64/kernel/signal.c __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); head 674 arch/arm64/kernel/signal.c __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); head 410 arch/ia64/kernel/kprobes.c struct hlist_head *head, empty_rp; head 417 arch/ia64/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); head 432 arch/ia64/kernel/kprobes.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 449 arch/ia64/kernel/kprobes.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 131 arch/ia64/kernel/machine_kexec.c (*rnk)(image->head, image->start, ia64_boot_param, head 105 arch/ia64/kernel/mca_drv.h #define slidx_foreach_entry(pos, head) \ head 106 arch/ia64/kernel/mca_drv.h list_for_each_entry(pos, head, list) head 107 arch/ia64/kernel/mca_drv.h #define slidx_first_entry(head) \ head 108 arch/ia64/kernel/mca_drv.h (((head)->next != (head)) ? list_entry((head)->next, typeof(slidx_list_t), list) : NULL) head 1271 arch/ia64/kernel/unwind.c unsigned short head; head 1279 arch/ia64/kernel/unwind.c head = unw.lru_head; head 1280 arch/ia64/kernel/unwind.c script = unw.cache + head; head 1293 arch/ia64/kernel/unwind.c unw.cache[unw.lru_tail].lru_chain = head; head 1294 arch/ia64/kernel/unwind.c unw.lru_tail = head; head 56 arch/m68k/kernel/machine_kexec.c ((relocate_kernel_t) reboot_code_buffer)(image->head & PAGE_MASK, head 563 arch/m68k/mac/via.c void via1_set_head(int head) head 565 arch/m68k/mac/via.c if (head == 0) head 669 arch/mips/include/asm/octeon/cvmx-pow.h uint64_t head:1; head 692 arch/mips/include/asm/octeon/cvmx-pow.h uint64_t head:1; head 724 arch/mips/include/asm/octeon/cvmx-pow.h uint64_t head:1; head 747 arch/mips/include/asm/octeon/cvmx-pow.h uint64_t head:1; head 492 arch/mips/kernel/kprobes.c struct hlist_head *head, empty_rp; head 498 arch/mips/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); head 513 arch/mips/kernel/kprobes.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 40 arch/mips/kernel/machine_kexec.c pr_debug(" head: %lx\n", kimage->head); head 222 arch/mips/kernel/machine_kexec.c (unsigned long) phys_to_virt(image->head & PAGE_MASK); head 224 arch/mips/kernel/machine_kexec.c kexec_indirection_page = (unsigned long)&image->head; head 236 arch/mips/kernel/machine_kexec.c for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE); head 10 arch/parisc/kernel/kexec.c extern void relocate_new_kernel(unsigned long head, head 38 arch/parisc/kernel/kexec.c pr_debug(" head: %lx\n", kimage->head); head 74 arch/parisc/kernel/kexec.c void (*reloc)(unsigned long head, head 105 arch/parisc/kernel/kexec.c reloc(image->head & PAGE_MASK, image->start, phys); head 195 arch/parisc/kernel/kprobes.c struct hlist_head *head, empty_rp; head 202 arch/parisc/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); head 217 arch/parisc/kernel/kprobes.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 236 arch/parisc/kernel/kprobes.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 38 arch/powerpc/include/asm/hvcserver.h extern int hvcs_free_partner_info(struct list_head *head); head 40 arch/powerpc/include/asm/hvcserver.h struct list_head *head, unsigned long *pi_buff); head 93 arch/powerpc/include/asm/ps3.h struct list_head head; head 34 arch/powerpc/include/asm/ps3gpu.h static inline int lv1_gpu_display_sync(u64 context_handle, u64 head, head 39 arch/powerpc/include/asm/ps3gpu.h head, ddr_offset, 0, 0); head 42 arch/powerpc/include/asm/ps3gpu.h static inline int lv1_gpu_display_flip(u64 context_handle, u64 head, head 47 arch/powerpc/include/asm/ps3gpu.h head, ddr_offset, 0, 0); head 400 arch/powerpc/kernel/kprobes.c struct hlist_head *head, empty_rp; head 406 arch/powerpc/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); head 421 arch/powerpc/kernel/kprobes.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 43 arch/powerpc/kernel/machine_kexec_32.c page_list = image->head; head 122 arch/powerpc/kernel/machine_kexec_64.c copy_segments(image->head); head 931 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long head, i, j; head 948 arch/powerpc/kvm/book3s_64_mmu_hv.c i = head = *rmapp & KVMPPC_RMAP_INDEX; head 976 arch/powerpc/kvm/book3s_64_mmu_hv.c } while ((i = j) != head); head 994 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long head, i, j; head 1008 arch/powerpc/kvm/book3s_64_mmu_hv.c i = head = *rmapp & KVMPPC_RMAP_INDEX; head 1014 arch/powerpc/kvm/book3s_64_mmu_hv.c } while ((i = j) != head); head 1051 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long head, i, j; head 1064 arch/powerpc/kvm/book3s_64_mmu_hv.c i = head = *rmapp & KVMPPC_RMAP_INDEX; head 1123 arch/powerpc/kvm/book3s_64_mmu_hv.c } while ((i = j) != head); head 49 arch/powerpc/kvm/book3s_64_vio.c static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head) head 51 arch/powerpc/kvm/book3s_64_vio.c struct kvmppc_spapr_tce_iommu_table *stit = container_of(head, head 177 arch/powerpc/kvm/book3s_64_vio.c static void release_spapr_tce_table(struct rcu_head *head) head 179 arch/powerpc/kvm/book3s_64_vio.c struct kvmppc_spapr_tce_table *stt = container_of(head, head 84 arch/powerpc/kvm/book3s_hv_rm_mmu.c struct revmap_entry *head, *tail; head 89 arch/powerpc/kvm/book3s_hv_rm_mmu.c head = &kvm->arch.hpt.rev[i]; head 91 arch/powerpc/kvm/book3s_hv_rm_mmu.c head = real_vmalloc_addr(head); head 92 arch/powerpc/kvm/book3s_hv_rm_mmu.c tail = &kvm->arch.hpt.rev[head->back]; head 96 arch/powerpc/kvm/book3s_hv_rm_mmu.c rev->back = head->back; head 98 arch/powerpc/kvm/book3s_hv_rm_mmu.c head->back = pte_index; head 165 arch/powerpc/kvm/book3s_hv_rm_mmu.c unsigned long ptel, head; head 178 arch/powerpc/kvm/book3s_hv_rm_mmu.c head = *rmap & KVMPPC_RMAP_INDEX; head 183 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (head == pte_index) { head 184 arch/powerpc/kvm/book3s_hv_rm_mmu.c head = rev->forw; head 185 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (head == pte_index) head 188 arch/powerpc/kvm/book3s_hv_rm_mmu.c *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head; head 95 arch/powerpc/kvm/book3s_mmu_hpte.c static void free_pte_rcu(struct rcu_head *head) head 97 arch/powerpc/kvm/book3s_mmu_hpte.c struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head); head 231 arch/powerpc/mm/book3s64/iommu_api.c static void mm_iommu_free(struct rcu_head *head) head 233 arch/powerpc/mm/book3s64/iommu_api.c struct mm_iommu_table_group_mem_t *mem = container_of(head, head 261 arch/powerpc/mm/hugetlbpage.c static void hugepd_free_rcu_callback(struct rcu_head *head) head 264 arch/powerpc/mm/hugetlbpage.c container_of(head, struct hugepd_freelist, rcu); head 74 arch/powerpc/oprofile/cell/pr_util.h unsigned int head, tail; head 55 arch/powerpc/oprofile/cell/spu_task_sync.c if (spu_buff[spu].head >= spu_buff[spu].tail) { head 56 arch/powerpc/oprofile/cell/spu_task_sync.c if ((spu_buff[spu].head - spu_buff[spu].tail) head 60 arch/powerpc/oprofile/cell/spu_task_sync.c } else if (spu_buff[spu].tail > spu_buff[spu].head) { head 61 arch/powerpc/oprofile/cell/spu_task_sync.c if ((spu_buff[spu].tail - spu_buff[spu].head) head 67 arch/powerpc/oprofile/cell/spu_task_sync.c spu_buff[spu].buff[spu_buff[spu].head] = value; head 68 arch/powerpc/oprofile/cell/spu_task_sync.c spu_buff[spu].head++; head 70 arch/powerpc/oprofile/cell/spu_task_sync.c if (spu_buff[spu].head >= max_spu_buff) head 71 arch/powerpc/oprofile/cell/spu_task_sync.c spu_buff[spu].head = 0; head 106 arch/powerpc/oprofile/cell/spu_task_sync.c curr_head = spu_buff[spu].head; head 466 arch/powerpc/oprofile/cell/spu_task_sync.c spu_buff[spu].head = 0; head 2305 arch/powerpc/platforms/cell/spufs/file.c return (ctx->switch_log->head - ctx->switch_log->tail) % head 2336 arch/powerpc/platforms/cell/spufs/file.c ctx->switch_log->head = ctx->switch_log->tail = 0; head 2490 arch/powerpc/platforms/cell/spufs/file.c p = ctx->switch_log->log + ctx->switch_log->head; head 2497 arch/powerpc/platforms/cell/spufs/file.c ctx->switch_log->head = head 2498 arch/powerpc/platforms/cell/spufs/file.c (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; head 56 arch/powerpc/platforms/cell/spufs/spufs.h unsigned long head; head 343 arch/powerpc/platforms/powernv/pci-ioda-tce.c static void pnv_iommu_table_group_link_free(struct rcu_head *head) head 345 arch/powerpc/platforms/powernv/pci-ioda-tce.c struct iommu_table_group_link *tgl = container_of(head, head 401 arch/powerpc/platforms/ps3/mm.c list_for_each_entry(c, &r->chunk_list.head, link) { head 430 arch/powerpc/platforms/ps3/mm.c list_for_each_entry(c, &r->chunk_list.head, link) { head 536 arch/powerpc/platforms/ps3/mm.c list_add(&c->link, &r->chunk_list.head); head 570 arch/powerpc/platforms/ps3/mm.c if (list_empty(&r->chunk_list.head)) { head 575 arch/powerpc/platforms/ps3/mm.c last = list_entry(r->chunk_list.head.next, head 606 arch/powerpc/platforms/ps3/mm.c list_add(&c->link, &r->chunk_list.head); head 656 arch/powerpc/platforms/ps3/mm.c INIT_LIST_HEAD(&r->chunk_list.head); head 678 arch/powerpc/platforms/ps3/mm.c INIT_LIST_HEAD(&r->chunk_list.head); head 718 arch/powerpc/platforms/ps3/mm.c list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) { head 741 arch/powerpc/platforms/ps3/mm.c list_for_each_entry_safe(c, n, &r->chunk_list.head, link) { head 62 arch/powerpc/platforms/pseries/hvcserver.c int hvcs_free_partner_info(struct list_head *head) head 67 arch/powerpc/platforms/pseries/hvcserver.c if (!head) head 70 arch/powerpc/platforms/pseries/hvcserver.c while (!list_empty(head)) { head 71 arch/powerpc/platforms/pseries/hvcserver.c element = head->next; head 119 arch/powerpc/platforms/pseries/hvcserver.c int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head, head 133 arch/powerpc/platforms/pseries/hvcserver.c if (!head || !pi_buff) head 138 arch/powerpc/platforms/pseries/hvcserver.c INIT_LIST_HEAD(head); head 148 arch/powerpc/platforms/pseries/hvcserver.c if (!list_empty(head)) head 169 arch/powerpc/platforms/pseries/hvcserver.c hvcs_free_partner_info(head); head 183 arch/powerpc/platforms/pseries/hvcserver.c list_add_tail(&(next_partner_info->node), head); head 79 arch/s390/include/asm/gmap.h #define gmap_for_each_rmap(pos, head) \ head 80 arch/s390/include/asm/gmap.h for (pos = (head); pos; pos = pos->next) head 82 arch/s390/include/asm/gmap.h #define gmap_for_each_rmap_safe(pos, n, head) \ head 83 arch/s390/include/asm/gmap.h for (pos = (head); n = pos ? pos->next : NULL, pos; pos = n) head 368 arch/s390/kernel/kprobes.c struct hlist_head *head, empty_rp; head 375 arch/s390/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); head 394 arch/s390/kernel/kprobes.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 413 arch/s390/kernel/kprobes.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 287 arch/s390/kernel/machine_kexec.c (*data_mover)(&image->head, image->start); head 80 arch/s390/kernel/perf_cpum_sf.c unsigned long head; /* index of SDB of buffer head */ head 1346 arch/s390/kernel/perf_cpum_sf.c #define AUX_SDB_NUM_ALERT(aux) AUX_SDB_NUM(aux, aux->head, aux->alert_mark) head 1347 arch/s390/kernel/perf_cpum_sf.c #define AUX_SDB_NUM_EMPTY(aux) AUX_SDB_NUM(aux, aux->head, aux->empty_mark) head 1382 arch/s390/kernel/perf_cpum_sf.c for (i = 0, idx = aux->head; i < range_scan; i++, idx++) { head 1412 arch/s390/kernel/perf_cpum_sf.c unsigned long head, base, offset; head 1415 arch/s390/kernel/perf_cpum_sf.c if (WARN_ON_ONCE(handle->head & ~PAGE_MASK)) head 1418 arch/s390/kernel/perf_cpum_sf.c aux->head = handle->head >> PAGE_SHIFT; head 1437 arch/s390/kernel/perf_cpum_sf.c aux->empty_mark = aux->head + range - 1; head 1441 arch/s390/kernel/perf_cpum_sf.c aux->alert_mark = aux->head + range/2 - 1; head 1446 arch/s390/kernel/perf_cpum_sf.c head = AUX_SDB_INDEX(aux, aux->head); head 1447 arch/s390/kernel/perf_cpum_sf.c base = aux->sdbt_index[head / CPUM_SF_SDB_PER_TABLE]; head 1448 arch/s390/kernel/perf_cpum_sf.c offset = head % CPUM_SF_SDB_PER_TABLE; head 1450 arch/s390/kernel/perf_cpum_sf.c cpuhw->lsctl.dear = aux->sdb_index[head]; head 1456 arch/s390/kernel/perf_cpum_sf.c aux->head, aux->alert_mark, aux->empty_mark, head 1458 arch/s390/kernel/perf_cpum_sf.c head / CPUM_SF_SDB_PER_TABLE, head 1566 arch/s390/kernel/perf_cpum_sf.c aux->empty_mark = aux->head + range - 1; head 1607 arch/s390/kernel/perf_cpum_sf.c aux->head = handle->head >> PAGE_SHIFT; head 1610 arch/s390/kernel/perf_cpum_sf.c aux->alert_mark = aux->head; head 1612 arch/s390/kernel/perf_cpum_sf.c aux->alert_mark = aux->head + range/2 - 1; head 1625 arch/s390/kernel/perf_cpum_sf.c aux->head, range, overflow); head 1631 arch/s390/kernel/perf_cpum_sf.c aux->head, aux->alert_mark); head 1638 arch/s390/kernel/perf_cpum_sf.c aux->head, aux->alert_mark, aux->empty_mark, head 258 arch/s390/kernel/vtime.c static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) head 262 arch/s390/kernel/vtime.c list_for_each_entry(tmp, head, entry) { head 268 arch/s390/kernel/vtime.c list_add_tail(&timer->entry, head); head 152 arch/s390/mm/gmap.c struct gmap_rmap *rmap, *rnext, *head; head 170 arch/s390/mm/gmap.c head = radix_tree_delete(root, index); head 171 arch/s390/mm/gmap.c gmap_for_each_rmap_safe(rmap, rnext, head) head 2176 arch/s390/mm/gmap.c struct gmap_rmap *rmap, *rnext, *head; head 2199 arch/s390/mm/gmap.c head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT); head 2200 arch/s390/mm/gmap.c gmap_for_each_rmap_safe(rmap, rnext, head) { head 306 arch/sh/kernel/kprobes.c struct hlist_head *head, empty_rp; head 312 arch/sh/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); head 327 arch/sh/kernel/kprobes.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 85 arch/sh/kernel/machine_kexec.c for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); head 103 arch/sh/kernel/machine_kexec.c page_list = image->head; head 130 arch/sh/kernel/machine_kexec.c for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); head 655 arch/sh/mm/pmb.c static void __init pmb_merge(struct pmb_entry *head) head 661 arch/sh/mm/pmb.c span = newsize = head->size; head 663 arch/sh/mm/pmb.c tail = head->link; head 686 arch/sh/mm/pmb.c head->flags &= ~PMB_SZ_MASK; head 687 arch/sh/mm/pmb.c head->flags |= pmb_size_to_flags(newsize); head 689 arch/sh/mm/pmb.c head->size = newsize; head 691 arch/sh/mm/pmb.c __pmb_unmap_entry(head->link, depth); head 692 arch/sh/mm/pmb.c __set_pmb_entry(head); head 469 arch/sparc/kernel/kprobes.c struct hlist_head *head, empty_rp; head 475 arch/sparc/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); head 490 arch/sparc/kernel/kprobes.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 320 arch/sparc/kernel/ldc.c static int __set_rx_head(struct ldc_channel *lp, unsigned long head) head 327 arch/sparc/kernel/ldc.c err = sun4v_ldc_rx_set_qhead(lp->id, head); head 760 arch/sparc/kernel/ldc.c unsigned long head = lp->tx_acked; head 764 arch/sparc/kernel/ldc.c struct ldc_packet *p = lp->tx_base + (head / LDC_PACKET_SIZE); head 766 arch/sparc/kernel/ldc.c head = tx_advance(lp, head); head 769 arch/sparc/kernel/ldc.c lp->tx_acked = head; head 772 arch/sparc/kernel/ldc.c if (head == lp->tx_tail) head 1706 arch/sparc/kernel/ldc.c static int rx_set_head(struct ldc_channel *lp, unsigned long head) head 1708 arch/sparc/kernel/ldc.c int err = __set_rx_head(lp, head); head 1713 arch/sparc/kernel/ldc.c lp->rx_head = head; head 151 arch/sparc/kernel/pci_fire.c unsigned long *head) head 153 arch/sparc/kernel/pci_fire.c *head = upa_readq(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid)); head 158 arch/sparc/kernel/pci_fire.c unsigned long *head, unsigned long *msi) head 164 arch/sparc/kernel/pci_fire.c ep = &base[*head]; head 185 arch/sparc/kernel/pci_fire.c (*head)++; head 186 arch/sparc/kernel/pci_fire.c if (*head >= pbm->msiq_ent_count) head 187 arch/sparc/kernel/pci_fire.c *head = 0; head 193 arch/sparc/kernel/pci_fire.c unsigned long head) head 195 arch/sparc/kernel/pci_fire.c upa_writeq(head, pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid)); head 38 arch/sparc/kernel/pci_impl.h unsigned long *head); head 40 arch/sparc/kernel/pci_impl.h unsigned long *head, unsigned long *msi); head 42 arch/sparc/kernel/pci_impl.h unsigned long head); head 19 arch/sparc/kernel/pci_msi.c unsigned long orig_head, head; head 24 arch/sparc/kernel/pci_msi.c err = ops->get_head(pbm, msiqid, &head); head 28 arch/sparc/kernel/pci_msi.c orig_head = head; head 32 arch/sparc/kernel/pci_msi.c err = ops->dequeue_msi(pbm, msiqid, &head, &msi); head 46 arch/sparc/kernel/pci_msi.c if (likely(head != orig_head)) { head 47 arch/sparc/kernel/pci_msi.c err = ops->set_head(pbm, msiqid, head); head 61 arch/sparc/kernel/pci_msi.c head, msiqid, err); head 67 arch/sparc/kernel/pci_msi.c head, msiqid, err); head 962 arch/sparc/kernel/pci_sun4v.c unsigned long *head) head 966 arch/sparc/kernel/pci_sun4v.c err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head); head 971 arch/sparc/kernel/pci_sun4v.c if (unlikely(*head >= limit)) head 978 arch/sparc/kernel/pci_sun4v.c unsigned long msiqid, unsigned long *head, head 988 arch/sparc/kernel/pci_sun4v.c *head); head 1009 arch/sparc/kernel/pci_sun4v.c (*head) += sizeof(struct pci_sun4v_msiq_entry); head 1010 arch/sparc/kernel/pci_sun4v.c if (*head >= head 1012 arch/sparc/kernel/pci_sun4v.c *head = 0; head 1018 arch/sparc/kernel/pci_sun4v.c unsigned long head) head 1022 arch/sparc/kernel/pci_sun4v.c err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head); head 54 arch/sparc/kernel/pci_sun4v.h unsigned long *head); head 57 arch/sparc/kernel/pci_sun4v.h unsigned long head); head 60 arch/sparc/kernel/pci_sun4v.h unsigned long *head); head 425 arch/sparc/mm/srmmu.c static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) head 427 arch/sparc/mm/srmmu.c entry->next = head; head 428 arch/sparc/mm/srmmu.c (entry->prev = head->prev)->next = entry; head 429 arch/sparc/mm/srmmu.c head->prev = entry; head 43 arch/um/drivers/line.c n = line->head - line->tail; head 97 arch/um/drivers/line.c line->head = line->buffer; head 134 arch/um/drivers/line.c if ((line->buffer == NULL) || (line->head == line->tail)) head 137 arch/um/drivers/line.c if (line->tail < line->head) { head 139 arch/um/drivers/line.c count = line->buffer + LINE_BUFSIZE - line->head; head 141 arch/um/drivers/line.c n = write_chan(line->chan_out, line->head, count, head 150 arch/um/drivers/line.c line->head = line->buffer; head 152 arch/um/drivers/line.c line->head += n; head 157 arch/um/drivers/line.c count = line->tail - line->head; head 158 arch/um/drivers/line.c n = write_chan(line->chan_out, line->head, count, head 164 arch/um/drivers/line.c line->head += n; head 165 arch/um/drivers/line.c return line->head == line->tail; head 199 arch/um/drivers/line.c if (line->head != line->tail) head 257 arch/um/drivers/line.c line->head = line->buffer; head 51 arch/um/drivers/line.h char *head; head 228 arch/um/drivers/vector_kern.c qi->head = head 229 arch/um/drivers/vector_kern.c (qi->head + advance) head 241 arch/um/drivers/vector_kern.c qi->head = 0; head 361 arch/um/drivers/vector_kern.c for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) { head 398 arch/um/drivers/vector_kern.c send_from += qi->head; head 400 arch/um/drivers/vector_kern.c if (send_len + qi->head > qi->max_depth) head 401 arch/um/drivers/vector_kern.c send_len = qi->max_depth - qi->head; head 572 arch/um/drivers/vector_kern.c result->head = 0; head 43 arch/um/drivers/vector_kern.h int queue_depth, head, tail, max_depth, max_iov_frags; head 58 arch/x86/events/intel/bts.c local_t head; head 152 arch/x86/events/intel/bts.c index = local_read(&buf->head); head 176 arch/x86/events/intel/bts.c static void bts_buffer_pad_out(struct bts_phys *phys, unsigned long head) head 178 arch/x86/events/intel/bts.c unsigned long index = head - phys->offset; head 188 arch/x86/events/intel/bts.c unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head; head 193 arch/x86/events/intel/bts.c head = index + bts_buffer_offset(buf, buf->cur_buf); head 194 arch/x86/events/intel/bts.c old = local_xchg(&buf->head, head); head 197 arch/x86/events/intel/bts.c if (old == head) head 208 arch/x86/events/intel/bts.c local_add(head - old, &buf->data_size); head 210 arch/x86/events/intel/bts.c local_set(&buf->data_size, head); head 319 arch/x86/events/intel/bts.c bts->handle.head = head 371 arch/x86/events/intel/bts.c unsigned long head, space, next_space, pad, gap, skip, wakeup; head 379 arch/x86/events/intel/bts.c head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); head 382 arch/x86/events/intel/bts.c space = phys->offset + phys->displacement + phys->size - head; head 405 arch/x86/events/intel/bts.c bts_buffer_pad_out(phys, head); head 412 arch/x86/events/intel/bts.c head = phys->offset + phys->displacement; head 419 arch/x86/events/intel/bts.c local_set(&buf->head, head); head 426 arch/x86/events/intel/bts.c handle->head; head 432 arch/x86/events/intel/bts.c buf->end = head + space; head 479 arch/x86/events/intel/bts.c old_head = local_read(&buf->head); head 483 arch/x86/events/intel/bts.c if (old_head == local_read(&buf->head)) head 815 arch/x86/events/intel/pt.c old = (local64_xchg(&buf->head, base) & head 883 arch/x86/events/intel/pt.c void *head = pt_buffer_region(buf); head 886 arch/x86/events/intel/pt.c memset(head + buf->output_off, 0, head 1030 arch/x86/events/intel/pt.c unsigned long head = local64_read(&buf->head); head 1057 arch/x86/events/intel/pt.c if (!offset_in_page(head + handle->size + 1)) head 1060 arch/x86/events/intel/pt.c idx = (head >> PAGE_SHIFT) + npages; head 1072 arch/x86/events/intel/pt.c idx = (head >> PAGE_SHIFT) + npages - 1; head 1105 arch/x86/events/intel/pt.c static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head) head 1112 arch/x86/events/intel/pt.c head &= (buf->nr_pages << PAGE_SHIFT) - 1; head 1114 arch/x86/events/intel/pt.c pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1); head 1120 arch/x86/events/intel/pt.c buf->output_off = head & (pt_buffer_region_size(buf) - 1); head 1122 arch/x86/events/intel/pt.c local64_set(&buf->head, head); head 1300 arch/x86/events/intel/pt.c struct perf_addr_filters_head *head = perf_event_addr_filters(event); head 1310 arch/x86/events/intel/pt.c list_for_each_entry(filter, &head->list, entry) { head 1374 arch/x86/events/intel/pt.c pt_buffer_reset_offsets(buf, pt->handle.head); head 1438 arch/x86/events/intel/pt.c pt_buffer_reset_offsets(buf, pt->handle.head); head 1492 arch/x86/events/intel/pt.c pt->handle.head = head 81 arch/x86/events/intel/pt.h local64_t head; head 497 arch/x86/include/asm/kvm_host.h struct list_head head; head 78 arch/x86/kernel/apic/io_apic.c #define for_each_irq_pin(entry, head) \ head 79 arch/x86/kernel/apic/io_apic.c list_for_each_entry(entry, &head, list) head 1281 arch/x86/kernel/cpu/mce/amd.c struct list_head *head = &b->blocks->miscj; head 1290 arch/x86/kernel/cpu/mce/amd.c list_for_each_entry_safe(pos, tmp, head, miscj) { head 1294 arch/x86/kernel/cpu/mce/amd.c list_for_each_entry_safe_reverse(pos, tmp, head, miscj) head 1380 arch/x86/kernel/cpu/mce/amd.c struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank]; head 1382 arch/x86/kernel/cpu/mce/amd.c if (!head) head 1385 arch/x86/kernel/cpu/mce/amd.c list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { head 1390 arch/x86/kernel/cpu/mce/amd.c kobject_put(&head->blocks->kobj); head 57 arch/x86/kernel/cpu/mce/genpool.c struct llist_node *head; head 61 arch/x86/kernel/cpu/mce/genpool.c head = llist_del_all(&mce_event_llist); head 62 arch/x86/kernel/cpu/mce/genpool.c if (!head) head 66 arch/x86/kernel/cpu/mce/genpool.c llist_for_each_entry_safe(node, t, head, llnode) { head 76 arch/x86/kernel/cpu/mce/genpool.c struct llist_node *head; head 80 arch/x86/kernel/cpu/mce/genpool.c head = llist_del_all(&mce_event_llist); head 81 arch/x86/kernel/cpu/mce/genpool.c if (!head) head 84 arch/x86/kernel/cpu/mce/genpool.c head = llist_reverse_order(head); head 85 arch/x86/kernel/cpu/mce/genpool.c llist_for_each_entry_safe(node, tmp, head, llnode) { head 301 arch/x86/kernel/cpu/resctrl/monitor.c struct list_head *head; head 311 arch/x86/kernel/cpu/resctrl/monitor.c head = &rdtgrp->mon.crdtgrp_list; head 314 arch/x86/kernel/cpu/resctrl/monitor.c list_for_each_entry(entry, head, mon.crdtgrp_list) { head 360 arch/x86/kernel/cpu/resctrl/monitor.c struct list_head *head; head 385 arch/x86/kernel/cpu/resctrl/monitor.c head = &rgrp->mon.crdtgrp_list; head 386 arch/x86/kernel/cpu/resctrl/monitor.c list_for_each_entry(entry, head, mon.crdtgrp_list) { head 430 arch/x86/kernel/cpu/resctrl/monitor.c list_for_each_entry(entry, head, mon.crdtgrp_list) { head 512 arch/x86/kernel/cpu/resctrl/monitor.c struct list_head *head; head 527 arch/x86/kernel/cpu/resctrl/monitor.c head = &prgrp->mon.crdtgrp_list; head 528 arch/x86/kernel/cpu/resctrl/monitor.c list_for_each_entry(crgrp, head, mon.crdtgrp_list) head 337 arch/x86/kernel/cpu/resctrl/rdtgroup.c struct list_head *head; head 360 arch/x86/kernel/cpu/resctrl/rdtgroup.c head = &prgrp->mon.crdtgrp_list; head 361 arch/x86/kernel/cpu/resctrl/rdtgroup.c list_for_each_entry(crgrp, head, mon.crdtgrp_list) { head 390 arch/x86/kernel/cpu/resctrl/rdtgroup.c struct list_head *head; head 431 arch/x86/kernel/cpu/resctrl/rdtgroup.c head = &rdtgrp->mon.crdtgrp_list; head 432 arch/x86/kernel/cpu/resctrl/rdtgroup.c list_for_each_entry(crgrp, head, mon.crdtgrp_list) { head 515 arch/x86/kernel/cpu/resctrl/rdtgroup.c static void move_myself(struct callback_head *head) head 520 arch/x86/kernel/cpu/resctrl/rdtgroup.c callback = container_of(head, struct task_move_callback, work); head 2215 arch/x86/kernel/cpu/resctrl/rdtgroup.c struct list_head *head; head 2217 arch/x86/kernel/cpu/resctrl/rdtgroup.c head = &rdtgrp->mon.crdtgrp_list; head 2218 arch/x86/kernel/cpu/resctrl/rdtgroup.c list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { head 2409 arch/x86/kernel/cpu/resctrl/rdtgroup.c struct list_head *head; head 2418 arch/x86/kernel/cpu/resctrl/rdtgroup.c head = &prgrp->mon.crdtgrp_list; head 2419 arch/x86/kernel/cpu/resctrl/rdtgroup.c list_for_each_entry(crgrp, head, mon.crdtgrp_list) { head 64 arch/x86/kernel/head64.c #define __head __section(.head.text) head 760 arch/x86/kernel/kprobes/core.c struct hlist_head *head, empty_rp; head 780 arch/x86/kernel/kprobes/core.c kretprobe_hash_lock(current, &head, &flags); head 805 arch/x86/kernel/kprobes/core.c hlist_for_each_entry(ri, head, hlist) { head 841 arch/x86/kernel/kprobes/core.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 240 arch/x86/kernel/machine_kexec_32.c image->start = relocate_kernel_ptr((unsigned long)image->head, head 434 arch/x86/kernel/machine_kexec_64.c image->start = relocate_kernel((unsigned long)image->head, head 46 arch/x86/kernel/nmi.c struct list_head head; head 53 arch/x86/kernel/nmi.c .head = LIST_HEAD_INIT(nmi_desc[0].head), head 57 arch/x86/kernel/nmi.c .head = LIST_HEAD_INIT(nmi_desc[1].head), head 61 arch/x86/kernel/nmi.c .head = LIST_HEAD_INIT(nmi_desc[2].head), head 65 arch/x86/kernel/nmi.c .head = LIST_HEAD_INIT(nmi_desc[3].head), head 139 arch/x86/kernel/nmi.c list_for_each_entry_rcu(a, &desc->head, list) { head 173 arch/x86/kernel/nmi.c WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head)); head 174 arch/x86/kernel/nmi.c WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head)); head 181 arch/x86/kernel/nmi.c list_add_rcu(&action->list, &desc->head); head 183 arch/x86/kernel/nmi.c list_add_tail_rcu(&action->list, &desc->head); head 198 arch/x86/kernel/nmi.c list_for_each_entry_rcu(n, &desc->head, list) { head 369 arch/x86/kvm/mtrr.c list_for_each_entry(tmp, &mtrr_state->head, node) head 441 arch/x86/kvm/mtrr.c INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head); head 522 arch/x86/kvm/mtrr.c list_for_each_entry_continue(iter->range, &mtrr_state->head, node) head 537 arch/x86/kvm/mtrr.c iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node); head 164 arch/x86/kvm/page_track.c struct kvm_page_track_notifier_head *head; head 166 arch/x86/kvm/page_track.c head = &kvm->arch.track_notifier_head; head 167 arch/x86/kvm/page_track.c cleanup_srcu_struct(&head->track_srcu); head 172 arch/x86/kvm/page_track.c struct kvm_page_track_notifier_head *head; head 174 arch/x86/kvm/page_track.c head = &kvm->arch.track_notifier_head; head 175 arch/x86/kvm/page_track.c init_srcu_struct(&head->track_srcu); head 176 arch/x86/kvm/page_track.c INIT_HLIST_HEAD(&head->track_notifier_list); head 187 arch/x86/kvm/page_track.c struct kvm_page_track_notifier_head *head; head 189 arch/x86/kvm/page_track.c head = &kvm->arch.track_notifier_head; head 192 arch/x86/kvm/page_track.c hlist_add_head_rcu(&n->node, &head->track_notifier_list); head 205 arch/x86/kvm/page_track.c struct kvm_page_track_notifier_head *head; head 207 arch/x86/kvm/page_track.c head = &kvm->arch.track_notifier_head; head 212 arch/x86/kvm/page_track.c synchronize_srcu(&head->track_srcu); head 226 arch/x86/kvm/page_track.c struct kvm_page_track_notifier_head *head; head 230 arch/x86/kvm/page_track.c head = &vcpu->kvm->arch.track_notifier_head; head 232 arch/x86/kvm/page_track.c if (hlist_empty(&head->track_notifier_list)) head 235 arch/x86/kvm/page_track.c idx = srcu_read_lock(&head->track_srcu); head 236 arch/x86/kvm/page_track.c hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) head 239 arch/x86/kvm/page_track.c srcu_read_unlock(&head->track_srcu, idx); head 251 arch/x86/kvm/page_track.c struct kvm_page_track_notifier_head *head; head 255 arch/x86/kvm/page_track.c head = &kvm->arch.track_notifier_head; head 257 arch/x86/kvm/page_track.c if (hlist_empty(&head->track_notifier_list)) head 260 arch/x86/kvm/page_track.c idx = srcu_read_lock(&head->track_srcu); head 261 arch/x86/kvm/page_track.c hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) head 264 arch/x86/kvm/page_track.c srcu_read_unlock(&head->track_srcu, idx); head 1943 arch/x86/kvm/svm.c struct list_head *head = &sev->regions_list; head 1955 arch/x86/kvm/svm.c if (!list_empty(head)) { head 1956 arch/x86/kvm/svm.c list_for_each_safe(pos, q, head) { head 7122 arch/x86/kvm/svm.c struct list_head *head = &sev->regions_list; head 7125 arch/x86/kvm/svm.c list_for_each_entry(i, head, list) { head 111 arch/x86/mm/kmmio.c struct list_head *head; head 119 arch/x86/mm/kmmio.c head = kmmio_page_list(addr); head 120 arch/x86/mm/kmmio.c list_for_each_entry_rcu(f, head, list) { head 478 arch/x86/mm/kmmio.c static void rcu_free_kmmio_fault_pages(struct rcu_head *head) head 481 arch/x86/mm/kmmio.c head, head 494 arch/x86/mm/kmmio.c static void remove_kmmio_fault_pages(struct rcu_head *head) head 497 arch/x86/mm/kmmio.c container_of(head, struct kmmio_delayed_release, rcu); head 23 arch/x86/oprofile/backtrace.c dump_user_backtrace_32(struct stack_frame_ia32 *head) head 30 arch/x86/oprofile/backtrace.c bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); head 40 arch/x86/oprofile/backtrace.c if (head >= fp) head 49 arch/x86/oprofile/backtrace.c struct stack_frame_ia32 *head; head 55 arch/x86/oprofile/backtrace.c head = (struct stack_frame_ia32 *) regs->bp; head 56 arch/x86/oprofile/backtrace.c while (depth-- && head) head 57 arch/x86/oprofile/backtrace.c head = dump_user_backtrace_32(head); head 70 arch/x86/oprofile/backtrace.c static struct stack_frame *dump_user_backtrace(struct stack_frame *head) head 76 arch/x86/oprofile/backtrace.c bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); head 84 arch/x86/oprofile/backtrace.c if (head >= bufhead[0].next_frame) head 93 arch/x86/oprofile/backtrace.c struct stack_frame *head = (struct stack_frame *)frame_pointer(regs); head 125 arch/x86/oprofile/backtrace.c while (depth-- && head) head 126 arch/x86/oprofile/backtrace.c head = dump_user_backtrace(head); head 156 arch/x86/platform/uv/uv_time.c struct uv_rtc_timer_head *head = blade_info[bid]; head 158 arch/x86/platform/uv/uv_time.c if (!head) { head 159 arch/x86/platform/uv/uv_time.c head = kmalloc_node(sizeof(struct uv_rtc_timer_head) + head 163 arch/x86/platform/uv/uv_time.c if (!head) { head 167 arch/x86/platform/uv/uv_time.c spin_lock_init(&head->lock); head 168 arch/x86/platform/uv/uv_time.c head->ncpus = uv_blade_nr_possible_cpus(bid); head 169 arch/x86/platform/uv/uv_time.c head->next_cpu = -1; head 170 arch/x86/platform/uv/uv_time.c blade_info[bid] = head; head 173 arch/x86/platform/uv/uv_time.c head->cpu[bcpu].lcpu = cpu; head 174 arch/x86/platform/uv/uv_time.c head->cpu[bcpu].expires = ULLONG_MAX; head 181 arch/x86/platform/uv/uv_time.c static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode) head 186 arch/x86/platform/uv/uv_time.c head->next_cpu = -1; head 187 arch/x86/platform/uv/uv_time.c for (c = 0; c < head->ncpus; c++) { head 188 arch/x86/platform/uv/uv_time.c u64 exp = head->cpu[c].expires; head 195 arch/x86/platform/uv/uv_time.c head->next_cpu = bcpu; head 196 arch/x86/platform/uv/uv_time.c c = head->cpu[bcpu].lcpu; head 215 arch/x86/platform/uv/uv_time.c struct uv_rtc_timer_head *head = blade_info[bid]; head 217 arch/x86/platform/uv/uv_time.c u64 *t = &head->cpu[bcpu].expires; head 221 arch/x86/platform/uv/uv_time.c spin_lock_irqsave(&head->lock, flags); head 223 arch/x86/platform/uv/uv_time.c next_cpu = head->next_cpu; head 228 arch/x86/platform/uv/uv_time.c expires < head->cpu[next_cpu].expires) { head 229 arch/x86/platform/uv/uv_time.c head->next_cpu = bcpu; head 232 arch/x86/platform/uv/uv_time.c uv_rtc_find_next_timer(head, pnode); head 233 arch/x86/platform/uv/uv_time.c spin_unlock_irqrestore(&head->lock, flags); head 238 arch/x86/platform/uv/uv_time.c spin_unlock_irqrestore(&head->lock, flags); head 251 arch/x86/platform/uv/uv_time.c struct uv_rtc_timer_head *head = blade_info[bid]; head 253 arch/x86/platform/uv/uv_time.c u64 *t = &head->cpu[bcpu].expires; head 257 arch/x86/platform/uv/uv_time.c spin_lock_irqsave(&head->lock, flags); head 259 arch/x86/platform/uv/uv_time.c if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) head 265 arch/x86/platform/uv/uv_time.c if (head->next_cpu == bcpu) head 266 arch/x86/platform/uv/uv_time.c uv_rtc_find_next_timer(head, pnode); head 269 arch/x86/platform/uv/uv_time.c spin_unlock_irqrestore(&head->lock, flags); head 1393 block/blk-core.c list->head = rq->bio; head 32 block/blk-ioc.c static void icq_free_icq_rcu(struct rcu_head *head) head 34 block/blk-ioc.c struct io_cq *icq = container_of(head, struct io_cq, __rcu_head); head 3216 block/blk-mq.c static bool blk_mq_elv_switch_none(struct list_head *head, head 3231 block/blk-mq.c list_add(&qe->node, head); head 3248 block/blk-mq.c static void blk_mq_elv_switch_back(struct list_head *head, head 3254 block/blk-mq.c list_for_each_entry(qe, head, node) head 3275 block/blk-mq.c LIST_HEAD(head); head 3297 block/blk-mq.c if (!blk_mq_elv_switch_none(&head, q)) head 3328 block/blk-mq.c blk_mq_elv_switch_back(&head, q); head 167 block/blk-stat.c static void blk_stat_free_callback_rcu(struct rcu_head *head) head 171 block/blk-stat.c cb = container_of(head, struct blk_stat_callback, rcu); head 596 block/kyber-iosched.c struct list_head *head = &kcq->rq_list[sched_domain]; head 600 block/kyber-iosched.c list_move(&rq->queuelist, head); head 602 block/kyber-iosched.c list_move_tail(&rq->queuelist, head); head 34 block/partitions/ibm.c __u16 head; head 40 block/partitions/ibm.c head = ptr->hh & 0x000F; head 42 block/partitions/ibm.c head * geo->sectors; head 52 block/partitions/ibm.c __u16 head; head 58 block/partitions/ibm.c head = ptr->hh & 0x000F; head 60 block/partitions/ibm.c head * geo->sectors + head 870 crypto/algapi.c unsigned int head) head 876 crypto/algapi.c p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn), head 881 crypto/algapi.c inst = (void *)(p + head); head 224 crypto/cryptd.c static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, head 231 crypto/cryptd.c p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); head 235 crypto/cryptd.c inst = (void *)(p + head); head 28 drivers/acpi/acpi_dbg.c (CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) head 30 drivers/acpi/acpi_dbg.c (CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) head 32 drivers/acpi/acpi_dbg.c (CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) head 34 drivers/acpi/acpi_dbg.c (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) head 269 drivers/acpi/acpi_dbg.c p = &crc->buf[crc->head]; head 274 drivers/acpi/acpi_dbg.c crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); head 520 drivers/acpi/acpi_dbg.c acpi_aml_io.out_crc.head = acpi_aml_io.out_crc.tail = 0; head 521 drivers/acpi/acpi_dbg.c acpi_aml_io.in_crc.head = acpi_aml_io.in_crc.tail = 0; head 661 drivers/acpi/acpi_dbg.c p = &crc->buf[crc->head]; head 669 drivers/acpi/acpi_dbg.c crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); head 28 drivers/acpi/acpi_ipmi.c struct list_head head; head 60 drivers/acpi/acpi_ipmi.c struct list_head head; head 122 drivers/acpi/acpi_ipmi.c INIT_LIST_HEAD(&ipmi_device->head); head 158 drivers/acpi/acpi_ipmi.c list_del(&ipmi_device->head); head 205 drivers/acpi/acpi_ipmi.c INIT_LIST_HEAD(&ipmi_msg->head); head 341 drivers/acpi/acpi_ipmi.c head); head 342 drivers/acpi/acpi_ipmi.c list_del(&tx_msg->head); head 361 drivers/acpi/acpi_ipmi.c list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) { head 364 drivers/acpi/acpi_ipmi.c list_del(&tx_msg->head); head 390 drivers/acpi/acpi_ipmi.c list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) { head 393 drivers/acpi/acpi_ipmi.c list_del(&tx_msg->head); head 460 drivers/acpi/acpi_ipmi.c list_for_each_entry(temp, &driver_data.ipmi_devices, head) { head 470 drivers/acpi/acpi_ipmi.c list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices); head 491 drivers/acpi/acpi_ipmi.c &driver_data.ipmi_devices, head) { head 501 drivers/acpi/acpi_ipmi.c struct acpi_ipmi_device, head); head 561 drivers/acpi/acpi_ipmi.c list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); head 627 drivers/acpi/acpi_ipmi.c head); head 75 drivers/acpi/acpica/acconvert.h struct acpi_file_node *head); head 652 drivers/acpi/apei/ghes.c static void ghes_estatus_cache_rcu_free(struct rcu_head *head) head 656 drivers/acpi/apei/ghes.c cache = container_of(head, struct ghes_estatus_cache, rcu); head 796 drivers/acpi/arm64/iort.c int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) head 840 drivers/acpi/arm64/iort.c list_add_tail(®ion->list, head); head 1008 drivers/acpi/arm64/iort.c int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) head 1018 drivers/acpi/property.c const struct list_head *head; head 1025 drivers/acpi/property.c head = &adev->children; head 1029 drivers/acpi/property.c if (list_empty(head)) head 1035 drivers/acpi/property.c if (next == head) { head 1041 drivers/acpi/property.c child_adev = list_first_entry(head, struct acpi_device, head 1061 drivers/acpi/property.c head = &adev->data.subnodes; head 1063 drivers/acpi/property.c head = &data->data.subnodes; head 1067 drivers/acpi/property.c if (list_empty(head)) head 1073 drivers/acpi/property.c if (next == head) head 1078 drivers/acpi/property.c dn = list_first_entry(head, struct acpi_data_node, sibling); head 50 drivers/acpi/utils.c u8 *head = NULL; head 180 drivers/acpi/utils.c head = buffer->pointer; head 196 drivers/acpi/utils.c *((u64 *) head) = head 198 drivers/acpi/utils.c head += sizeof(u64); head 201 drivers/acpi/utils.c pointer = (u8 **) head; head 205 drivers/acpi/utils.c head += sizeof(u64 *); head 221 drivers/acpi/utils.c pointer = (u8 **) head; head 225 drivers/acpi/utils.c head += sizeof(char *); head 232 drivers/acpi/utils.c pointer = (u8 **) head; head 236 drivers/acpi/utils.c head += sizeof(u8 *); head 247 drivers/acpi/utils.c *(void **)head = head 249 drivers/acpi/utils.c head += sizeof(void *); head 700 drivers/ata/libata-core.c u32 cyl, head, sect; head 703 drivers/ata/libata-core.c head = tf->device & 0xf; head 712 drivers/ata/libata-core.c block = (cyl * dev->heads + head) * dev->sectors + sect - 1; head 813 drivers/ata/libata-core.c u32 sect, head, cyl, track; head 825 drivers/ata/libata-core.c head = track % dev->heads; head 829 drivers/ata/libata-core.c (u32)block, track, cyl, head, sect); head 835 drivers/ata/libata-core.c if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) head 842 drivers/ata/libata-core.c tf->device |= head; head 1744 drivers/ata/libata-scsi.c u32 sect, head, cyl, track; head 1752 drivers/ata/libata-scsi.c head = track % dev->heads; head 1756 drivers/ata/libata-scsi.c (u32)block, track, cyl, head, sect); head 1762 drivers/ata/libata-scsi.c if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) head 1770 drivers/ata/libata-scsi.c tf->device |= head; head 245 drivers/ata/sata_nv.c unsigned int head; head 1694 drivers/ata/sata_nv.c WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE); head 1705 drivers/ata/sata_nv.c if (dq->head == dq->tail) /* null queue */ head 1708 drivers/ata/sata_nv.c tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)]; head 1709 drivers/ata/sata_nv.c dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON; head 1731 drivers/ata/sata_nv.c dq->head = 0; head 790 drivers/atm/ambassador.c skb, skb->head, (long) skb_end_offset(skb)); head 1390 drivers/atm/ambassador.c skb->data = skb->head; head 172 drivers/atm/atmtcp.c struct hlist_head *head = &vcc_hash[i]; head 174 drivers/atm/atmtcp.c sk_for_each(s, head) { head 265 drivers/atm/atmtcp.c struct hlist_head *head; head 269 drivers/atm/atmtcp.c head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; head 271 drivers/atm/atmtcp.c sk_for_each(s, head) { head 2177 drivers/atm/eni.c struct hlist_head *head = &vcc_hash[i]; head 2179 drivers/atm/eni.c sk_for_each(s, head) { head 1491 drivers/atm/firestream.c skb, ne, skb->data, skb->head); head 559 drivers/atm/fore200e.c struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; head 564 drivers/atm/fore200e.c FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); head 925 drivers/atm/fore200e.c entry = &bsq->host_entry[ bsq->head ]; head 948 drivers/atm/fore200e.c FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS); head 1096 drivers/atm/fore200e.c entry = &rxq->host_entry[ rxq->head ]; head 1127 drivers/atm/fore200e.c FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX); head 1228 drivers/atm/fore200e.c struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; head 1235 drivers/atm/fore200e.c FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); head 1561 drivers/atm/fore200e.c entry = &txq->host_entry[ txq->head ]; head 1613 drivers/atm/fore200e.c FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX); head 1672 drivers/atm/fore200e.c struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; head 1688 drivers/atm/fore200e.c FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); head 1743 drivers/atm/fore200e.c struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; head 1750 drivers/atm/fore200e.c FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); head 1783 drivers/atm/fore200e.c struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; head 1789 drivers/atm/fore200e.c FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); head 2188 drivers/atm/fore200e.c rxq->head = 0; head 2249 drivers/atm/fore200e.c txq->head = 0; head 2291 drivers/atm/fore200e.c cmdq->head = 0; head 603 drivers/atm/fore200e.h int head; /* head of cmd queue */ head 612 drivers/atm/fore200e.h int head; /* head of tx queue */ head 624 drivers/atm/fore200e.h int head; /* head of rx queue */ head 634 drivers/atm/fore200e.h int head; /* head of buffer supply queue */ head 330 drivers/atm/he.c struct hlist_head *head; head 338 drivers/atm/he.c head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; head 340 drivers/atm/he.c sk_for_each(s, head) { head 1239 drivers/atm/idt77252.c u32 head, tail; head 1253 drivers/atm/idt77252.c head = IDT77252_PRV_PADDR(queue) + (queue->data - queue->head - 16); head 1260 drivers/atm/idt77252.c while (head != tail) { head 1333 drivers/atm/idt77252.c head = IDT77252_PRV_PADDR(queue) head 1334 drivers/atm/idt77252.c + (queue->data - queue->head - 16); head 1340 drivers/atm/idt77252.c head = le32_to_cpu(*(u32 *) &queue->data[0]); head 1804 drivers/atm/idt77252.c skb->data = skb->head; head 2085 drivers/atm/nicstar.c iovb->data = iovb->head; head 2097 drivers/atm/nicstar.c iovb->data = iovb->head; head 899 drivers/atm/solos-pci.c struct hlist_head *head; head 904 drivers/atm/solos-pci.c head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)]; head 905 drivers/atm/solos-pci.c sk_for_each(s, head) { head 214 drivers/atm/zatm.c struct rx_buffer_head *head; head 225 drivers/atm/zatm.c head = (struct rx_buffer_head *) skb->data; head 227 drivers/atm/zatm.c if (!first) first = head; head 229 drivers/atm/zatm.c head->buffer = virt_to_bus(skb->data); head 230 drivers/atm/zatm.c head->link = 0; head 231 drivers/atm/zatm.c head->skb = skb; head 233 drivers/atm/zatm.c (unsigned long) head); head 237 drivers/atm/zatm.c data))[-1].link = virt_to_bus(head); head 182 drivers/base/attribute_container.c #define klist_for_each_entry(pos, head, member, iter) \ head 183 drivers/base/attribute_container.c for (klist_iter_init(head, iter); (pos = ({ \ head 51 drivers/base/firmware_loader/main.c struct list_head head; head 203 drivers/base/firmware_loader/main.c list_for_each_entry(tmp, &fwc->head, list) head 233 drivers/base/firmware_loader/main.c list_add(&tmp->list, &fwc->head); head 1204 drivers/base/firmware_loader/main.c struct list_head *head = data; head 1209 drivers/base/firmware_loader/main.c list_add(&fce->list, head); head 1419 drivers/base/firmware_loader/main.c INIT_LIST_HEAD(&fw_cache.head); head 1584 drivers/block/amiflop.c getprm.head=p->type->heads; head 119 drivers/block/aoe/aoe.h struct list_head head; head 55 drivers/block/aoe/aoecmd.c struct list_head head; head 81 drivers/block/aoe/aoecmd.c struct list_head *head, *pos, *nx; head 84 drivers/block/aoe/aoecmd.c head = &d->rexmitq; head 85 drivers/block/aoe/aoecmd.c list_for_each_safe(pos, nx, head) { head 86 drivers/block/aoe/aoecmd.c f = list_entry(pos, struct frame, head); head 99 drivers/block/aoe/aoecmd.c struct list_head *head, *pos, *nx; head 103 drivers/block/aoe/aoecmd.c head = &d->factive[n]; head 104 drivers/block/aoe/aoecmd.c list_for_each_safe(pos, nx, head) { head 105 drivers/block/aoe/aoecmd.c f = list_entry(pos, struct frame, head); head 202 drivers/block/aoe/aoecmd.c list_add(&f->head, &t->ffree); head 223 drivers/block/aoe/aoecmd.c f = list_entry(pos, struct frame, head); head 314 drivers/block/aoe/aoecmd.c list_add_tail(&f->head, &d->factive[n]); head 643 drivers/block/aoe/aoecmd.c struct list_head *pos, *nx, *head; head 649 drivers/block/aoe/aoecmd.c head = &d->rexmitq; head 650 drivers/block/aoe/aoecmd.c list_for_each_safe(pos, nx, head) { head 651 drivers/block/aoe/aoecmd.c f = list_entry(pos, struct frame, head); head 662 drivers/block/aoe/aoecmd.c list_replace(&f->head, &nf->head); head 663 drivers/block/aoe/aoecmd.c pos = &nf->head; head 733 drivers/block/aoe/aoecmd.c struct list_head *head, *pos, *nx; head 757 drivers/block/aoe/aoecmd.c head = &d->factive[i]; head 758 drivers/block/aoe/aoecmd.c list_for_each_safe(pos, nx, head) { head 759 drivers/block/aoe/aoecmd.c f = list_entry(pos, struct frame, head); head 770 drivers/block/aoe/aoecmd.c f = list_entry(pos, struct frame, head); head 1203 drivers/block/aoe/aoecmd.c if (list_empty(&iocq[id].head)) head 1205 drivers/block/aoe/aoecmd.c pos = iocq[id].head.next; head 1207 drivers/block/aoe/aoecmd.c f = list_entry(pos, struct frame, head); head 1296 drivers/block/aoe/aoecmd.c list_add_tail(&f->head, &iocq[id].head); head 1662 drivers/block/aoe/aoecmd.c list_splice_init(&iocq[id].head, &flist); head 1667 drivers/block/aoe/aoecmd.c f = list_entry(pos, struct frame, head); head 1716 drivers/block/aoe/aoecmd.c INIT_LIST_HEAD(&iocq[i].head); head 187 drivers/block/aoe/aoedev.c f = list_entry(pos, struct frame, head); head 200 drivers/block/aoe/aoedev.c struct list_head *head, *pos, *nx; head 207 drivers/block/aoe/aoedev.c head = &d->factive[i]; head 208 drivers/block/aoe/aoedev.c list_for_each_safe(pos, nx, head) head 211 drivers/block/aoe/aoedev.c head = &d->rexmitq; head 212 drivers/block/aoe/aoedev.c list_for_each_safe(pos, nx, head) head 502 drivers/block/aoe/aoedev.c struct list_head *pos, *nx, *head; head 511 drivers/block/aoe/aoedev.c head = &t->ffree; head 512 drivers/block/aoe/aoedev.c list_for_each_safe(pos, nx, head) { head 514 drivers/block/aoe/aoedev.c f = list_entry(pos, struct frame, head); head 163 drivers/block/ataflop.c int head; /* "" "" */ head 750 drivers/block/ataflop.c if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) { head 769 drivers/block/ataflop.c *p++ = desc->head; head 786 drivers/block/ataflop.c ReqSide = desc->head; head 1595 drivers/block/ataflop.c getprm.head = 2; head 1701 drivers/block/ataflop.c setprm.head != 2) head 4979 drivers/block/drbd/drbd_nl.c LIST_HEAD(head); head 4989 drivers/block/drbd/drbd_nl.c list_add(&head, &state_change->list); head 4990 drivers/block/drbd/drbd_nl.c free_state_changes(&head); head 5002 drivers/block/drbd/drbd_nl.c if (!list_empty(&head)) head 5003 drivers/block/drbd/drbd_nl.c free_state_changes(&head); head 5008 drivers/block/drbd/drbd_nl.c list_add_tail(&state_change->list, &head); head 5013 drivers/block/drbd/drbd_nl.c if (!list_empty(&head)) { head 5015 drivers/block/drbd/drbd_nl.c list_entry(head.next, struct drbd_state_change, list); head 5018 drivers/block/drbd/drbd_nl.c list_del(&head); /* detach list from head */ head 9 drivers/block/drbd/drbd_nla.c struct nlattr *head = nla_data(nla); head 21 drivers/block/drbd/drbd_nla.c nla_for_each_attr(nla, head, len, rem) { head 75 drivers/block/drbd/drbd_receiver.c static struct page *page_chain_del(struct page **head, int n) head 81 drivers/block/drbd/drbd_receiver.c BUG_ON(!head); head 83 drivers/block/drbd/drbd_receiver.c page = *head; head 101 drivers/block/drbd/drbd_receiver.c page = *head; head 102 drivers/block/drbd/drbd_receiver.c *head = tmp; head 131 drivers/block/drbd/drbd_receiver.c static void page_chain_add(struct page **head, head 141 drivers/block/drbd/drbd_receiver.c set_page_private(chain_last, (unsigned long)*head); head 142 drivers/block/drbd/drbd_receiver.c *head = chain_first; head 475 drivers/block/drbd/drbd_receiver.c struct list_head *head) head 481 drivers/block/drbd/drbd_receiver.c while (!list_empty(head)) { head 491 drivers/block/drbd/drbd_receiver.c struct list_head *head) head 494 drivers/block/drbd/drbd_receiver.c _drbd_wait_ee_list_empty(device, head); head 319 drivers/block/floppy.c #define PH_HEAD(floppy, head) (((((floppy)->stretch & 2) >> 1) ^ head) << 2) head 2106 drivers/block/floppy.c unsigned char track, head, sect, size; head 2117 drivers/block/floppy.c DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, format_req.head); head 2136 drivers/block/floppy.c n = (track_shift * format_req.track + head_shift * format_req.head) head 2147 drivers/block/floppy.c here[count].head = format_req.head; head 2196 drivers/block/floppy.c tmp_format_req->head >= _floppy->head || head 2551 drivers/block/floppy.c max_sector = _floppy->sect * _floppy->head; head 3240 drivers/block/floppy.c (int)g->head <= 0 || head 3242 drivers/block/floppy.c (int)(g->sect * g->head) <= 0 || head 3382 drivers/block/floppy.c geo->heads = g->head; head 218 drivers/block/mtip32xx/mtip32xx.h unsigned char head; head 1289 drivers/block/pktcdvd.c bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head); head 82 drivers/block/rsxx/dma.c int head; head 142 drivers/block/rsxx/dma.c tag = trackers->head; head 144 drivers/block/rsxx/dma.c trackers->head = trackers->list[tag].next_tag; head 155 drivers/block/rsxx/dma.c trackers->list[tag].next_tag = trackers->head; head 156 drivers/block/rsxx/dma.c trackers->head = tag; head 815 drivers/block/rsxx/dma.c ctrl->trackers->head = 0; head 355 drivers/block/swim.c static inline void swim_head(struct swim __iomem *base, enum head head) head 359 drivers/block/swim.c if (head == UPPER_HEAD) head 361 drivers/block/swim.c else if (head == LOWER_HEAD) head 601 drivers/block/swim.c fs->secpercyl = g->head * g->sect; head 723 drivers/block/swim.c geo->heads = g->head; head 184 drivers/block/swim3.c int head; /* head number ditto */ head 350 drivers/block/swim3.c fs->head = x / fs->secpertrack; head 437 drivers/block/swim3.c fs->req_sector, fs->secpertrack, fs->head, n); head 440 drivers/block/swim3.c swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); head 769 drivers/block/swim3.c if (++fs->head > 1) { head 770 drivers/block/swim3.c fs->head = 0; head 389 drivers/block/sx8.c __le16 head; head 835 drivers/block/sx8.c port->dev_geom_head = le16_to_cpu(desc->head); head 2110 drivers/block/xen-blkfront.c merge_bio.head = shadow[j].request->bio; head 23 drivers/char/agp/isoch.c static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new) head 28 drivers/char/agp/isoch.c list_for_each(pos, head) { head 40 drivers/char/agp/isoch.c struct list_head *pos, *tmp, *head = &list->list, *start = head->next; head 43 drivers/char/agp/isoch.c INIT_LIST_HEAD(head); head 45 drivers/char/agp/isoch.c for (pos=start; pos!=head; ) { head 54 drivers/char/agp/isoch.c agp_3_5_dev_list_insert(head, tmp); head 81 drivers/char/agp/isoch.c struct list_head *head = &dev_list->list, *pos; head 137 drivers/char/agp/isoch.c list_for_each(pos, head) { head 292 drivers/char/agp/isoch.c struct list_head *head = &dev_list->list, *pos; head 304 drivers/char/agp/isoch.c for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) { head 327 drivers/char/agp/isoch.c struct list_head *head, *pos; head 347 drivers/char/agp/isoch.c head = &dev_list->list; head 348 drivers/char/agp/isoch.c INIT_LIST_HEAD(head); head 379 drivers/char/agp/isoch.c list_add(pos, head); head 394 drivers/char/agp/isoch.c list_for_each(pos, head) { head 462 drivers/char/agp/isoch.c for (pos=head->next; pos!=head; ) { head 294 drivers/char/tpm/tpm.h struct tpm_header *head = (struct tpm_header *)buf->data; head 296 drivers/char/tpm/tpm.h head->tag = cpu_to_be16(tag); head 297 drivers/char/tpm/tpm.h head->length = cpu_to_be32(sizeof(*head)); head 298 drivers/char/tpm/tpm.h head->ordinal = cpu_to_be32(ordinal); head 321 drivers/char/tpm/tpm.h struct tpm_header *head = (struct tpm_header *)buf->data; head 323 drivers/char/tpm/tpm.h return be32_to_cpu(head->length); head 328 drivers/char/tpm/tpm.h struct tpm_header *head = (struct tpm_header *)buf->data; head 330 drivers/char/tpm/tpm.h return be16_to_cpu(head->tag); head 337 drivers/char/tpm/tpm.h struct tpm_header *head = (struct tpm_header *)buf->data; head 351 drivers/char/tpm/tpm.h head->length = cpu_to_be32(len + new_len); head 1283 drivers/char/xillybus/xillybus_core.c unsigned char *head = head 1297 drivers/char/xillybus/xillybus_core.c *head++ = channel->rd_leftovers[i]; head 4210 drivers/clk/clk.c if (!cn->notifier_head.head) { head 466 drivers/cpufreq/cpufreq.c for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next) head 1081 drivers/crypto/amcc/crypto4xx_core.c u32 head = core_dev->dev->pdr_head; head 1096 drivers/crypto/amcc/crypto4xx_core.c } while (head != tail); head 57 drivers/crypto/caam/intern.h int head; /* entinfo (s/w ring) head index */ head 194 drivers/crypto/caam/jr.c int hw_idx, sw_idx, i, head, tail; head 205 drivers/crypto/caam/jr.c head = READ_ONCE(jrp->head); head 210 drivers/crypto/caam/jr.c for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { head 218 drivers/crypto/caam/jr.c BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); head 258 drivers/crypto/caam/jr.c } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && head 361 drivers/crypto/caam/jr.c int head, tail, desc_size; head 373 drivers/crypto/caam/jr.c head = jrp->head; head 377 drivers/crypto/caam/jr.c CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { head 383 drivers/crypto/caam/jr.c head_entry = &jrp->entinfo[head]; head 390 drivers/crypto/caam/jr.c jr_inpentry_set(jrp->inpring, head, cpu_to_caam_dma(desc_dma)); head 399 drivers/crypto/caam/jr.c jrp->head = (head + 1) & (JOBR_DEPTH - 1); head 458 drivers/crypto/caam/jr.c jrp->head = 0; head 45 drivers/crypto/cavium/cpt/cptvf.h u8 *head; head 77 drivers/crypto/cavium/cpt/cptvf.h struct pending_entry *head; /* head of the queue */ head 87 drivers/crypto/cavium/cpt/cptvf_main.c if (!queue->head) head 91 drivers/crypto/cavium/cpt/cptvf_main.c kzfree((queue->head)); head 117 drivers/crypto/cavium/cpt/cptvf_main.c queue->head = kzalloc((size), GFP_KERNEL); head 118 drivers/crypto/cavium/cpt/cptvf_main.c if (!queue->head) { head 187 drivers/crypto/cavium/cpt/cptvf_main.c chunk->head, head 189 drivers/crypto/cavium/cpt/cptvf_main.c chunk->head = NULL; head 236 drivers/crypto/cavium/cpt/cptvf_main.c curr->head = (u8 *)dma_alloc_coherent(&pdev->dev, head 240 drivers/crypto/cavium/cpt/cptvf_main.c if (!curr->head) { head 260 drivers/crypto/cavium/cpt/cptvf_main.c *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr; head 268 drivers/crypto/cavium/cpt/cptvf_main.c *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr; head 19 drivers/crypto/cavium/cpt/cptvf_reqmanager.c ent = &q->head[q->rear]; head 243 drivers/crypto/cavium/cpt/cptvf_reqmanager.c ent = &queue->qhead->head[queue->idx * qinfo->cmd_size]; head 341 drivers/crypto/cavium/cpt/cptvf_reqmanager.c pentry = &pqueue->head[pqueue->front]; head 349 drivers/crypto/ccp/ccp-crypto-aes-cmac.c int ccp_register_aes_cmac_algs(struct list_head *head) head 398 drivers/crypto/ccp/ccp-crypto-aes-cmac.c list_add(&ccp_alg->entry, head); head 210 drivers/crypto/ccp/ccp-crypto-aes-galois.c static int ccp_register_aes_aead(struct list_head *head, head 242 drivers/crypto/ccp/ccp-crypto-aes-galois.c list_add(&ccp_aead->entry, head); head 247 drivers/crypto/ccp/ccp-crypto-aes-galois.c int ccp_register_aes_aeads(struct list_head *head) head 255 drivers/crypto/ccp/ccp-crypto-aes-galois.c ret = ccp_register_aes_aead(head, &aes_aead_algs[i]); head 227 drivers/crypto/ccp/ccp-crypto-aes-xts.c static int ccp_register_aes_xts_alg(struct list_head *head, head 270 drivers/crypto/ccp/ccp-crypto-aes-xts.c list_add(&ccp_alg->entry, head); head 275 drivers/crypto/ccp/ccp-crypto-aes-xts.c int ccp_register_aes_xts_algs(struct list_head *head) head 280 drivers/crypto/ccp/ccp-crypto-aes-xts.c ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]); head 323 drivers/crypto/ccp/ccp-crypto-aes.c static int ccp_register_aes_alg(struct list_head *head, head 355 drivers/crypto/ccp/ccp-crypto-aes.c list_add(&ccp_alg->entry, head); head 360 drivers/crypto/ccp/ccp-crypto-aes.c int ccp_register_aes_algs(struct list_head *head) head 368 drivers/crypto/ccp/ccp-crypto-aes.c ret = ccp_register_aes_alg(head, &aes_algs[i]); head 186 drivers/crypto/ccp/ccp-crypto-des3.c static int ccp_register_des3_alg(struct list_head *head, head 218 drivers/crypto/ccp/ccp-crypto-des3.c list_add(&ccp_alg->entry, head); head 223 drivers/crypto/ccp/ccp-crypto-des3.c int ccp_register_des3_algs(struct list_head *head) head 231 drivers/crypto/ccp/ccp-crypto-des3.c ret = ccp_register_des3_alg(head, &des3_algs[i]); head 245 drivers/crypto/ccp/ccp-crypto-rsa.c static int ccp_register_rsa_alg(struct list_head *head, head 271 drivers/crypto/ccp/ccp-crypto-rsa.c list_add(&ccp_alg->entry, head); head 276 drivers/crypto/ccp/ccp-crypto-rsa.c int ccp_register_rsa_algs(struct list_head *head) head 287 drivers/crypto/ccp/ccp-crypto-rsa.c ret = ccp_register_rsa_alg(head, &rsa_algs[i]); head 416 drivers/crypto/ccp/ccp-crypto-sha.c static int ccp_register_hmac_alg(struct list_head *head, head 456 drivers/crypto/ccp/ccp-crypto-sha.c list_add(&ccp_alg->entry, head); head 461 drivers/crypto/ccp/ccp-crypto-sha.c static int ccp_register_sha_alg(struct list_head *head, head 513 drivers/crypto/ccp/ccp-crypto-sha.c list_add(&ccp_alg->entry, head); head 515 drivers/crypto/ccp/ccp-crypto-sha.c ret = ccp_register_hmac_alg(head, def, ccp_alg); head 520 drivers/crypto/ccp/ccp-crypto-sha.c int ccp_register_sha_algs(struct list_head *head) head 528 drivers/crypto/ccp/ccp-crypto-sha.c ret = ccp_register_sha_alg(head, &sha_algs[i]); head 274 drivers/crypto/ccp/ccp-crypto.h int ccp_register_aes_algs(struct list_head *head); head 275 drivers/crypto/ccp/ccp-crypto.h int ccp_register_aes_cmac_algs(struct list_head *head); head 276 drivers/crypto/ccp/ccp-crypto.h int ccp_register_aes_xts_algs(struct list_head *head); head 277 drivers/crypto/ccp/ccp-crypto.h int ccp_register_aes_aeads(struct list_head *head); head 278 drivers/crypto/ccp/ccp-crypto.h int ccp_register_sha_algs(struct list_head *head); head 279 drivers/crypto/ccp/ccp-crypto.h int ccp_register_des3_algs(struct list_head *head); head 280 drivers/crypto/ccp/ccp-crypto.h int ccp_register_rsa_algs(struct list_head *head); head 579 drivers/crypto/ccree/cc_request_mgr.c unsigned int *head = &request_mgr_handle->req_queue_head; head 587 drivers/crypto/ccree/cc_request_mgr.c if (*head == *tail) { head 593 drivers/crypto/ccree/cc_request_mgr.c *head); head 1348 drivers/crypto/chelsio/chtls/chtls_io.c req = (struct cpl_rx_data_ack *)skb->head; head 173 drivers/crypto/hisilicon/qm.c (qc)->head = 0; \ head 209 drivers/crypto/hisilicon/qm.c __le16 head; head 224 drivers/crypto/hisilicon/qm.c __le16 head; head 238 drivers/crypto/hisilicon/qm.c __le16 head; head 248 drivers/crypto/hisilicon/qm.c __le16 head; head 389 drivers/crypto/hisilicon/sec/sec_algs.c list_for_each_entry_safe(el, temp, &sec_req->elements, head) { head 436 drivers/crypto/hisilicon/sec/sec_algs.c head); head 514 drivers/crypto/hisilicon/sec/sec_algs.c list_del(&sec_req_el->head); head 786 drivers/crypto/hisilicon/sec/sec_algs.c list_add_tail(&el->head, &sec_req->elements); head 838 drivers/crypto/hisilicon/sec/sec_algs.c list_for_each_entry_safe(el, temp, &sec_req->elements, head) { head 839 drivers/crypto/hisilicon/sec/sec_algs.c list_del(&el->head); head 303 drivers/crypto/hisilicon/sec/sec_drv.h struct list_head head; head 394 drivers/crypto/hisilicon/zip/zip_crypto.c const u8 *head = TO_HEAD(req_type); head 397 drivers/crypto/hisilicon/zip/zip_crypto.c ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size); head 57 drivers/crypto/n2_core.c unsigned long head; head 134 drivers/crypto/n2_core.c for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { head 140 drivers/crypto/n2_core.c q->head = new_head; head 150 drivers/crypto/n2_core.c unsigned long head, hv_ret; head 157 drivers/crypto/n2_core.c hv_ret = sun4v_ncs_gethead(q->qhandle, &head); head 160 drivers/crypto/n2_core.c smp_processor_id(), head, hv_ret); head 162 drivers/crypto/n2_core.c sun4v_ncs_sethead_marker(q->qhandle, head); head 176 drivers/crypto/n2_core.c unsigned long head = q->head; head 181 drivers/crypto/n2_core.c if (head > tail) head 182 drivers/crypto/n2_core.c diff = head - tail; head 184 drivers/crypto/n2_core.c diff = (end - tail) + head; head 242 drivers/crypto/n2_core.c qp->head != qp->tail) head 493 drivers/crypto/n2_core.c unsigned long head, hv_ret; head 496 drivers/crypto/n2_core.c hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); head 501 drivers/crypto/n2_core.c if (head == qp->tail) { head 502 drivers/crypto/n2_core.c qp->head = head; head 221 drivers/crypto/n2_core.h unsigned long *head); head 229 drivers/crypto/n2_core.h unsigned long head); head 148 drivers/crypto/qat/qat_common/adf_cfg.c static void adf_cfg_section_del_all(struct list_head *head); head 192 drivers/crypto/qat/qat_common/adf_cfg.c static void adf_cfg_keyval_del_all(struct list_head *head) head 196 drivers/crypto/qat/qat_common/adf_cfg.c list_for_each_prev_safe(list_ptr, tmp, head) { head 204 drivers/crypto/qat/qat_common/adf_cfg.c static void adf_cfg_section_del_all(struct list_head *head) head 209 drivers/crypto/qat/qat_common/adf_cfg.c list_for_each_prev_safe(list, tmp, head) { head 140 drivers/crypto/qat/qat_common/adf_transport.c uint32_t *msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); head 146 drivers/crypto/qat/qat_common/adf_transport.c ring->head = adf_modulo(ring->head + head 150 drivers/crypto/qat/qat_common/adf_transport.c msg = (uint32_t *)((uintptr_t)ring->base_addr + ring->head); head 155 drivers/crypto/qat/qat_common/adf_transport.c ring->ring_number, ring->head); head 283 drivers/crypto/qat/qat_common/adf_transport.c ring->head = 0; head 92 drivers/crypto/qat/qat_common/adf_transport_debug.c int head, tail, empty; head 94 drivers/crypto/qat/qat_common/adf_transport_debug.c head = READ_CSR_RING_HEAD(csr, bank->bank_number, head 106 drivers/crypto/qat/qat_common/adf_transport_debug.c head, tail, (empty & 1 << ring->ring_number) head 210 drivers/crypto/qat/qat_common/adf_transport_debug.c int head, tail, empty; head 215 drivers/crypto/qat/qat_common/adf_transport_debug.c head = READ_CSR_RING_HEAD(csr, bank->bank_number, head 223 drivers/crypto/qat/qat_common/adf_transport_debug.c ring->ring_number, head, tail, head 66 drivers/crypto/qat/qat_common/adf_transport_internal.h uint16_t head; head 277 drivers/crypto/talitos.c int head; head 288 drivers/crypto/talitos.c head = priv->chan[ch].head; head 289 drivers/crypto/talitos.c request = &priv->chan[ch].fifo[head]; head 306 drivers/crypto/talitos.c priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1); head 101 drivers/crypto/talitos.h int head; head 35 drivers/dma-buf/dma-buf.c struct list_head head; head 567 drivers/dma-buf/dma-buf.c list_add(&dmabuf->list_node, &db_list.head); head 1175 drivers/dma-buf/dma-buf.c list_for_each_entry(buf_obj, &db_list.head, list_node) { head 1291 drivers/dma-buf/dma-buf.c INIT_LIST_HEAD(&db_list.head); head 145 drivers/dma-buf/dma-fence-chain.c struct dma_fence_chain *head = to_dma_fence_chain(fence); head 147 drivers/dma-buf/dma-fence-chain.c dma_fence_get(&head->base); head 148 drivers/dma-buf/dma-fence-chain.c dma_fence_chain_for_each(fence, &head->base) { head 153 drivers/dma-buf/dma-fence-chain.c if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) { head 159 drivers/dma-buf/dma-fence-chain.c dma_fence_put(&head->base); head 122 drivers/dma-buf/udmabuf.c static long udmabuf_create(const struct udmabuf_create_list *head, head 139 drivers/dma-buf/udmabuf.c for (i = 0; i < head->count; i++) { head 156 drivers/dma-buf/udmabuf.c for (i = 0; i < head->count; i++) { head 197 drivers/dma-buf/udmabuf.c if (head->flags & UDMABUF_FLAGS_CLOEXEC) head 214 drivers/dma-buf/udmabuf.c struct udmabuf_create_list head; head 221 drivers/dma-buf/udmabuf.c head.flags = create.flags; head 222 drivers/dma-buf/udmabuf.c head.count = 1; head 227 drivers/dma-buf/udmabuf.c return udmabuf_create(&head, &list); head 232 drivers/dma-buf/udmabuf.c struct udmabuf_create_list head; head 237 drivers/dma-buf/udmabuf.c if (copy_from_user(&head, (void __user *)arg, sizeof(head))) head 239 drivers/dma-buf/udmabuf.c if (head.count > list_limit) head 241 drivers/dma-buf/udmabuf.c lsize = sizeof(struct udmabuf_create_item) * head.count; head 242 drivers/dma-buf/udmabuf.c list = memdup_user((void __user *)(arg + sizeof(head)), lsize); head 246 drivers/dma-buf/udmabuf.c ret = udmabuf_create(&head, list); head 1523 drivers/dma/amba-pl08x.c LIST_HEAD(head); head 1525 drivers/dma/amba-pl08x.c vchan_get_all_descriptors(&plchan->vc, &head); head 1526 drivers/dma/amba-pl08x.c vchan_dma_desc_free_list(&plchan->vc, &head); head 794 drivers/dma/bcm2835-dma.c LIST_HEAD(head); head 808 drivers/dma/bcm2835-dma.c vchan_get_all_descriptors(&c->vc, &head); head 810 drivers/dma/bcm2835-dma.c vchan_dma_desc_free_list(&c->vc, &head); head 58 drivers/dma/coh901318_lli.c struct coh901318_lli *head; head 68 drivers/dma/coh901318_lli.c head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy); head 70 drivers/dma/coh901318_lli.c if (head == NULL) head 75 drivers/dma/coh901318_lli.c lli = head; head 99 drivers/dma/coh901318_lli.c return head; head 108 drivers/dma/coh901318_lli.c coh901318_lli_free(pool, &head); head 431 drivers/dma/dma-axi-dmac.c LIST_HEAD(head); head 436 drivers/dma/dma-axi-dmac.c vchan_get_all_descriptors(&chan->vchan, &head); head 437 drivers/dma/dma-axi-dmac.c list_splice_tail_init(&chan->active_descs, &head); head 440 drivers/dma/dma-axi-dmac.c vchan_dma_desc_free_list(&chan->vchan, &head); head 571 drivers/dma/dma-jz4780.c LIST_HEAD(head); head 584 drivers/dma/dma-jz4780.c vchan_get_all_descriptors(&jzchan->vchan, &head); head 588 drivers/dma/dma-jz4780.c vchan_dma_desc_free_list(&jzchan->vchan, &head); head 631 drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c LIST_HEAD(head); head 637 drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c vchan_get_all_descriptors(&chan->vc, &head); head 643 drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c vchan_dma_desc_free_list(&chan->vc, &head); head 246 drivers/dma/dw-edma/dw-edma-core.c LIST_HEAD(head); head 322 drivers/dma/dw/core.c struct list_head *head, *active = dwc->tx_node_active; head 330 drivers/dma/dw/core.c head = &desc->tx_list; head 331 drivers/dma/dw/core.c if (active != head) { head 333 drivers/dma/dw/core.c if (active == head->next) head 159 drivers/dma/fsl-edma-common.c LIST_HEAD(head); head 165 drivers/dma/fsl-edma-common.c vchan_get_all_descriptors(&fsl_chan->vchan, &head); head 167 drivers/dma/fsl-edma-common.c vchan_dma_desc_free_list(&fsl_chan->vchan, &head); head 633 drivers/dma/fsl-edma-common.c LIST_HEAD(head); head 639 drivers/dma/fsl-edma-common.c vchan_get_all_descriptors(&fsl_chan->vchan, &head); head 643 drivers/dma/fsl-edma-common.c vchan_dma_desc_free_list(&fsl_chan->vchan, &head); head 299 drivers/dma/fsl-qdma.c LIST_HEAD(head); head 302 drivers/dma/fsl-qdma.c vchan_get_all_descriptors(&fsl_chan->vchan, &head); head 305 drivers/dma/fsl-qdma.c vchan_dma_desc_free_list(&fsl_chan->vchan, &head); head 1004 drivers/dma/fsl-qdma.c LIST_HEAD(head); head 1009 drivers/dma/fsl-qdma.c vchan_get_all_descriptors(&fsl_chan->vchan, &head); head 1011 drivers/dma/fsl-qdma.c vchan_dma_desc_free_list(&fsl_chan->vchan, &head); head 387 drivers/dma/hsu/hsu.c LIST_HEAD(head); head 397 drivers/dma/hsu/hsu.c vchan_get_all_descriptors(&hsuc->vchan, &head); head 399 drivers/dma/hsu/hsu.c vchan_dma_desc_free_list(&hsuc->vchan, &head); head 474 drivers/dma/idma64.c LIST_HEAD(head); head 483 drivers/dma/idma64.c vchan_get_all_descriptors(&idma64c->vchan, &head); head 486 drivers/dma/idma64.c vchan_dma_desc_free_list(&idma64c->vchan, &head); head 695 drivers/dma/img-mdc-dma.c LIST_HEAD(head); head 706 drivers/dma/img-mdc-dma.c vchan_get_all_descriptors(&mchan->vc, &head); head 712 drivers/dma/img-mdc-dma.c vchan_dma_desc_free_list(&mchan->vc, &head); head 1062 drivers/dma/imx-sdma.c LIST_HEAD(head); head 1073 drivers/dma/imx-sdma.c vchan_get_all_descriptors(&sdmac->vc, &head); head 1076 drivers/dma/imx-sdma.c vchan_dma_desc_free_list(&sdmac->vc, &head); head 162 drivers/dma/ioat/dma.c ioat_chan->issued = ioat_chan->head; head 167 drivers/dma/ioat/dma.c __func__, ioat_chan->head, ioat_chan->tail, head 208 drivers/dma/ioat/dma.c __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued); head 209 drivers/dma/ioat/dma.c desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); head 225 drivers/dma/ioat/dma.c ioat_chan->head += 1; head 246 drivers/dma/ioat/dma.c __func__, ioat_chan->head, ioat_chan->tail, head 316 drivers/dma/ioat/dma.c ioat_chan->head += ioat_chan->produce; head 459 drivers/dma/ioat/dma.c __func__, num_descs, ioat_chan->head, head 468 drivers/dma/ioat/dma.c __func__, num_descs, ioat_chan->head, head 585 drivers/dma/ioat/dma.c __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued); head 123 drivers/dma/ioat/dma.h u16 head; head 316 drivers/dma/ioat/dma.h return CIRC_CNT(ioat_chan->head, ioat_chan->tail, head 323 drivers/dma/ioat/dma.h return CIRC_CNT(ioat_chan->head, ioat_chan->issued, head 639 drivers/dma/ioat/init.c desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i); head 715 drivers/dma/ioat/init.c ioat_chan->head = 0; head 118 drivers/dma/ioat/prep.c idx = ioat_chan->head; head 187 drivers/dma/ioat/prep.c idx = ioat_chan->head; head 383 drivers/dma/ioat/prep.c idx = ioat_chan->head; head 489 drivers/dma/ioat/prep.c idx = ioat_chan->head; head 715 drivers/dma/ioat/prep.c desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); head 727 drivers/dma/k3dma.c LIST_HEAD(head); head 738 drivers/dma/k3dma.c vchan_get_all_descriptors(&c->vc, &head); head 751 drivers/dma/k3dma.c vchan_dma_desc_free_list(&c->vc, &head); head 555 drivers/dma/mediatek/mtk-cqdma.c LIST_HEAD(head); head 562 drivers/dma/mediatek/mtk-cqdma.c list_splice_tail_init(&vc->desc_allocated, &head); head 563 drivers/dma/mediatek/mtk-cqdma.c list_splice_tail_init(&vc->desc_submitted, &head); head 564 drivers/dma/mediatek/mtk-cqdma.c list_splice_tail_init(&vc->desc_issued, &head); head 568 drivers/dma/mediatek/mtk-cqdma.c vchan_dma_desc_free_list(vc, &head); head 742 drivers/dma/mediatek/mtk-hsdma.c LIST_HEAD(head); head 745 drivers/dma/mediatek/mtk-hsdma.c list_splice_tail_init(&vc->desc_allocated, &head); head 746 drivers/dma/mediatek/mtk-hsdma.c list_splice_tail_init(&vc->desc_submitted, &head); head 747 drivers/dma/mediatek/mtk-hsdma.c list_splice_tail_init(&vc->desc_issued, &head); head 751 drivers/dma/mediatek/mtk-hsdma.c vchan_dma_desc_free_list(vc, &head); head 397 drivers/dma/mediatek/mtk-uart-apdma.c LIST_HEAD(head); head 432 drivers/dma/mediatek/mtk-uart-apdma.c vchan_get_all_descriptors(&c->vc, &head); head 433 drivers/dma/mediatek/mtk-uart-apdma.c vchan_dma_desc_free_list(&c->vc, &head); head 46 drivers/dma/mic_x100_dma.c ch->head = mic_dma_hw_ring_inc(ch->head); head 107 drivers/dma/mic_x100_dma.c static u32 mic_dma_ring_count(u32 head, u32 tail) head 111 drivers/dma/mic_x100_dma.c if (head >= tail) head 112 drivers/dma/mic_x100_dma.c count = (tail - 0) + (MIC_DMA_DESC_RX_SIZE - head); head 114 drivers/dma/mic_x100_dma.c count = tail - head; head 124 drivers/dma/mic_x100_dma.c count = mic_dma_ring_count(ch->head, ch->last_tail); head 127 drivers/dma/mic_x100_dma.c count = mic_dma_ring_count(ch->head, ch->last_tail); head 158 drivers/dma/mic_x100_dma.c mic_dma_memcpy_desc(&ch->desc_ring[ch->head], head 171 drivers/dma/mic_x100_dma.c mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, head 174 drivers/dma/mic_x100_dma.c mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, head 195 drivers/dma/mic_x100_dma.c mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, head 251 drivers/dma/mic_x100_dma.c mic_ch->submitted = mic_ch->head; head 260 drivers/dma/mic_x100_dma.c u32 idx = mic_dma_hw_ring_dec(ch->head); head 280 drivers/dma/mic_x100_dma.c mic_dma_prep_status_desc(&mic_ch->desc_ring[mic_ch->head], src_val, dst, head 449 drivers/dma/mic_x100_dma.c ch->head = ch->last_tail; head 105 drivers/dma/mic_x100_dma.h u32 head; head 190 drivers/dma/moxart-dma.c LIST_HEAD(head); head 206 drivers/dma/moxart-dma.c vchan_get_all_descriptors(&ch->vc, &head); head 208 drivers/dma/moxart-dma.c vchan_dma_desc_free_list(&ch->vc, &head); head 690 drivers/dma/nbpfaxi.c LIST_HEAD(head); head 720 drivers/dma/nbpfaxi.c list_add_tail(&desc->node, &head); head 729 drivers/dma/nbpfaxi.c list_splice_tail(&head, &chan->free); head 754 drivers/dma/nbpfaxi.c LIST_HEAD(head); head 759 drivers/dma/nbpfaxi.c list_move(&desc->node, &head); head 764 drivers/dma/nbpfaxi.c list_for_each_entry_safe(desc, tmp, &head, node) { head 837 drivers/dma/nbpfaxi.c LIST_HEAD(head); head 841 drivers/dma/nbpfaxi.c list_splice_init(&chan->done, &head); head 842 drivers/dma/nbpfaxi.c list_splice_init(&chan->active, &head); head 843 drivers/dma/nbpfaxi.c list_splice_init(&chan->queued, &head); head 849 drivers/dma/nbpfaxi.c list_for_each_entry_safe(desc, tmp, &head, node) { head 662 drivers/dma/owl-dma.c LIST_HEAD(head); head 674 drivers/dma/owl-dma.c vchan_get_all_descriptors(&vchan->vc, &head); head 675 drivers/dma/owl-dma.c vchan_dma_desc_free_list(&vchan->vc, &head); head 1092 drivers/dma/pxa_dma.c LIST_HEAD(head); head 1098 drivers/dma/pxa_dma.c vchan_get_all_descriptors(&chan->vc, &head); head 1100 drivers/dma/pxa_dma.c list_for_each_entry(vd, &head, node) { head 1116 drivers/dma/pxa_dma.c vchan_dma_desc_free_list(&chan->vc, &head); head 344 drivers/dma/qcom/bam_dma.c #define IS_BUSY(chan) (CIRC_SPACE(bchan->tail, bchan->head,\ head 363 drivers/dma/qcom/bam_dma.c unsigned short head; /* start of active descriptor entries */ head 489 drivers/dma/qcom/bam_dma.c bchan->head = 0; head 693 drivers/dma/qcom/bam_dma.c LIST_HEAD(head); head 722 drivers/dma/qcom/bam_dma.c vchan_get_all_descriptors(&bchan->vc, &head); head 725 drivers/dma/qcom/bam_dma.c vchan_dma_desc_free_list(&bchan->vc, &head); head 819 drivers/dma/qcom/bam_dma.c avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1); head 821 drivers/dma/qcom/bam_dma.c if (offset < bchan->head) head 831 drivers/dma/qcom/bam_dma.c bchan->head += async_desc->xfer_len; head 832 drivers/dma/qcom/bam_dma.c bchan->head %= MAX_DESCRIPTORS; head 1013 drivers/dma/qcom/bam_dma.c avail = CIRC_SPACE(bchan->tail, bchan->head, head 525 drivers/dma/s3c24xx-dma.c LIST_HEAD(head); head 527 drivers/dma/s3c24xx-dma.c vchan_get_all_descriptors(&s3cchan->vc, &head); head 528 drivers/dma/s3c24xx-dma.c vchan_dma_desc_free_list(&s3cchan->vc, &head); head 757 drivers/dma/sa11x0-dma.c LIST_HEAD(head); head 763 drivers/dma/sa11x0-dma.c vchan_get_all_descriptors(&c->vc, &head); head 776 drivers/dma/sa11x0-dma.c list_add_tail(&p->txd_load->vd.node, &head); head 780 drivers/dma/sa11x0-dma.c list_add_tail(&p->txd_done->vd.node, &head); head 790 drivers/dma/sa11x0-dma.c vchan_dma_desc_free_list(&c->vc, &head); head 450 drivers/dma/sh/usb-dmac.c LIST_HEAD(head); head 455 drivers/dma/sh/usb-dmac.c vchan_get_all_descriptors(&uchan->vc, &head); head 462 drivers/dma/sh/usb-dmac.c vchan_dma_desc_free_list(&uchan->vc, &head); head 1044 drivers/dma/sprd-dma.c LIST_HEAD(head); head 1052 drivers/dma/sprd-dma.c vchan_get_all_descriptors(&schan->vc, &head); head 1058 drivers/dma/sprd-dma.c vchan_dma_desc_free_list(&schan->vc, &head); head 659 drivers/dma/st_fdma.c LIST_HEAD(head); head 669 drivers/dma/st_fdma.c vchan_get_all_descriptors(&fchan->vchan, &head); head 671 drivers/dma/st_fdma.c vchan_dma_desc_free_list(&fchan->vchan, &head); head 487 drivers/dma/stm32-dma.c LIST_HEAD(head); head 496 drivers/dma/stm32-dma.c vchan_get_all_descriptors(&chan->vchan, &head); head 498 drivers/dma/stm32-dma.c vchan_dma_desc_free_list(&chan->vchan, &head); head 1242 drivers/dma/stm32-mdma.c LIST_HEAD(head); head 1249 drivers/dma/stm32-mdma.c vchan_get_all_descriptors(&chan->vchan, &head); head 1252 drivers/dma/stm32-mdma.c vchan_dma_desc_free_list(&chan->vchan, &head); head 867 drivers/dma/sun4i-dma.c LIST_HEAD(head); head 871 drivers/dma/sun4i-dma.c vchan_get_all_descriptors(&vchan->vc, &head); head 888 drivers/dma/sun4i-dma.c vchan_dma_desc_free_list(&vchan->vc, &head); head 890 drivers/dma/sun6i-dma.c LIST_HEAD(head); head 908 drivers/dma/sun6i-dma.c vchan_get_all_descriptors(&vchan->vc, &head); head 922 drivers/dma/sun6i-dma.c vchan_dma_desc_free_list(&vchan->vc, &head); head 492 drivers/dma/tegra210-adma.c LIST_HEAD(head); head 500 drivers/dma/tegra210-adma.c vchan_get_all_descriptors(&tdc->vc, &head); head 502 drivers/dma/tegra210-adma.c vchan_dma_desc_free_list(&tdc->vc, &head); head 859 drivers/dma/ti/edma.c LIST_HEAD(head); head 878 drivers/dma/ti/edma.c vchan_get_all_descriptors(&echan->vchan, &head); head 880 drivers/dma/ti/edma.c vchan_dma_desc_free_list(&echan->vchan, &head); head 1318 drivers/dma/ti/omap-dma.c LIST_HEAD(head); head 1338 drivers/dma/ti/omap-dma.c vchan_get_all_descriptors(&c->vc, &head); head 1340 drivers/dma/ti/omap-dma.c vchan_dma_desc_free_list(&c->vc, &head); head 258 drivers/dma/uniphier-mdmac.c LIST_HEAD(head); head 267 drivers/dma/uniphier-mdmac.c vchan_get_all_descriptors(vc, &head); head 271 drivers/dma/uniphier-mdmac.c vchan_dma_desc_free_list(vc, &head); head 88 drivers/dma/virt-dma.c LIST_HEAD(head); head 91 drivers/dma/virt-dma.c list_splice_tail_init(&vc->desc_completed, &head); head 103 drivers/dma/virt-dma.c list_for_each_entry_safe(vd, _vd, &head, node) { head 112 drivers/dma/virt-dma.c void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) head 116 drivers/dma/virt-dma.c list_for_each_entry_safe(vd, _vd, head, node) { head 44 drivers/dma/virt-dma.h void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); head 176 drivers/dma/virt-dma.h struct list_head *head) head 178 drivers/dma/virt-dma.h list_splice_tail_init(&vc->desc_allocated, head); head 179 drivers/dma/virt-dma.h list_splice_tail_init(&vc->desc_submitted, head); head 180 drivers/dma/virt-dma.h list_splice_tail_init(&vc->desc_issued, head); head 181 drivers/dma/virt-dma.h list_splice_tail_init(&vc->desc_completed, head); head 188 drivers/dma/virt-dma.h LIST_HEAD(head); head 191 drivers/dma/virt-dma.h vchan_get_all_descriptors(vc, &head); head 192 drivers/dma/virt-dma.h list_for_each_entry(vd, &head, node) head 196 drivers/dma/virt-dma.h vchan_dma_desc_free_list(vc, &head); head 219 drivers/dma/xgene-dma.c u16 head; head 598 drivers/dma/xgene-dma.c desc_hw = &ring->desc_hw[ring->head]; head 604 drivers/dma/xgene-dma.c if (++ring->head == ring->slots) head 605 drivers/dma/xgene-dma.c ring->head = 0; head 615 drivers/dma/xgene-dma.c desc_hw = &ring->desc_hw[ring->head]; head 617 drivers/dma/xgene-dma.c if (++ring->head == ring->slots) head 618 drivers/dma/xgene-dma.c ring->head = 0; head 702 drivers/dma/xgene-dma.c desc_hw = &ring->desc_hw[ring->head]; head 709 drivers/dma/xgene-dma.c if (++ring->head == ring->slots) head 710 drivers/dma/xgene-dma.c ring->head = 0; head 665 drivers/dma/zx_dma.c LIST_HEAD(head); head 676 drivers/dma/zx_dma.c vchan_get_all_descriptors(&c->vc, &head); head 686 drivers/dma/zx_dma.c vchan_dma_desc_free_list(&c->vc, &head); head 550 drivers/edac/thunderx_edac.c unsigned long head = ring_pos(lmc->ring_head, ARRAY_SIZE(lmc->err_ctx)); head 551 drivers/edac/thunderx_edac.c struct lmc_err_ctx *ctx = &lmc->err_ctx[head]; head 1082 drivers/edac/thunderx_edac.c unsigned long head = ring_pos(ocx->com_ring_head, head 1084 drivers/edac/thunderx_edac.c struct ocx_com_err_ctx *ctx = &ocx->com_err_ctx[head]; head 1173 drivers/edac/thunderx_edac.c unsigned long head = ring_pos(ocx->link_ring_head, head 1175 drivers/edac/thunderx_edac.c struct ocx_link_err_ctx *ctx = &ocx->link_err_ctx[head]; head 1766 drivers/edac/thunderx_edac.c unsigned long head = ring_pos(tad->ring_head, ARRAY_SIZE(tad->err_ctx)); head 1767 drivers/edac/thunderx_edac.c struct l2c_err_ctx *ctx = &tad->err_ctx[head]; head 1798 drivers/edac/thunderx_edac.c unsigned long head = ring_pos(cbc->ring_head, ARRAY_SIZE(cbc->err_ctx)); head 1799 drivers/edac/thunderx_edac.c struct l2c_err_ctx *ctx = &cbc->err_ctx[head]; head 1827 drivers/edac/thunderx_edac.c unsigned long head = ring_pos(mci->ring_head, ARRAY_SIZE(mci->err_ctx)); head 1828 drivers/edac/thunderx_edac.c struct l2c_err_ctx *ctx = &mci->err_ctx[head]; head 63 drivers/firewire/nosy.c struct packet *head, *tail; head 120 drivers/firewire/nosy.c buffer->head = (struct packet *) buffer->data; head 154 drivers/firewire/nosy.c length = buffer->head->length; head 156 drivers/firewire/nosy.c if (&buffer->head->data[length] < end) { head 157 drivers/firewire/nosy.c if (copy_to_user(data, buffer->head->data, length)) head 159 drivers/firewire/nosy.c buffer->head = (struct packet *) &buffer->head->data[length]; head 161 drivers/firewire/nosy.c size_t split = end - buffer->head->data; head 163 drivers/firewire/nosy.c if (copy_to_user(data, buffer->head->data, split)) head 167 drivers/firewire/nosy.c buffer->head = (struct packet *) &buffer->data[length - split]; head 948 drivers/firmware/dmi_scan.c const struct list_head *head = from ? &from->list : &dmi_devices; head 951 drivers/firmware/dmi_scan.c for (d = head->next; d != &dmi_devices; d = d->next) { head 111 drivers/firmware/efi/efi-pstore.c struct list_head *head) head 114 drivers/firmware/efi/efi-pstore.c if (&next->list != head) head 147 drivers/firmware/efi/efi-pstore.c struct list_head *head, bool stop) head 155 drivers/firmware/efi/efi-pstore.c ret = __efi_pstore_scan_sysfs_exit(next, &next->list != head); head 172 drivers/firmware/efi/efi-pstore.c struct list_head *head = &efivar_sysfs_list; head 177 drivers/firmware/efi/efi-pstore.c list_for_each_entry_safe(entry, n, head, list) { head 178 drivers/firmware/efi/efi-pstore.c efi_pstore_scan_sysfs_enter(entry, n, head); head 181 drivers/firmware/efi/efi-pstore.c ret = efi_pstore_scan_sysfs_exit(entry, n, head, head 192 drivers/firmware/efi/efi-pstore.c list_for_each_entry_safe_from((*pos), n, head, list) { head 193 drivers/firmware/efi/efi-pstore.c efi_pstore_scan_sysfs_enter((*pos), n, head); head 196 drivers/firmware/efi/efi-pstore.c ret = efi_pstore_scan_sysfs_exit((*pos), n, head, size < 0); head 338 drivers/firmware/efi/vars.c struct list_head *head) head 345 drivers/firmware/efi/vars.c list_for_each_entry_safe(entry, n, head, list) { head 427 drivers/firmware/efi/vars.c void *data, bool duplicates, struct list_head *head) head 481 drivers/firmware/efi/vars.c head)) { head 526 drivers/firmware/efi/vars.c int efivar_entry_add(struct efivar_entry *entry, struct list_head *head) head 530 drivers/firmware/efi/vars.c list_add(&entry->list, head); head 660 drivers/firmware/efi/vars.c unsigned long size, void *data, struct list_head *head) head 675 drivers/firmware/efi/vars.c if (head && efivar_entry_find(name, vendor, head, false)) { head 815 drivers/firmware/efi/vars.c struct list_head *head, bool remove) head 821 drivers/firmware/efi/vars.c list_for_each_entry_safe(entry, n, head, list) { head 1086 drivers/firmware/efi/vars.c struct list_head *head, void *data, head 1093 drivers/firmware/efi/vars.c list_for_each_entry_safe(entry, n, head, list) { head 1106 drivers/firmware/efi/vars.c list_for_each_entry_safe_continue((*prev), n, head, list) { head 1131 drivers/firmware/efi/vars.c struct list_head *head, void *data) head 1138 drivers/firmware/efi/vars.c err = __efivar_entry_iter(func, head, data, NULL); head 816 drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c head) { head 450 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c INIT_LIST_HEAD(&entry->head); head 455 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c list_add_tail(&entry->head, &process_info->userptr_valid_list); head 457 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c list_add_tail(&entry->head, &process_info->kfd_bo_list); head 468 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c list_del(&bo_list_entry->head); head 584 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c list_add(&ctx->kfd_bo.tv.head, &ctx->list); head 647 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c list_add(&ctx->kfd_bo.tv.head, &ctx->list); head 1256 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c list_del(&bo_list_entry->head); head 1557 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c list_del_init(&mem->validate_list.head); head 1697 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c validate_list.head) { head 1714 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c list_move_tail(&mem->validate_list.head, head 1723 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c validate_list.head) { head 1794 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c validate_list.head) { head 1795 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c list_add_tail(&mem->resv_list.head, &resv_list); head 1816 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c validate_list.head) { head 1831 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c list_move_tail(&mem->validate_list.head, head 1992 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c validate_list.head) { head 1994 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c list_add_tail(&mem->resv_list.head, &ctx.list); head 2021 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c validate_list.head) { head 2078 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c validate_list.head) head 208 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c list_add_tail(&e->tv.head, &bucket[priority]); head 372 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c struct drm_display_mode, head); head 618 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { head 628 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { head 1026 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { head 1518 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 1538 drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 463 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c for (;&p->evictable->tv.head != &p->validated; head 464 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c p->evictable = list_prev_entry(p->evictable, tv.head)) { head 504 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c p->evictable = list_prev_entry(p->evictable, tv.head); head 505 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c list_move(&candidate->tv.head, &p->validated); head 537 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c list_for_each_entry(lobj, validated, tv.head) { head 613 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c list_add(&p->uf_entry.tv.head, &p->validated); head 662 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c tv.head); head 727 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c list_for_each_entry(e, &p->validated, tv.head) { head 76 drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c INIT_LIST_HEAD(&csa_tv.head); head 80 drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c list_add(&csa_tv.head, &list); head 3039 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 3044 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 3159 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 3190 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 3610 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { head 3628 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c gmc.xgmi.head) { head 3638 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c gmc.xgmi.head) { head 3645 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { head 3827 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c list_add_tail(&adev->gmc.xgmi.head, &device_list); head 3835 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) head 3839 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { head 3884 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { head 3916 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { head 289 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) head 377 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 411 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 697 drivers/gpu/drm/amd/amdgpu/amdgpu_display.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 128 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c &ddev->mode_config.crtc_list, head) { head 148 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 174 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 1267 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { head 45 drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 47 drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 66 drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 85 drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 101 drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 119 drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { head 174 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c list_add(&tv.head, &list); head 608 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c list_add(&tv.head, &list); head 122 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h struct list_head head; head 92 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c list_for_each_entry(connector, &mode_config->connector_list, head) head 438 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c struct list_head *head; head 451 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c head = bo->mn_list.next; head 456 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c if (list_empty(head)) { head 459 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c node = container_of(head, struct amdgpu_mn_node, bos); head 267 drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { head 293 drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { head 329 drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { head 73 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c .head = obj->head, head 155 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c data->head.block = block_id; head 158 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; head 160 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; head 172 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c data->head.sub_block_index = sub_block; head 256 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (!amdgpu_ras_is_supported(adev, data.head.block)) head 261 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c ret = amdgpu_ras_feature_enable(adev, &data.head, 0); head 264 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c ret = amdgpu_ras_feature_enable(adev, &data.head, 1); head 299 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c .head = obj->head, head 320 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name); head 326 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_common_if *head) head 334 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (head->block >= AMDGPU_RAS_BLOCK_COUNT) head 337 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c obj = &con->objs[head->block]; head 342 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c obj->head = *head; head 344 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c list_add(&obj->node, &con->head); head 352 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_common_if *head) head 361 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (head) { head 362 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (head->block >= AMDGPU_RAS_BLOCK_COUNT) head 365 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c obj = &con->objs[head->block]; head 368 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c WARN_ON(head->block != obj->head.block); head 375 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c WARN_ON(i != obj->head.block); head 387 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_common_if *head) head 391 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c return con->hw_supported & BIT(head->block); head 395 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_common_if *head) head 399 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c return con->features & BIT(head->block); head 407 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_common_if *head, int enable) head 410 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); head 418 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (!amdgpu_ras_is_feature_allowed(adev, head)) head 420 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) head 425 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c obj = amdgpu_ras_create_obj(adev, head); head 432 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c con->features |= BIT(head->block); head 434 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (obj && amdgpu_ras_is_feature_enabled(adev, head)) { head 435 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c con->features &= ~BIT(head->block); head 445 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_common_if *head, bool enable) head 456 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c .block_id = amdgpu_ras_block_to_ta(head->block), head 457 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c .error_type = amdgpu_ras_error_to_ta(head->type), head 461 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c .block_id = amdgpu_ras_block_to_ta(head->block), head 462 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c .error_type = amdgpu_ras_error_to_ta(head->type), head 467 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head)); head 469 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) head 476 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c ras_block_str(head->block), head 484 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c __amdgpu_ras_feature_enable(adev, head, enable); head 491 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_common_if *head, bool enable) head 507 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c ret = amdgpu_ras_feature_enable(adev, head, 1); head 513 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c ret = __amdgpu_ras_feature_enable(adev, head, 1); head 516 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c ras_block_str(head->block)); head 520 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c ret = __amdgpu_ras_feature_enable(adev, head, 1); head 524 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c ret = amdgpu_ras_feature_enable(adev, head, 0); head 527 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c ret = amdgpu_ras_feature_enable(adev, head, enable); head 538 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c list_for_each_entry_safe(obj, tmp, &con->head, node) { head 543 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (__amdgpu_ras_feature_enable(adev, &obj->head, 0)) head 546 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (amdgpu_ras_feature_enable(adev, &obj->head, 0)) head 564 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_common_if head = { head 569 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c strcpy(head.name, ras_block_str(i)); head 575 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (__amdgpu_ras_feature_enable(adev, &head, 1)) head 578 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (amdgpu_ras_feature_enable(adev, &head, 1)) head 591 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); head 597 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c switch (info->head.block) { head 627 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c obj->err_data.ce_count, ras_block_str(info->head.block)); head 630 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c obj->err_data.ue_count, ras_block_str(info->head.block)); head 639 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); head 641 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c .block_id = amdgpu_ras_block_to_ta(info->head.block), head 642 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c .inject_error_type = amdgpu_ras_error_to_ta(info->head.type), head 643 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c .sub_block_index = info->head.sub_block_index, head 652 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c switch (info->head.block) { head 665 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c ras_block_str(info->head.block)); head 671 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c ras_block_str(info->head.block), head 695 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c list_for_each_entry(obj, &con->head, node) { head 697 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c .head = obj->head, head 858 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_fs_if *head) head 860 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); head 868 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c head->sysfs_name, head 893 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_common_if *head) head 895 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); head 914 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c list_for_each_entry_safe(obj, tmp, &con->head, node) { head 915 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c amdgpu_ras_sysfs_remove(adev, &obj->head); head 936 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_fs_if *head) head 939 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head); head 947 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c head->debugfs_name, head 956 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_common_if *head) head 958 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); head 973 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c list_for_each_entry_safe(obj, tmp, &con->head, node) { head 974 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c amdgpu_ras_debugfs_remove(adev, &obj->head); head 1053 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); head 1078 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); head 1100 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); head 1105 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c obj = amdgpu_ras_create_obj(adev, &info->head); head 1143 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c list_for_each_entry_safe(obj, tmp, &con->head, node) { head 1145 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c .head = obj->head, head 1453 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c INIT_LIST_HEAD(&con->head); head 1505 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c list_for_each_entry_safe(obj, tmp, &con->head, node) { head 1506 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (!amdgpu_ras_is_supported(adev, obj->head.block)) { head 1507 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c amdgpu_ras_feature_enable(adev, &obj->head, 0); head 317 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct list_head head; head 387 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct ras_common_if head; head 417 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct ras_common_if head; head 423 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct ras_common_if head; head 429 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct ras_common_if head; head 435 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct ras_common_if head; head 440 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct ras_common_if head; head 445 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct ras_common_if head; head 451 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct ras_common_if head; head 571 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct ras_common_if *head, bool enable); head 574 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct ras_common_if *head, bool enable); head 577 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct ras_fs_if *head); head 580 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct ras_common_if *head); head 583 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct ras_fs_if *head); head 586 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct ras_common_if *head); head 566 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_add(&entry->tv.head, validated); head 368 drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); head 369 drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c list_for_each_entry(entry, &hive->device_list, head) head 375 drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { head 389 drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { head 1679 drivers/gpu/drm/amd/amdgpu/atombios_encoders.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 335 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 387 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 1233 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { head 1276 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { head 1342 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { head 2622 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 3470 drivers/gpu/drm/amd/amdgpu/dce_v10_0.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 353 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 404 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 1259 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { head 1302 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { head 1368 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { head 2730 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 3596 drivers/gpu/drm/amd/amdgpu/dce_v11_0.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 286 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 329 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 1135 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { head 1176 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { head 1247 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { head 1642 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { head 2510 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 3280 drivers/gpu/drm/amd/amdgpu/dce_v6_0.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 280 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 322 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 1172 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { head 1231 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { head 1295 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { head 2530 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 3358 drivers/gpu/drm/amd/amdgpu/dce_v8_0.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 2340 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c .head = *ras_if, head 4457 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ih_info.head = **ras_if; head 4489 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ih_info.head = **ras_if; head 4490 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c fs_info.head = **ras_if; head 6058 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks)) head 6061 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c if (!ras_gfx_subblocks[info->head.sub_block_index].name) head 6064 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type & head 6065 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c info->head.type)) { head 6067 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ras_gfx_subblocks[info->head.sub_block_index].name, head 6068 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c info->head.type); head 6072 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type & head 6073 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c info->head.type)) { head 6075 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ras_gfx_subblocks[info->head.sub_block_index].name, head 6076 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c info->head.type); head 6080 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c block_info.block_id = amdgpu_ras_block_to_ta(info->head.block); head 6082 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ras_gfx_subblocks[info->head.sub_block_index].ta_subblock; head 6083 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type); head 6171 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ih_data.head = *ras_if; head 278 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c ih_data.head = *ras_if; head 865 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c ih_info.head = **ras_if; head 897 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c ih_info.head = **ras_if; head 898 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c fs_info->head = **ras_if; head 1320 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c .head = *ras_if, head 1726 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c ih_info.head = **ras_if; head 1758 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c ih_info.head = **ras_if; head 1759 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c fs_info.head = **ras_if; head 1865 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c .head = *ras_if, head 2068 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c ih_data.head = *ras_if; head 243 drivers/gpu/drm/amd/amdkfd/kfd_events.c list_for_each_entry(waiter, &ev->wq.head, wait.entry) head 400 drivers/gpu/drm/amd/amdkfd/kfd_events.c list_for_each_entry(waiter, &ev->wq.head, wait.entry) head 250 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 492 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c list_for_each_entry(mode, &connector->modes, head) { head 904 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 970 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c head) { head 1203 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c list_for_each_entry(connector, &ddev->mode_config.connector_list, head) { head 1656 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c &dev->mode_config.connector_list, head) { head 3591 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { head 3602 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c head); head 4518 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c list_add(&tv.head, &list); head 4887 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c head) { head 4961 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c list_for_each_entry(curmode, &connector->probed_modes, head) { head 6546 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 73 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h struct list_head head; head 117 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c struct list_head *handler_list = &irq_list_head->head; head 164 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; head 295 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; head 386 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c INIT_LIST_HEAD(&lh->head); head 435 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; head 462 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; head 488 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; head 510 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head)) head 736 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 769 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 430 drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c list_for_each_entry_safe(obj, next, &config->privobj_list, head) head 856 drivers/gpu/drm/arm/malidp_drv.c list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) { head 888 drivers/gpu/drm/ast/ast_mode.c encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head); head 227 drivers/gpu/drm/drm_agpsupport.c list_add(&entry->head, &dev->agp->memory); head 259 drivers/gpu/drm/drm_agpsupport.c list_for_each_entry(entry, &dev->agp->memory, head) { head 374 drivers/gpu/drm/drm_agpsupport.c list_del(&entry->head); head 405 drivers/gpu/drm/drm_agpsupport.c struct drm_agp_head *head = NULL; head 407 drivers/gpu/drm/drm_agpsupport.c head = kzalloc(sizeof(*head), GFP_KERNEL); head 408 drivers/gpu/drm/drm_agpsupport.c if (!head) head 410 drivers/gpu/drm/drm_agpsupport.c head->bridge = agp_find_bridge(dev->pdev); head 411 drivers/gpu/drm/drm_agpsupport.c if (!head->bridge) { head 412 drivers/gpu/drm/drm_agpsupport.c head->bridge = agp_backend_acquire(dev->pdev); head 413 drivers/gpu/drm/drm_agpsupport.c if (!head->bridge) { head 414 drivers/gpu/drm/drm_agpsupport.c kfree(head); head 417 drivers/gpu/drm/drm_agpsupport.c agp_copy_info(head->bridge, &head->agp_info); head 418 drivers/gpu/drm/drm_agpsupport.c agp_backend_release(head->bridge); head 420 drivers/gpu/drm/drm_agpsupport.c agp_copy_info(head->bridge, &head->agp_info); head 422 drivers/gpu/drm/drm_agpsupport.c if (head->agp_info.chipset == NOT_SUPPORTED) { head 423 drivers/gpu/drm/drm_agpsupport.c kfree(head); head 426 drivers/gpu/drm/drm_agpsupport.c INIT_LIST_HEAD(&head->memory); head 427 drivers/gpu/drm/drm_agpsupport.c head->cant_use_aperture = head->agp_info.cant_use_aperture; head 428 drivers/gpu/drm/drm_agpsupport.c head->page_mask = head->agp_info.page_mask; head 429 drivers/gpu/drm/drm_agpsupport.c head->base = head->agp_info.aper_base; head 430 drivers/gpu/drm/drm_agpsupport.c return head; head 454 drivers/gpu/drm/drm_agpsupport.c list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { head 733 drivers/gpu/drm/drm_atomic.c list_add_tail(&obj->head, &dev->mode_config.privobj_list); head 746 drivers/gpu/drm/drm_atomic.c list_del(&obj->head); head 1461 drivers/gpu/drm/drm_atomic.c list_for_each_entry(plane, &config->plane_list, head) { head 1469 drivers/gpu/drm/drm_atomic.c list_for_each_entry(crtc, &config->crtc_list, head) { head 56 drivers/gpu/drm/drm_bufs.c list_for_each_entry(entry, &dev->maplist, head) { head 299 drivers/gpu/drm/drm_bufs.c list_for_each_entry(entry, &dev->agp->memory, head) { head 351 drivers/gpu/drm/drm_bufs.c list_add(&list->head, &dev->maplist); head 395 drivers/gpu/drm/drm_bufs.c list_for_each_entry(_entry, &dev->maplist, head) head 481 drivers/gpu/drm/drm_bufs.c r_list = list_entry(list, struct drm_map_list, head); head 521 drivers/gpu/drm/drm_bufs.c list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { head 524 drivers/gpu/drm/drm_bufs.c list_del(&r_list->head); head 589 drivers/gpu/drm/drm_bufs.c list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { head 602 drivers/gpu/drm/drm_bufs.c list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) head 634 drivers/gpu/drm/drm_bufs.c list_for_each_entry(r_list, &dev->maplist, head) { head 757 drivers/gpu/drm/drm_bufs.c list_for_each_entry(agp_entry, &dev->agp->memory, head) { head 1596 drivers/gpu/drm/drm_bufs.c list_for_each_entry(entry, &dev->maplist, head) { head 122 drivers/gpu/drm/drm_client_modeset.c list_for_each_entry(mode, &connector->modes, head) { head 151 drivers/gpu/drm/drm_client_modeset.c list_for_each_entry(mode, &connector->modes, head) { head 184 drivers/gpu/drm/drm_client_modeset.c list_add(&mode->head, &connector->modes); head 288 drivers/gpu/drm/drm_client_modeset.c list_for_each_entry(mode, &connectors[i]->modes, head) { head 399 drivers/gpu/drm/drm_client_modeset.c list_for_each_entry(modes[i], &connector->modes, head) head 615 drivers/gpu/drm/drm_client_modeset.c head); head 259 drivers/gpu/drm/drm_connector.c list_add_tail(&connector->head, &config->connector_list); head 420 drivers/gpu/drm/drm_connector.c list_del(&mode->head); head 447 drivers/gpu/drm/drm_connector.c list_for_each_entry_safe(mode, t, &connector->probed_modes, head) head 450 drivers/gpu/drm/drm_connector.c list_for_each_entry_safe(mode, t, &connector->modes, head) head 464 drivers/gpu/drm/drm_connector.c list_del(&connector->head); head 680 drivers/gpu/drm/drm_connector.c lhead = old_conn ? &old_conn->head : &config->connector_list; head 689 drivers/gpu/drm/drm_connector.c iter->conn = list_entry(lhead, struct drm_connector, head); head 2173 drivers/gpu/drm/drm_connector.c list_for_each_entry(mode, &connector->modes, head) head 41 drivers/gpu/drm/drm_context.c struct list_head head; head 145 drivers/gpu/drm/drm_context.c list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) { head 152 drivers/gpu/drm/drm_context.c list_del(&pos->head); head 198 drivers/gpu/drm/drm_context.c list_for_each_entry(_entry, &dev->maplist, head) { head 238 drivers/gpu/drm/drm_context.c list_for_each_entry(r_list, &dev->maplist, head) { head 396 drivers/gpu/drm/drm_context.c INIT_LIST_HEAD(&ctx_entry->head); head 401 drivers/gpu/drm/drm_context.c list_add(&ctx_entry->head, &dev->ctxlist); head 512 drivers/gpu/drm/drm_context.c list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { head 514 drivers/gpu/drm/drm_context.c list_del(&pos->head); head 279 drivers/gpu/drm/drm_crtc.c list_add_tail(&crtc->head, &config->crtc_list); head 333 drivers/gpu/drm/drm_crtc.c list_del(&crtc->head); head 179 drivers/gpu/drm/drm_debugfs_crc.c return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR); head 187 drivers/gpu/drm/drm_debugfs_crc.c crc->head = 0; head 393 drivers/gpu/drm/drm_debugfs_crc.c int head, tail; head 404 drivers/gpu/drm/drm_debugfs_crc.c head = crc->head; head 407 drivers/gpu/drm/drm_debugfs_crc.c if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) { head 419 drivers/gpu/drm/drm_debugfs_crc.c entry = &crc->entries[head]; head 424 drivers/gpu/drm/drm_debugfs_crc.c head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1); head 425 drivers/gpu/drm/drm_debugfs_crc.c crc->head = head; head 1888 drivers/gpu/drm/drm_edid.c struct drm_display_mode, head); head 1890 drivers/gpu/drm/drm_edid.c list_for_each_entry_safe(cur_mode, t, &connector->probed_modes, head) { head 2170 drivers/gpu/drm/drm_edid.c list_for_each_entry(m, &connector->probed_modes, head) head 2467 drivers/gpu/drm/drm_edid.c list_for_each_entry(m, &connector->probed_modes, head) { head 3223 drivers/gpu/drm/drm_edid.c list_for_each_entry(mode, &connector->probed_modes, head) { head 3267 drivers/gpu/drm/drm_edid.c list_add_tail(&newmode->head, &list); head 3270 drivers/gpu/drm/drm_edid.c list_for_each_entry_safe(mode, tmp, &list, head) { head 3271 drivers/gpu/drm/drm_edid.c list_del(&mode->head); head 3440 drivers/gpu/drm/drm_edid.c list_for_each_entry(mode, &connector->probed_modes, head) { head 3455 drivers/gpu/drm/drm_edid.c list_add_tail(&new_mode->head, &stereo_modes); head 4982 drivers/gpu/drm/drm_edid.c list_for_each_entry(mode, &connector->probed_modes, head) { head 142 drivers/gpu/drm/drm_encoder.c list_add_tail(&encoder->head, &dev->mode_config.encoder_list); head 181 drivers/gpu/drm/drm_encoder.c list_del(&encoder->head); head 743 drivers/gpu/drm/drm_framebuffer.c list_add(&fb->head, &dev->mode_config.fb_list); head 826 drivers/gpu/drm/drm_framebuffer.c list_del(&fb->head); head 73 drivers/gpu/drm/drm_hashtab.c hlist_for_each_entry(entry, h_list, head) head 86 drivers/gpu/drm/drm_hashtab.c hlist_for_each_entry(entry, h_list, head) { head 88 drivers/gpu/drm/drm_hashtab.c return &entry->head; head 104 drivers/gpu/drm/drm_hashtab.c hlist_for_each_entry_rcu(entry, h_list, head) { head 106 drivers/gpu/drm/drm_hashtab.c return &entry->head; head 124 drivers/gpu/drm/drm_hashtab.c hlist_for_each_entry(entry, h_list, head) { head 129 drivers/gpu/drm/drm_hashtab.c parent = &entry->head; head 132 drivers/gpu/drm/drm_hashtab.c hlist_add_behind_rcu(&item->head, parent); head 134 drivers/gpu/drm/drm_hashtab.c hlist_add_head_rcu(&item->head, h_list); head 178 drivers/gpu/drm/drm_hashtab.c *item = hlist_entry(list, struct drm_hash_item, head); head 197 drivers/gpu/drm/drm_hashtab.c hlist_del_init_rcu(&item->head); head 166 drivers/gpu/drm/drm_lease.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 148 drivers/gpu/drm/drm_legacy.h struct list_head head; head 75 drivers/gpu/drm/drm_memory.c list_for_each_entry(agpmem, &dev->agp->memory, head) head 80 drivers/gpu/drm/drm_memory.c if (&agpmem->head == &dev->agp->memory) head 446 drivers/gpu/drm/drm_mode_config.c head) { head 469 drivers/gpu/drm/drm_mode_config.c head) { head 474 drivers/gpu/drm/drm_mode_config.c head) { head 478 drivers/gpu/drm/drm_mode_config.c list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { head 496 drivers/gpu/drm/drm_mode_config.c list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { head 112 drivers/gpu/drm/drm_modes.c list_add_tail(&mode->head, &connector->probed_modes); head 917 drivers/gpu/drm/drm_modes.c struct list_head head = dst->head; head 920 drivers/gpu/drm/drm_modes.c dst->head = head; head 1294 drivers/gpu/drm/drm_modes.c list_for_each_entry_safe(mode, t, mode_list, head) { head 1296 drivers/gpu/drm/drm_modes.c list_del(&mode->head); head 1324 drivers/gpu/drm/drm_modes.c struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head); head 1325 drivers/gpu/drm/drm_modes.c struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head); head 1373 drivers/gpu/drm/drm_modes.c list_for_each_entry_safe(pmode, pt, &connector->probed_modes, head) { head 1378 drivers/gpu/drm/drm_modes.c list_for_each_entry(mode, &connector->modes, head) { head 1406 drivers/gpu/drm/drm_modes.c list_del(&pmode->head); head 1412 drivers/gpu/drm/drm_modes.c list_move_tail(&pmode->head, &connector->modes); head 58 drivers/gpu/drm/drm_modeset_helper.c &dev->mode_config.connector_list, head) { head 62 drivers/gpu/drm/drm_modeset_helper.c list_move_tail(&connector->head, &panel_list); head 233 drivers/gpu/drm/drm_modeset_lock.c struct drm_modeset_lock, head); head 266 drivers/gpu/drm/drm_modeset_lock.c WARN_ON(!list_empty(&lock->head)); head 267 drivers/gpu/drm/drm_modeset_lock.c list_add(&lock->head, &ctx->locked); head 316 drivers/gpu/drm/drm_modeset_lock.c INIT_LIST_HEAD(&lock->head); head 370 drivers/gpu/drm/drm_modeset_lock.c list_del_init(&lock->head); head 262 drivers/gpu/drm/drm_plane.c list_add_tail(&plane->head, &config->plane_list); head 365 drivers/gpu/drm/drm_plane.c BUG_ON(list_empty(&plane->head)); head 372 drivers/gpu/drm/drm_plane.c list_del(&plane->head); head 149 drivers/gpu/drm/drm_probe_helper.c list_for_each_entry(mode, &connector->probed_modes, head) { head 417 drivers/gpu/drm/drm_probe_helper.c list_for_each_entry(mode, &connector->modes, head) head 505 drivers/gpu/drm/drm_probe_helper.c list_for_each_entry(mode, &connector->modes, head) { head 533 drivers/gpu/drm/drm_probe_helper.c list_for_each_entry(mode, &connector->modes, head) head 540 drivers/gpu/drm/drm_probe_helper.c list_for_each_entry(mode, &connector->modes, head) { head 133 drivers/gpu/drm/drm_property.c list_add_tail(&property->head, &dev->mode_config.property_list); head 411 drivers/gpu/drm/drm_property.c list_for_each_entry(prop_enum, &property->enum_list, head) { head 429 drivers/gpu/drm/drm_property.c list_add_tail(&prop_enum->head, &property->enum_list); head 446 drivers/gpu/drm/drm_property.c list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) { head 447 drivers/gpu/drm/drm_property.c list_del(&prop_enum->head); head 454 drivers/gpu/drm/drm_property.c list_del(&property->head); head 498 drivers/gpu/drm/drm_property.c list_for_each_entry(prop_enum, &property->enum_list, head) { head 232 drivers/gpu/drm/drm_sysfs.c list_for_each_entry(mode, &connector->modes, head) { head 61 drivers/gpu/drm/drm_vm.c struct list_head head; head 160 drivers/gpu/drm/drm_vm.c list_for_each_entry(agpmem, &dev->agp->memory, head) { head 166 drivers/gpu/drm/drm_vm.c if (&agpmem->head == &dev->agp->memory) head 251 drivers/gpu/drm/drm_vm.c list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { head 255 drivers/gpu/drm/drm_vm.c list_del(&pt->head); head 266 drivers/gpu/drm/drm_vm.c list_for_each_entry(r_list, &dev->maplist, head) { head 409 drivers/gpu/drm/drm_vm.c list_add(&vma_entry->head, &dev->vmalist); head 431 drivers/gpu/drm/drm_vm.c list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { head 433 drivers/gpu/drm/drm_vm.c list_del(&pt->head); head 668 drivers/gpu/drm/drm_vm.c list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) { head 669 drivers/gpu/drm/drm_vm.c list_del(&vma->head); head 298 drivers/gpu/drm/exynos/exynos_drm_drv.c list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) head 301 drivers/gpu/drm/exynos/exynos_drm_drv.c list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) head 155 drivers/gpu/drm/exynos/exynos_drm_g2d.c u32 head; head 1276 drivers/gpu/drm/exynos/exynos_drm_g2d.c cmdlist->head = cmdlist->last / 2; head 69 drivers/gpu/drm/exynos/exynos_drm_ipp.c list_add_tail(&ipp->head, &ipp_list); head 87 drivers/gpu/drm/exynos/exynos_drm_ipp.c list_del(&ipp->head); head 117 drivers/gpu/drm/exynos/exynos_drm_ipp.c list_for_each_entry(ipp, &ipp_list, head) { head 132 drivers/gpu/drm/exynos/exynos_drm_ipp.c list_for_each_entry(ipp, &ipp_list, head) head 802 drivers/gpu/drm/exynos/exynos_drm_ipp.c head); head 803 drivers/gpu/drm/exynos/exynos_drm_ipp.c list_del_init(&task->head); head 823 drivers/gpu/drm/exynos/exynos_drm_ipp.c list_add(&task->head, &ipp->todo_list); head 840 drivers/gpu/drm/exynos/exynos_drm_ipp.c list_del_init(&task->head); head 53 drivers/gpu/drm/exynos/exynos_drm_ipp.h struct list_head head; head 85 drivers/gpu/drm/exynos/exynos_drm_ipp.h struct list_head head; head 1009 drivers/gpu/drm/exynos/exynos_hdmi.c list_for_each_entry(m, &connector->modes, head) { head 295 drivers/gpu/drm/gma500/cdv_device.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) head 369 drivers/gpu/drm/gma500/cdv_device.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) head 591 drivers/gpu/drm/gma500/cdv_intel_display.c list_for_each_entry(connector, &mode_config->connector_list, head) { head 1001 drivers/gpu/drm/gma500/cdv_intel_dp.c list_for_each_entry(encoder, &mode_config->encoder_list, head) { head 1788 drivers/gpu/drm/gma500/cdv_intel_dp.c head) { head 271 drivers/gpu/drm/gma500/cdv_intel_lvds.c head) { head 684 drivers/gpu/drm/gma500/cdv_intel_lvds.c list_for_each_entry(scan, &connector->probed_modes, head) { head 578 drivers/gpu/drm/gma500/framebuffer.c head) { head 32 drivers/gpu/drm/gma500/gma_display.c list_for_each_entry(l_entry, &mode_config->connector_list, head) { head 729 drivers/gpu/drm/gma500/mdfld_intel_display.c list_for_each_entry(connector, &mode_config->connector_list, head) { head 394 drivers/gpu/drm/gma500/oaktrail_crtc.c list_for_each_entry(connector, &mode_config->connector_list, head) { head 113 drivers/gpu/drm/gma500/oaktrail_lvds.c list_for_each_entry(connector, &mode_config->connector_list, head) { head 371 drivers/gpu/drm/gma500/oaktrail_lvds.c list_for_each_entry(scan, &connector->probed_modes, head) { head 187 drivers/gpu/drm/gma500/psb_device.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 192 drivers/gpu/drm/gma500/psb_device.c list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) head 227 drivers/gpu/drm/gma500/psb_device.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) head 231 drivers/gpu/drm/gma500/psb_device.c list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) head 374 drivers/gpu/drm/gma500/psb_drv.c head) { head 118 drivers/gpu/drm/gma500/psb_intel_display.c list_for_each_entry(connector, &mode_config->connector_list, head) { head 537 drivers/gpu/drm/gma500/psb_intel_display.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 552 drivers/gpu/drm/gma500/psb_intel_display.c head) { head 387 drivers/gpu/drm/gma500/psb_intel_lvds.c head) { head 745 drivers/gpu/drm/gma500/psb_intel_lvds.c list_for_each_entry(scan, &connector->probed_modes, head) { head 1239 drivers/gpu/drm/gma500/psb_intel_sdvo.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 1612 drivers/gpu/drm/gma500/psb_intel_sdvo.c list_for_each_entry(newmode, &connector->probed_modes, head) { head 251 drivers/gpu/drm/i810/i810_dma.c ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; head 252 drivers/gpu/drm/i810/i810_dma.c ring->space = ring->head - (ring->tail + 8); head 256 drivers/gpu/drm/i810/i810_dma.c if (ring->head != last_head) { head 258 drivers/gpu/drm/i810/i810_dma.c last_head = ring->head; head 279 drivers/gpu/drm/i810/i810_dma.c ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR; head 281 drivers/gpu/drm/i810/i810_dma.c ring->space = ring->head - (ring->tail + 8); head 328 drivers/gpu/drm/i810/i810_dma.c list_for_each_entry(r_list, &dev->maplist, head) { head 77 drivers/gpu/drm/i810/i810_drv.h int head; head 321 drivers/gpu/drm/i915/display/intel_display.h list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) head 326 drivers/gpu/drm/i915/display/intel_display.h base.head) head 331 drivers/gpu/drm/i915/display/intel_display.h base.head) \ head 338 drivers/gpu/drm/i915/display/intel_display.h base.head) \ head 344 drivers/gpu/drm/i915/display/intel_display.h base.head) head 349 drivers/gpu/drm/i915/display/intel_display.h base.head) \ head 355 drivers/gpu/drm/i915/display/intel_display.h base.head) head 365 drivers/gpu/drm/i915/display/intel_display.h list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ head 369 drivers/gpu/drm/i915/display/intel_display.h list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ head 73 drivers/gpu/drm/i915/display/intel_panel.c list_for_each_entry(scan, &connector->base.probed_modes, head) { head 117 drivers/gpu/drm/i915/display/intel_panel.c list_for_each_entry(scan, &connector->base.probed_modes, head) { head 133 drivers/gpu/drm/i915/display/intel_panel.c typeof(*scan), head); head 2851 drivers/gpu/drm/i915/display/intel_sdvo.c list_for_each_entry(mode, &connector->probed_modes, head) { head 2935 drivers/gpu/drm/i915/display/intel_sdvo.c &dev->mode_config.connector_list, head) { head 838 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct hlist_head *head; head 841 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c head = &eb->buckets[hash_32(handle, eb->lut_size)]; head 842 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c hlist_for_each_entry(vma, head, exec_node) { head 137 drivers/gpu/drm/i915/gem/i915_gem_object.c static void __i915_gem_free_object_rcu(struct rcu_head *head) head 140 drivers/gpu/drm/i915/gem/i915_gem_object.c container_of(head, typeof(*obj), rcu); head 548 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c struct list_head *head) head 560 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c list_add_tail(&obj->mm.link, head); head 72 drivers/gpu/drm/i915/gt/intel_context.c ce->ring->head, ce->ring->tail); head 302 drivers/gpu/drm/i915/gt/intel_engine.h GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) && head 303 drivers/gpu/drm/i915/gt/intel_engine.h tail < ring->head); head 322 drivers/gpu/drm/i915/gt/intel_engine.h __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size) head 330 drivers/gpu/drm/i915/gt/intel_engine.h return (head - tail - CACHELINE_BYTES) & (size - 1); head 1328 drivers/gpu/drm/i915/gt/intel_engine_cs.c rq->head, rq->postfix, rq->tail, head 1332 drivers/gpu/drm/i915/gt/intel_engine_cs.c size = rq->tail - rq->head; head 1333 drivers/gpu/drm/i915/gt/intel_engine_cs.c if (rq->tail < rq->head) head 1339 drivers/gpu/drm/i915/gt/intel_engine_cs.c unsigned int head = rq->head; head 1342 drivers/gpu/drm/i915/gt/intel_engine_cs.c if (rq->tail < head) { head 1343 drivers/gpu/drm/i915/gt/intel_engine_cs.c len = rq->ring->size - head; head 1344 drivers/gpu/drm/i915/gt/intel_engine_cs.c memcpy(ring, vaddr + head, len); head 1345 drivers/gpu/drm/i915/gt/intel_engine_cs.c head = 0; head 1347 drivers/gpu/drm/i915/gt/intel_engine_cs.c memcpy(ring + len, vaddr + head, size - len); head 1391 drivers/gpu/drm/i915/gt/intel_engine_cs.c rq->ring->head); head 104 drivers/gpu/drm/i915/gt/intel_engine_types.h u32 head; head 33 drivers/gpu/drm/i915/gt/intel_hangcheck.c u32 head; head 136 drivers/gpu/drm/i915/gt/intel_hangcheck.c hc->head = ENGINE_READ(engine, RING_HEAD); head 144 drivers/gpu/drm/i915/gt/intel_hangcheck.c engine->hangcheck.last_head = hc->head; head 157 drivers/gpu/drm/i915/gt/intel_hangcheck.c if (engine->hangcheck.last_head != hc->head) head 1491 drivers/gpu/drm/i915/gt/intel_lrc.c u8 head, tail; head 1505 drivers/gpu/drm/i915/gt/intel_lrc.c head = execlists->csb_head; head 1507 drivers/gpu/drm/i915/gt/intel_lrc.c GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail); head 1508 drivers/gpu/drm/i915/gt/intel_lrc.c if (unlikely(head == tail)) head 1524 drivers/gpu/drm/i915/gt/intel_lrc.c if (++head == num_entries) head 1525 drivers/gpu/drm/i915/gt/intel_lrc.c head = 0; head 1546 drivers/gpu/drm/i915/gt/intel_lrc.c engine->name, head, head 1547 drivers/gpu/drm/i915/gt/intel_lrc.c buf[2 * head + 0], buf[2 * head + 1]); head 1550 drivers/gpu/drm/i915/gt/intel_lrc.c csb_step = gen12_csb_parse(execlists, buf + 2 * head); head 1552 drivers/gpu/drm/i915/gt/intel_lrc.c csb_step = gen8_csb_parse(execlists, buf + 2 * head); head 1600 drivers/gpu/drm/i915/gt/intel_lrc.c } while (head != tail); head 1602 drivers/gpu/drm/i915/gt/intel_lrc.c execlists->csb_head = head; head 1769 drivers/gpu/drm/i915/gt/intel_lrc.c GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); head 1773 drivers/gpu/drm/i915/gt/intel_lrc.c regs[CTX_RING_HEAD + 1] = ring->head; head 2485 drivers/gpu/drm/i915/gt/intel_lrc.c ce->ring->head = ce->ring->tail; head 2489 drivers/gpu/drm/i915/gt/intel_lrc.c ce->ring->head = intel_ring_wrap(ce->ring, rq->head); head 2539 drivers/gpu/drm/i915/gt/intel_lrc.c engine->name, ce->ring->head, ce->ring->tail); head 3999 drivers/gpu/drm/i915/gt/intel_lrc.c u32 head, head 4022 drivers/gpu/drm/i915/gt/intel_lrc.c ce->ring->head = head; head 111 drivers/gpu/drm/i915/gt/intel_lrc.h u32 head, head 54 drivers/gpu/drm/i915/gt/intel_ringbuffer.c space = __intel_ring_space(ring->head, ring->emit, ring->size); head 645 drivers/gpu/drm/i915/gt/intel_ringbuffer.c engine->name, ring->head, ring->tail); head 692 drivers/gpu/drm/i915/gt/intel_ringbuffer.c GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); head 697 drivers/gpu/drm/i915/gt/intel_ringbuffer.c ENGINE_WRITE(engine, RING_HEAD, ring->head); head 698 drivers/gpu/drm/i915/gt/intel_ringbuffer.c ENGINE_WRITE(engine, RING_TAIL, ring->head); head 713 drivers/gpu/drm/i915/gt/intel_ringbuffer.c ENGINE_READ(engine, RING_HEAD), ring->head, head 726 drivers/gpu/drm/i915/gt/intel_ringbuffer.c if (ring->tail != ring->head) { head 786 drivers/gpu/drm/i915/gt/intel_ringbuffer.c u32 head; head 838 drivers/gpu/drm/i915/gt/intel_ringbuffer.c head = rq->head; head 840 drivers/gpu/drm/i915/gt/intel_ringbuffer.c head = engine->legacy.ring->tail; head 842 drivers/gpu/drm/i915/gt/intel_ringbuffer.c engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head); head 1239 drivers/gpu/drm/i915/gt/intel_ringbuffer.c ring->head = tail; head 184 drivers/gpu/drm/i915/gt/selftest_lrc.c struct i915_request *head; head 188 drivers/gpu/drm/i915/gt/selftest_lrc.c head = semaphore_queue(outer, vma, n++); head 189 drivers/gpu/drm/i915/gt/selftest_lrc.c if (IS_ERR(head)) head 190 drivers/gpu/drm/i915/gt/selftest_lrc.c return PTR_ERR(head); head 192 drivers/gpu/drm/i915/gt/selftest_lrc.c i915_request_get(head); head 209 drivers/gpu/drm/i915/gt/selftest_lrc.c if (i915_request_wait(head, head 220 drivers/gpu/drm/i915/gt/selftest_lrc.c i915_request_put(head); head 1189 drivers/gpu/drm/i915/gt/selftest_lrc.c ring_size = rq->wa_tail - rq->head; head 80 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c desc, desc->head, desc->tail); head 81 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c desc->head = 0; head 298 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c u32 head = desc->head / 4; /* in dwords */ head 307 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c GEM_BUG_ON(desc->head % 4); head 315 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c if (tail < head) head 316 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c used = (size - head) + tail; head 318 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c used = tail - head; head 556 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c u32 head = desc->head / 4; /* in dwords */ head 565 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c GEM_BUG_ON(desc->head % 4); head 568 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c GEM_BUG_ON(head >= size); head 571 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c available = tail - head; head 578 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail); head 581 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c data[0] = cmds[head]; head 582 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c head = (head + 1) % size; head 589 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 4 * (head + available - 1 > size ? head 590 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c size - head : available - 1), &cmds[head], head 591 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c 4 * (head + available - 1 > size ? head 592 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c available - 1 - size + head : 0), &cmds[0]); head 597 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c data[i] = cmds[head]; head 598 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c head = (head + 1) % size; head 602 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c desc->head = head * 4; head 118 drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h u32 head; head 251 drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h u32 head; /* offset updated by GuC*/ head 417 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head), head 673 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled); head 69 drivers/gpu/drm/i915/gt/uc/selftest_guc.c err = wait_for(READ_ONCE(desc->head) == READ_ONCE(desc->tail), 10); head 180 drivers/gpu/drm/i915/gvt/sched_policy.c struct list_head *head = &sched_data->lru_runq_head; head 184 drivers/gpu/drm/i915/gvt/sched_policy.c list_for_each(pos, head) { head 811 drivers/gpu/drm/i915/gvt/scheduler.c u32 head, tail; head 817 drivers/gpu/drm/i915/gvt/scheduler.c head = workload->rb_head; head 821 drivers/gpu/drm/i915/gvt/scheduler.c if (tail < head) { head 828 drivers/gpu/drm/i915/gvt/scheduler.c head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail; head 832 drivers/gpu/drm/i915/gvt/scheduler.c vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head; head 1485 drivers/gpu/drm/i915/gvt/scheduler.c u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; head 1497 drivers/gpu/drm/i915/gvt/scheduler.c RING_CTX_OFF(ring_header.val), &head, 4); head 1502 drivers/gpu/drm/i915/gvt/scheduler.c guest_head = head; head 1504 drivers/gpu/drm/i915/gvt/scheduler.c head &= RB_HEAD_OFF_MASK; head 1512 drivers/gpu/drm/i915/gvt/scheduler.c gvt_dbg_el("ctx head %x real head %lx\n", head, head 1518 drivers/gpu/drm/i915/gvt/scheduler.c head = last_workload->rb_tail; head 1546 drivers/gpu/drm/i915/gvt/scheduler.c workload->rb_head = head; head 1591 drivers/gpu/drm/i915/gvt/scheduler.c workload, ring_id, head, tail, start, ctl); head 264 drivers/gpu/drm/i915/i915_active.c struct llist_node *head = NULL, *tail = NULL; head 290 drivers/gpu/drm/i915/i915_active.c pos->next = head; head 291 drivers/gpu/drm/i915/i915_active.c head = pos; head 295 drivers/gpu/drm/i915/i915_active.c if (head) head 296 drivers/gpu/drm/i915/i915_active.c llist_add_batch(head, tail, &engine->barrier_tasks); head 1562 drivers/gpu/drm/i915/i915_debugfs.c ring->space, ring->head, ring->tail, ring->emit); head 2604 drivers/gpu/drm/i915/i915_debugfs.c list_for_each_entry(mode, &connector->modes, head) head 1207 drivers/gpu/drm/i915/i915_drv.h u32 head; head 466 drivers/gpu/drm/i915/i915_gpu_error.c erq->start, erq->head, erq->tail); head 487 drivers/gpu/drm/i915/i915_gpu_error.c err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head); head 1101 drivers/gpu/drm/i915/i915_gpu_error.c ee->head = ENGINE_READ(engine, RING_HEAD); head 1180 drivers/gpu/drm/i915/i915_gpu_error.c erq->head = request->head; head 1414 drivers/gpu/drm/i915/i915_gpu_error.c ee->cpu_ring_head = request->ring->head; head 1417 drivers/gpu/drm/i915/i915_gpu_error.c ee->rq_head = request->head; head 102 drivers/gpu/drm/i915/i915_gpu_error.h u32 head; head 149 drivers/gpu/drm/i915/i915_gpu_error.h u32 head; head 225 drivers/gpu/drm/i915/i915_perf.c #define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1)) head 463 drivers/gpu/drm/i915/i915_perf.c u32 head, hw_tail, aged_tail, aging_tail; head 476 drivers/gpu/drm/i915/i915_perf.c head = stream->oa_buffer.head; head 547 drivers/gpu/drm/i915/i915_perf.c false : OA_TAKEN(aged_tail, head) >= report_size; head 666 drivers/gpu/drm/i915/i915_perf.c u32 head, tail; head 675 drivers/gpu/drm/i915/i915_perf.c head = stream->oa_buffer.head; head 692 drivers/gpu/drm/i915/i915_perf.c head -= gtt_offset; head 702 drivers/gpu/drm/i915/i915_perf.c if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || head 705 drivers/gpu/drm/i915/i915_perf.c head, tail)) head 710 drivers/gpu/drm/i915/i915_perf.c (taken = OA_TAKEN(tail, head)); head 711 drivers/gpu/drm/i915/i915_perf.c head = (head + report_size) & mask) { head 712 drivers/gpu/drm/i915/i915_perf.c u8 *report = oa_buf_base + head; head 726 drivers/gpu/drm/i915/i915_perf.c if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { head 831 drivers/gpu/drm/i915/i915_perf.c head += gtt_offset; head 833 drivers/gpu/drm/i915/i915_perf.c I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK); head 834 drivers/gpu/drm/i915/i915_perf.c stream->oa_buffer.head = head; head 954 drivers/gpu/drm/i915/i915_perf.c u32 head, tail; head 963 drivers/gpu/drm/i915/i915_perf.c head = stream->oa_buffer.head; head 978 drivers/gpu/drm/i915/i915_perf.c head -= gtt_offset; head 987 drivers/gpu/drm/i915/i915_perf.c if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || head 990 drivers/gpu/drm/i915/i915_perf.c head, tail)) head 995 drivers/gpu/drm/i915/i915_perf.c (taken = OA_TAKEN(tail, head)); head 996 drivers/gpu/drm/i915/i915_perf.c head = (head + report_size) & mask) { head 997 drivers/gpu/drm/i915/i915_perf.c u8 *report = oa_buf_base + head; head 1008 drivers/gpu/drm/i915/i915_perf.c if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { head 1044 drivers/gpu/drm/i915/i915_perf.c head += gtt_offset; head 1047 drivers/gpu/drm/i915/i915_perf.c ((head & GEN7_OASTATUS2_HEAD_MASK) | head 1049 drivers/gpu/drm/i915/i915_perf.c stream->oa_buffer.head = head; head 1406 drivers/gpu/drm/i915/i915_perf.c stream->oa_buffer.head = gtt_offset; head 1453 drivers/gpu/drm/i915/i915_perf.c stream->oa_buffer.head = gtt_offset; head 244 drivers/gpu/drm/i915/i915_request.c rq->ring->head = rq->postfix; head 741 drivers/gpu/drm/i915/i915_request.c rq->head = rq->ring->emit; head 753 drivers/gpu/drm/i915/i915_request.c ce->ring->emit = rq->head; head 1103 drivers/gpu/drm/i915/i915_request.c u32 head; head 1116 drivers/gpu/drm/i915/i915_request.c head = rq->infix; head 1117 drivers/gpu/drm/i915/i915_request.c if (rq->postfix < head) { head 1118 drivers/gpu/drm/i915/i915_request.c memset(vaddr + head, 0, rq->ring->size - head); head 1119 drivers/gpu/drm/i915/i915_request.c head = 0; head 1121 drivers/gpu/drm/i915/i915_request.c memset(vaddr + head, 0, rq->postfix - head); head 184 drivers/gpu/drm/i915/i915_request.h u32 head; head 150 drivers/gpu/drm/i915/i915_sw_fence.c list_for_each_entry_safe(pos, next, &x->head, entry) { head 160 drivers/gpu/drm/i915/i915_sw_fence.c list_for_each_entry_safe(pos, next, &x->head, entry) { head 169 drivers/gpu/drm/i915/i915_sw_fence.c list_splice_tail_init(&extra, &x->head); head 259 drivers/gpu/drm/i915/i915_sw_fence.c list_for_each_entry(wq, &fence->wait.head, entry) { head 277 drivers/gpu/drm/i915/i915_sw_fence.c list_for_each_entry(wq, &fence->wait.head, entry) { head 241 drivers/gpu/drm/i915/i915_utils.h static inline void __list_del_many(struct list_head *head, head 244 drivers/gpu/drm/i915/i915_utils.h first->prev = head; head 245 drivers/gpu/drm/i915/i915_utils.h WRITE_ONCE(head->next, first); head 1092 drivers/gpu/drm/i915/selftests/i915_request.c sz = rq->ring->emit - rq->head; head 278 drivers/gpu/drm/imx/ipuv3-crtc.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 106 drivers/gpu/drm/mga/mga_dma.c u32 head, tail; head 141 drivers/gpu/drm/mga/mga_dma.c head = MGA_READ(MGA_PRIMADDRESS); head 143 drivers/gpu/drm/mga/mga_dma.c if (head <= tail) head 146 drivers/gpu/drm/mga/mga_dma.c primary->space = head - tail; head 148 drivers/gpu/drm/mga/mga_dma.c DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset)); head 161 drivers/gpu/drm/mga/mga_dma.c u32 head, tail; head 179 drivers/gpu/drm/mga/mga_dma.c head = MGA_READ(MGA_PRIMADDRESS); head 181 drivers/gpu/drm/mga/mga_dma.c if (head == dev_priv->primary->offset) head 184 drivers/gpu/drm/mga/mga_dma.c primary->space = head - dev_priv->primary->offset; head 186 drivers/gpu/drm/mga/mga_dma.c DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset)); head 202 drivers/gpu/drm/mga/mga_dma.c u32 head = dev_priv->primary->offset; head 209 drivers/gpu/drm/mga/mga_dma.c MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL); head 235 drivers/gpu/drm/mga/mga_dma.c for (entry = dev_priv->head->next; entry; entry = entry->next) { head 237 drivers/gpu/drm/mga/mga_dma.c entry, entry->buf->idx, entry->age.head, head 238 drivers/gpu/drm/mga/mga_dma.c (unsigned long)(entry->age.head - dev_priv->primary->offset)); head 253 drivers/gpu/drm/mga/mga_dma.c dev_priv->head = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL); head 254 drivers/gpu/drm/mga/mga_dma.c if (dev_priv->head == NULL) head 257 drivers/gpu/drm/mga/mga_dma.c SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0); head 267 drivers/gpu/drm/mga/mga_dma.c entry->next = dev_priv->head->next; head 268 drivers/gpu/drm/mga/mga_dma.c entry->prev = dev_priv->head; head 272 drivers/gpu/drm/mga/mga_dma.c if (dev_priv->head->next != NULL) head 273 drivers/gpu/drm/mga/mga_dma.c dev_priv->head->next->prev = entry; head 281 drivers/gpu/drm/mga/mga_dma.c dev_priv->head->next = entry; head 294 drivers/gpu/drm/mga/mga_dma.c entry = dev_priv->head; head 301 drivers/gpu/drm/mga/mga_dma.c dev_priv->head = dev_priv->tail = NULL; head 328 drivers/gpu/drm/mga/mga_dma.c u32 head, wrap; head 331 drivers/gpu/drm/mga/mga_dma.c head = MGA_READ(MGA_PRIMADDRESS); head 335 drivers/gpu/drm/mga/mga_dma.c tail->age.head ? head 336 drivers/gpu/drm/mga/mga_dma.c (unsigned long)(tail->age.head - dev_priv->primary->offset) : 0, head 339 drivers/gpu/drm/mga/mga_dma.c (unsigned long)(head - dev_priv->primary->offset), wrap); head 341 drivers/gpu/drm/mga/mga_dma.c if (TEST_AGE(&tail->age, head, wrap)) { head 359 drivers/gpu/drm/mga/mga_dma.c drm_mga_freelist_t *head, *entry, *prev; head 362 drivers/gpu/drm/mga/mga_dma.c (unsigned long)(buf_priv->list_entry->age.head - head 367 drivers/gpu/drm/mga/mga_dma.c head = dev_priv->head; head 369 drivers/gpu/drm/mga/mga_dma.c if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) { head 376 drivers/gpu/drm/mga/mga_dma.c prev = head->next; head 377 drivers/gpu/drm/mga/mga_dma.c head->next = entry; head 379 drivers/gpu/drm/mga/mga_dma.c entry->prev = head; head 569 drivers/gpu/drm/mga/mga_dma.c list_for_each_entry(_entry, &dev->maplist, head) { head 930 drivers/gpu/drm/mga/mga_dma.c dev_priv->sarea_priv->last_frame.head = 0; head 1006 drivers/gpu/drm/mga/mga_dma.c if (dev_priv->head != NULL) head 96 drivers/gpu/drm/mga/mga_drv.h drm_mga_freelist_t *head; head 358 drivers/gpu/drm/mga/mga_drv.h (age)->head = h; \ head 364 drivers/gpu/drm/mga/mga_drv.h (age)->head < h)) head 370 drivers/gpu/drm/mga/mga_drv.h entry->age.head = (dev_priv->prim.tail + \ head 374 drivers/gpu/drm/mga/mga_drv.h entry->age.head = 0; \ head 579 drivers/gpu/drm/mga/mga_state.c sarea_priv->last_frame.head = dev_priv->prim.tail; head 1065 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { head 515 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c list_for_each_entry(cur_mode, &connector->modes, head) { head 976 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c list_for_each_entry(conn_iter, connector_list, head) head 382 drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 161 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c list_for_each_entry(crtc, &config->crtc_list, head) { head 92 drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) head 126 drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 710 drivers/gpu/drm/msm/dsi/dsi_manager.c list_for_each_entry(connector, connector_list, head) { head 57 drivers/gpu/drm/msm/edp/edp_bridge.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 148 drivers/gpu/drm/msm/msm_debugfs.c list_for_each_entry(fb, &dev->mode_config.fb_list, head) { head 71 drivers/gpu/drm/msm/msm_rd.c (CIRC_CNT((circ)->head, (circ)->tail, BUF_SZ)) head 73 drivers/gpu/drm/msm/msm_rd.c (CIRC_CNT_TO_END((circ)->head, (circ)->tail, BUF_SZ)) head 76 drivers/gpu/drm/msm/msm_rd.c (CIRC_SPACE((circ)->head, (circ)->tail, BUF_SZ)) head 78 drivers/gpu/drm/msm/msm_rd.c (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, BUF_SZ)) head 107 drivers/gpu/drm/msm/msm_rd.c char *fptr = &fifo->buf[fifo->head]; head 121 drivers/gpu/drm/msm/msm_rd.c smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1)); head 256 drivers/gpu/drm/nouveau/dispnv04/crtc.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 468 drivers/gpu/drm/nouveau/dispnv04/crtc.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 684 drivers/gpu/drm/nouveau/dispnv04/crtc.c int head = nv_crtc->index; head 685 drivers/gpu/drm/nouveau/dispnv04/crtc.c uint8_t saved_cr21 = nv04_display(dev)->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21]; head 688 drivers/gpu/drm/nouveau/dispnv04/crtc.c NVSetOwner(crtc->dev, head); head 690 drivers/gpu/drm/nouveau/dispnv04/crtc.c nouveau_hw_load_state(crtc->dev, head, &nv04_display(dev)->saved_reg); head 691 drivers/gpu/drm/nouveau/dispnv04/crtc.c nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21); head 1034 drivers/gpu/drm/nouveau/dispnv04/crtc.c struct list_head head; head 1059 drivers/gpu/drm/nouveau/dispnv04/crtc.c s = list_first_entry(&fctx->flip, struct nv04_page_flip_state, head); head 1067 drivers/gpu/drm/nouveau/dispnv04/crtc.c list_del(&s->head); head 1109 drivers/gpu/drm/nouveau/dispnv04/crtc.c list_add_tail(&s->head, &fctx->flip); head 1133 drivers/gpu/drm/nouveau/dispnv04/crtc.c list_del(&s->head); head 1153 drivers/gpu/drm/nouveau/dispnv04/crtc.c int head = nouveau_crtc(crtc)->index; head 1208 drivers/gpu/drm/nouveau/dispnv04/crtc.c OUT_RING (chan, head); head 1215 drivers/gpu/drm/nouveau/dispnv04/crtc.c nouveau_bo_ref(new_bo, &dispnv04->image[head]); head 244 drivers/gpu/drm/nouveau/dispnv04/dac.c int head; head 281 drivers/gpu/drm/nouveau/dispnv04/dac.c head = (saved_routput & 0x100) >> 8; head 284 drivers/gpu/drm/nouveau/dispnv04/dac.c if (!(NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX) & 0xC0)) head 285 drivers/gpu/drm/nouveau/dispnv04/dac.c head ^= 1; head 288 drivers/gpu/drm/nouveau/dispnv04/dac.c routput = (saved_routput & 0xfffffece) | head << 8; head 303 drivers/gpu/drm/nouveau/dispnv04/dac.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, head 305 drivers/gpu/drm/nouveau/dispnv04/dac.c temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); head 306 drivers/gpu/drm/nouveau/dispnv04/dac.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, head 314 drivers/gpu/drm/nouveau/dispnv04/dac.c temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL); head 315 drivers/gpu/drm/nouveau/dispnv04/dac.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL, head 317 drivers/gpu/drm/nouveau/dispnv04/dac.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, 0); head 367 drivers/gpu/drm/nouveau/dispnv04/dac.c int head = nouveau_crtc(encoder->crtc)->index; head 371 drivers/gpu/drm/nouveau/dispnv04/dac.c nv04_dfp_disable(dev, head); head 380 drivers/gpu/drm/nouveau/dispnv04/dac.c int head = nouveau_crtc(encoder->crtc)->index; head 390 drivers/gpu/drm/nouveau/dispnv04/dac.c head << 8 | NV_PRAMDAC_DACCLK_SEL_DACCLK); head 392 drivers/gpu/drm/nouveau/dispnv04/dac.c list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) { head 400 drivers/gpu/drm/nouveau/dispnv04/dac.c (otherdac & ~0x0100) | (head ^ 1) << 8); head 69 drivers/gpu/drm/nouveau/dispnv04/dfp.c int head, bool dl) head 81 drivers/gpu/drm/nouveau/dispnv04/dfp.c if (head != ramdac) head 93 drivers/gpu/drm/nouveau/dispnv04/dfp.c void nv04_dfp_disable(struct drm_device *dev, int head) head 97 drivers/gpu/drm/nouveau/dispnv04/dfp.c if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) & head 103 drivers/gpu/drm/nouveau/dispnv04/dfp.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, head 108 drivers/gpu/drm/nouveau/dispnv04/dfp.c crtcstate[head].fp_control = FP_TG_CONTROL_OFF; head 109 drivers/gpu/drm/nouveau/dispnv04/dfp.c crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] &= head 135 drivers/gpu/drm/nouveau/dispnv04/dfp.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 171 drivers/gpu/drm/nouveau/dispnv04/dfp.c list_for_each_entry(slave, &dev->mode_config.encoder_list, head) { head 204 drivers/gpu/drm/nouveau/dispnv04/dfp.c struct nouveau_encoder *nv_encoder, int head) head 216 drivers/gpu/drm/nouveau/dispnv04/dfp.c if (head) head 240 drivers/gpu/drm/nouveau/dispnv04/dfp.c state->sel_clk |= (head ? 0x40 : 0x10) << shift; head 249 drivers/gpu/drm/nouveau/dispnv04/dfp.c int head = nouveau_crtc(encoder->crtc)->index; head 251 drivers/gpu/drm/nouveau/dispnv04/dfp.c uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX]; head 252 drivers/gpu/drm/nouveau/dispnv04/dfp.c uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX]; head 256 drivers/gpu/drm/nouveau/dispnv04/dfp.c nv04_dfp_prepare_sel_clk(dev, nv_encoder, head); head 262 drivers/gpu/drm/nouveau/dispnv04/dfp.c *cr_lcd |= head ? 0x0 : 0x8; head 270 drivers/gpu/drm/nouveau/dispnv04/dfp.c NVWriteVgaCrtc(dev, head ^ 1, head 453 drivers/gpu/drm/nouveau/dispnv04/dfp.c int head = nouveau_crtc(encoder->crtc)->index; head 457 drivers/gpu/drm/nouveau/dispnv04/dfp.c run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); head 459 drivers/gpu/drm/nouveau/dispnv04/dfp.c call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock); head 463 drivers/gpu/drm/nouveau/dispnv04/dfp.c nv04_display(dev)->mode_reg.crtc_reg[head].fp_control = head 464 drivers/gpu/drm/nouveau/dispnv04/dfp.c NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); head 534 drivers/gpu/drm/nouveau/dispnv04/dfp.c int head = crtc ? nouveau_crtc(crtc)->index : head 538 drivers/gpu/drm/nouveau/dispnv04/dfp.c call_lvds_script(dev, nv_encoder->dcb, head, head 544 drivers/gpu/drm/nouveau/dispnv04/dfp.c call_lvds_script(dev, nv_encoder->dcb, head, head 582 drivers/gpu/drm/nouveau/dispnv04/dfp.c nv_encoder->restore.head = head 590 drivers/gpu/drm/nouveau/dispnv04/dfp.c int head = nv_encoder->restore.head; head 597 drivers/gpu/drm/nouveau/dispnv04/dfp.c call_lvds_script(dev, nv_encoder->dcb, head, head 603 drivers/gpu/drm/nouveau/dispnv04/dfp.c (&nv04_display(dev)->saved_reg.crtc_reg[head].pllvals); head 605 drivers/gpu/drm/nouveau/dispnv04/dfp.c run_tmds_table(dev, nv_encoder->dcb, head, clock); head 54 drivers/gpu/drm/nouveau/dispnv04/disp.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 64 drivers/gpu/drm/nouveau/dispnv04/disp.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 91 drivers/gpu/drm/nouveau/dispnv04/disp.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 96 drivers/gpu/drm/nouveau/dispnv04/disp.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head) head 106 drivers/gpu/drm/nouveau/dispnv04/disp.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 118 drivers/gpu/drm/nouveau/dispnv04/disp.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 131 drivers/gpu/drm/nouveau/dispnv04/disp.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 147 drivers/gpu/drm/nouveau/dispnv04/disp.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 171 drivers/gpu/drm/nouveau/dispnv04/disp.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head) head 174 drivers/gpu/drm/nouveau/dispnv04/disp.c list_for_each_entry(nv_crtc, &dev->mode_config.crtc_list, base.head) head 258 drivers/gpu/drm/nouveau/dispnv04/disp.c &dev->mode_config.connector_list, head) { head 266 drivers/gpu/drm/nouveau/dispnv04/disp.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 274 drivers/gpu/drm/nouveau/dispnv04/disp.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) head 277 drivers/gpu/drm/nouveau/dispnv04/disp.c list_for_each_entry(nv_encoder, &dev->mode_config.encoder_list, base.base.head) head 70 drivers/gpu/drm/nouveau/dispnv04/disp.h int head; head 111 drivers/gpu/drm/nouveau/dispnv04/disp.h int head, bool dl); head 112 drivers/gpu/drm/nouveau/dispnv04/disp.h void nv04_dfp_disable(struct drm_device *dev, int head); head 172 drivers/gpu/drm/nouveau/dispnv04/disp.h init.head = crtc; head 38 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value) head 40 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index); head 41 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value); head 45 drivers/gpu/drm/nouveau/dispnv04/hw.c NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index) head 47 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index); head 48 drivers/gpu/drm/nouveau/dispnv04/hw.c return NVReadPRMVIO(dev, head, NV_PRMVIO_SR); head 52 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value) head 54 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index); head 55 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value); head 59 drivers/gpu/drm/nouveau/dispnv04/hw.c NVReadVgaGr(struct drm_device *dev, int head, uint8_t index) head 61 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index); head 62 drivers/gpu/drm/nouveau/dispnv04/hw.c return NVReadPRMVIO(dev, head, NV_PRMVIO_GX); head 109 drivers/gpu/drm/nouveau/dispnv04/hw.c NVBlankScreen(struct drm_device *dev, int head, bool blank) head 114 drivers/gpu/drm/nouveau/dispnv04/hw.c NVSetOwner(dev, head); head 116 drivers/gpu/drm/nouveau/dispnv04/hw.c seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX); head 118 drivers/gpu/drm/nouveau/dispnv04/hw.c NVVgaSeqReset(dev, head, true); head 120 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20); head 122 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); head 123 drivers/gpu/drm/nouveau/dispnv04/hw.c NVVgaSeqReset(dev, head, false); head 250 drivers/gpu/drm/nouveau/dispnv04/hw.c nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) head 264 drivers/gpu/drm/nouveau/dispnv04/hw.c enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0; head 275 drivers/gpu/drm/nouveau/dispnv04/hw.c NV_WARN(drm, "VPLL %d outwith limits, attempting to fix\n", head + 1); head 377 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(struct drm_device *dev, int head, head 380 drivers/gpu/drm/nouveau/dispnv04/hw.c crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index); head 384 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(struct drm_device *dev, int head, head 387 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]); head 391 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_save_state_ramdac(struct drm_device *dev, int head, head 395 drivers/gpu/drm/nouveau/dispnv04/hw.c struct nv04_crtc_reg *regp = &state->crtc_reg[head]; head 399 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); head 401 drivers/gpu/drm/nouveau/dispnv04/hw.c nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, ®p->pllvals); head 406 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); head 408 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); head 411 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); head 413 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); head 415 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); head 416 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL); head 417 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW); head 418 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY); head 419 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL); head 420 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW); head 421 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY); head 422 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2); head 426 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg); head 427 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20); head 431 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER); head 433 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4); head 434 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4); head 438 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); head 439 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0); head 440 drivers/gpu/drm/nouveau/dispnv04/hw.c if (!nv_gf4_disp_arch(dev) && head == 0) { head 446 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1); head 447 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2); head 449 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR); head 452 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); head 455 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); head 456 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); head 457 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); head 460 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->ctv_regs[i] = NVReadRAMDAC(dev, head, head 466 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_load_state_ramdac(struct drm_device *dev, int head, head 471 drivers/gpu/drm/nouveau/dispnv04/hw.c struct nv04_crtc_reg *regp = &state->crtc_reg[head]; head 472 drivers/gpu/drm/nouveau/dispnv04/hw.c uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; head 476 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); head 483 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); head 485 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); head 488 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); head 490 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); head 492 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); head 493 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal); head 494 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew); head 495 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay); head 496 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal); head 497 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew); head 498 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay); head 499 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2); head 504 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]); head 505 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]); head 509 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither); head 511 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]); head 512 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]); head 516 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control); head 517 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0); head 518 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1); head 519 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2); head 521 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color); head 524 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); head 527 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); head 528 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); head 529 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); head 532 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, head 538 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_save_state_vga(struct drm_device *dev, int head, head 541 drivers/gpu/drm/nouveau/dispnv04/hw.c struct nv04_crtc_reg *regp = &state->crtc_reg[head]; head 544 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ); head 547 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, i); head 549 drivers/gpu/drm/nouveau/dispnv04/hw.c NVSetEnablePalette(dev, head, true); head 551 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->Attribute[i] = NVReadVgaAttr(dev, head, i); head 552 drivers/gpu/drm/nouveau/dispnv04/hw.c NVSetEnablePalette(dev, head, false); head 555 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->Graphics[i] = NVReadVgaGr(dev, head, i); head 558 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->Sequencer[i] = NVReadVgaSeq(dev, head, i); head 562 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_load_state_vga(struct drm_device *dev, int head, head 565 drivers/gpu/drm/nouveau/dispnv04/hw.c struct nv04_crtc_reg *regp = &state->crtc_reg[head]; head 568 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg); head 571 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]); head 573 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_lock_vga_crtc_base(dev, head, false); head 575 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, i); head 576 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_lock_vga_crtc_base(dev, head, true); head 579 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteVgaGr(dev, head, i, regp->Graphics[i]); head 581 drivers/gpu/drm/nouveau/dispnv04/hw.c NVSetEnablePalette(dev, head, true); head 583 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteVgaAttr(dev, head, i, regp->Attribute[i]); head 584 drivers/gpu/drm/nouveau/dispnv04/hw.c NVSetEnablePalette(dev, head, false); head 588 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_save_state_ext(struct drm_device *dev, int head, head 592 drivers/gpu/drm/nouveau/dispnv04/hw.c struct nv04_crtc_reg *regp = &state->crtc_reg[head]; head 595 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX); head 596 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX); head 597 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX); head 598 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX); head 599 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX); head 600 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX); head 601 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX); head 603 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); head 604 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); head 605 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_21); head 608 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_47); head 611 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, 0x9f); head 613 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_49); head 614 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); head 615 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); head 616 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); head 617 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); head 620 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); head 621 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); head 624 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); head 627 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); head 630 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL); head 631 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG); head 634 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG); head 636 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); head 637 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); head 639 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); head 640 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); head 641 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); head 642 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY); head 646 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_42); head 647 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_53); head 648 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_54); head 651 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i); head 652 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_59); head 653 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_5B); head 655 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_85); head 656 drivers/gpu/drm/nouveau/dispnv04/hw.c rd_cio_state(dev, head, regp, NV_CIO_CRE_86); head 659 drivers/gpu/drm/nouveau/dispnv04/hw.c regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START); head 663 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_load_state_ext(struct drm_device *dev, int head, head 668 drivers/gpu/drm/nouveau/dispnv04/hw.c struct nv04_crtc_reg *regp = &state->crtc_reg[head]; head 678 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl); head 690 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); head 691 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); head 692 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); head 695 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); head 698 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); head 700 drivers/gpu/drm/nouveau/dispnv04/hw.c reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); head 702 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000); head 704 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000); head 708 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg); head 710 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX); head 711 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX); head 712 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX); head 713 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX); head 714 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX); head 715 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX); head 716 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX); head 717 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); head 718 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); head 721 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_47); head 724 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, 0x9f); head 726 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_49); head 727 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); head 728 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); head 729 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); head 731 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_fix_nv40_hw_cursor(dev, head); head 732 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); head 734 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); head 735 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); head 737 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); head 738 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); head 739 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); head 740 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY); head 757 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_42); head 758 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_53); head 759 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_54); head 762 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]); head 763 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_59); head 764 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_5B); head 766 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_85); head 767 drivers/gpu/drm/nouveau/dispnv04/hw.c wr_cio_state(dev, head, regp, NV_CIO_CRE_86); head 770 drivers/gpu/drm/nouveau/dispnv04/hw.c NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); head 774 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_save_state_palette(struct drm_device *dev, int head, head 778 drivers/gpu/drm/nouveau/dispnv04/hw.c int head_offset = head * NV_PRMDIO_SIZE, i; head 785 drivers/gpu/drm/nouveau/dispnv04/hw.c state->crtc_reg[head].DAC[i] = nvif_rd08(device, head 789 drivers/gpu/drm/nouveau/dispnv04/hw.c NVSetEnablePalette(dev, head, false); head 793 drivers/gpu/drm/nouveau/dispnv04/hw.c nouveau_hw_load_state_palette(struct drm_device *dev, int head, head 797 drivers/gpu/drm/nouveau/dispnv04/hw.c int head_offset = head * NV_PRMDIO_SIZE, i; head 805 drivers/gpu/drm/nouveau/dispnv04/hw.c state->crtc_reg[head].DAC[i]); head 808 drivers/gpu/drm/nouveau/dispnv04/hw.c NVSetEnablePalette(dev, head, false); head 811 drivers/gpu/drm/nouveau/dispnv04/hw.c void nouveau_hw_save_state(struct drm_device *dev, int head, head 818 drivers/gpu/drm/nouveau/dispnv04/hw.c nouveau_hw_fix_bad_vpll(dev, head); head 819 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_save_state_ramdac(dev, head, state); head 820 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_save_state_vga(dev, head, state); head 821 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_save_state_palette(dev, head, state); head 822 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_save_state_ext(dev, head, state); head 825 drivers/gpu/drm/nouveau/dispnv04/hw.c void nouveau_hw_load_state(struct drm_device *dev, int head, head 828 drivers/gpu/drm/nouveau/dispnv04/hw.c NVVgaProtect(dev, head, true); head 829 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_load_state_ramdac(dev, head, state); head 830 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_load_state_ext(dev, head, state); head 831 drivers/gpu/drm/nouveau/dispnv04/hw.c nouveau_hw_load_state_palette(dev, head, state); head 832 drivers/gpu/drm/nouveau/dispnv04/hw.c nv_load_state_vga(dev, head, state); head 833 drivers/gpu/drm/nouveau/dispnv04/hw.c NVVgaProtect(dev, head, false); head 37 drivers/gpu/drm/nouveau/dispnv04/hw.h void NVWriteVgaSeq(struct drm_device *, int head, uint8_t index, uint8_t value); head 38 drivers/gpu/drm/nouveau/dispnv04/hw.h uint8_t NVReadVgaSeq(struct drm_device *, int head, uint8_t index); head 39 drivers/gpu/drm/nouveau/dispnv04/hw.h void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value); head 40 drivers/gpu/drm/nouveau/dispnv04/hw.h uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index); head 42 drivers/gpu/drm/nouveau/dispnv04/hw.h void NVBlankScreen(struct drm_device *, int head, bool blank); head 48 drivers/gpu/drm/nouveau/dispnv04/hw.h void nouveau_hw_save_state(struct drm_device *, int head, head 50 drivers/gpu/drm/nouveau/dispnv04/hw.h void nouveau_hw_load_state(struct drm_device *, int head, head 52 drivers/gpu/drm/nouveau/dispnv04/hw.h void nouveau_hw_load_state_palette(struct drm_device *, int head, head 60 drivers/gpu/drm/nouveau/dispnv04/hw.h int head, uint32_t reg) head 64 drivers/gpu/drm/nouveau/dispnv04/hw.h if (head) head 71 drivers/gpu/drm/nouveau/dispnv04/hw.h int head, uint32_t reg, uint32_t val) head 74 drivers/gpu/drm/nouveau/dispnv04/hw.h if (head) head 80 drivers/gpu/drm/nouveau/dispnv04/hw.h int head, uint32_t reg) head 84 drivers/gpu/drm/nouveau/dispnv04/hw.h if (head) head 91 drivers/gpu/drm/nouveau/dispnv04/hw.h int head, uint32_t reg, uint32_t val) head 94 drivers/gpu/drm/nouveau/dispnv04/hw.h if (head) head 120 drivers/gpu/drm/nouveau/dispnv04/hw.h int head, uint8_t index, uint8_t value) head 123 drivers/gpu/drm/nouveau/dispnv04/hw.h nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); head 124 drivers/gpu/drm/nouveau/dispnv04/hw.h nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); head 128 drivers/gpu/drm/nouveau/dispnv04/hw.h int head, uint8_t index) head 132 drivers/gpu/drm/nouveau/dispnv04/hw.h nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); head 133 drivers/gpu/drm/nouveau/dispnv04/hw.h val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); head 152 drivers/gpu/drm/nouveau/dispnv04/hw.h NVWriteVgaCrtc5758(struct drm_device *dev, int head, uint8_t index, uint8_t value) head 154 drivers/gpu/drm/nouveau/dispnv04/hw.h NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index); head 155 drivers/gpu/drm/nouveau/dispnv04/hw.h NVWriteVgaCrtc(dev, head, NV_CIO_CRE_58, value); head 158 drivers/gpu/drm/nouveau/dispnv04/hw.h static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_t index) head 160 drivers/gpu/drm/nouveau/dispnv04/hw.h NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index); head 161 drivers/gpu/drm/nouveau/dispnv04/hw.h return NVReadVgaCrtc(dev, head, NV_CIO_CRE_58); head 165 drivers/gpu/drm/nouveau/dispnv04/hw.h int head, uint32_t reg) head 173 drivers/gpu/drm/nouveau/dispnv04/hw.h if (head && drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) head 181 drivers/gpu/drm/nouveau/dispnv04/hw.h int head, uint32_t reg, uint8_t value) head 188 drivers/gpu/drm/nouveau/dispnv04/hw.h if (head && drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) head 194 drivers/gpu/drm/nouveau/dispnv04/hw.h static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) head 197 drivers/gpu/drm/nouveau/dispnv04/hw.h nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); head 198 drivers/gpu/drm/nouveau/dispnv04/hw.h nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); head 201 drivers/gpu/drm/nouveau/dispnv04/hw.h static inline bool NVGetEnablePalette(struct drm_device *dev, int head) head 204 drivers/gpu/drm/nouveau/dispnv04/hw.h nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); head 205 drivers/gpu/drm/nouveau/dispnv04/hw.h return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); head 209 drivers/gpu/drm/nouveau/dispnv04/hw.h int head, uint8_t index, uint8_t value) head 212 drivers/gpu/drm/nouveau/dispnv04/hw.h if (NVGetEnablePalette(dev, head)) head 217 drivers/gpu/drm/nouveau/dispnv04/hw.h nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); head 218 drivers/gpu/drm/nouveau/dispnv04/hw.h nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); head 219 drivers/gpu/drm/nouveau/dispnv04/hw.h nvif_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value); head 223 drivers/gpu/drm/nouveau/dispnv04/hw.h int head, uint8_t index) head 227 drivers/gpu/drm/nouveau/dispnv04/hw.h if (NVGetEnablePalette(dev, head)) head 232 drivers/gpu/drm/nouveau/dispnv04/hw.h nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); head 233 drivers/gpu/drm/nouveau/dispnv04/hw.h nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); head 234 drivers/gpu/drm/nouveau/dispnv04/hw.h val = nvif_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE); head 238 drivers/gpu/drm/nouveau/dispnv04/hw.h static inline void NVVgaSeqReset(struct drm_device *dev, int head, bool start) head 240 drivers/gpu/drm/nouveau/dispnv04/hw.h NVWriteVgaSeq(dev, head, NV_VIO_SR_RESET_INDEX, start ? 0x1 : 0x3); head 243 drivers/gpu/drm/nouveau/dispnv04/hw.h static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect) head 245 drivers/gpu/drm/nouveau/dispnv04/hw.h uint8_t seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX); head 248 drivers/gpu/drm/nouveau/dispnv04/hw.h NVVgaSeqReset(dev, head, true); head 249 drivers/gpu/drm/nouveau/dispnv04/hw.h NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20); head 252 drivers/gpu/drm/nouveau/dispnv04/hw.h NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); /* reenable display */ head 253 drivers/gpu/drm/nouveau/dispnv04/hw.h NVVgaSeqReset(dev, head, false); head 255 drivers/gpu/drm/nouveau/dispnv04/hw.h NVSetEnablePalette(dev, head, protect); head 272 drivers/gpu/drm/nouveau/dispnv04/hw.h nv_lock_vga_crtc_base(struct drm_device *dev, int head, bool lock) head 274 drivers/gpu/drm/nouveau/dispnv04/hw.h uint8_t cr11 = NVReadVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX); head 281 drivers/gpu/drm/nouveau/dispnv04/hw.h NVWriteVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX, cr11); head 287 drivers/gpu/drm/nouveau/dispnv04/hw.h nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock) head 303 drivers/gpu/drm/nouveau/dispnv04/hw.h cr21 = NVReadVgaCrtc(dev, head, NV_CIO_CRE_21) | 0xfa; head 305 drivers/gpu/drm/nouveau/dispnv04/hw.h NVWriteVgaCrtc(dev, head, NV_CIO_CRE_21, cr21); head 341 drivers/gpu/drm/nouveau/dispnv04/hw.h nv_fix_nv40_hw_cursor(struct drm_device *dev, int head) head 348 drivers/gpu/drm/nouveau/dispnv04/hw.h uint32_t curpos = NVReadRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS); head 349 drivers/gpu/drm/nouveau/dispnv04/hw.h NVWriteRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS, curpos); head 353 drivers/gpu/drm/nouveau/dispnv04/hw.h nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) head 357 drivers/gpu/drm/nouveau/dispnv04/hw.h NVWriteCRTC(dev, head, NV_PCRTC_START, offset); head 364 drivers/gpu/drm/nouveau/dispnv04/hw.h int cre_heb = NVReadVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX); head 366 drivers/gpu/drm/nouveau/dispnv04/hw.h NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX, head 372 drivers/gpu/drm/nouveau/dispnv04/hw.h nv_show_cursor(struct drm_device *dev, int head, bool show) head 376 drivers/gpu/drm/nouveau/dispnv04/hw.h &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX]; head 382 drivers/gpu/drm/nouveau/dispnv04/hw.h NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); head 385 drivers/gpu/drm/nouveau/dispnv04/hw.h nv_fix_nv40_hw_cursor(dev, head); head 546 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c int head = nouveau_crtc(encoder->crtc)->index; head 547 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head]; head 582 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START, head 584 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END, head 586 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START, head 588 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END, head 590 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1); head 88 drivers/gpu/drm/nouveau/dispnv04/tvnv04.c int head = nouveau_crtc(encoder->crtc)->index; head 89 drivers/gpu/drm/nouveau/dispnv04/tvnv04.c crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX); head 91 drivers/gpu/drm/nouveau/dispnv04/tvnv04.c state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK : head 97 drivers/gpu/drm/nouveau/dispnv04/tvnv04.c NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A); head 105 drivers/gpu/drm/nouveau/dispnv04/tvnv04.c static void nv04_tv_bind(struct drm_device *dev, int head, bool bind) head 107 drivers/gpu/drm/nouveau/dispnv04/tvnv04.c struct nv04_crtc_reg *state = &nv04_display(dev)->mode_reg.crtc_reg[head]; head 116 drivers/gpu/drm/nouveau/dispnv04/tvnv04.c NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX, head 118 drivers/gpu/drm/nouveau/dispnv04/tvnv04.c NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49, head 120 drivers/gpu/drm/nouveau/dispnv04/tvnv04.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, head 127 drivers/gpu/drm/nouveau/dispnv04/tvnv04.c int head = nouveau_crtc(encoder->crtc)->index; head 132 drivers/gpu/drm/nouveau/dispnv04/tvnv04.c nv04_dfp_disable(dev, head); head 135 drivers/gpu/drm/nouveau/dispnv04/tvnv04.c nv04_tv_bind(dev, head ^ 1, false); head 137 drivers/gpu/drm/nouveau/dispnv04/tvnv04.c nv04_tv_bind(dev, head, true); head 54 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c int head; head 62 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c head = (dacclk & 0x100) >> 8; head 67 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL); head 68 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START); head 69 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END); head 70 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); head 72 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c); head 73 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c ctv_14 = NVReadRAMDAC(dev, head, 0x680c14); head 74 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c); head 80 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343); head 81 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047); head 82 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183); head 83 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, head 98 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20); head 99 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16); head 102 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff); head 108 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff); head 114 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c); head 115 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, 0x680c14, ctv_14); head 116 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c); head 119 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control); head 120 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end); head 121 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start); head 122 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal); head 402 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c int head = nouveau_crtc(encoder->crtc)->index; head 403 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c uint8_t *cr_lcd = &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[ head 411 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c nv04_dfp_disable(dev, head); head 418 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { head 424 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c nv04_dfp_get_bound_head(dev, dcb) == head) { head 425 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c nv04_dfp_bind_head(dev, dcb, head ^ 1, head 433 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c *cr_lcd |= 0x1 | (head ? 0x0 : 0x8); head 444 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c if (head) head 463 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c int head = nouveau_crtc(encoder->crtc)->index; head 464 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head]; head 477 drivers/gpu/drm/nouveau/dispnv04/tvnv17.c if (head) head 27 drivers/gpu/drm/nouveau/dispnv50/base.c nv50_base_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw) head 52 drivers/gpu/drm/nouveau/dispnv50/base.c return bases[cid].new(drm, head, bases[cid].oclass, pwndw); head 7 drivers/gpu/drm/nouveau/dispnv50/base.h struct nouveau_drm *, int head, s32 oclass, head 30 drivers/gpu/drm/nouveau/dispnv50/base.h int nv50_base_new(struct nouveau_drm *, int head, struct nv50_wndw **); head 259 drivers/gpu/drm/nouveau/dispnv50/base507c.c struct nouveau_drm *drm, int head, s32 oclass, u32 interlock_data, head 263 drivers/gpu/drm/nouveau/dispnv50/base507c.c .head = head, head 270 drivers/gpu/drm/nouveau/dispnv50/base507c.c "base", head, format, BIT(head), head 276 drivers/gpu/drm/nouveau/dispnv50/base507c.c &oclass, head, &args, sizeof(args), head 299 drivers/gpu/drm/nouveau/dispnv50/base507c.c base507c_new(struct nouveau_drm *drm, int head, s32 oclass, head 302 drivers/gpu/drm/nouveau/dispnv50/base507c.c return base507c_new_(&base507c, base507c_format, drm, head, oclass, head 303 drivers/gpu/drm/nouveau/dispnv50/base507c.c 0x00000002 << (head * 8), pwndw); head 75 drivers/gpu/drm/nouveau/dispnv50/base827c.c base827c_new(struct nouveau_drm *drm, int head, s32 oclass, head 78 drivers/gpu/drm/nouveau/dispnv50/base827c.c return base507c_new_(&base827c, base507c_format, drm, head, oclass, head 79 drivers/gpu/drm/nouveau/dispnv50/base827c.c 0x00000002 << (head * 8), pwndw); head 171 drivers/gpu/drm/nouveau/dispnv50/base907c.c base907c_new(struct nouveau_drm *drm, int head, s32 oclass, head 174 drivers/gpu/drm/nouveau/dispnv50/base907c.c return base507c_new_(&base907c, base507c_format, drm, head, oclass, head 175 drivers/gpu/drm/nouveau/dispnv50/base907c.c 0x00000002 << (head * 4), pwndw); head 45 drivers/gpu/drm/nouveau/dispnv50/base917c.c base917c_new(struct nouveau_drm *drm, int head, s32 oclass, head 48 drivers/gpu/drm/nouveau/dispnv50/base917c.c return base507c_new_(&base907c, base917c_format, drm, head, oclass, head 49 drivers/gpu/drm/nouveau/dispnv50/base917c.c 0x00000002 << (head * 4), pwndw); head 21 drivers/gpu/drm/nouveau/dispnv50/core.h const struct nv50_head_func *head; head 81 drivers/gpu/drm/nouveau/dispnv50/core507d.c .head = &head507d, head 31 drivers/gpu/drm/nouveau/dispnv50/core827d.c .head = &head827d, head 31 drivers/gpu/drm/nouveau/dispnv50/core907d.c .head = &head907d, head 31 drivers/gpu/drm/nouveau/dispnv50/core917d.c .head = &head917d, head 102 drivers/gpu/drm/nouveau/dispnv50/corec37d.c .head = &headc37d, head 53 drivers/gpu/drm/nouveau/dispnv50/corec57d.c .head = &headc57d, head 27 drivers/gpu/drm/nouveau/dispnv50/curs.c nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw) head 52 drivers/gpu/drm/nouveau/dispnv50/curs.c return curses[cid].new(drm, head, curses[cid].oclass, pwndw); head 7 drivers/gpu/drm/nouveau/dispnv50/curs.h int head, s32 oclass, u32 interlock_data, head 13 drivers/gpu/drm/nouveau/dispnv50/curs.h int nv50_curs_new(struct nouveau_drm *, int head, struct nv50_wndw **); head 74 drivers/gpu/drm/nouveau/dispnv50/curs507a.c struct nv50_head *head = nv50_head(asyw->state.crtc); head 88 drivers/gpu/drm/nouveau/dispnv50/curs507a.c ret = head->func->curs_layout(head, asyw, asyh); head 92 drivers/gpu/drm/nouveau/dispnv50/curs507a.c return head->func->curs_format(head, asyw, asyh); head 110 drivers/gpu/drm/nouveau/dispnv50/curs507a.c int head, s32 oclass, u32 interlock_data, head 114 drivers/gpu/drm/nouveau/dispnv50/curs507a.c .head = head, head 121 drivers/gpu/drm/nouveau/dispnv50/curs507a.c "curs", head, curs507a_format, BIT(head), head 140 drivers/gpu/drm/nouveau/dispnv50/curs507a.c curs507a_new(struct nouveau_drm *drm, int head, s32 oclass, head 143 drivers/gpu/drm/nouveau/dispnv50/curs507a.c return curs507a_new_(&curs507a, drm, head, oclass, head 144 drivers/gpu/drm/nouveau/dispnv50/curs507a.c 0x00000001 << (head * 8), pwndw); head 25 drivers/gpu/drm/nouveau/dispnv50/curs907a.c curs907a_new(struct nouveau_drm *drm, int head, s32 oclass, head 28 drivers/gpu/drm/nouveau/dispnv50/curs907a.c return curs507a_new_(&curs507a, drm, head, oclass, head 29 drivers/gpu/drm/nouveau/dispnv50/curs907a.c 0x00000001 << (head * 4), pwndw); head 45 drivers/gpu/drm/nouveau/dispnv50/cursc37a.c cursc37a_new(struct nouveau_drm *drm, int head, s32 oclass, head 48 drivers/gpu/drm/nouveau/dispnv50/cursc37a.c return curs507a_new_(&cursc37a, drm, head, oclass, head 49 drivers/gpu/drm/nouveau/dispnv50/cursc37a.c 0x00000001 << head, pwndw); head 63 drivers/gpu/drm/nouveau/dispnv50/disp.c struct list_head head; head 82 drivers/gpu/drm/nouveau/dispnv50/disp.c const s32 *oclass, u8 head, void *data, u32 size, head 135 drivers/gpu/drm/nouveau/dispnv50/disp.c const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf, head 165 drivers/gpu/drm/nouveau/dispnv50/disp.c ret = nv50_chan_create(device, disp, oclass, head, data, size, head 682 drivers/gpu/drm/nouveau/dispnv50/disp.c struct nv50_head *head; head 729 drivers/gpu/drm/nouveau/dispnv50/disp.c msto->head = NULL; head 747 drivers/gpu/drm/nouveau/dispnv50/disp.c (0x0100 << msto->head->base.index), head 764 drivers/gpu/drm/nouveau/dispnv50/disp.c msto->encoder.name, msto->head->base.base.name, head 836 drivers/gpu/drm/nouveau/dispnv50/disp.c struct nv50_head *head = nv50_head(encoder->crtc); head 837 drivers/gpu/drm/nouveau/dispnv50/disp.c struct nv50_head_atom *armh = nv50_head_atom(head->base.base.state); head 872 drivers/gpu/drm/nouveau/dispnv50/disp.c mstm->outp->update(mstm->outp, head->base.index, armh, proto, head 875 drivers/gpu/drm/nouveau/dispnv50/disp.c msto->head = head; head 889 drivers/gpu/drm/nouveau/dispnv50/disp.c mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0); head 943 drivers/gpu/drm/nouveau/dispnv50/disp.c struct nv50_head *head = nv50_head(connector_state->crtc); head 946 drivers/gpu/drm/nouveau/dispnv50/disp.c return &mstc->mstm->msto[head->base.index]->encoder; head 1407 drivers/gpu/drm/nouveau/dispnv50/disp.c nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head, head 1414 drivers/gpu/drm/nouveau/dispnv50/disp.c nv_encoder->ctrl &= ~BIT(head); head 1419 drivers/gpu/drm/nouveau/dispnv50/disp.c nv_encoder->ctrl |= BIT(head); head 1850 drivers/gpu/drm/nouveau/dispnv50/disp.c struct nv50_head *head = nv50_head(crtc); head 1861 drivers/gpu/drm/nouveau/dispnv50/disp.c nv50_head_flush_clr(head, asyh, atom->flush_disable); head 1880 drivers/gpu/drm/nouveau/dispnv50/disp.c list_for_each_entry(outp, &atom->outp, head) { head 1911 drivers/gpu/drm/nouveau/dispnv50/disp.c list_for_each_entry_safe(outp, outt, &atom->outp, head) { head 1926 drivers/gpu/drm/nouveau/dispnv50/disp.c list_del(&outp->head); head 1933 drivers/gpu/drm/nouveau/dispnv50/disp.c struct nv50_head *head = nv50_head(crtc); head 1939 drivers/gpu/drm/nouveau/dispnv50/disp.c nv50_head_flush_set(head, asyh); head 2092 drivers/gpu/drm/nouveau/dispnv50/disp.c list_for_each_entry(outp, &atom->outp, head) { head 2101 drivers/gpu/drm/nouveau/dispnv50/disp.c list_add(&outp->head, &atom->outp); head 2207 drivers/gpu/drm/nouveau/dispnv50/disp.c list_for_each_entry_safe(outp, outt, &atom->outp, head) { head 2208 drivers/gpu/drm/nouveau/dispnv50/disp.c list_del(&outp->head); head 2265 drivers/gpu/drm/nouveau/dispnv50/disp.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 2282 drivers/gpu/drm/nouveau/dispnv50/disp.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 2414 drivers/gpu/drm/nouveau/dispnv50/disp.c list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { head 72 drivers/gpu/drm/nouveau/dispnv50/disp.h const s32 *oclass, u8 head, void *data, u32 size, head 34 drivers/gpu/drm/nouveau/dispnv50/head.c nv50_head_flush_clr(struct nv50_head *head, head 40 drivers/gpu/drm/nouveau/dispnv50/head.c if (clr.olut) head->func->olut_clr(head); head 41 drivers/gpu/drm/nouveau/dispnv50/head.c if (clr.core) head->func->core_clr(head); head 42 drivers/gpu/drm/nouveau/dispnv50/head.c if (clr.curs) head->func->curs_clr(head); head 46 drivers/gpu/drm/nouveau/dispnv50/head.c nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh) head 48 drivers/gpu/drm/nouveau/dispnv50/head.c if (asyh->set.view ) head->func->view (head, asyh); head 49 drivers/gpu/drm/nouveau/dispnv50/head.c if (asyh->set.mode ) head->func->mode (head, asyh); head 50 drivers/gpu/drm/nouveau/dispnv50/head.c if (asyh->set.core ) head->func->core_set(head, asyh); head 52 drivers/gpu/drm/nouveau/dispnv50/head.c asyh->olut.offset = nv50_lut_load(&head->olut, head 56 drivers/gpu/drm/nouveau/dispnv50/head.c head->func->olut_set(head, asyh); head 58 drivers/gpu/drm/nouveau/dispnv50/head.c if (asyh->set.curs ) head->func->curs_set(head, asyh); head 59 drivers/gpu/drm/nouveau/dispnv50/head.c if (asyh->set.base ) head->func->base (head, asyh); head 60 drivers/gpu/drm/nouveau/dispnv50/head.c if (asyh->set.ovly ) head->func->ovly (head, asyh); head 61 drivers/gpu/drm/nouveau/dispnv50/head.c if (asyh->set.dither ) head->func->dither (head, asyh); head 62 drivers/gpu/drm/nouveau/dispnv50/head.c if (asyh->set.procamp) head->func->procamp (head, asyh); head 63 drivers/gpu/drm/nouveau/dispnv50/head.c if (asyh->set.or ) head->func->or (head, asyh); head 211 drivers/gpu/drm/nouveau/dispnv50/head.c nv50_head_atomic_check_lut(struct nv50_head *head, head 214 drivers/gpu/drm/nouveau/dispnv50/head.c struct nv50_disp *disp = nv50_disp(head->base.base.dev); head 232 drivers/gpu/drm/nouveau/dispnv50/head.c if (!olut && !head->func->olut_identity) { head 239 drivers/gpu/drm/nouveau/dispnv50/head.c head->func->olut(head, asyh); head 244 drivers/gpu/drm/nouveau/dispnv50/head.c nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh) head 289 drivers/gpu/drm/nouveau/dispnv50/head.c asyh->set.or = head->func->or != NULL; head 297 drivers/gpu/drm/nouveau/dispnv50/head.c struct nv50_head *head = nv50_head(crtc); head 325 drivers/gpu/drm/nouveau/dispnv50/head.c asyh->set.or = head->func->or != NULL; head 329 drivers/gpu/drm/nouveau/dispnv50/head.c nv50_head_atomic_check_mode(head, asyh); head 333 drivers/gpu/drm/nouveau/dispnv50/head.c int ret = nv50_head_atomic_check_lut(head, asyh); head 349 drivers/gpu/drm/nouveau/dispnv50/head.c if (head->func->core_calc) { head 350 drivers/gpu/drm/nouveau/dispnv50/head.c head->func->core_calc(head, asyh); head 459 drivers/gpu/drm/nouveau/dispnv50/head.c struct nv50_head *head = nv50_head(crtc); head 460 drivers/gpu/drm/nouveau/dispnv50/head.c nv50_lut_fini(&head->olut); head 462 drivers/gpu/drm/nouveau/dispnv50/head.c kfree(head); head 481 drivers/gpu/drm/nouveau/dispnv50/head.c struct nv50_head *head; head 486 drivers/gpu/drm/nouveau/dispnv50/head.c head = kzalloc(sizeof(*head), GFP_KERNEL); head 487 drivers/gpu/drm/nouveau/dispnv50/head.c if (!head) head 490 drivers/gpu/drm/nouveau/dispnv50/head.c head->func = disp->core->func->head; head 491 drivers/gpu/drm/nouveau/dispnv50/head.c head->base.index = index; head 494 drivers/gpu/drm/nouveau/dispnv50/head.c ret = nv50_base_new(drm, head->base.index, &base); head 495 drivers/gpu/drm/nouveau/dispnv50/head.c ret = nv50_ovly_new(drm, head->base.index, &ovly); head 498 drivers/gpu/drm/nouveau/dispnv50/head.c head->base.index * 2 + 0, &base); head 500 drivers/gpu/drm/nouveau/dispnv50/head.c head->base.index * 2 + 1, &ovly); head 503 drivers/gpu/drm/nouveau/dispnv50/head.c ret = nv50_curs_new(drm, head->base.index, &curs); head 505 drivers/gpu/drm/nouveau/dispnv50/head.c kfree(head); head 509 drivers/gpu/drm/nouveau/dispnv50/head.c crtc = &head->base.base; head 511 drivers/gpu/drm/nouveau/dispnv50/head.c &nv50_head_func, "head-%d", head->base.index); head 519 drivers/gpu/drm/nouveau/dispnv50/head.c if (head->func->olut_set) { head 520 drivers/gpu/drm/nouveau/dispnv50/head.c ret = nv50_lut_init(disp, &drm->client.mmu, &head->olut); head 26 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh) head 28 drivers/gpu/drm/nouveau/dispnv50/head507d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 31 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1); head 39 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_dither(struct nv50_head *head, struct nv50_head_atom *asyh) head 41 drivers/gpu/drm/nouveau/dispnv50/head507d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 44 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1); head 53 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh) head 55 drivers/gpu/drm/nouveau/dispnv50/head507d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 73 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x0904 + head->base.index * 0x400, 1); head 80 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_base(struct nv50_head *head, struct nv50_head_atom *asyh) head 82 drivers/gpu/drm/nouveau/dispnv50/head507d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 100 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x0900 + head->base.index * 0x400, 1); head 107 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_curs_clr(struct nv50_head *head) head 109 drivers/gpu/drm/nouveau/dispnv50/head507d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 112 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x0880 + head->base.index * 0x400, 1); head 119 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh) head 121 drivers/gpu/drm/nouveau/dispnv50/head507d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 124 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x0880 + head->base.index * 0x400, 2); head 133 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw, head 146 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw, head 159 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_core_clr(struct nv50_head *head) head 161 drivers/gpu/drm/nouveau/dispnv50/head507d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 164 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x0874 + head->base.index * 0x400, 1); head 171 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh) head 173 drivers/gpu/drm/nouveau/dispnv50/head507d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 176 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x0860 + head->base.index * 0x400, 1); head 178 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x0868 + head->base.index * 0x400, 4); head 187 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1); head 201 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_core_calc(struct nv50_head *head, struct nv50_head_atom *asyh) head 203 drivers/gpu/drm/nouveau/dispnv50/head507d.c struct nv50_disp *disp = nv50_disp(head->base.base.dev); head 233 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_olut_clr(struct nv50_head *head) head 235 drivers/gpu/drm/nouveau/dispnv50/head507d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 238 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1); head 245 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh) head 247 drivers/gpu/drm/nouveau/dispnv50/head507d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 250 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2); head 275 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) head 286 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_mode(struct nv50_head *head, struct nv50_head_atom *asyh) head 288 drivers/gpu/drm/nouveau/dispnv50/head507d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 292 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2); head 295 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7); head 303 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x082c + (head->base.index * 0x400), 1); head 310 drivers/gpu/drm/nouveau/dispnv50/head507d.c head507d_view(struct nv50_head *head, struct nv50_head_atom *asyh) head 312 drivers/gpu/drm/nouveau/dispnv50/head507d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 315 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1); head 317 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1); head 319 drivers/gpu/drm/nouveau/dispnv50/head507d.c evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2); head 26 drivers/gpu/drm/nouveau/dispnv50/head827d.c head827d_curs_clr(struct nv50_head *head) head 28 drivers/gpu/drm/nouveau/dispnv50/head827d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 31 drivers/gpu/drm/nouveau/dispnv50/head827d.c evo_mthd(push, 0x0880 + head->base.index * 0x400, 1); head 33 drivers/gpu/drm/nouveau/dispnv50/head827d.c evo_mthd(push, 0x089c + head->base.index * 0x400, 1); head 40 drivers/gpu/drm/nouveau/dispnv50/head827d.c head827d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh) head 42 drivers/gpu/drm/nouveau/dispnv50/head827d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 45 drivers/gpu/drm/nouveau/dispnv50/head827d.c evo_mthd(push, 0x0880 + head->base.index * 0x400, 2); head 49 drivers/gpu/drm/nouveau/dispnv50/head827d.c evo_mthd(push, 0x089c + head->base.index * 0x400, 1); head 56 drivers/gpu/drm/nouveau/dispnv50/head827d.c head827d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh) head 58 drivers/gpu/drm/nouveau/dispnv50/head827d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 61 drivers/gpu/drm/nouveau/dispnv50/head827d.c evo_mthd(push, 0x0860 + head->base.index * 0x400, 1); head 63 drivers/gpu/drm/nouveau/dispnv50/head827d.c evo_mthd(push, 0x0868 + head->base.index * 0x400, 4); head 71 drivers/gpu/drm/nouveau/dispnv50/head827d.c evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1); head 78 drivers/gpu/drm/nouveau/dispnv50/head827d.c head827d_olut_clr(struct nv50_head *head) head 80 drivers/gpu/drm/nouveau/dispnv50/head827d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 83 drivers/gpu/drm/nouveau/dispnv50/head827d.c evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1); head 85 drivers/gpu/drm/nouveau/dispnv50/head827d.c evo_mthd(push, 0x085c + (head->base.index * 0x400), 1); head 92 drivers/gpu/drm/nouveau/dispnv50/head827d.c head827d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh) head 94 drivers/gpu/drm/nouveau/dispnv50/head827d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 97 drivers/gpu/drm/nouveau/dispnv50/head827d.c evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2); head 100 drivers/gpu/drm/nouveau/dispnv50/head827d.c evo_mthd(push, 0x085c + (head->base.index * 0x400), 1); head 26 drivers/gpu/drm/nouveau/dispnv50/head907d.c head907d_or(struct nv50_head *head, struct nv50_head_atom *asyh) head 28 drivers/gpu/drm/nouveau/dispnv50/head907d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 31 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x0404 + (head->base.index * 0x300), 2); head 35 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_data(push, 0x31ec6000 | head->base.index << 25 | head 42 drivers/gpu/drm/nouveau/dispnv50/head907d.c head907d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh) head 44 drivers/gpu/drm/nouveau/dispnv50/head907d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 47 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1); head 55 drivers/gpu/drm/nouveau/dispnv50/head907d.c head907d_dither(struct nv50_head *head, struct nv50_head_atom *asyh) head 57 drivers/gpu/drm/nouveau/dispnv50/head907d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 60 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1); head 69 drivers/gpu/drm/nouveau/dispnv50/head907d.c head907d_ovly(struct nv50_head *head, struct nv50_head_atom *asyh) head 71 drivers/gpu/drm/nouveau/dispnv50/head907d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 90 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1); head 97 drivers/gpu/drm/nouveau/dispnv50/head907d.c head907d_base(struct nv50_head *head, struct nv50_head_atom *asyh) head 99 drivers/gpu/drm/nouveau/dispnv50/head907d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 117 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1); head 124 drivers/gpu/drm/nouveau/dispnv50/head907d.c head907d_curs_clr(struct nv50_head *head) head 126 drivers/gpu/drm/nouveau/dispnv50/head907d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 129 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x0480 + head->base.index * 0x300, 1); head 131 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x048c + head->base.index * 0x300, 1); head 138 drivers/gpu/drm/nouveau/dispnv50/head907d.c head907d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh) head 140 drivers/gpu/drm/nouveau/dispnv50/head907d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 143 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x0480 + head->base.index * 0x300, 2); head 147 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x048c + head->base.index * 0x300, 1); head 154 drivers/gpu/drm/nouveau/dispnv50/head907d.c head907d_core_clr(struct nv50_head *head) head 156 drivers/gpu/drm/nouveau/dispnv50/head907d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 159 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x0474 + head->base.index * 0x300, 1); head 166 drivers/gpu/drm/nouveau/dispnv50/head907d.c head907d_core_set(struct nv50_head *head, struct nv50_head_atom *asyh) head 168 drivers/gpu/drm/nouveau/dispnv50/head907d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 171 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x0460 + head->base.index * 0x300, 1); head 173 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x0468 + head->base.index * 0x300, 4); head 181 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1); head 188 drivers/gpu/drm/nouveau/dispnv50/head907d.c head907d_olut_clr(struct nv50_head *head) head 190 drivers/gpu/drm/nouveau/dispnv50/head907d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 193 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x0448 + (head->base.index * 0x300), 1); head 195 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x045c + (head->base.index * 0x300), 1); head 202 drivers/gpu/drm/nouveau/dispnv50/head907d.c head907d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh) head 204 drivers/gpu/drm/nouveau/dispnv50/head907d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 207 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x0448 + (head->base.index * 0x300), 2); head 210 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x045c + (head->base.index * 0x300), 1); head 234 drivers/gpu/drm/nouveau/dispnv50/head907d.c head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) head 241 drivers/gpu/drm/nouveau/dispnv50/head907d.c head907d_mode(struct nv50_head *head, struct nv50_head_atom *asyh) head 243 drivers/gpu/drm/nouveau/dispnv50/head907d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 247 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6); head 254 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x042c + (head->base.index * 0x300), 2); head 257 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3); head 266 drivers/gpu/drm/nouveau/dispnv50/head907d.c head907d_view(struct nv50_head *head, struct nv50_head_atom *asyh) head 268 drivers/gpu/drm/nouveau/dispnv50/head907d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 271 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1); head 273 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1); head 275 drivers/gpu/drm/nouveau/dispnv50/head907d.c evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3); head 26 drivers/gpu/drm/nouveau/dispnv50/head917d.c head917d_dither(struct nv50_head *head, struct nv50_head_atom *asyh) head 28 drivers/gpu/drm/nouveau/dispnv50/head917d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 31 drivers/gpu/drm/nouveau/dispnv50/head917d.c evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1); head 40 drivers/gpu/drm/nouveau/dispnv50/head917d.c head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh) head 42 drivers/gpu/drm/nouveau/dispnv50/head917d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 60 drivers/gpu/drm/nouveau/dispnv50/head917d.c evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1); head 67 drivers/gpu/drm/nouveau/dispnv50/head917d.c head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw, head 27 drivers/gpu/drm/nouveau/dispnv50/headc37d.c headc37d_or(struct nv50_head *head, struct nv50_head_atom *asyh) head 29 drivers/gpu/drm/nouveau/dispnv50/headc37d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 45 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x2004 + (head->base.index * 0x400), 1); head 55 drivers/gpu/drm/nouveau/dispnv50/headc37d.c headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh) head 57 drivers/gpu/drm/nouveau/dispnv50/headc37d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 60 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x2000 + (head->base.index * 0x400), 1); head 69 drivers/gpu/drm/nouveau/dispnv50/headc37d.c headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh) head 71 drivers/gpu/drm/nouveau/dispnv50/headc37d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 74 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x2018 + (head->base.index * 0x0400), 1); head 83 drivers/gpu/drm/nouveau/dispnv50/headc37d.c headc37d_curs_clr(struct nv50_head *head) head 85 drivers/gpu/drm/nouveau/dispnv50/headc37d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 88 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x209c + head->base.index * 0x400, 1); head 90 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x2088 + head->base.index * 0x400, 1); head 97 drivers/gpu/drm/nouveau/dispnv50/headc37d.c headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh) head 99 drivers/gpu/drm/nouveau/dispnv50/headc37d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 102 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x209c + head->base.index * 0x400, 2); head 107 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x2088 + head->base.index * 0x400, 1); head 109 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x2090 + head->base.index * 0x400, 1); head 116 drivers/gpu/drm/nouveau/dispnv50/headc37d.c headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw, head 124 drivers/gpu/drm/nouveau/dispnv50/headc37d.c headc37d_olut_clr(struct nv50_head *head) head 126 drivers/gpu/drm/nouveau/dispnv50/headc37d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 129 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x20ac + (head->base.index * 0x400), 1); head 136 drivers/gpu/drm/nouveau/dispnv50/headc37d.c headc37d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh) head 138 drivers/gpu/drm/nouveau/dispnv50/headc37d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 141 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x20a4 + (head->base.index * 0x400), 3); head 152 drivers/gpu/drm/nouveau/dispnv50/headc37d.c headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) head 162 drivers/gpu/drm/nouveau/dispnv50/headc37d.c headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh) head 164 drivers/gpu/drm/nouveau/dispnv50/headc37d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 168 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5); head 174 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x200c + (head->base.index * 0x400), 1); head 176 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1); head 179 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x2030 + (head->base.index * 0x400), 1); head 186 drivers/gpu/drm/nouveau/dispnv50/headc37d.c headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh) head 188 drivers/gpu/drm/nouveau/dispnv50/headc37d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 191 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x204c + (head->base.index * 0x400), 1); head 193 drivers/gpu/drm/nouveau/dispnv50/headc37d.c evo_mthd(push, 0x2058 + (head->base.index * 0x400), 1); head 27 drivers/gpu/drm/nouveau/dispnv50/headc57d.c headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh) head 29 drivers/gpu/drm/nouveau/dispnv50/headc57d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 45 drivers/gpu/drm/nouveau/dispnv50/headc57d.c evo_mthd(push, 0x2004 + (head->base.index * 0x400), 1); head 55 drivers/gpu/drm/nouveau/dispnv50/headc57d.c headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh) head 57 drivers/gpu/drm/nouveau/dispnv50/headc57d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 60 drivers/gpu/drm/nouveau/dispnv50/headc57d.c evo_mthd(push, 0x2000 + (head->base.index * 0x400), 1); head 73 drivers/gpu/drm/nouveau/dispnv50/headc57d.c headc57d_olut_clr(struct nv50_head *head) head 75 drivers/gpu/drm/nouveau/dispnv50/headc57d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 78 drivers/gpu/drm/nouveau/dispnv50/headc57d.c evo_mthd(push, 0x2288 + (head->base.index * 0x400), 1); head 85 drivers/gpu/drm/nouveau/dispnv50/headc57d.c headc57d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh) head 87 drivers/gpu/drm/nouveau/dispnv50/headc57d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 90 drivers/gpu/drm/nouveau/dispnv50/headc57d.c evo_mthd(push, 0x2280 + (head->base.index * 0x400), 4); head 155 drivers/gpu/drm/nouveau/dispnv50/headc57d.c headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh) head 168 drivers/gpu/drm/nouveau/dispnv50/headc57d.c headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh) head 170 drivers/gpu/drm/nouveau/dispnv50/headc57d.c struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan; head 174 drivers/gpu/drm/nouveau/dispnv50/headc57d.c evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5); head 180 drivers/gpu/drm/nouveau/dispnv50/headc57d.c evo_mthd(push, 0x200c + (head->base.index * 0x400), 1); head 182 drivers/gpu/drm/nouveau/dispnv50/headc57d.c evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1); head 185 drivers/gpu/drm/nouveau/dispnv50/headc57d.c evo_mthd(push, 0x2030 + (head->base.index * 0x400), 1); head 31 drivers/gpu/drm/nouveau/dispnv50/oimm507b.c .head = wndw->id, head 28 drivers/gpu/drm/nouveau/dispnv50/ovly.c nv50_ovly_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw) head 52 drivers/gpu/drm/nouveau/dispnv50/ovly.c ret = ovlys[cid].new(drm, head, ovlys[cid].oclass, pwndw); head 7 drivers/gpu/drm/nouveau/dispnv50/ovly.h struct nouveau_drm *, int head, s32 oclass, head 29 drivers/gpu/drm/nouveau/dispnv50/ovly.h int nv50_ovly_new(struct nouveau_drm *, int head, struct nv50_wndw **); head 170 drivers/gpu/drm/nouveau/dispnv50/ovly507e.c struct nouveau_drm *drm, int head, s32 oclass, u32 interlock_data, head 174 drivers/gpu/drm/nouveau/dispnv50/ovly507e.c .head = head, head 181 drivers/gpu/drm/nouveau/dispnv50/ovly507e.c "ovly", head, format, BIT(head), head 211 drivers/gpu/drm/nouveau/dispnv50/ovly507e.c ovly507e_new(struct nouveau_drm *drm, int head, s32 oclass, head 214 drivers/gpu/drm/nouveau/dispnv50/ovly507e.c return ovly507e_new_(&ovly507e, ovly507e_format, drm, head, oclass, head 215 drivers/gpu/drm/nouveau/dispnv50/ovly507e.c 0x00000004 << (head * 8), pwndw); head 99 drivers/gpu/drm/nouveau/dispnv50/ovly827e.c ovly827e_new(struct nouveau_drm *drm, int head, s32 oclass, head 102 drivers/gpu/drm/nouveau/dispnv50/ovly827e.c return ovly507e_new_(&ovly827e, ovly827e_format, drm, head, oclass, head 103 drivers/gpu/drm/nouveau/dispnv50/ovly827e.c 0x00000004 << (head * 8), pwndw); head 76 drivers/gpu/drm/nouveau/dispnv50/ovly907e.c ovly907e_new(struct nouveau_drm *drm, int head, s32 oclass, head 79 drivers/gpu/drm/nouveau/dispnv50/ovly907e.c return ovly507e_new_(&ovly907e, ovly907e_format, drm, head, oclass, head 80 drivers/gpu/drm/nouveau/dispnv50/ovly907e.c 0x00000004 << (head * 4), pwndw); head 37 drivers/gpu/drm/nouveau/dispnv50/ovly917e.c ovly917e_new(struct nouveau_drm *drm, int head, s32 oclass, head 40 drivers/gpu/drm/nouveau/dispnv50/ovly917e.c return ovly507e_new_(&ovly907e, ovly917e_format, drm, head, oclass, head 41 drivers/gpu/drm/nouveau/dispnv50/ovly917e.c 0x00000004 << (head * 4), pwndw); head 37 drivers/gpu/drm/nouveau/dispnv50/wndw.c list_del(&ctxdma->head); head 59 drivers/gpu/drm/nouveau/dispnv50/wndw.c list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) { head 66 drivers/gpu/drm/nouveau/dispnv50/wndw.c list_add(&ctxdma->head, &wndw->ctxdma.list); head 585 drivers/gpu/drm/nouveau/dispnv50/wndw.c list_for_each_entry_safe(ctxdma, ctxtmp, &wndw->ctxdma.list, head) { head 11 drivers/gpu/drm/nouveau/dispnv50/wndw.h struct list_head head; head 12 drivers/gpu/drm/nouveau/include/nvif/cl0046.h __u8 head; head 11 drivers/gpu/drm/nouveau/include/nvif/cl5070.h __u8 head; head 7 drivers/gpu/drm/nouveau/include/nvif/cl507a.h __u8 head; head 7 drivers/gpu/drm/nouveau/include/nvif/cl507b.h __u8 head; head 7 drivers/gpu/drm/nouveau/include/nvif/cl507c.h __u8 head; head 7 drivers/gpu/drm/nouveau/include/nvif/cl507e.h __u8 head; head 26 drivers/gpu/drm/nouveau/include/nvif/event.h __u8 head; head 159 drivers/gpu/drm/nouveau/include/nvif/list.h list_add(struct list_head *entry, struct list_head *head) head 161 drivers/gpu/drm/nouveau/include/nvif/list.h __list_add(entry, head, head->next); head 180 drivers/gpu/drm/nouveau/include/nvif/list.h list_add_tail(struct list_head *entry, struct list_head *head) head 182 drivers/gpu/drm/nouveau/include/nvif/list.h __list_add(entry, head->prev, head); head 220 drivers/gpu/drm/nouveau/include/nvif/list.h struct list_head *head) head 223 drivers/gpu/drm/nouveau/include/nvif/list.h list_add_tail(list, head); head 235 drivers/gpu/drm/nouveau/include/nvif/list.h list_empty(struct list_head *head) head 237 drivers/gpu/drm/nouveau/include/nvif/list.h return head->next == head; head 314 drivers/gpu/drm/nouveau/include/nvif/list.h #define list_for_each_entry(pos, head, member) \ head 315 drivers/gpu/drm/nouveau/include/nvif/list.h for (pos = __container_of((head)->next, pos, member); \ head 316 drivers/gpu/drm/nouveau/include/nvif/list.h &pos->member != (head); \ head 326 drivers/gpu/drm/nouveau/include/nvif/list.h #define list_for_each_entry_safe(pos, tmp, head, member) \ head 327 drivers/gpu/drm/nouveau/include/nvif/list.h for (pos = __container_of((head)->next, pos, member), \ head 329 drivers/gpu/drm/nouveau/include/nvif/list.h &pos->member != (head); \ head 333 drivers/gpu/drm/nouveau/include/nvif/list.h #define list_for_each_entry_reverse(pos, head, member) \ head 334 drivers/gpu/drm/nouveau/include/nvif/list.h for (pos = __container_of((head)->prev, pos, member); \ head 335 drivers/gpu/drm/nouveau/include/nvif/list.h &pos->member != (head); \ head 338 drivers/gpu/drm/nouveau/include/nvif/list.h #define list_for_each_entry_continue(pos, head, member) \ head 340 drivers/gpu/drm/nouveau/include/nvif/list.h &pos->member != (head); \ head 343 drivers/gpu/drm/nouveau/include/nvif/list.h #define list_for_each_entry_continue_reverse(pos, head, member) \ head 345 drivers/gpu/drm/nouveau/include/nvif/list.h &pos->member != (head); \ head 348 drivers/gpu/drm/nouveau/include/nvif/list.h #define list_for_each_entry_from(pos, head, member) \ head 350 drivers/gpu/drm/nouveau/include/nvif/list.h &pos->member != (head); \ head 97 drivers/gpu/drm/nouveau/include/nvkm/core/device.h struct list_head head; head 9 drivers/gpu/drm/nouveau/include/nvkm/core/notify.h struct list_head head; head 15 drivers/gpu/drm/nouveau/include/nvkm/core/object.h struct list_head head; head 12 drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h struct list_head head; head 23 drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h struct list_head head; head 12 drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h int head; head 28 drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h .head = -1, \ head 57 drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h struct list_head head; head 64 drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h struct list_head head; head 92 drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h struct list_head head; head 39 drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h struct list_head head; head 59 drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h struct list_head head; head 7 drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h struct list_head head; head 7 drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h struct list_head head; head 16 drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h INIT_LIST_HEAD(&alarm->head); head 7 drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h u8 nvkm_rdport(struct nvkm_device *, int head, u16 port); head 8 drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h void nvkm_wrport(struct nvkm_device *, int head, u16 port, u8 value); head 11 drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h u8 nvkm_rdvgas(struct nvkm_device *, int head, u8 index); head 12 drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h void nvkm_wrvgas(struct nvkm_device *, int head, u8 index, u8 value); head 15 drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h u8 nvkm_rdvgag(struct nvkm_device *, int head, u8 index); head 16 drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h void nvkm_wrvgag(struct nvkm_device *, int head, u8 index, u8 value); head 19 drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h u8 nvkm_rdvgac(struct nvkm_device *, int head, u8 index); head 20 drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h void nvkm_wrvgac(struct nvkm_device *, int head, u8 index, u8 value); head 23 drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h u8 nvkm_rdvgai(struct nvkm_device *, int head, u16 port, u8 index); head 24 drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h void nvkm_wrvgai(struct nvkm_device *, int head, u16 port, u8 index, u8 value); head 119 drivers/gpu/drm/nouveau/nouveau_abi16.c list_del(&ntfy->head); head 135 drivers/gpu/drm/nouveau/nouveau_abi16.c list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) { head 154 drivers/gpu/drm/nouveau/nouveau_abi16.c list_del(&chan->head); head 165 drivers/gpu/drm/nouveau/nouveau_abi16.c list_for_each_entry_safe(chan, temp, &abi16->channels, head) { head 300 drivers/gpu/drm/nouveau/nouveau_abi16.c list_add(&chan->head, &abi16->channels); head 359 drivers/gpu/drm/nouveau/nouveau_abi16.c list_for_each_entry(chan, &abi16->channels, head) { head 502 drivers/gpu/drm/nouveau/nouveau_abi16.c list_add(&ntfy->head, &chan->notifiers); head 543 drivers/gpu/drm/nouveau/nouveau_abi16.c list_add(&ntfy->head, &chan->notifiers); head 606 drivers/gpu/drm/nouveau/nouveau_abi16.c list_for_each_entry(ntfy, &chan->notifiers, head) { head 17 drivers/gpu/drm/nouveau/nouveau_abi16.h struct list_head head; head 22 drivers/gpu/drm/nouveau/nouveau_abi16.h struct list_head head; head 94 drivers/gpu/drm/nouveau/nouveau_bios.c struct dcb_output *dcbent, int head, bool dl) head 100 drivers/gpu/drm/nouveau/nouveau_bios.c NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, head ? NV_CIO_CRE_44_HEADB : head 102 drivers/gpu/drm/nouveau/nouveau_bios.c nouveau_bios_run_init_table(dev, scriptptr, dcbent, head); head 104 drivers/gpu/drm/nouveau/nouveau_bios.c nv04_dfp_bind_head(dev, dcbent, head, dl); head 107 drivers/gpu/drm/nouveau/nouveau_bios.c static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script) head 117 drivers/gpu/drm/nouveau/nouveau_bios.c run_digital_op_script(dev, scriptofs, dcbent, head, bios->fp.dual_link); head 134 drivers/gpu/drm/nouveau/nouveau_bios.c static int run_lvds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk) head 202 drivers/gpu/drm/nouveau/nouveau_bios.c run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link); head 207 drivers/gpu/drm/nouveau/nouveau_bios.c int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk) head 222 drivers/gpu/drm/nouveau/nouveau_bios.c if (bios->fp.last_script_invoc == (script << 1 | head) || !lvds_ver || head 228 drivers/gpu/drm/nouveau/nouveau_bios.c call_lvds_script(dev, dcbent, head, LVDS_INIT, pxclk); head 232 drivers/gpu/drm/nouveau/nouveau_bios.c call_lvds_script(dev, dcbent, head, LVDS_RESET, pxclk); head 234 drivers/gpu/drm/nouveau/nouveau_bios.c call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk); head 242 drivers/gpu/drm/nouveau/nouveau_bios.c ret = call_lvds_manufacturer_script(dev, dcbent, head, script); head 244 drivers/gpu/drm/nouveau/nouveau_bios.c ret = run_lvds_table(dev, dcbent, head, script, pxclk); head 246 drivers/gpu/drm/nouveau/nouveau_bios.c bios->fp.last_script_invoc = (script << 1 | head); head 624 drivers/gpu/drm/nouveau/nouveau_bios.c int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk) head 671 drivers/gpu/drm/nouveau/nouveau_bios.c run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000); head 173 drivers/gpu/drm/nouveau/nouveau_bios.h int head, int pxclk); head 174 drivers/gpu/drm/nouveau/nouveau_bios.h int call_lvds_script(struct drm_device *, struct dcb_output *, int head, head 212 drivers/gpu/drm/nouveau/nouveau_bo.c INIT_LIST_HEAD(&nvbo->head); head 1317 drivers/gpu/drm/nouveau/nouveau_bo.c list_for_each_entry(vma, &nvbo->vma_list, head) { head 1321 drivers/gpu/drm/nouveau/nouveau_bo.c list_for_each_entry(vma, &nvbo->vma_list, head) { head 19 drivers/gpu/drm/nouveau/nouveau_bo.h struct list_head head; head 61 drivers/gpu/drm/nouveau/nouveau_connector.c list_for_each_entry(mode, &connector->probed_modes, head) { head 401 drivers/gpu/drm/nouveau/nouveau_connector.c list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) { head 109 drivers/gpu/drm/nouveau/nouveau_display.c .base.head = nouveau_crtc(crtc)->index, head 146 drivers/gpu/drm/nouveau/nouveau_display.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 161 drivers/gpu/drm/nouveau/nouveau_display.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 174 drivers/gpu/drm/nouveau/nouveau_display.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 180 drivers/gpu/drm/nouveau/nouveau_display.c .head = nv_crtc->index, head 138 drivers/gpu/drm/nouveau/nouveau_drm.c list_for_each_entry_safe(work, wtmp, &cli->worker, head) { head 140 drivers/gpu/drm/nouveau/nouveau_drm.c list_del(&work->head); head 161 drivers/gpu/drm/nouveau/nouveau_drm.c list_add_tail(&work->head, &cli->worker); head 1075 drivers/gpu/drm/nouveau/nouveau_drm.c list_add(&cli->head, &drm->clients); head 1103 drivers/gpu/drm/nouveau/nouveau_drm.c list_del(&cli->head); head 103 drivers/gpu/drm/nouveau/nouveau_drv.h struct list_head head; head 117 drivers/gpu/drm/nouveau/nouveau_drv.h struct list_head head; head 71 drivers/gpu/drm/nouveau/nouveau_encoder.h void (*update)(struct nouveau_encoder *, u8 head, head 61 drivers/gpu/drm/nouveau/nouveau_fence.c list_del(&fence->head); head 96 drivers/gpu/drm/nouveau/nouveau_fence.c fence = list_entry(fctx->pending.next, typeof(*fence), head); head 133 drivers/gpu/drm/nouveau/nouveau_fence.c fence = list_entry(fctx->pending.next, typeof(*fence), head); head 157 drivers/gpu/drm/nouveau/nouveau_fence.c fence = list_entry(fctx->pending.next, typeof(*fence), head); head 227 drivers/gpu/drm/nouveau/nouveau_fence.c list_add_tail(&fence->head, &fctx->pending); head 478 drivers/gpu/drm/nouveau/nouveau_fence.c list_del(&fence->head); head 14 drivers/gpu/drm/nouveau/nouveau_fence.h struct list_head head; head 117 drivers/gpu/drm/nouveau/nouveau_gem.c list_del_init(&vma->head); head 705 drivers/gpu/drm/nouveau/nouveau_gem.c list_for_each_entry(temp, &abi16->channels, head) { head 76 drivers/gpu/drm/nouveau/nouveau_svm.c struct list_head head; head 83 drivers/gpu/drm/nouveau/nouveau_svm.c list_for_each_entry(ivmm, &svm->inst, head) { head 218 drivers/gpu/drm/nouveau/nouveau_svm.c list_del(&ivmm->head); head 237 drivers/gpu/drm/nouveau/nouveau_svm.c list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst); head 44 drivers/gpu/drm/nouveau/nouveau_usif.c struct list_head head; head 58 drivers/gpu/drm/nouveau/nouveau_usif.c list_for_each_entry(ntfy, &cli->notifys, head) { head 68 drivers/gpu/drm/nouveau/nouveau_usif.c list_del(&ntfy->head); head 160 drivers/gpu/drm/nouveau/nouveau_usif.c list_add(&ntfy->head, &cli->notifys); head 250 drivers/gpu/drm/nouveau/nouveau_usif.c struct list_head head; head 259 drivers/gpu/drm/nouveau/nouveau_usif.c list_del(&object->head); head 276 drivers/gpu/drm/nouveau/nouveau_usif.c list_add(&object->head, &cli->objects); head 361 drivers/gpu/drm/nouveau/nouveau_usif.c list_del(&object->head); head 384 drivers/gpu/drm/nouveau/nouveau_usif.c list_for_each_entry_safe(notify, ntemp, &cli->notifys, head) { head 388 drivers/gpu/drm/nouveau/nouveau_usif.c list_for_each_entry_safe(object, otemp, &cli->objects, head) { head 53 drivers/gpu/drm/nouveau/nouveau_vmm.c list_for_each_entry(vma, &nvbo->vma_list, head) { head 70 drivers/gpu/drm/nouveau/nouveau_vmm.c list_del(&vma->head); head 97 drivers/gpu/drm/nouveau/nouveau_vmm.c list_add_tail(&vma->head, &nvbo->vma_list); head 10 drivers/gpu/drm/nouveau/nouveau_vmm.h struct list_head head; head 62 drivers/gpu/drm/nouveau/nvkm/core/event.c list_for_each_entry(notify, &event->list, head) { head 131 drivers/gpu/drm/nouveau/nvkm/core/ioctl.c list_add(&object->head, &parent->tree); head 124 drivers/gpu/drm/nouveau/nvkm/core/notify.c list_del(¬ify->head); head 156 drivers/gpu/drm/nouveau/nvkm/core/notify.c list_add_tail(¬ify->head, &event->list); head 188 drivers/gpu/drm/nouveau/nvkm/core/object.c list_for_each_entry(child, &object->tree, head) { head 215 drivers/gpu/drm/nouveau/nvkm/core/object.c list_for_each_entry_continue_reverse(child, &object->tree, head) { head 237 drivers/gpu/drm/nouveau/nvkm/core/object.c list_for_each_entry(child, &object->tree, head) { head 248 drivers/gpu/drm/nouveau/nvkm/core/object.c list_for_each_entry_continue_reverse(child, &object->tree, head) head 266 drivers/gpu/drm/nouveau/nvkm/core/object.c list_for_each_entry_safe(child, ctemp, &object->tree, head) { head 287 drivers/gpu/drm/nouveau/nvkm/core/object.c list_del(&object->head); head 305 drivers/gpu/drm/nouveau/nvkm/core/object.c INIT_LIST_HEAD(&object->head); head 40 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c list_for_each_entry(device, &nv_devices, head) { head 63 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c list_for_each_entry(device, &nv_devices, head) { head 2874 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c list_del(&device->head); head 2912 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c list_add_tail(&device->head, &nv_devices); head 106 drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c list_for_each_entry(pstate, &clk->states, head) { head 113 drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c list_for_each_entry(cstate, &pstate->list, head) { head 46 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c struct nvkm_head *head = nvkm_head_find(disp, id); head 47 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c if (head) head 48 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c head->func->vblank_put(head); head 55 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c struct nvkm_head *head = nvkm_head_find(disp, id); head 56 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c if (head) head 57 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c head->func->vblank_get(head); head 73 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) { head 75 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c notify->index = req->v0.head; head 91 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c nvkm_disp_vblank(struct nvkm_disp *disp, int head) head 94 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep)); head 111 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_for_each_entry(outp, &disp->outp, head) { head 226 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_for_each_entry(outp, &disp->outp, head) { head 230 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_for_each_entry(conn, &disp->conn, head) { head 245 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_for_each_entry(conn, &disp->conn, head) { head 249 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_for_each_entry(outp, &disp->outp, head) { head 262 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_for_each_entry(ior, &disp->ior, head) { head 277 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c struct nvkm_head *head; head 329 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_add_tail(&outp->head, &disp->outp); head 334 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_for_each_entry_safe(outp, outt, &disp->outp, head) { head 348 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_for_each_entry(pair, &disp->outp, head) { head 368 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_for_each_entry(conn, &disp->conn, head) { head 385 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_del(&outp->head); head 390 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_add_tail(&outp->conn->head, &disp->conn); head 406 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_for_each_entry(outp, &disp->outp, head) { head 417 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_for_each_entry(head, &disp->head, head) head 418 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c i = max(i, head->id + 1); head 438 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c conn = list_first_entry(&disp->conn, typeof(*conn), head); head 439 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_del(&conn->head); head 444 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c outp = list_first_entry(&disp->outp, typeof(*outp), head); head 445 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_del(&outp->head); head 451 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_first_entry(&disp->ior, typeof(*ior), head); head 455 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c while (!list_empty(&disp->head)) { head 456 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c struct nvkm_head *head = head 457 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c list_first_entry(&disp->head, typeof(*head), head); head 458 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c nvkm_head_del(&head); head 479 drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c INIT_LIST_HEAD(&disp->head); head 43 drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c int head, ret = -ENOSYS; head 50 drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c args->v0.version, args->v0.pushbuf, args->v0.head); head 51 drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c if (!nvkm_head_find(&disp->base, args->v0.head)) head 54 drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c head = args->v0.head; head 58 drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c return nv50_disp_dmac_new_(func, mthd, disp, chid + head, head 59 drivers/gpu/drm/nouveau/nvkm/engine/disp/basenv50.c head, push, oclass, pobject); head 81 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c u32 base = chan->head * mthd->addr; head 340 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c struct nv50_disp *disp, int ctrl, int user, int head, head 356 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c chan->head = head; head 18 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h int head; head 36 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h struct nv50_disp *, int ctrl, int user, int head, head 40 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h struct nv50_disp *, int chid, int head, u64 push, head 17 drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h struct list_head head; head 43 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgv100.c const u32 mask = 0x00010000 << chan->head; head 42 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c int head, ret = -ENOSYS; head 47 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c args->v0.version, args->v0.head); head 48 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c if (!nvkm_head_find(&disp->base, args->v0.head)) head 50 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c head = args->v0.head; head 54 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c return nv50_disp_chan_new_(func, NULL, disp, ctrl + head, user + head, head 55 drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c head, oclass, pobject); head 47 drivers/gpu/drm/nouveau/nvkm/engine/disp/dacgf119.c state->head = ctrl & 0x0000000f; head 98 drivers/gpu/drm/nouveau/nvkm/engine/disp/dacnv50.c state->head = ctrl & 0x00000003; head 36 drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c struct nv50_disp *disp, int chid, int head, u64 push, head 44 drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c ret = nv50_disp_chan_new_(func, mthd, disp, chid, chid, head, oclass, head 450 drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c struct nvkm_head *head; head 461 drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c list_for_each_entry(head, &outp->disp->head, head) { head 462 drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c if (ior->asy.head & (1 << head->id)) { head 463 drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c u32 khz = (head->asy.hz >> ior->asy.rgdiv) / 1000; head 464 drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c datakbps += khz * head->asy.or.depth; head 37 drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c .head = { .cnt = nv50_head_cnt, .new = nv50_head_new }, head 37 drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c .head = { .cnt = nv50_head_cnt, .new = nv50_head_new }, head 40 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c struct nvkm_head *head; head 44 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c list_for_each_entry(head, &disp->base.head, head) { head 45 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c mask[head->id] = nvkm_rd32(device, 0x6101d4 + (head->id * 0x800)); head 46 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c HEAD_DBG(head, "%08x", mask[head->id]); head 52 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c list_for_each_entry(head, &disp->base.head, head) { head 53 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c if (!(mask[head->id] & 0x00001000)) head 55 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c nv50_disp_super_1_0(disp, head); head 59 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c list_for_each_entry(head, &disp->base.head, head) { head 60 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c if (!(mask[head->id] & 0x00001000)) head 62 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c nv50_disp_super_2_0(disp, head); head 65 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c list_for_each_entry(head, &disp->base.head, head) { head 66 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c if (!(mask[head->id] & 0x00010000)) head 68 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c nv50_disp_super_2_1(disp, head); head 70 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c list_for_each_entry(head, &disp->base.head, head) { head 71 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c if (!(mask[head->id] & 0x00001000)) head 73 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c nv50_disp_super_2_2(disp, head); head 77 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c list_for_each_entry(head, &disp->base.head, head) { head 78 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c if (!(mask[head->id] & 0x00001000)) head 80 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c nv50_disp_super_3_0(disp, head); head 84 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c list_for_each_entry(head, &disp->base.head, head) head 85 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c nvkm_wr32(device, 0x6101d4 + (head->id * 0x800), 0x00000000); head 126 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c struct nvkm_head *head; head 164 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c list_for_each_entry(head, &disp->base.head, head) { head 165 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c const u32 hoff = head->id * 0x800; head 166 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c u32 mask = 0x01000000 << head->id; head 170 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c nvkm_disp_vblank(&disp->base, head->id); head 189 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c struct nvkm_head *head; head 199 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c list_for_each_entry(head, &disp->base.head, head) { head 200 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c const u32 hoff = head->id * 0x800; head 246 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c list_for_each_entry(head, &disp->base.head, head) { head 247 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c const u32 hoff = head->id * 0x800; head 263 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c .head = { .cnt = gf119_head_cnt, .new = gf119_head_new }, head 38 drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c .head = { .cnt = gf119_head_cnt, .new = gf119_head_new }, head 38 drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c .head = { .cnt = gf119_head_cnt, .new = gf119_head_new }, head 38 drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c .head = { .cnt = gf119_head_cnt, .new = gf119_head_new }, head 38 drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c .head = { .cnt = gf119_head_cnt, .new = gf119_head_new }, head 38 drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c .head = { .cnt = gf119_head_cnt, .new = gf119_head_new }, head 65 drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c .head = { .cnt = gf119_head_cnt, .new = gf119_head_new }, head 37 drivers/gpu/drm/nouveau/nvkm/engine/disp/gt200.c .head = { .cnt = nv50_head_cnt, .new = nv50_head_new }, head 37 drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c .head = { .cnt = nv50_head_cnt, .new = nv50_head_new }, head 46 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c struct nvkm_head *head; head 51 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c list_for_each_entry(head, &disp->base.head, head) { head 52 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c mask[head->id] = nvkm_rd32(device, 0x6107ac + (head->id * 4)); head 53 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c HEAD_DBG(head, "%08x", mask[head->id]); head 59 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c list_for_each_entry(head, &disp->base.head, head) { head 60 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c if (!(mask[head->id] & 0x00001000)) head 62 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c nv50_disp_super_1_0(disp, head); head 66 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c list_for_each_entry(head, &disp->base.head, head) { head 67 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c if (!(mask[head->id] & 0x00001000)) head 69 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c nv50_disp_super_2_0(disp, head); head 72 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c list_for_each_entry(head, &disp->base.head, head) { head 73 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c if (!(mask[head->id] & 0x00010000)) head 75 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c nv50_disp_super_2_1(disp, head); head 77 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c list_for_each_entry(head, &disp->base.head, head) { head 78 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c if (!(mask[head->id] & 0x00001000)) head 80 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c nv50_disp_super_2_2(disp, head); head 84 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c list_for_each_entry(head, &disp->base.head, head) { head 85 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c if (!(mask[head->id] & 0x00001000)) head 87 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c nv50_disp_super_3_0(disp, head); head 91 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c list_for_each_entry(head, &disp->base.head, head) head 92 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c nvkm_wr32(device, 0x6107ac + (head->id * 4), 0x00000000); head 176 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c int head; head 185 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c for_each_set_bit(head, &mask, disp->wndw.nr) { head 186 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c nvkm_wr32(device, 0x611854, 0x00010000 << head); head 187 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c gv100_disp_exception(disp, 73 + head); head 188 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c stat &= ~(0x00010000 << head); head 239 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c gv100_disp_intr_head_timing(struct nv50_disp *disp, int head) head 243 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c u32 stat = nvkm_rd32(device, 0x611800 + (head * 0x04)); head 247 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c nvkm_wr32(device, 0x611800 + (head * 0x04), stat & 0x00000003); head 252 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c nvkm_disp_vblank(&disp->base, head); head 253 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000004); head 259 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c nvkm_wr32(device, 0x611800 + (head * 0x04), stat); head 270 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c int head; head 273 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c for_each_set_bit(head, &mask, 8) { head 274 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c gv100_disp_intr_head_timing(disp, head); head 275 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c stat &= ~BIT(head); head 314 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c struct nvkm_head *head; head 340 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c list_for_each_entry(head, &disp->base.head, head) { head 341 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c const int id = head->id; head 387 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c nvkm_wr32(device, 0x611cec, disp->head.mask << 16 | head 400 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c list_for_each_entry(head, &disp->base.head, head) { head 401 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c const u32 hoff = head->id * 4; head 421 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c .head = { .cnt = gv100_head_cnt, .new = gv100_head_new }, head 41 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c gf119_hda_hpd(struct nvkm_ior *ior, int head, bool present) head 44 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c const u32 hoff = 0x800 * head; head 41 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c gt215_hda_hpd(struct nvkm_ior *ior, int head, bool present) head 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c g84_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, head 35 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmig84.c const u32 hoff = head * 0x800; head 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c gf119_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, head 34 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigf119.c const u32 hoff = head * 0x800; head 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c gk104_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, head 34 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c const u32 hoff = head * 0x800; head 35 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigk104.c const u32 hdmi = head * 0x400; head 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c gm200_hdmi_scdc(struct nvkm_ior *ior, int head, u8 scdc) head 30 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c const u32 hoff = head * 0x800; head 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigt215.c gt215_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, head 25 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c gv100_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, head 32 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c const u32 hoff = head * 0x800; head 33 drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c const u32 hdmi = head * 0x400; head 34 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c struct nvkm_head *head; head 35 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c list_for_each_entry(head, &disp->head, head) { head 36 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c if (head->id == id) head 37 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c return head; head 44 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c struct nvkm_head *head, void *data, u32 size) head 56 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c head->func->state(head, &head->arm); head 57 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c args->v0.vtotal = head->arm.vtotal; head 58 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c args->v0.vblanks = head->arm.vblanks; head 59 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c args->v0.vblanke = head->arm.vblanke; head 60 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c args->v0.htotal = head->arm.htotal; head 61 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c args->v0.hblanks = head->arm.hblanks; head 62 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c args->v0.hblanke = head->arm.hblanke; head 72 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c head->func->rgpos(head, &args->v0.hline, &args->v0.vline); head 83 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c struct nvkm_head *head = *phead; head 84 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c if (head) { head 85 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c HEAD_DBG(head, "dtor"); head 86 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c list_del(&head->head); head 96 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c struct nvkm_head *head; head 97 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c if (!(head = kzalloc(sizeof(*head), GFP_KERNEL))) head 99 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c head->func = func; head 100 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c head->disp = disp; head 101 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c head->id = id; head 102 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c list_add_tail(&head->head, &disp->head); head 103 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.c HEAD_DBG(head, "ctor"); head 11 drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h struct list_head head; head 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c gf119_head_vblank_put(struct nvkm_head *head) head 29 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c struct nvkm_device *device = head->disp->engine.subdev.device; head 30 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c const u32 hoff = head->id * 0x800; head 35 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c gf119_head_vblank_get(struct nvkm_head *head) head 37 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c struct nvkm_device *device = head->disp->engine.subdev.device; head 38 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c const u32 hoff = head->id * 0x800; head 43 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c gf119_head_rgclk(struct nvkm_head *head, int div) head 45 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c struct nvkm_device *device = head->disp->engine.subdev.device; head 46 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c nvkm_mask(device, 0x612200 + (head->id * 0x800), 0x0000000f, div); head 50 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c gf119_head_state(struct nvkm_head *head, struct nvkm_head_state *state) head 52 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c struct nvkm_device *device = head->disp->engine.subdev.device; head 53 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c const u32 hoff = (state == &head->asy) * 0x20000 + head->id * 0x300; head 25 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c gv100_head_vblank_put(struct nvkm_head *head) head 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c struct nvkm_device *device = head->disp->engine.subdev.device; head 28 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000004, 0x00000000); head 32 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c gv100_head_vblank_get(struct nvkm_head *head) head 34 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c struct nvkm_device *device = head->disp->engine.subdev.device; head 35 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000004, 0x00000004); head 39 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c gv100_head_rgpos(struct nvkm_head *head, u16 *hline, u16 *vline) head 41 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c struct nvkm_device *device = head->disp->engine.subdev.device; head 42 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c const u32 hoff = head->id * 0x800; head 49 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c gv100_head_state(struct nvkm_head *head, struct nvkm_head_state *state) head 51 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c struct nvkm_device *device = head->disp->engine.subdev.device; head 52 drivers/gpu/drm/nouveau/nvkm/engine/disp/headgv100.c const u32 hoff = (state == &head->arm) * 0x8000 + head->id * 0x400; head 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv04.c nv04_head_vblank_put(struct nvkm_head *head) head 29 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv04.c struct nvkm_device *device = head->disp->engine.subdev.device; head 30 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv04.c nvkm_wr32(device, 0x600140 + (head->id * 0x2000) , 0x00000000); head 34 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv04.c nv04_head_vblank_get(struct nvkm_head *head) head 36 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv04.c struct nvkm_device *device = head->disp->engine.subdev.device; head 37 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv04.c nvkm_wr32(device, 0x600140 + (head->id * 0x2000) , 0x00000001); head 41 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv04.c nv04_head_rgpos(struct nvkm_head *head, u16 *hline, u16 *vline) head 43 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv04.c struct nvkm_device *device = head->disp->engine.subdev.device; head 44 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv04.c u32 data = nvkm_rd32(device, 0x600868 + (head->id * 0x2000)); head 50 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv04.c nv04_head_state(struct nvkm_head *head, struct nvkm_head_state *state) head 52 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv04.c struct nvkm_device *device = head->disp->engine.subdev.device; head 53 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv04.c const u32 hoff = head->id * 0x0200; head 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c nv50_head_vblank_put(struct nvkm_head *head) head 29 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c struct nvkm_device *device = head->disp->engine.subdev.device; head 30 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c nvkm_mask(device, 0x61002c, (4 << head->id), 0); head 34 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c nv50_head_vblank_get(struct nvkm_head *head) head 36 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c struct nvkm_device *device = head->disp->engine.subdev.device; head 37 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c nvkm_mask(device, 0x61002c, (4 << head->id), (4 << head->id)); head 41 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c nv50_head_rgclk(struct nvkm_head *head, int div) head 43 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c struct nvkm_device *device = head->disp->engine.subdev.device; head 44 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c nvkm_mask(device, 0x614200 + (head->id * 0x800), 0x0000000f, div); head 48 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c nv50_head_rgpos(struct nvkm_head *head, u16 *hline, u16 *vline) head 50 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c struct nvkm_device *device = head->disp->engine.subdev.device; head 51 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c const u32 hoff = head->id * 0x800; head 58 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c nv50_head_state(struct nvkm_head *head, struct nvkm_head_state *state) head 60 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c struct nvkm_device *device = head->disp->engine.subdev.device; head 61 drivers/gpu/drm/nouveau/nvkm/engine/disp/headnv50.c const u32 hoff = head->id * 0x540 + (state == &head->arm) * 4; head 37 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.c list_for_each_entry(ior, &disp->ior, head) { head 50 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.c list_del(&ior->head); head 69 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.c list_add_tail(&ior->head, &disp->ior); head 18 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h struct list_head head; head 34 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h unsigned head:8; head 66 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h void (*ctrl)(struct nvkm_ior *, int head, bool enable, head 69 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h void (*scdc)(struct nvkm_ior *, int head, u8 scdc); head 79 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h void (*vcpi)(struct nvkm_ior *, int head, u8 slot, head 81 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h void (*audio)(struct nvkm_ior *, int head, bool enable); head 82 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h void (*audio_sym)(struct nvkm_ior *, int head, u16 h, u32 v); head 83 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h void (*activesym)(struct nvkm_ior *, int head, head 85 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h void (*watermark)(struct nvkm_ior *, int head, u8 watermark); head 89 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h void (*hpd)(struct nvkm_ior *, int head, bool present); head 35 drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp77.c .head = { .cnt = nv50_head_cnt, .new = nv50_head_new }, head 35 drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c .head = { .cnt = nv50_head_cnt, .new = nv50_head_new }, head 96 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c disp->head.nr = func->head.cnt(&disp->base, &disp->head.mask); head 98 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c disp->head.nr, disp->head.mask); head 99 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c for_each_set_bit(i, &disp->head.mask, disp->head.nr) { head 100 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c ret = func->head.new(&disp->base, i); head 182 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_iedt(struct nvkm_head *head, struct nvkm_outp *outp, head 186 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c struct nvkm_bios *bios = head->disp->engine.subdev.device->bios; head 189 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c const u16 m = (0x0100 << head->id) | (l << 6) | outp->info.or; head 197 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_ied_on(struct nvkm_head *head, head 200 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c struct nvkm_subdev *subdev = &head->disp->engine.subdev; head 214 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c data = nv50_disp_super_iedt(head, outp, &ver, &hdr, &cnt, &len, &iedt); head 221 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c if (head->asy.or.depth == 24) head 248 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c init.head = head->id; head 253 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_ied_off(struct nvkm_head *head, struct nvkm_ior *ior, int id) head 265 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c data = nv50_disp_super_iedt(head, outp, &ver, &hdr, &cnt, &len, &iedt); head 269 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nvbios_init(&head->disp->engine.subdev, iedt.script[id], head 273 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c init.head = head->id; head 278 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_ior_asy(struct nvkm_head *head) head 281 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c list_for_each_entry(ior, &head->disp->ior, head) { head 282 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c if (ior->asy.head & (1 << head->id)) { head 283 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c HEAD_DBG(head, "to %s", ior->name); head 287 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c HEAD_DBG(head, "nothing to attach"); head 292 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_ior_arm(struct nvkm_head *head) head 295 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c list_for_each_entry(ior, &head->disp->ior, head) { head 296 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c if (ior->arm.head & (1 << head->id)) { head 297 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c HEAD_DBG(head, "on %s", ior->name); head 301 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c HEAD_DBG(head, "nothing attached"); head 306 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_3_0(struct nv50_disp *disp, struct nvkm_head *head) head 311 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c HEAD_DBG(head, "supervisor 3.0"); head 312 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c ior = nv50_disp_super_ior_asy(head); head 317 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_ied_on(head, ior, 1, head->asy.hz / 1000); head 325 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_2_2_dp(struct nvkm_head *head, struct nvkm_ior *ior) head 327 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c struct nvkm_subdev *subdev = &head->disp->engine.subdev; head 328 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c const u32 khz = head->asy.hz / 1000; head 338 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c h = head->asy.hblanke + head->asy.htotal - head->asy.hblanks - 7; head 344 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c v = head->asy.vblanks - head->asy.vblanke - 25; head 349 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c ior->func->dp.audio_sym(ior, head->id, h, v); head 352 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c link_data_rate = (khz * head->asy.or.depth / 8) / ior->dp.nr; head 414 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c ior->func->dp.activesym(ior, head->id, bestTU, head 427 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c ior->func->dp.watermark(ior, head->id, unk); head 431 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_2_2(struct nv50_disp *disp, struct nvkm_head *head) head 433 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c const u32 khz = head->asy.hz / 1000; head 438 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c HEAD_DBG(head, "supervisor 2.2"); head 439 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c ior = nv50_disp_super_ior_asy(head); head 453 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c head->asy.or.depth = (disp->sor.lvdsconf & 0x0200) ? 24 : 18; head 462 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_ied_on(head, ior, 0, khz); head 465 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c head->func->rgclk(head, ior->asy.rgdiv); head 469 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_2_2_dp(head, ior); head 478 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_2_1(struct nv50_disp *disp, struct nvkm_head *head) head 481 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c const u32 khz = head->asy.hz / 1000; head 482 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c HEAD_DBG(head, "supervisor 2.1 - %d khz", khz); head 484 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head->id, khz); head 488 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head) head 494 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c HEAD_DBG(head, "supervisor 2.0"); head 495 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c ior = nv50_disp_super_ior_arm(head); head 500 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_ied_off(head, ior, 2); head 505 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c if (ior->arm.head == (1 << head->id)) { head 512 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_1_0(struct nv50_disp *disp, struct nvkm_head *head) head 517 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c HEAD_DBG(head, "supervisor 1.0"); head 518 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c ior = nv50_disp_super_ior_arm(head); head 523 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_ied_off(head, ior, 1); head 529 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c struct nvkm_head *head; head 532 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c list_for_each_entry(head, &disp->base.head, head) { head 533 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c head->func->state(head, &head->arm); head 534 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c head->func->state(head, &head->asy); head 537 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c list_for_each_entry(ior, &disp->base.ior, head) { head 550 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c struct nvkm_head *head; head 558 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c list_for_each_entry(head, &disp->base.head, head) { head 559 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c if (!(super & (0x00000020 << head->id))) head 561 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c if (!(super & (0x00000080 << head->id))) head 563 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_1_0(disp, head); head 567 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c list_for_each_entry(head, &disp->base.head, head) { head 568 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c if (!(super & (0x00000080 << head->id))) head 570 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_2_0(disp, head); head 573 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c list_for_each_entry(head, &disp->base.head, head) { head 574 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c if (!(super & (0x00000200 << head->id))) head 576 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_2_1(disp, head); head 578 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c list_for_each_entry(head, &disp->base.head, head) { head 579 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c if (!(super & (0x00000080 << head->id))) head 581 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_2_2(disp, head); head 585 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c list_for_each_entry(head, &disp->base.head, head) { head 586 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c if (!(super & (0x00000080 << head->id))) head 588 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nv50_disp_super_3_0(disp, head); head 696 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c struct nvkm_head *head; head 708 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c list_for_each_entry(head, &disp->base.head, head) { head 709 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c tmp = nvkm_rd32(device, 0x616100 + (head->id * 0x800)); head 710 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nvkm_wr32(device, 0x610190 + (head->id * 0x10), tmp); head 711 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c tmp = nvkm_rd32(device, 0x616104 + (head->id * 0x800)); head 712 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nvkm_wr32(device, 0x610194 + (head->id * 0x10), tmp); head 713 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c tmp = nvkm_rd32(device, 0x616108 + (head->id * 0x800)); head 714 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nvkm_wr32(device, 0x610198 + (head->id * 0x10), tmp); head 715 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c tmp = nvkm_rd32(device, 0x61610c + (head->id * 0x800)); head 716 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c nvkm_wr32(device, 0x61019c + (head->id * 0x10), tmp); head 765 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c .head = { .cnt = nv50_head_cnt, .new = nv50_head_new }, head 23 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h } wndw, head, dac; head 67 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h } wndw, head, dac, sor, pior; head 42 drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c int head, ret = -ENOSYS; head 47 drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c args->v0.version, args->v0.head); head 48 drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c if (!nvkm_head_find(&disp->base, args->v0.head)) head 50 drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c head = args->v0.head; head 54 drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c return nv50_disp_chan_new_(func, NULL, disp, ctrl + head, user + head, head 55 drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c head, oclass, pobject); head 37 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c list_for_each_entry(ior, &disp->ior, head) { head 46 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c list_for_each_entry(ior, &disp->ior, head) { head 143 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c list_for_each_entry(ior, &outp->disp->ior, head) { head 149 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c list_for_each_entry(ior, &outp->disp->ior, head) { head 159 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c list_for_each_entry(ior, &outp->disp->ior, head) { head 216 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c if (!ior->arm.head || ior->arm.proto != proto) { head 217 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c OUTP_DBG(outp, "no heads (%x %d %d)", ior->arm.head, head 17 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h struct list_head head; head 43 drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c int head, ret = -ENOSYS; head 50 drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c args->v0.version, args->v0.pushbuf, args->v0.head); head 51 drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c if (!nvkm_head_find(&disp->base, args->v0.head)) head 54 drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c head = args->v0.head; head 58 drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c return nv50_disp_dmac_new_(func, mthd, disp, chid + head, head 59 drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlynv50.c head, push, oclass, pobject); head 78 drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c if (state->head && state == &ior->asy) { head 79 drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c struct nvkm_head *head = head 80 drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c nvkm_head_find(ior->disp, __ffs(state->head)); head 81 drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c if (!WARN_ON(!head)) { head 82 drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c struct nvkm_head_state *state = &head->asy; head 113 drivers/gpu/drm/nouveau/nvkm/engine/disp/piornv50.c state->head = ctrl & 0x00000003; head 11 drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h void nvkm_disp_vblank(struct nvkm_disp *, int head); head 46 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c struct nvkm_head *head; head 52 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c args->v0.version, args->v0.method, args->v0.head); head 54 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c id = args->v0.head; head 58 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c if (!(head = nvkm_head_find(root->disp, id))) head 63 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv04.c return nvkm_head_mthd_scanoutpos(object, head, data, size); head 46 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c struct nvkm_head *head; head 56 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c args->v0.version, args->v0.method, args->v0.head); head 58 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c hidx = args->v0.head; head 72 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c if (!(head = nvkm_head_find(&disp->base, hidx))) head 76 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c list_for_each_entry(temp, &disp->base.outp, head) { head 89 drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c return nvkm_head_mthd_scanoutpos(object, head, data, size); head 29 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c g94_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark) head 37 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c g94_sor_dp_activesym(struct nvkm_ior *sor, int head, head 49 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c g94_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v) head 147 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c list_for_each_entry(ior, &disp->ior, head) { head 256 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorg94.c state->head = ctrl & 0x00000003; head 29 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c gf119_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark) head 32 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c const u32 hoff = head * 0x800; head 37 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c gf119_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v) head 40 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c const u32 hoff = head * 0x800; head 46 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c gf119_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable) head 49 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c const u32 hoff = 0x800 * head; head 60 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c gf119_sor_dp_vcpi(struct nvkm_ior *sor, int head, head 64 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c const u32 hoff = head * 0x800; head 155 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c state->head = ctrl & 0x0000000f; head 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgt215.c gt215_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable) head 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c gv100_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark) head 30 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c const u32 hoff = head * 0x800; head 35 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c gv100_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v) head 38 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c const u32 hoff = head * 0x800; head 44 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c gv100_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable) head 47 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c const u32 hoff = 0x800 * head; head 77 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c state->head = ctrl & 0x000000ff; head 84 drivers/gpu/drm/nouveau/nvkm/engine/disp/sornv50.c state->head = ctrl & 0x00000003; head 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c tu102_sor_dp_vcpi(struct nvkm_ior *sor, int head, head 31 drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c const u32 hoff = head * 0x800; head 35 drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c struct nvkm_head *head; head 61 drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c list_for_each_entry(head, &disp->base.head, head) { head 62 drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c const int id = head->id; head 109 drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c nvkm_wr32(device, 0x611cec, disp->head.mask << 16 | head 122 drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c list_for_each_entry(head, &disp->base.head, head) { head 123 drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c const u32 hoff = head->id * 4; head 143 drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c .head = { .cnt = gv100_head_cnt, .new = gv100_head_new }, head 27 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_rdport(struct nvkm_device *device, int head, u16 port) head 35 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c return nvkm_rd08(device, 0x601000 + (head * 0x2000) + port); head 41 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c head = 0; /* CR44 selects head */ head 42 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c return nvkm_rd08(device, 0x0c0000 + (head * 0x2000) + port); head 49 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wrport(struct nvkm_device *device, int head, u16 port, u8 data) head 57 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wr08(device, 0x601000 + (head * 0x2000) + port, data); head 63 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c head = 0; /* CR44 selects head */ head 64 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wr08(device, 0x0c0000 + (head * 0x2000) + port, data); head 69 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_rdvgas(struct nvkm_device *device, int head, u8 index) head 71 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wrport(device, head, 0x03c4, index); head 72 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c return nvkm_rdport(device, head, 0x03c5); head 76 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wrvgas(struct nvkm_device *device, int head, u8 index, u8 value) head 78 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wrport(device, head, 0x03c4, index); head 79 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wrport(device, head, 0x03c5, value); head 83 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_rdvgag(struct nvkm_device *device, int head, u8 index) head 85 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wrport(device, head, 0x03ce, index); head 86 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c return nvkm_rdport(device, head, 0x03cf); head 90 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wrvgag(struct nvkm_device *device, int head, u8 index, u8 value) head 92 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wrport(device, head, 0x03ce, index); head 93 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wrport(device, head, 0x03cf, value); head 97 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_rdvgac(struct nvkm_device *device, int head, u8 index) head 99 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wrport(device, head, 0x03d4, index); head 100 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c return nvkm_rdport(device, head, 0x03d5); head 104 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wrvgac(struct nvkm_device *device, int head, u8 index, u8 value) head 106 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wrport(device, head, 0x03d4, index); head 107 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wrport(device, head, 0x03d5, value); head 111 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_rdvgai(struct nvkm_device *device, int head, u16 port, u8 index) head 113 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c if (port == 0x03c4) return nvkm_rdvgas(device, head, index); head 114 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c if (port == 0x03ce) return nvkm_rdvgag(device, head, index); head 115 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c if (port == 0x03d4) return nvkm_rdvgac(device, head, index); head 120 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c nvkm_wrvgai(struct nvkm_device *device, int head, u16 port, u8 index, u8 value) head 122 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c if (port == 0x03c4) nvkm_wrvgas(device, head, index, value); head 123 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c else if (port == 0x03ce) nvkm_wrvgag(device, head, index, value); head 124 drivers/gpu/drm/nouveau/nvkm/engine/disp/vga.c else if (port == 0x03d4) nvkm_wrvgac(device, head, index, value); head 33 drivers/gpu/drm/nouveau/nvkm/engine/disp/wimmgv100.c const u32 mask = 0x00000001 << chan->head; head 134 drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c const u32 mask = 0x00000001 << chan->head; head 80 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c list_for_each_entry(chan, &fifo->chan, head) { head 82 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c list_del(&chan->head); head 83 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c list_add(&chan->head, &fifo->chan); head 110 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c list_for_each_entry(chan, &fifo->chan, head) { head 112 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c list_del(&chan->head); head 113 drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c list_add(&chan->head, &fifo->chan); head 7 drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.h struct list_head head; head 321 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c if (!list_empty(&chan->head)) { head 323 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c list_del(&chan->head); head 369 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c INIT_LIST_HEAD(&chan->head); head 411 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c list_add(&chan->head, &fifo->chan); head 12 drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h struct list_head head; head 14 drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h struct list_head head; head 65 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c list_for_each_entry(chan, &fifo->chan, head) { head 96 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c list_del_init(&chan->head); head 104 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c list_add_tail(&chan->head, &fifo->chan); head 188 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c list_del_init(&chan->head); head 336 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c list_for_each_entry(chan, &fifo->chan, head) { head 193 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { head 197 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { head 199 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c list_for_each_entry(chan, &cgrp->chan, head) { head 214 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c if (!list_empty(&chan->head)) { head 215 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c list_del_init(&chan->head); head 217 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c list_del_init(&cgrp->head); head 229 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp); head 230 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c list_add_tail(&chan->head, &cgrp->chan); head 232 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); head 335 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c list_for_each_entry(chan, &fifo->runlist[runl].chan, head) { head 337 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c list_del_init(&chan->head); head 342 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) { head 344 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c chan = list_first_entry(&cgrp->chan, typeof(*chan), head); head 345 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c list_del_init(&chan->head); head 347 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c list_del_init(&cgrp->head); head 166 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c if (!list_empty(&chan->head) && !chan->killed) { head 188 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c if (list_empty(&chan->head) && !chan->killed) { head 242 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c INIT_LIST_HEAD(&chan->head); head 191 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c if (!list_empty(&chan->head)) { head 213 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c if (list_empty(&chan->head) && !chan->killed) { head 273 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c INIT_LIST_HEAD(&chan->head); head 292 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c INIT_LIST_HEAD(&chan->cgrp->head); head 153 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c INIT_LIST_HEAD(&chan->head); head 172 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c INIT_LIST_HEAD(&chan->cgrp->head); head 1664 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c u32 head = init->addr - base; head 1665 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c u32 tail = head + init->count * init->pitch; head 1666 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c while (head < tail) { head 1667 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c if (head != prev + 4 || xfer >= 32) { head 1673 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c addr = head; head 1676 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c prev = head; head 1678 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c head = head + init->pitch; head 292 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h #define pack_for_each_init(init, pack, head) \ head 293 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h for (pack = head; pack && pack->init; pack++) \ head 135 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c list_del(&chan->head); head 163 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c list_add(&chan->head, &gr->chan); head 252 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c list_for_each_entry(temp, &gr->chan, head) { head 255 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c list_del(&chan->head); head 256 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c list_add(&chan->head, &gr->chan); head 27 drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h struct list_head head; head 47 drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c struct list_head head; head 90 drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c list_del(&chan->head); head 119 drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c list_add(&chan->head, &mpeg->chan); head 158 drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c list_for_each_entry(temp, &mpeg->chan, head) { head 161 drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c list_del(&chan->head); head 162 drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c list_add(&chan->head, &mpeg->chan); head 41 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c list_for_each_entry(dom, &pm->domains, head) head 67 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c list_for_each_entry(dom, &pm->domains, head) { head 119 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c list_for_each_entry(src, &pm->sources, head) { head 255 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c list_for_each_entry(dom, &pm->domains, head) head 320 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c if (ctr->head.next) head 321 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c list_del(&ctr->head); head 354 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c list_add_tail(&ctr->head, &dom->list); head 724 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c list_for_each_entry(src, &pm->sources, head) { head 753 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c list_add_tail(&src->head, &pm->sources); head 796 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c list_add_tail(&dom->head, &pm->domains); head 838 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c list_for_each_entry_safe(dom, next_dom, &pm->domains, head) { head 839 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c list_del(&dom->head); head 843 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c list_for_each_entry_safe(src, next_src, &pm->sources, head) { head 844 drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c list_del(&src->head); head 15 drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h struct list_head head; head 38 drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h struct list_head head; head 76 drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h struct list_head head; head 37 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c list_for_each_entry(chan, &sw->chan, head) { head 40 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c list_del(&chan->head); head 41 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c list_add(&chan->head, &sw->chan); head 85 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c list_del(&chan->head); head 107 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c list_add(&chan->head, &sw->chan); head 15 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h struct list_head head; head 126 drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c .head = i, head 119 drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c .head = i, head 63 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c u32 head, tail; head 65 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c head = nvkm_falcon_rd32(falcon, queue->head_reg); head 68 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c return head == tail; head 77 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c u32 head, tail, available; head 79 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c head = nvkm_falcon_rd32(falcon, queue->head_reg); head 81 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c if (head < queue->position) head 86 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c available = head - tail; head 159 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c u32 head, tail, free; head 163 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c head = nvkm_falcon_rd32(falcon, queue->head_reg); head 166 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c if (head >= tail) { head 167 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c free = queue->offset + queue->size - head; head 172 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c head = queue->offset; head 176 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c if (head < tail) head 177 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c free = tail - head - 1; head 114 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c if (init->head >= 0) head 115 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c return init->head; head 217 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c return nvkm_rdport(init->subdev->device, init->head, port); head 225 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c nvkm_wrport(init->subdev->device, init->head, port, value); head 233 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c int head = init->head < 0 ? 0 : init->head; head 234 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c return nvkm_rdvgai(subdev->device, head, port, index); head 247 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c init->head = 0; head 251 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c int head = init->head < 0 ? 0 : init->head; head 252 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c nvkm_wrvgai(device, head, port, index, value); head 258 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c init->head = 1; head 136 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c list_for_each_entry_from_reverse(cstate, &pstate->list, head) { head 149 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c return list_last_entry(&pstate->list, typeof(*cstate), head); head 151 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c list_for_each_entry(cstate, &pstate->list, head) { head 218 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c list_del(&cstate->head); head 257 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c list_add(&cstate->head, &pstate->list); head 273 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c list_for_each_entry(pstate, &clk->states, head) { head 364 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c list_for_each_entry(cstate, &pstate->list, head) { head 392 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c list_for_each_entry_safe(cstate, temp, &pstate->list, head) { head 396 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c list_del(&pstate->head); head 463 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c list_add_tail(&pstate->head, &clk->states); head 481 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c list_for_each_entry(pstate, &clk->states, head) { head 636 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c list_for_each_entry_safe(pstate, temp, &clk->states, head) { head 689 drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c list_add_tail(&func->pstates[idx].head, &clk->states); head 34 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c int head = type - PLL_VPLL0; head 51 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c nvkm_wr32(device, 0x00ef10 + (head * 0x40), fN << 16); head 52 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gv100.c nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) | head 34 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c int head = type - PLL_VPLL0; head 51 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c nvkm_wr32(device, 0x00ef10 + (head * 0x40), fN << 16); head 52 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) | head 56 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c nvkm_wr32(device, 0x00ef0c + (head * 0x40), 0x00000900); head 57 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c nvkm_wr32(device, 0x00ef00 + (head * 0x40), 0x02000014); head 968 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c list_for_each_entry(cfg, &ram->cfg, head) { head 1162 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c list_for_each_entry(cfg, &ram->cfg, head) { head 1168 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c if (&cfg->head == &ram->cfg) head 1451 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c p = &list_last_entry(&ram->cfg, typeof(*cfg), head)->bios; head 1480 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c list_add_tail(&cfg->head, &ram->cfg); head 1513 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c list_for_each_entry_safe(cfg, tmp, &ram->cfg, head) { head 157 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c list_del(&aux->head); head 193 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c list_add_tail(&aux->head, &pad->i2c->aux); head 40 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_for_each_entry(pad, &i2c->pad, head) { head 68 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_for_each_entry(bus, &i2c->bus, head) { head 81 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_for_each_entry(aux, &i2c->aux, head) { head 142 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_for_each_entry(aux, &i2c->aux, head) { head 167 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_for_each_entry(aux, &i2c->aux, head) { head 171 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_for_each_entry(bus, &i2c->bus, head) { head 180 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_for_each_entry(pad, &i2c->pad, head) { head 198 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_for_each_entry(pad, &i2c->pad, head) head 200 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_for_each_entry(bus, &i2c->bus, head) head 214 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_for_each_entry(pad, &i2c->pad, head) { head 218 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_for_each_entry(bus, &i2c->bus, head) { head 222 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_for_each_entry(aux, &i2c->aux, head) { head 238 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_first_entry(&i2c->aux, typeof(*aux), head); head 244 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_first_entry(&i2c->bus, typeof(*bus), head); head 250 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c list_first_entry(&i2c->pad, typeof(*pad), head); head 199 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c list_del(&bus->head); head 225 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c list_add_tail(&bus->head, &pad->i2c->bus); head 89 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c list_del(&pad->head); head 104 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.c list_add_tail(&pad->head, &i2c->pad); head 20 drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h struct list_head head; head 117 drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c list_for_each_entry(rail, &iccsense->rails, head) { head 137 drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c list_for_each_entry_safe(sensor, tmps, &iccsense->sensors, head) { head 138 drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c list_del(&sensor->head); head 141 drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c list_for_each_entry_safe(rail, tmpr, &iccsense->rails, head) { head 142 drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c list_del(&rail->head); head 195 drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c list_add_tail(&sensor->head, &iccsense->sensors); head 208 drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c list_for_each_entry(sensor, &iccsense->sensors, head) { head 291 drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c list_add_tail(&rail->head, &iccsense->rails); head 302 drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c list_for_each_entry(sensor, &iccsense->sensors, head) head 9 drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h struct list_head head; head 18 drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h struct list_head head; head 77 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c list_del(&iobj->head); head 88 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c list_add_tail(&iobj->head, &imem->list); head 153 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c list_for_each_entry_safe(iobj, itmp, &imem->list, head) { head 154 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c list_move_tail(&iobj->head, &imem->boot); head 166 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c list_for_each_entry(iobj, &imem->list, head) { head 174 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c list_for_each_entry(iobj, &imem->boot, head) { head 193 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c list_for_each_entry(iobj, &imem->boot, head) { head 200 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c list_for_each_entry(iobj, &imem->list, head) { head 26 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h struct list_head head; head 35 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c struct list_head head; head 51 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c list_add(&ptp->head, &mmu->ptp.list); head 57 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c list_del(&ptp->head); head 74 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head); head 93 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c list_add(&ptp->head, &mmu->ptp.list); head 104 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c list_del(&ptp->head); head 113 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c struct list_head head; head 124 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c list_for_each_entry(ptc, &mmu->ptc.list, head) { head 134 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c list_add(&ptc->head, &mmu->ptc.list); head 156 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c list_add_tail(&pt->head, &pt->ptc->item); head 190 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c pt = list_first_entry_or_null(&ptc->item, typeof(*pt), head); head 194 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c list_del(&pt->head); head 223 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c list_for_each_entry(ptc, &mmu->ptc.list, head) { head 225 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c list_for_each_entry_safe(pt, tt, &ptc->item, head) { head 227 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c list_del(&pt->head); head 238 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) { head 240 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c list_del(&ptc->head); head 59 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h struct list_head head; head 46 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c list_for_each_entry(umem, &master->umem, head) { head 129 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c list_del_init(&umem->head); head 171 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c INIT_LIST_HEAD(&umem->head); head 185 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c list_add(&umem->head, &umem->object.client->umem); head 16 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h struct list_head head; head 780 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c list_add(&new->head, &vma->head); head 794 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c list_del(&vma->head); head 836 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c list_del(&vma->head); head 879 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c #define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL : \ head 880 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c list_entry((root)->head.dir, struct nvkm_vma, head)) head 972 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c list_for_each_entry(vma, &vmm->list, head) { head 1002 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c vma = list_first_entry(&vmm->list, typeof(*vma), head); head 1003 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c list_del(&vma->head); head 1029 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c list_add_tail(&vma->head, &vmm->list); head 1112 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c list_add_tail(&vma->head, &vmm->list); head 1133 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c list_add(&vma->head, &vmm->list); head 159 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h struct list_head head; head 154 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c list_for_each_entry(join, &vmm->join, head) { head 329 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c list_for_each_entry(join, &vmm->join, head) { head 331 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c list_del(&join->head); head 350 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c list_add_tail(&join->head, &vmm->join); head 28 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c #define pack_for_each_init(init, pack, head) \ head 29 drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c for (pack = head; pack && pack->init; pack++) \ head 77 drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) { head 89 drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c list_del_init(&alarm->head); head 117 drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c list_del_init(&alarm->head); head 122 drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c list_for_each_entry(list, &tmr->alarms, head) { head 127 drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c list_add_tail(&alarm->head, &list->head); head 130 drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c list = list_first_entry(&tmr->alarms, typeof(*list), head); head 38 drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c list_add_tail(&info->head, &top->device); head 50 drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c list_for_each_entry(info, &top->device, head) { head 66 drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c list_for_each_entry(info, &top->device, head) { head 82 drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c list_for_each_entry(info, &top->device, head) { head 100 drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c list_for_each_entry(info, &top->device, head) { head 120 drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c list_for_each_entry(info, &top->device, head) { head 134 drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c list_for_each_entry(info, &top->device, head) { head 149 drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c list_for_each_entry(info, &top->device, head) { head 173 drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c list_for_each_entry_safe(info, temp, &top->device, head) { head 174 drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c list_del(&info->head); head 22 drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h struct list_head head; head 56 drivers/gpu/drm/omapdrm/omap_debugfs.c list_for_each_entry(fb, &dev->mode_config.fb_list, head) { head 109 drivers/gpu/drm/omapdrm/omap_encoder.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 39 drivers/gpu/drm/qxl/qxl_display.c static bool qxl_head_enabled(struct qxl_head *head) head 41 drivers/gpu/drm/qxl/qxl_display.c return head->width && head->height; head 148 drivers/gpu/drm/qxl/qxl_display.c struct qxl_head *head; head 150 drivers/gpu/drm/qxl/qxl_display.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 153 drivers/gpu/drm/qxl/qxl_display.c head = &qdev->client_monitors_config->heads[output->index]; head 156 drivers/gpu/drm/qxl/qxl_display.c dev->mode_config.suggested_x_property, head->x); head 158 drivers/gpu/drm/qxl/qxl_display.c dev->mode_config.suggested_y_property, head->y); head 248 drivers/gpu/drm/qxl/qxl_display.c struct qxl_head *head; head 259 drivers/gpu/drm/qxl/qxl_display.c head = &qdev->client_monitors_config->heads[h]; head 260 drivers/gpu/drm/qxl/qxl_display.c DRM_DEBUG_KMS("head %d is %dx%d\n", h, head->width, head->height); head 262 drivers/gpu/drm/qxl/qxl_display.c return qxl_add_mode(connector, head->width, head->height, true); head 296 drivers/gpu/drm/qxl/qxl_display.c struct qxl_head *head = &qdev->monitors_config->heads[i]; head 298 drivers/gpu/drm/qxl/qxl_display.c if (head->y > 8192 || head->x > 8192 || head 299 drivers/gpu/drm/qxl/qxl_display.c head->width > 8192 || head->height > 8192) { head 301 drivers/gpu/drm/qxl/qxl_display.c i, head->width, head->height, head 302 drivers/gpu/drm/qxl/qxl_display.c head->x, head->y); head 315 drivers/gpu/drm/qxl/qxl_display.c struct qxl_head head; head 326 drivers/gpu/drm/qxl/qxl_display.c head.id = i; head 327 drivers/gpu/drm/qxl/qxl_display.c head.flags = 0; head 332 drivers/gpu/drm/qxl/qxl_display.c head.width = mode->hdisplay; head 333 drivers/gpu/drm/qxl/qxl_display.c head.height = mode->vdisplay; head 334 drivers/gpu/drm/qxl/qxl_display.c head.x = crtc->x; head 335 drivers/gpu/drm/qxl/qxl_display.c head.y = crtc->y; head 339 drivers/gpu/drm/qxl/qxl_display.c head.x += qdev->dumb_heads[i].x; head 341 drivers/gpu/drm/qxl/qxl_display.c head.width = 0; head 342 drivers/gpu/drm/qxl/qxl_display.c head.height = 0; head 343 drivers/gpu/drm/qxl/qxl_display.c head.x = 0; head 344 drivers/gpu/drm/qxl/qxl_display.c head.y = 0; head 352 drivers/gpu/drm/qxl/qxl_display.c if (head.width == qdev->monitors_config->heads[i].width && head 353 drivers/gpu/drm/qxl/qxl_display.c head.height == qdev->monitors_config->heads[i].height && head 354 drivers/gpu/drm/qxl/qxl_display.c head.x == qdev->monitors_config->heads[i].x && head 355 drivers/gpu/drm/qxl/qxl_display.c head.y == qdev->monitors_config->heads[i].y && head 360 drivers/gpu/drm/qxl/qxl_display.c i, head.width, head.height, head.x, head.y, head 367 drivers/gpu/drm/qxl/qxl_display.c qdev->monitors_config->heads[i] = head; head 751 drivers/gpu/drm/qxl/qxl_display.c struct qxl_head *head; head 756 drivers/gpu/drm/qxl/qxl_display.c head = qdev->dumb_heads + i; head 757 drivers/gpu/drm/qxl/qxl_display.c head->x = surf->width; head 758 drivers/gpu/drm/qxl/qxl_display.c surf->width += head->width; head 759 drivers/gpu/drm/qxl/qxl_display.c if (surf->height < head->height) head 760 drivers/gpu/drm/qxl/qxl_display.c surf->height = head->height; head 988 drivers/gpu/drm/qxl/qxl_display.c struct qxl_head *head; head 989 drivers/gpu/drm/qxl/qxl_display.c head = &qdev->client_monitors_config->heads[output->index]; head 990 drivers/gpu/drm/qxl/qxl_display.c if (head->width) head 991 drivers/gpu/drm/qxl/qxl_display.c pwidth = head->width; head 992 drivers/gpu/drm/qxl/qxl_display.c if (head->height) head 993 drivers/gpu/drm/qxl/qxl_display.c pheight = head->height; head 161 drivers/gpu/drm/qxl/qxl_drv.h struct list_head head; head 51 drivers/gpu/drm/qxl/qxl_image.c list_add_tail(&chunk->head, &image->chunk_list); head 90 drivers/gpu/drm/qxl/qxl_image.c list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { head 120 drivers/gpu/drm/qxl/qxl_image.c drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head); head 167 drivers/gpu/drm/qxl/qxl_release.c struct qxl_bo_list, tv.head); head 170 drivers/gpu/drm/qxl/qxl_release.c list_del(&entry->tv.head); head 213 drivers/gpu/drm/qxl/qxl_release.c list_for_each_entry(entry, &release->bos, tv.head) { head 225 drivers/gpu/drm/qxl/qxl_release.c list_add_tail(&entry->tv.head, &release->bos); head 267 drivers/gpu/drm/qxl/qxl_release.c list_for_each_entry(entry, &release->bos, tv.head) { head 442 drivers/gpu/drm/qxl/qxl_release.c bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; head 458 drivers/gpu/drm/qxl/qxl_release.c list_for_each_entry(entry, &release->bos, head) { head 782 drivers/gpu/drm/r128/r128_cce.c dev_priv->head = kzalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL); head 783 drivers/gpu/drm/r128/r128_cce.c if (dev_priv->head == NULL) head 786 drivers/gpu/drm/r128/r128_cce.c dev_priv->head->age = R128_BUFFER_USED; head 798 drivers/gpu/drm/r128/r128_cce.c entry->prev = dev_priv->head; head 799 drivers/gpu/drm/r128/r128_cce.c entry->next = dev_priv->head->next; head 807 drivers/gpu/drm/r128/r128_cce.c dev_priv->head->next = entry; head 809 drivers/gpu/drm/r128/r128_cce.c if (dev_priv->head->next) head 810 drivers/gpu/drm/r128/r128_cce.c dev_priv->head->next->prev = entry; head 95 drivers/gpu/drm/r128/r128_drv.h drm_r128_freelist_t *head; head 1742 drivers/gpu/drm/radeon/atombios_crtc.c list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { head 1769 drivers/gpu/drm/radeon/atombios_crtc.c list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { head 1810 drivers/gpu/drm/radeon/atombios_crtc.c list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { head 2106 drivers/gpu/drm/radeon/atombios_crtc.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 2207 drivers/gpu/drm/radeon/atombios_encoders.c list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { head 2251 drivers/gpu/drm/radeon/atombios_encoders.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 2562 drivers/gpu/drm/radeon/atombios_encoders.c list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { head 2735 drivers/gpu/drm/radeon/atombios_encoders.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 92 drivers/gpu/drm/radeon/dce6_afmt.c list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) { head 1679 drivers/gpu/drm/radeon/evergreen.c list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { head 1704 drivers/gpu/drm/radeon/evergreen.c list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { head 1769 drivers/gpu/drm/radeon/evergreen.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 1808 drivers/gpu/drm/radeon/evergreen.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 81 drivers/gpu/drm/radeon/mkregtable.c static inline void list_add_tail(struct list_head *new, struct list_head *head) head 83 drivers/gpu/drm/radeon/mkregtable.c __list_add(new, head->prev, head); head 101 drivers/gpu/drm/radeon/mkregtable.c #define list_for_each_entry(pos, head, member) \ head 102 drivers/gpu/drm/radeon/mkregtable.c for (pos = list_entry((head)->next, typeof(*pos), member); \ head 103 drivers/gpu/drm/radeon/mkregtable.c &pos->member != (head); \ head 457 drivers/gpu/drm/radeon/r100.c list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { head 488 drivers/gpu/drm/radeon/r100.c list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { head 599 drivers/gpu/drm/radeon/r100.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 622 drivers/gpu/drm/radeon/r100.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 957 drivers/gpu/drm/radeon/r600.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 1024 drivers/gpu/drm/radeon/r600.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 165 drivers/gpu/drm/radeon/r600_dpm.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 191 drivers/gpu/drm/radeon/r600_dpm.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 132 drivers/gpu/drm/radeon/r600_hdmi.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 2836 drivers/gpu/drm/radeon/radeon.h struct list_head *head); head 743 drivers/gpu/drm/radeon/radeon_acpi.c head) { head 257 drivers/gpu/drm/radeon/radeon_audio.c list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) { head 405 drivers/gpu/drm/radeon/radeon_connectors.c struct drm_display_mode, head); head 429 drivers/gpu/drm/radeon/radeon_connectors.c list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { head 780 drivers/gpu/drm/radeon/radeon_connectors.c list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { head 790 drivers/gpu/drm/radeon/radeon_connectors.c list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { head 1325 drivers/gpu/drm/radeon/radeon_connectors.c list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { head 1889 drivers/gpu/drm/radeon/radeon_connectors.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 1909 drivers/gpu/drm/radeon/radeon_connectors.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 2360 drivers/gpu/drm/radeon/radeon_connectors.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 2501 drivers/gpu/drm/radeon/radeon_connectors.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 189 drivers/gpu/drm/radeon/radeon_cs.c radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, head 257 drivers/gpu/drm/radeon/radeon_cs.c list_for_each_entry(reloc, &p->validated, tv.head) { head 400 drivers/gpu/drm/radeon/radeon_cs.c struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head); head 401 drivers/gpu/drm/radeon/radeon_cs.c struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head); head 185 drivers/gpu/drm/radeon/radeon_cursor.c list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) { head 1584 drivers/gpu/drm/radeon/radeon_device.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 1590 drivers/gpu/drm/radeon/radeon_device.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 1713 drivers/gpu/drm/radeon/radeon_device.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 1752 drivers/gpu/drm/radeon/radeon_device.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 639 drivers/gpu/drm/radeon/radeon_display.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) head 778 drivers/gpu/drm/radeon/radeon_display.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 812 drivers/gpu/drm/radeon/radeon_display.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 1700 drivers/gpu/drm/radeon/radeon_display.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 124 drivers/gpu/drm/radeon/radeon_dp_mst.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 337 drivers/gpu/drm/radeon/radeon_dp_mst.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 759 drivers/gpu/drm/radeon/radeon_dp_mst.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 537 drivers/gpu/drm/radeon/radeon_drv.c list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) { head 63 drivers/gpu/drm/radeon/radeon_encoders.c list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) { head 83 drivers/gpu/drm/radeon/radeon_encoders.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 212 drivers/gpu/drm/radeon/radeon_encoders.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 214 drivers/gpu/drm/radeon/radeon_encoders.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 231 drivers/gpu/drm/radeon/radeon_encoders.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 250 drivers/gpu/drm/radeon/radeon_encoders.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 275 drivers/gpu/drm/radeon/radeon_encoders.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 293 drivers/gpu/drm/radeon/radeon_encoders.c list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { head 563 drivers/gpu/drm/radeon/radeon_gem.c list_add(&tv.head, &list); head 573 drivers/gpu/drm/radeon/radeon_gem.c list_for_each_entry(entry, &list, head) { head 95 drivers/gpu/drm/radeon/radeon_irq_kms.c list_for_each_entry(connector, &mode_config->connector_list, head) head 111 drivers/gpu/drm/radeon/radeon_irq_kms.c list_for_each_entry(connector, &mode_config->connector_list, head) head 591 drivers/gpu/drm/radeon/radeon_legacy_crtc.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 785 drivers/gpu/drm/radeon/radeon_legacy_crtc.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 1070 drivers/gpu/drm/radeon/radeon_legacy_crtc.c list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) head 1082 drivers/gpu/drm/radeon/radeon_legacy_crtc.c list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) { head 1546 drivers/gpu/drm/radeon/radeon_legacy_encoders.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 1752 drivers/gpu/drm/radeon/radeon_legacy_encoders.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 235 drivers/gpu/drm/radeon/radeon_mn.c struct list_head *head; head 242 drivers/gpu/drm/radeon/radeon_mn.c head = bo->mn_list.next; head 246 drivers/gpu/drm/radeon/radeon_mn.c if (list_empty(head)) { head 248 drivers/gpu/drm/radeon/radeon_mn.c node = container_of(head, struct radeon_mn_node, bos); head 535 drivers/gpu/drm/radeon/radeon_object.c struct list_head *head, int ring) head 545 drivers/gpu/drm/radeon/radeon_object.c r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true); head 550 drivers/gpu/drm/radeon/radeon_object.c list_for_each_entry(lobj, head, tv.head) { head 589 drivers/gpu/drm/radeon/radeon_object.c ttm_eu_backoff_reservation(ticket, head); head 597 drivers/gpu/drm/radeon/radeon_object.c list_for_each_entry(lobj, &duplicates, tv.head) { head 145 drivers/gpu/drm/radeon/radeon_object.h struct list_head *head, int ring); head 1660 drivers/gpu/drm/radeon/radeon_pm.c &ddev->mode_config.crtc_list, head) { head 1734 drivers/gpu/drm/radeon/radeon_pm.c &ddev->mode_config.crtc_list, head) { head 130 drivers/gpu/drm/radeon/radeon_vm.c struct list_head *head) head 147 drivers/gpu/drm/radeon/radeon_vm.c list_add(&list[0].tv.head, head); head 159 drivers/gpu/drm/radeon/radeon_vm.c list_add(&list[idx++].tv.head, head); head 324 drivers/gpu/drm/radeon/rs600.c list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { head 342 drivers/gpu/drm/radeon/rs600.c list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { head 409 drivers/gpu/drm/radeon/rs600.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 436 drivers/gpu/drm/radeon/rs600.c list_for_each_entry(connector, &dev->mode_config.connector_list, head) { head 739 drivers/gpu/drm/rcar-du/rcar_du_kms.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 115 drivers/gpu/drm/rcar-du/rcar_lvds.c struct drm_display_mode, head); head 1607 drivers/gpu/drm/rockchip/rockchip_drm_vop.c head) head 1631 drivers/gpu/drm/rockchip/rockchip_drm_vop.c head) head 224 drivers/gpu/drm/savage/savage_bci.c dev_priv->head.next = &dev_priv->tail; head 225 drivers/gpu/drm/savage/savage_bci.c dev_priv->head.prev = NULL; head 226 drivers/gpu/drm/savage/savage_bci.c dev_priv->head.buf = NULL; head 229 drivers/gpu/drm/savage/savage_bci.c dev_priv->tail.prev = &dev_priv->head; head 239 drivers/gpu/drm/savage/savage_bci.c entry->next = dev_priv->head.next; head 240 drivers/gpu/drm/savage/savage_bci.c entry->prev = &dev_priv->head; head 241 drivers/gpu/drm/savage/savage_bci.c dev_priv->head.next->prev = entry; head 242 drivers/gpu/drm/savage/savage_bci.c dev_priv->head.next = entry; head 293 drivers/gpu/drm/savage/savage_bci.c prev = &dev_priv->head; head 136 drivers/gpu/drm/savage/savage_drv.h drm_savage_buf_priv_t head, tail; head 530 drivers/gpu/drm/shmobile/shmob_drm_crtc.c struct drm_display_mode, head); head 230 drivers/gpu/drm/sis/sis_mm.c list_for_each_entry(entry, &dev->maplist, head) { head 148 drivers/gpu/drm/sti/sti_crtc.c list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) { head 269 drivers/gpu/drm/sti/sti_crtc.c head) { head 44 drivers/gpu/drm/sti/sti_drv.c list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) { head 60 drivers/gpu/drm/sti/sti_drv.c list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) { head 81 drivers/gpu/drm/sti/sti_drv.c list_for_each_entry(p, &dev->mode_config.plane_list, head) { head 429 drivers/gpu/drm/sti/sti_dvo.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 666 drivers/gpu/drm/sti/sti_hda.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 1139 drivers/gpu/drm/sti/sti_hdmi.c list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { head 987 drivers/gpu/drm/stm/ltdc.c &ddev->mode_config.plane_list, head) head 972 drivers/gpu/drm/tegra/drm.c list_for_each_entry(fb, &drm->mode_config.fb_list, head) { head 47 drivers/gpu/drm/tilcdc/tilcdc_external.c list_for_each_entry(connector, &ddev->mode_config.connector_list, head) { head 63 drivers/gpu/drm/tilcdc/tilcdc_external.c list_for_each_entry(encoder, &ddev->mode_config.encoder_list, head) head 39 drivers/gpu/drm/ttm/ttm_execbuf_util.c list_for_each_entry_continue_reverse(entry, list, head) { head 50 drivers/gpu/drm/ttm/ttm_execbuf_util.c list_for_each_entry(entry, list, head) { head 65 drivers/gpu/drm/ttm/ttm_execbuf_util.c entry = list_first_entry(list, struct ttm_validate_buffer, head); head 69 drivers/gpu/drm/ttm/ttm_execbuf_util.c list_for_each_entry(entry, list, head) { head 106 drivers/gpu/drm/ttm/ttm_execbuf_util.c entry = list_first_entry(list, struct ttm_validate_buffer, head); head 112 drivers/gpu/drm/ttm/ttm_execbuf_util.c list_for_each_entry(entry, list, head) { head 123 drivers/gpu/drm/ttm/ttm_execbuf_util.c entry = list_prev_entry(entry, head); head 124 drivers/gpu/drm/ttm/ttm_execbuf_util.c list_del(&safe->head); head 125 drivers/gpu/drm/ttm/ttm_execbuf_util.c list_add(&safe->head, dups); head 172 drivers/gpu/drm/ttm/ttm_execbuf_util.c list_del(&entry->head); head 173 drivers/gpu/drm/ttm/ttm_execbuf_util.c list_add(&entry->head, list); head 196 drivers/gpu/drm/ttm/ttm_execbuf_util.c bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; head 201 drivers/gpu/drm/ttm/ttm_execbuf_util.c list_for_each_entry(entry, list, head) { head 53 drivers/gpu/drm/vboxvideo/vbox_main.c list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) { head 129 drivers/gpu/drm/vboxvideo/vbox_mode.c list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) { head 153 drivers/gpu/drm/vboxvideo/vbox_mode.c head) { head 197 drivers/gpu/drm/vboxvideo/vbox_mode.c head) { head 450 drivers/gpu/drm/vboxvideo/vbox_mode.c list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) { head 806 drivers/gpu/drm/vboxvideo/vbox_mode.c list_for_each_entry_safe(mode, iterator, &connector->modes, head) { head 807 drivers/gpu/drm/vboxvideo/vbox_mode.c list_del(&mode->head); head 1233 drivers/gpu/drm/vc4/vc4_crtc.c &drm->mode_config.plane_list, head) { head 516 drivers/gpu/drm/vc4/vc4_drv.h struct list_head head; head 619 drivers/gpu/drm/vc4/vc4_drv.h struct vc4_exec_info, head); head 626 drivers/gpu/drm/vc4/vc4_drv.h struct vc4_exec_info, head); head 635 drivers/gpu/drm/vc4/vc4_drv.h struct vc4_exec_info, head); head 531 drivers/gpu/drm/vc4/vc4_gem.c list_move_tail(&exec->head, &vc4->render_job_list); head 697 drivers/gpu/drm/vc4/vc4_gem.c list_add_tail(&exec->head, &vc4->bin_job_list); head 1000 drivers/gpu/drm/vc4/vc4_gem.c struct vc4_exec_info, head); head 1001 drivers/gpu/drm/vc4/vc4_gem.c list_del(&exec->head); head 146 drivers/gpu/drm/vc4/vc4_irq.c list_move_tail(&exec->head, &vc4->bin_job_list); head 161 drivers/gpu/drm/vc4/vc4_irq.c list_move_tail(&exec->head, &vc4->job_done_list); head 552 drivers/gpu/drm/via/via_dmablit.c blitq->head = 0; head 750 drivers/gpu/drm/via/via_dmablit.c blitq->blits[blitq->head++] = vsg; head 751 drivers/gpu/drm/via/via_dmablit.c if (blitq->head >= VIA_NUM_BLIT_SLOTS) head 752 drivers/gpu/drm/via/via_dmablit.c blitq->head = 0; head 66 drivers/gpu/drm/via/via_dmablit.h unsigned head; head 266 drivers/gpu/drm/via/via_verifier.c list_for_each_entry(r_list, &dev->maplist, head) { head 168 drivers/gpu/drm/virtio/virtgpu_drv.h struct list_head head; head 221 drivers/gpu/drm/virtio/virtgpu_drv.h struct list_head *head); head 222 drivers/gpu/drm/virtio/virtgpu_drv.h void virtio_gpu_unref_list(struct list_head *head); head 60 drivers/gpu/drm/virtio/virtgpu_ioctl.c struct list_head *head) head 68 drivers/gpu/drm/virtio/virtgpu_ioctl.c ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true); head 72 drivers/gpu/drm/virtio/virtgpu_ioctl.c list_for_each_entry(buf, head, head) { head 77 drivers/gpu/drm/virtio/virtgpu_ioctl.c ttm_eu_backoff_reservation(ticket, head); head 84 drivers/gpu/drm/virtio/virtgpu_ioctl.c void virtio_gpu_unref_list(struct list_head *head) head 90 drivers/gpu/drm/virtio/virtgpu_ioctl.c list_for_each_entry(buf, head, head) { head 190 drivers/gpu/drm/virtio/virtgpu_ioctl.c list_add(&buflist[i].head, &validate_list); head 526 drivers/gpu/drm/virtio/virtgpu_ioctl.c list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { head 227 drivers/gpu/drm/virtio/virtgpu_kms.c list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) { head 161 drivers/gpu/drm/virtio/virtgpu_object.c list_add(&mainbuf.head, &validate_list); head 592 drivers/gpu/drm/virtio/virtgpu_vq.c list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { head 736 drivers/gpu/drm/virtio/virtgpu_vq.c list_for_each_entry(search_ent, &vgdev->cap_cache, head) { head 744 drivers/gpu/drm/virtio/virtgpu_vq.c list_add_tail(&cache_ent->head, &vgdev->cap_cache); head 124 drivers/gpu/drm/vmwgfx/ttm_object.c struct list_head head; head 401 drivers/gpu/drm/vmwgfx/ttm_object.c list_add_tail(&ref->head, &tfile->ref_list); head 431 drivers/gpu/drm/vmwgfx/ttm_object.c list_del(&ref->head); head 480 drivers/gpu/drm/vmwgfx/ttm_object.c ref = list_entry(list, struct ttm_ref_object, head); head 393 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c void vmw_binding_res_list_kill(struct list_head *head) head 397 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c vmw_binding_res_list_scrub(head); head 398 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c list_for_each_entry_safe(entry, next, head, res_list) head 411 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c void vmw_binding_res_list_scrub(struct list_head *head) head 415 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c list_for_each_entry(entry, head, res_list) { head 423 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c list_for_each_entry(entry, head, res_list) { head 198 drivers/gpu/drm/vmwgfx/vmwgfx_binding.h extern void vmw_binding_res_list_kill(struct list_head *head); head 199 drivers/gpu/drm/vmwgfx/vmwgfx_binding.h extern void vmw_binding_res_list_scrub(struct list_head *head); head 46 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c struct list_head head; head 107 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c list_del(&entry->head); head 127 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c list_for_each_entry_safe(entry, next, list, head) { head 128 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c list_del(&entry->head); head 135 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c list_add_tail(&entry->head, &entry->man->list); head 165 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c list_for_each_entry_safe(entry, next, list, head) { head 173 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c list_del(&entry->head); head 174 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c list_add_tail(&entry->head, &entry->man->list); head 220 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c list_add_tail(&cres->head, list); head 266 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c list_del(&entry->head); head 268 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c list_add_tail(&entry->head, list); head 322 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c list_for_each_entry_safe(entry, next, &man->list, head) head 655 drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head) head 660 drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c list_add_tail(head, &vcotbl->resource_list); head 238 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h struct list_head head; head 1309 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h struct list_head *head); head 66 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c struct list_head head; head 101 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c struct list_head head; head 116 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c struct list_head head; head 174 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c list_for_each_entry(entry, &sw_context->ctx_list, head) { head 242 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c list_add_tail(&node->head, &sw_context->ctx_list); head 531 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c list_add_tail(&rel->head, &sw_context->res_relocations); head 564 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c list_for_each_entry(rel, list, head) { head 751 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c list_for_each_entry(val, &sw_context->ctx_list, head) { head 1184 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c list_add_tail(&reloc->head, &sw_context->bo_relocations); head 1238 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c list_add_tail(&reloc->head, &sw_context->bo_relocations); head 3304 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c list_for_each_entry(reloc, &sw_context->bo_relocations, head) { head 120 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c list_del_init(&fence->head); head 292 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c list_for_each_entry_safe(action, next_action, &list, head) { head 293 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c list_del_init(&action->head); head 355 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c list_add_tail(&fence->head, &fman->fence_list); head 369 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c list_for_each_entry_safe(action, next_action, list, head) { head 370 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c list_del_init(&action->head); head 380 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c list_add_tail(&action->head, &fman->cleanup_list); head 416 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c list_for_each_entry(fence, &fman->fence_list, head) { head 475 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { head 477 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c list_del_init(&fence->head); head 734 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c head); head 742 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c list_del_init(&fence->head); head 750 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c BUG_ON(!list_empty(&fence->head)); head 975 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c list_add_tail(&action->head, &action_list); head 978 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c list_add_tail(&action->head, &fence->seq_passed_actions); head 52 drivers/gpu/drm/vmwgfx/vmwgfx_fence.h struct list_head head; head 61 drivers/gpu/drm/vmwgfx/vmwgfx_fence.h struct list_head head; head 261 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 1851 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { head 2066 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c list_for_each_entry(con, &dev->mode_config.connector_list, head) { head 2282 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c list_del_init(&du->pref_mode->head); head 2459 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c head) { head 2662 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c head) { head 2688 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c list_for_each_entry(mode, &con->modes, head) { head 2699 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c head); head 237 drivers/gpu/drm/vmwgfx/vmwgfx_kms.h struct list_head head; head 32 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c struct list_head head; head 39 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c INIT_LIST_HEAD(&queue->head); head 50 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c list_for_each_entry_safe(marker, next, &queue->head, head) { head 67 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c list_add_tail(&marker->head, &queue->head); head 83 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c if (list_empty(&queue->head)) { head 90 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c list_for_each_entry_safe(marker, next, &queue->head, head) { head 97 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c list_del(&marker->head); head 137 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c if (list_empty(&queue->head)) head 140 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c marker = list_first_entry(&queue->head, head 141 drivers/gpu/drm/vmwgfx/vmwgfx_marker.c struct vmw_marker, head); head 494 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c list_add_tail(&val_buf->head, &val_list); head 575 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c list_add_tail(&val_buf->head, &val_list); head 70 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c struct list_head head; head 190 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry(entry, &ctx->bo_list, base.head) { head 227 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry(entry, &ctx->resource_ctx_list, head) { head 234 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry(entry, &ctx->resource_list, head) { head 291 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_add_tail(&val_buf->head, &ctx->bo_list); head 347 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_add_tail(&node->head, &ctx->resource_list); head 352 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_add(&node->head, &ctx->resource_ctx_list); head 355 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_add_tail(&node->head, &ctx->resource_ctx_list); head 358 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_add_tail(&node->head, &ctx->resource_list); head 445 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry(val, &ctx->resource_list, head) { head 485 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry(val, &ctx->resource_list, head) { head 492 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry(val, &ctx->resource_list, head) { head 564 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry(entry, &ctx->bo_list, base.head) { head 600 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry(val, &ctx->resource_list, head) { head 645 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry(entry, &ctx->bo_list, base.head) head 648 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry(val, &ctx->resource_list, head) head 651 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry(val, &ctx->resource_ctx_list, head) head 670 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry(entry, &ctx->bo_list, base.head) { head 676 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c list_for_each_entry(val, &ctx->resource_list, head) head 56 drivers/gpu/host1x/intr.c static void remove_completed_waiters(struct list_head *head, u32 sync, head 62 drivers/gpu/host1x/intr.c list_for_each_entry_safe(waiter, next, head, list) { head 89 drivers/gpu/host1x/intr.c struct list_head *head, head 93 drivers/gpu/host1x/intr.c list_first_entry(head, struct host1x_waitlist, list)->thresh; head 135 drivers/gpu/host1x/intr.c struct list_head *head = completed; head 138 drivers/gpu/host1x/intr.c for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) { head 142 drivers/gpu/host1x/intr.c list_for_each_entry_safe(waiter, next, head, list) { head 394 drivers/gpu/vga/vga_switcheroo.c find_client_from_pci(struct list_head *head, struct pci_dev *pdev) head 398 drivers/gpu/vga/vga_switcheroo.c list_for_each_entry(client, head, list) head 405 drivers/gpu/vga/vga_switcheroo.c find_client_from_id(struct list_head *head, head 410 drivers/gpu/vga/vga_switcheroo.c list_for_each_entry(client, head, list) head 417 drivers/gpu/vga/vga_switcheroo.c find_active_client(struct list_head *head) head 421 drivers/gpu/vga/vga_switcheroo.c list_for_each_entry(client, head, list) head 60 drivers/greybus/manifest.c static void release_cport_descriptors(struct list_head *head, u8 bundle_id) head 65 drivers/greybus/manifest.c list_for_each_entry_safe(desc, tmp, head, links) { head 668 drivers/hid/hid-kye.c struct list_head *head; head 673 drivers/hid/hid-kye.c list_for_each(head, list) { head 674 drivers/hid/hid-kye.c report = list_entry(head, struct hid_report, list); head 679 drivers/hid/hid-kye.c if (head == list) { head 841 drivers/hid/hid-uclogic-rdesc.c static const __u8 head[] = {UCLOGIC_RDESC_PH_HEAD}; head 850 drivers/hid/hid-uclogic-rdesc.c for (p = rdesc_ptr; p + sizeof(head) < rdesc_ptr + template_size;) { head 851 drivers/hid/hid-uclogic-rdesc.c if (memcmp(p, head, sizeof(head)) == 0 && head 852 drivers/hid/hid-uclogic-rdesc.c p[sizeof(head)] < param_num) { head 853 drivers/hid/hid-uclogic-rdesc.c v = param_list[p[sizeof(head)]]; head 855 drivers/hid/hid-uclogic-rdesc.c p += sizeof(head) + 1; head 52 drivers/hid/hid-wiimote-core.c while (wdata->queue.head != wdata->queue.tail) { head 95 drivers/hid/hid-wiimote-core.c memcpy(wdata->queue.outq[wdata->queue.head].data, buffer, count); head 96 drivers/hid/hid-wiimote-core.c wdata->queue.outq[wdata->queue.head].size = count; head 97 drivers/hid/hid-wiimote-core.c newhead = (wdata->queue.head + 1) % WIIMOTE_BUFSIZE; head 99 drivers/hid/hid-wiimote-core.c if (wdata->queue.head == wdata->queue.tail) { head 100 drivers/hid/hid-wiimote-core.c wdata->queue.head = newhead; head 103 drivers/hid/hid-wiimote-core.c wdata->queue.head = newhead; head 110 drivers/hid/hid-wiimote.h __u8 head; head 48 drivers/hid/hidraw.c if (list->head == list->tail) { head 52 drivers/hid/hidraw.c while (list->head == list->tail) { head 255 drivers/hid/hidraw.c if (list->head != list->tail) head 487 drivers/hid/hidraw.c int new_head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1); head 492 drivers/hid/hidraw.c if (!(list->buffer[list->head].value = kmemdup(data, len, GFP_ATOMIC))) { head 496 drivers/hid/hidraw.c list->buffer[list->head].len = len; head 497 drivers/hid/hidraw.c list->head = new_head; head 41 drivers/hid/uhid.c __u8 head; head 76 drivers/hid/uhid.c newhead = (uhid->head + 1) % UHID_BUFSIZE; head 79 drivers/hid/uhid.c uhid->outq[uhid->head] = ev; head 80 drivers/hid/uhid.c uhid->head = newhead; head 666 drivers/hid/uhid.c if (uhid->head == uhid->tail) head 670 drivers/hid/uhid.c uhid->head != uhid->tail); head 679 drivers/hid/uhid.c if (uhid->head == uhid->tail) { head 773 drivers/hid/uhid.c if (uhid->head != uhid->tail) head 526 drivers/hid/usbhid/hid-core.c int head; head 534 drivers/hid/usbhid/hid-core.c if ((head = (usbhid->outhead + 1) & (HID_OUTPUT_FIFO_SIZE - 1)) == usbhid->outtail) { head 546 drivers/hid/usbhid/hid-core.c usbhid->outhead = head; head 582 drivers/hid/usbhid/hid-core.c if ((head = (usbhid->ctrlhead + 1) & (HID_CONTROL_FIFO_SIZE - 1)) == usbhid->ctrltail) { head 597 drivers/hid/usbhid/hid-core.c usbhid->ctrlhead = head; head 40 drivers/hid/usbhid/hiddev.c int head; head 151 drivers/hid/usbhid/hiddev.c list->buffer[list->head] = *uref; head 152 drivers/hid/usbhid/hiddev.c list->head = (list->head + 1) & head 338 drivers/hid/usbhid/hiddev.c if (list->head == list->tail) { head 341 drivers/hid/usbhid/hiddev.c while (list->head == list->tail) { head 374 drivers/hid/usbhid/hiddev.c while (list->head != list->tail && head 417 drivers/hid/usbhid/hiddev.c if (list->head != list->tail) head 131 drivers/hsi/clients/cmt_speech.c static void cs_notify(u32 message, struct list_head *head) head 151 drivers/hsi/clients/cmt_speech.c list_add_tail(&entry->list, head); head 162 drivers/hsi/clients/cmt_speech.c static u32 cs_pop_entry(struct list_head *head) head 167 drivers/hsi/clients/cmt_speech.c entry = list_entry(head->next, struct char_queue, list); head 1320 drivers/hsi/clients/cmt_speech.c static void cs_free_char_queue(struct list_head *head) head 1325 drivers/hsi/clients/cmt_speech.c if (!list_empty(head)) { head 1326 drivers/hsi/clients/cmt_speech.c list_for_each_safe(cursor, next, head) { head 390 drivers/hsi/clients/ssi_protocol.c struct list_head *head, *tmp; head 412 drivers/hsi/clients/ssi_protocol.c list_for_each_safe(head, tmp, &ssi->txqueue) { head 413 drivers/hsi/clients/ssi_protocol.c msg = list_entry(head, struct hsi_msg, link); head 415 drivers/hsi/clients/ssi_protocol.c list_del(head); head 177 drivers/hsi/controllers/omap_ssi_port.c struct list_head *head, *tmp; head 182 drivers/hsi/controllers/omap_ssi_port.c list_for_each_safe(head, tmp, &omap_port->errqueue) { head 183 drivers/hsi/controllers/omap_ssi_port.c msg = list_entry(head, struct hsi_msg, link); head 185 drivers/hsi/controllers/omap_ssi_port.c list_del(head); head 404 drivers/hwtracing/coresight/coresight-etb10.c unsigned long head; head 411 drivers/hwtracing/coresight/coresight-etb10.c head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); head 414 drivers/hwtracing/coresight/coresight-etb10.c buf->cur = head / PAGE_SIZE; head 417 drivers/hwtracing/coresight/coresight-etb10.c buf->offset = head % PAGE_SIZE; head 562 drivers/hwtracing/coresight/coresight-etb10.c handle->head += to_read; head 451 drivers/hwtracing/coresight/coresight-etm-perf.c struct perf_addr_filters_head *head = perf_event_addr_filters(event); head 459 drivers/hwtracing/coresight/coresight-etm-perf.c list_for_each_entry(filter, &head->list, entry) { head 420 drivers/hwtracing/coresight/coresight-tmc-etf.c unsigned long head; head 427 drivers/hwtracing/coresight/coresight-tmc-etf.c head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); head 430 drivers/hwtracing/coresight/coresight-tmc-etf.c buf->cur = head / PAGE_SIZE; head 433 drivers/hwtracing/coresight/coresight-tmc-etf.c buf->offset = head % PAGE_SIZE; head 550 drivers/hwtracing/coresight/coresight-tmc-etf.c handle->head += to_read; head 44 drivers/hwtracing/coresight/coresight-tmc-etr.c unsigned long head; head 1426 drivers/hwtracing/coresight/coresight-tmc-etr.c unsigned long head = etr_perf->head; head 1430 drivers/hwtracing/coresight/coresight-tmc-etr.c head = etr_perf->head; head 1431 drivers/hwtracing/coresight/coresight-tmc-etr.c pg_idx = head >> PAGE_SHIFT; head 1432 drivers/hwtracing/coresight/coresight-tmc-etr.c pg_offset = head & (PAGE_SIZE - 1); head 1546 drivers/hwtracing/coresight/coresight-tmc-etr.c handle->head += size; head 1589 drivers/hwtracing/coresight/coresight-tmc-etr.c etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf); head 499 drivers/hwtracing/stm/policy.c struct list_head *head = &policy->group.cg_children; head 503 drivers/hwtracing/stm/policy.c if (list_empty(head)) head 515 drivers/hwtracing/stm/policy.c list_for_each_entry(item, head, ci_entry) { head 525 drivers/hwtracing/stm/policy.c head = &policy_node->group.cg_children; head 174 drivers/i2c/busses/i2c-ismt.c u8 head; /* ring buffer head pointer */ head 223 drivers/i2c/busses/i2c-ismt.c struct ismt_desc *desc = &priv->hw[priv->head]; head 225 drivers/i2c/busses/i2c-ismt.c dev_dbg(dev, "Dump of the descriptor struct: 0x%X\n", priv->head); head 301 drivers/i2c/busses/i2c-ismt.c fmhp = ((priv->head + 1) % ISMT_DESC_ENTRIES) << 16; head 402 drivers/i2c/busses/i2c-ismt.c desc = &priv->hw[priv->head]; head 609 drivers/i2c/busses/i2c-ismt.c priv->head++; head 610 drivers/i2c/busses/i2c-ismt.c priv->head %= ISMT_DESC_ENTRIES; head 784 drivers/i2c/busses/i2c-ismt.c priv->head = 0; head 134 drivers/ide/ide-disk.c unsigned int sect, head, cyl, track; head 138 drivers/ide/ide-disk.c head = track % drive->head; head 139 drivers/ide/ide-disk.c cyl = track / drive->head; head 141 drivers/ide/ide-disk.c pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect); head 147 drivers/ide/ide-disk.c tf->device = head; head 371 drivers/ide/ide-disk.c drive->capacity64 = drive->cyl * drive->head * drive->sect; head 742 drivers/ide/ide-disk.c (drive->head == 0 || drive->head > 16)) { head 744 drivers/ide/ide-disk.c drive->name, drive->head); head 171 drivers/ide/ide-io.c tf->device = (drive->head - 1) | drive->select; head 57 drivers/ide/ide-probe.c id[ATA_ID_CUR_HEADS] = id[ATA_ID_HEADS] = drive->head; head 66 drivers/ide/ide-probe.c if (!drive->cyl || !drive->head || !drive->sect) { head 68 drivers/ide/ide-probe.c drive->head = drive->bios_head = id[ATA_ID_HEADS]; head 75 drivers/ide/ide-probe.c drive->head = id[ATA_ID_CUR_HEADS]; head 80 drivers/ide/ide-probe.c if (drive->head > 16 && id[ATA_ID_HEADS] && id[ATA_ID_HEADS] <= 16) { head 82 drivers/ide/ide-probe.c drive->head = id[ATA_ID_HEADS]; head 505 drivers/ide/ide-probe.c drive->head, drive->sect); head 405 drivers/ide/ide-proc.c drive->cyl, drive->head, drive->sect); head 241 drivers/ide/ide.c u8 head; head 272 drivers/ide/ide.c ide_disks_chs[i].head = h; head 317 drivers/ide/ide.c drive->head = drive->bios_head = ide_disks_chs[i].head; head 322 drivers/ide/ide.c drive->cyl, drive->head, drive->sect); head 134 drivers/iio/buffer/industrialio-buffer-dma.c list_for_each_entry_safe(block, _block, &block_list, head) head 147 drivers/iio/buffer/industrialio-buffer-dma.c list_add_tail(&block->head, &iio_dma_buffer_dead_blocks); head 185 drivers/iio/buffer/industrialio-buffer-dma.c INIT_LIST_HEAD(&block->head); head 203 drivers/iio/buffer/industrialio-buffer-dma.c list_add_tail(&block->head, &queue->outgoing); head 245 drivers/iio/buffer/industrialio-buffer-dma.c list_for_each_entry_safe(block, _block, list, head) { head 246 drivers/iio/buffer/industrialio-buffer-dma.c list_del(&block->head); head 350 drivers/iio/buffer/industrialio-buffer-dma.c list_add_tail(&block->head, &queue->incoming); head 409 drivers/iio/buffer/industrialio-buffer-dma.c list_for_each_entry_safe(block, _block, &queue->incoming, head) { head 410 drivers/iio/buffer/industrialio-buffer-dma.c list_del(&block->head); head 452 drivers/iio/buffer/industrialio-buffer-dma.c list_add_tail(&block->head, &queue->incoming); head 463 drivers/iio/buffer/industrialio-buffer-dma.c iio_dma_buffer_block, head); head 465 drivers/iio/buffer/industrialio-buffer-dma.c list_del(&block->head); head 556 drivers/iio/buffer/industrialio-buffer-dma.c list_for_each_entry(block, &queue->outgoing, head) head 52 drivers/iio/buffer/industrialio-buffer-dmaengine.c list_del(&block->head); head 83 drivers/iio/buffer/industrialio-buffer-dmaengine.c list_add_tail(&block->head, &dmaengine_buffer->active); head 28 drivers/iio/buffer/industrialio-hw-consumer.c struct list_head head; head 58 drivers/iio/buffer/industrialio-hw-consumer.c list_for_each_entry(buf, &hwc->buffers, head) { head 72 drivers/iio/buffer/industrialio-hw-consumer.c list_add_tail(&buf->head, &hwc->buffers); head 116 drivers/iio/buffer/industrialio-hw-consumer.c list_for_each_entry(buf, &hwc->buffers, head) head 134 drivers/iio/buffer/industrialio-hw-consumer.c list_for_each_entry_safe(buf, n, &hwc->buffers, head) head 217 drivers/iio/buffer/industrialio-hw-consumer.c list_for_each_entry(buf, &hwc->buffers, head) { head 226 drivers/iio/buffer/industrialio-hw-consumer.c list_for_each_entry_continue_reverse(buf, &hwc->buffers, head) head 240 drivers/iio/buffer/industrialio-hw-consumer.c list_for_each_entry(buf, &hwc->buffers, head) head 100 drivers/infiniband/core/addr.c const struct nlattr *head, *curr; head 106 drivers/infiniband/core/addr.c head = (const struct nlattr *)nlmsg_data(nlh); head 109 drivers/infiniband/core/addr.c nla_for_each_attr(curr, head, len, rem) { head 219 drivers/infiniband/core/cache.c static void put_gid_ndev(struct rcu_head *head) head 222 drivers/infiniband/core/cache.c container_of(head, struct roce_gid_ndev_storage, rcu_head); head 928 drivers/infiniband/core/sa_query.c const struct nlattr *head, *curr; head 935 drivers/infiniband/core/sa_query.c head = (const struct nlattr *) nlmsg_data(nlh); head 949 drivers/infiniband/core/sa_query.c nla_for_each_attr(curr, head, len, rem) { head 905 drivers/infiniband/hw/cxgb4/qp.c static void add_to_fc_list(struct list_head *head, struct list_head *entry) head 908 drivers/infiniband/hw/cxgb4/qp.c list_add_tail(entry, head); head 8414 drivers/infiniband/hw/hfi1/chip.c present = (rcd->head != get_rcvhdrtail(rcd)); head 8421 drivers/infiniband/hw/hfi1/chip.c return rcd->head != tail; head 11830 drivers/infiniband/hw/hfi1/chip.c u32 head, tail; head 11832 drivers/infiniband/hw/hfi1/chip.c head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) head 11840 drivers/infiniband/hw/hfi1/chip.c return head == tail; head 11906 drivers/infiniband/hw/hfi1/chip.c rcd->head = 0; head 421 drivers/infiniband/hw/hfi1/driver.c packet->rhqoff = rcd->head; head 827 drivers/infiniband/hw/hfi1/driver.c update_usrhead(packet->rcd, packet->rcd->head, packet->updegr, head 859 drivers/infiniband/hw/hfi1/driver.c rcd->head = packet.rhqoff; head 888 drivers/infiniband/hw/hfi1/driver.c rcd->head = packet.rhqoff; head 1118 drivers/infiniband/hw/hfi1/driver.c rcd->head = packet.rhqoff; head 235 drivers/infiniband/hw/hfi1/hfi.h u32 head; head 1518 drivers/infiniband/hw/hfi1/hfi.h return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset; head 1458 drivers/infiniband/hw/hfi1/pio.c u32 head, next; head 1494 drivers/infiniband/hw/hfi1/pio.c head = sc->sr_head; head 1510 drivers/infiniband/hw/hfi1/pio.c pbuf = &sc->sr[head].pbuf; head 1518 drivers/infiniband/hw/hfi1/pio.c next = head + 1; head 1700 drivers/infiniband/hw/hfi1/pio.c u32 head, tail; head 1722 drivers/infiniband/hw/hfi1/pio.c head = READ_ONCE(sc->sr_head); /* snapshot the head */ head 1724 drivers/infiniband/hw/hfi1/pio.c while (head != tail) { head 1741 drivers/infiniband/hw/hfi1/rc.c u32 opcode, head, tail; head 1773 drivers/infiniband/hw/hfi1/rc.c head = priv->s_tid_head; head 1785 drivers/infiniband/hw/hfi1/rc.c if (head == tail && req->comp_seg < req->total_segs) { head 1792 drivers/infiniband/hw/hfi1/rc.c head = qp->s_tail; head 1800 drivers/infiniband/hw/hfi1/rc.c if ((psn & IB_BTH_REQ_ACK) && tail != head && head 579 drivers/infiniband/hw/hfi1/sdma.c u16 head, tail; head 588 drivers/infiniband/hw/hfi1/sdma.c head = sde->descq_head & sde->sdma_mask; head 590 drivers/infiniband/hw/hfi1/sdma.c while (head != tail) { head 592 drivers/infiniband/hw/hfi1/sdma.c head = ++sde->descq_head & sde->sdma_mask; head 594 drivers/infiniband/hw/hfi1/sdma.c if (txp && txp->next_descq_idx == head) { head 598 drivers/infiniband/hw/hfi1/sdma.c trace_hfi1_sdma_progress(sde, head, tail, txp); head 2150 drivers/infiniband/hw/hfi1/sdma.c u16 head, tail, cnt; head 2152 drivers/infiniband/hw/hfi1/sdma.c head = sde->descq_head & sde->sdma_mask; head 2158 drivers/infiniband/hw/hfi1/sdma.c sde->this_idx, head, tail, cnt, head 2162 drivers/infiniband/hw/hfi1/sdma.c while (head != tail) { head 2165 drivers/infiniband/hw/hfi1/sdma.c descqp = &sde->descq[head]; head 2181 drivers/infiniband/hw/hfi1/sdma.c head, flags, addr, gen, len); head 2197 drivers/infiniband/hw/hfi1/sdma.c head++; head 2198 drivers/infiniband/hw/hfi1/sdma.c head &= sde->sdma_mask; head 2213 drivers/infiniband/hw/hfi1/sdma.c u16 head, tail; head 2220 drivers/infiniband/hw/hfi1/sdma.c head = sde->descq_head & sde->sdma_mask; head 2229 drivers/infiniband/hw/hfi1/sdma.c (unsigned long long)read_sde_csr(sde, SD(HEAD)), head, head 2245 drivers/infiniband/hw/hfi1/sdma.c while (head != tail) { head 2248 drivers/infiniband/hw/hfi1/sdma.c descqp = &sde->descq[head]; head 2264 drivers/infiniband/hw/hfi1/sdma.c head, flags, addr, gen, len); head 2273 drivers/infiniband/hw/hfi1/sdma.c head = (head + 1) & sde->sdma_mask; head 1677 drivers/infiniband/hw/hfi1/tid_rdma.c u16 head, tail; head 1680 drivers/infiniband/hw/hfi1/tid_rdma.c head = req->setup_head; head 1682 drivers/infiniband/hw/hfi1/tid_rdma.c for ( ; CIRC_CNT(head, tail, MAX_FLOWS); head 433 drivers/infiniband/hw/hns/hns_roce_device.h u32 head; head 540 drivers/infiniband/hw/hns/hns_roce_device.h int head; head 1168 drivers/infiniband/hw/hns/hns_roce_hem.c struct list_head *head, bool exist_bt) head 1172 drivers/infiniband/hw/hns/hns_roce_hem.c list_for_each_entry_safe(hem, temp_hem, head, list) { head 1553 drivers/infiniband/hw/hns/hns_roce_hem.c struct list_head *head = &hem_list->btm_bt; head 1559 drivers/infiniband/hw/hns/hns_roce_hem.c list_for_each_entry_safe(hem, temp_hem, head, sibling) { head 99 drivers/infiniband/hw/hns/hns_roce_hw_v1.c wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); head 317 drivers/infiniband/hw/hns/hns_roce_hw_v1.c qp->sq.head += nreq; head 325 drivers/infiniband/hw/hns/hns_roce_hw_v1.c (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1))); head 373 drivers/infiniband/hw/hns/hns_roce_hw_v1.c wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); head 400 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->rq.head += nreq; head 415 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->rq.head); head 426 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->rq.head); head 2614 drivers/infiniband/hw/hns/hns_roce_hw_v1.c QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head); head 2628 drivers/infiniband/hw/hns/hns_roce_hw_v1.c QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head); head 2698 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->rq.head = 0; head 2700 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->sq.head = 0; head 2996 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->rq.head); head 3279 drivers/infiniband/hw/hns/hns_roce_hw_v1.c RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head); head 3311 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->rq.head = 0; head 3313 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->sq.head = 0; head 286 drivers/infiniband/hw/hns/hns_roce_hw_v2.c wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); head 299 drivers/infiniband/hw/hns/hns_roce_hw_v2.c ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); head 577 drivers/infiniband/hw/hns/hns_roce_hw_v2.c qp->sq.head += nreq; head 590 drivers/infiniband/hw/hns/hns_roce_hw_v2.c qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)); head 651 drivers/infiniband/hw/hns/hns_roce_hw_v2.c wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); head 692 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->rq.head += nreq; head 696 drivers/infiniband/hw/hns/hns_roce_hw_v2.c *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff; head 973 drivers/infiniband/hw/hns/hns_roce_hw_v2.c u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG); head 975 drivers/infiniband/hw/hns/hns_roce_hw_v2.c return head == priv->cmq.csq.next_to_use; head 984 drivers/infiniband/hw/hns/hns_roce_hw_v2.c u32 head; head 988 drivers/infiniband/hw/hns/hns_roce_hw_v2.c head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG); head 989 drivers/infiniband/hw/hns/hns_roce_hw_v2.c while (head != ntc) { head 3877 drivers/infiniband/hw/hns/hns_roce_hw_v2.c V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head); head 4416 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->sq.head); head 4425 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->rq.head); head 4467 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->rq.head = 0; head 4469 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->sq.head = 0; head 4849 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); head 4851 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); head 6247 drivers/infiniband/hw/hns/hns_roce_hw_v2.c ind = srq->head & (srq->max - 1); head 6256 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (unlikely(srq->head == srq->tail)) { head 6290 drivers/infiniband/hw/hns/hns_roce_hw_v2.c srq->head += nreq; head 6301 drivers/infiniband/hw/hns/hns_roce_hw_v2.c srq_db.parameter = cpu_to_le32(srq->head); head 1588 drivers/infiniband/hw/hns/hns_roce_hw_v2.h u32 head; head 1191 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); head 1194 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); head 1293 drivers/infiniband/hw/hns/hns_roce_qp.c cur = hr_wq->head - hr_wq->tail; head 1299 drivers/infiniband/hw/hns/hns_roce_qp.c cur = hr_wq->head - hr_wq->tail; head 283 drivers/infiniband/hw/hns/hns_roce_srq.c srq->head = 0; head 319 drivers/infiniband/hw/i40iw/i40iw_ctrl.c static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp) head 324 drivers/infiniband/hw/i40iw/i40iw_ctrl.c if (list_empty(head)) head 328 drivers/infiniband/hw/i40iw/i40iw_ctrl.c entry = head->next; head 331 drivers/infiniband/hw/i40iw/i40iw_ctrl.c entry = (lastentry != head) ? lastentry->next : NULL; head 593 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cqp->sq_ring.head, head 615 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cqp->sq_ring.head, head 1547 drivers/infiniband/hw/i40iw/i40iw_d.h (_ring).head = 0; \ head 1552 drivers/infiniband/hw/i40iw/i40iw_d.h #define I40IW_RING_GETCURRENT_HEAD(_ring) ((_ring).head) head 1560 drivers/infiniband/hw/i40iw/i40iw_d.h (_ring).head = ((_ring).head + 1) % size; \ head 1572 drivers/infiniband/hw/i40iw/i40iw_d.h (_ring).head = ((_ring).head + (_count)) % size; \ head 1583 drivers/infiniband/hw/i40iw/i40iw_d.h (_ring).head = ((_ring).head + 1) % (_ring).size head 1613 drivers/infiniband/hw/i40iw/i40iw_d.h (((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \ head 54 drivers/infiniband/hw/i40iw/i40iw_uk.c if (!qp->sq_ring.head) head 62 drivers/infiniband/hw/i40iw/i40iw_uk.c peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size; head 101 drivers/infiniband/hw/i40iw/i40iw_uk.c if (sw_sq_head > qp->initial_ring.head) { head 102 drivers/infiniband/hw/i40iw/i40iw_uk.c if ((hw_sq_tail >= qp->initial_ring.head) && head 106 drivers/infiniband/hw/i40iw/i40iw_uk.c } else if (sw_sq_head != qp->initial_ring.head) { head 107 drivers/infiniband/hw/i40iw/i40iw_uk.c if ((hw_sq_tail >= qp->initial_ring.head) || head 114 drivers/infiniband/hw/i40iw/i40iw_uk.c qp->initial_ring.head = qp->sq_ring.head; head 125 drivers/infiniband/hw/i40iw/i40iw_uk.c qp->initial_ring.head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); head 826 drivers/infiniband/hw/i40iw/i40iw_uk.c if (!wqe_idx && (qp->sq_ring.head == qp->sq_ring.tail)) { head 1097 drivers/infiniband/hw/i40iw/i40iw_uk.c cq_head = cq->cq_ring.head; head 118 drivers/infiniband/hw/i40iw/i40iw_user.h u32 head; head 499 drivers/infiniband/hw/mlx4/alias_GUID.c struct list_head *head = head 541 drivers/infiniband/hw/mlx4/alias_GUID.c list_add_tail(&callback_context->list, head); head 620 drivers/infiniband/hw/mlx4/cq.c cur = wq->head - wq->tail; head 3121 drivers/infiniband/hw/mlx4/main.c if (mqp->sq.tail != mqp->sq.head) { head 3139 drivers/infiniband/hw/mlx4/main.c if (mqp->rq.tail != mqp->rq.head) { head 175 drivers/infiniband/hw/mlx4/mlx4_ib.h unsigned head; head 361 drivers/infiniband/hw/mlx4/mlx4_ib.h int head; head 2606 drivers/infiniband/hw/mlx4/qp.c qp->rq.head = 0; head 2608 drivers/infiniband/hw/mlx4/qp.c qp->sq.head = 0; head 3294 drivers/infiniband/hw/mlx4/qp.c cur = wq->head - wq->tail; head 3300 drivers/infiniband/hw/mlx4/qp.c cur = wq->head - wq->tail; head 3600 drivers/infiniband/hw/mlx4/qp.c qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; head 3828 drivers/infiniband/hw/mlx4/qp.c qp->sq.head += nreq; head 3879 drivers/infiniband/hw/mlx4/qp.c ind = qp->rq.head & (qp->rq.wqe_cnt - 1); head 3927 drivers/infiniband/hw/mlx4/qp.c qp->rq.head += nreq; head 3935 drivers/infiniband/hw/mlx4/qp.c *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); head 142 drivers/infiniband/hw/mlx4/srq.c srq->head = 0; head 327 drivers/infiniband/hw/mlx4/srq.c if (unlikely(srq->head == srq->tail)) { head 333 drivers/infiniband/hw/mlx4/srq.c srq->wrid[srq->head] = wr->wr_id; head 335 drivers/infiniband/hw/mlx4/srq.c next = get_wqe(srq, srq->head); head 336 drivers/infiniband/hw/mlx4/srq.c srq->head = be16_to_cpu(next->next_wqe_index); head 334 drivers/infiniband/hw/mlx5/cq.c u16 tail, u16 head) head 340 drivers/infiniband/hw/mlx5/cq.c if (idx == head) head 395 drivers/infiniband/hw/mlx5/cq.c cur = wq->head - wq->tail; head 4483 drivers/infiniband/hw/mlx5/main.c if (mqp->sq.tail != mqp->sq.head) { head 4500 drivers/infiniband/hw/mlx5/main.c if (mqp->rq.tail != mqp->rq.head) { head 283 drivers/infiniband/hw/mlx5/mlx5_ib.h unsigned head; head 543 drivers/infiniband/hw/mlx5/mlx5_ib.h int head; head 667 drivers/infiniband/hw/mlx5/mlx5_ib.h struct list_head head; head 120 drivers/infiniband/hw/mlx5/mr.c list_add_tail(&mr->list, &ent->head); head 209 drivers/infiniband/hw/mlx5/mr.c if (list_empty(&ent->head)) { head 213 drivers/infiniband/hw/mlx5/mr.c mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); head 437 drivers/infiniband/hw/mlx5/mr.c if (list_empty(&ent->head)) { head 446 drivers/infiniband/hw/mlx5/mr.c mr = list_first_entry(&ent->head, struct mlx5_ib_mr, head 480 drivers/infiniband/hw/mlx5/mr.c if (!list_empty(&ent->head)) { head 481 drivers/infiniband/hw/mlx5/mr.c mr = list_first_entry(&ent->head, struct mlx5_ib_mr, head 525 drivers/infiniband/hw/mlx5/mr.c list_add_tail(&mr->list, &ent->head); head 546 drivers/infiniband/hw/mlx5/mr.c if (list_empty(&ent->head)) { head 550 drivers/infiniband/hw/mlx5/mr.c mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); head 624 drivers/infiniband/hw/mlx5/mr.c INIT_LIST_HEAD(&ent->head); head 782 drivers/infiniband/hw/mlx5/odp.c struct pf_frame *head = NULL, *frame; head 896 drivers/infiniband/hw/mlx5/odp.c frame->next = head; head 897 drivers/infiniband/hw/mlx5/odp.c head = frame; head 910 drivers/infiniband/hw/mlx5/odp.c if (head) { head 911 drivers/infiniband/hw/mlx5/odp.c frame = head; head 912 drivers/infiniband/hw/mlx5/odp.c head = frame->next; head 924 drivers/infiniband/hw/mlx5/odp.c while (head) { head 925 drivers/infiniband/hw/mlx5/odp.c frame = head; head 926 drivers/infiniband/hw/mlx5/odp.c head = frame->next; head 3721 drivers/infiniband/hw/mlx5/qp.c qp->rq.head = 0; head 3723 drivers/infiniband/hw/mlx5/qp.c qp->sq.head = 0; head 4075 drivers/infiniband/hw/mlx5/qp.c cur = wq->head - wq->tail; head 4081 drivers/infiniband/hw/mlx5/qp.c cur = wq->head - wq->tail; head 4948 drivers/infiniband/hw/mlx5/qp.c qp->sq.wqe_head[idx] = qp->sq.head + nreq; head 5318 drivers/infiniband/hw/mlx5/qp.c qp->sq.head += nreq; head 5380 drivers/infiniband/hw/mlx5/qp.c ind = qp->rq.head & (qp->rq.wqe_cnt - 1); head 5420 drivers/infiniband/hw/mlx5/qp.c qp->rq.head += nreq; head 5427 drivers/infiniband/hw/mlx5/qp.c *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); head 154 drivers/infiniband/hw/mlx5/srq.c srq->head = 0; head 448 drivers/infiniband/hw/mlx5/srq.c if (unlikely(srq->head == srq->tail)) { head 454 drivers/infiniband/hw/mlx5/srq.c srq->wrid[srq->head] = wr->wr_id; head 456 drivers/infiniband/hw/mlx5/srq.c next = get_wqe(srq, srq->head); head 457 drivers/infiniband/hw/mlx5/srq.c srq->head = be16_to_cpu(next->next_wqe_index); head 251 drivers/infiniband/hw/mthca/mthca_provider.h unsigned head; head 234 drivers/infiniband/hw/mthca/mthca_qp.c wq->head = 0; head 1572 drivers/infiniband/hw/mthca/mthca_qp.c cur = wq->head - wq->tail; head 1578 drivers/infiniband/hw/mthca/mthca_qp.c cur = wq->head - wq->tail; head 1657 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.head, qp->sq.tail, head 1815 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.head += nreq; head 1853 drivers/infiniband/hw/mthca/mthca_qp.c qp->rq.head, qp->rq.tail, head 1906 drivers/infiniband/hw/mthca/mthca_qp.c qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; head 1920 drivers/infiniband/hw/mthca/mthca_qp.c qp->rq.head += nreq; head 1955 drivers/infiniband/hw/mthca/mthca_qp.c ind = qp->sq.head & (qp->sq.max - 1); head 1962 drivers/infiniband/hw/mthca/mthca_qp.c ((qp->sq.head & 0xffff) << 8) | f0 | op0; head 1964 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; head 1971 drivers/infiniband/hw/mthca/mthca_qp.c *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); head 1987 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.head, qp->sq.tail, head 2135 drivers/infiniband/hw/mthca/mthca_qp.c dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; head 2137 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.head += nreq; head 2144 drivers/infiniband/hw/mthca/mthca_qp.c *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); head 2176 drivers/infiniband/hw/mthca/mthca_qp.c ind = qp->rq.head & (qp->rq.max - 1); head 2182 drivers/infiniband/hw/mthca/mthca_qp.c qp->rq.head, qp->rq.tail, head 2217 drivers/infiniband/hw/mthca/mthca_qp.c qp->rq.head += nreq; head 2224 drivers/infiniband/hw/mthca/mthca_qp.c *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); head 138 drivers/infiniband/hw/ocrdma/ocrdma.h u16 head, tail; head 366 drivers/infiniband/hw/ocrdma/ocrdma.h u32 head, tail; head 138 drivers/infiniband/hw/ocrdma/ocrdma_hw.c return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe)); head 143 drivers/infiniband/hw/ocrdma/ocrdma_hw.c dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1); head 908 drivers/infiniband/hw/ocrdma/ocrdma_hw.c struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head); head 910 drivers/infiniband/hw/ocrdma/ocrdma_hw.c list_for_each(cur, head) { head 1043 drivers/infiniband/hw/ocrdma/ocrdma_hw.c dev->mqe_ctx.tag = dev->mq.sq.head; head 1046 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->hdr.tag_lo = dev->mq.sq.head; head 2128 drivers/infiniband/hw/ocrdma/ocrdma_hw.c qp->sq.head = 0; head 2130 drivers/infiniband/hw/ocrdma/ocrdma_hw.c qp->rq.head = 0; head 1566 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt; head 1571 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c return (qp->sq.tail == qp->sq.head); head 1576 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c return (qp->rq.tail == qp->rq.head); head 1581 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c return q->va + (q->head * q->entry_size); head 1592 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c q->head = (q->head + 1) & q->max_wqe_idx; head 2187 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; head 2189 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; head 2190 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; head 2258 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id; head 222 drivers/infiniband/hw/qib/qib.h u32 head; head 1377 drivers/infiniband/hw/qib/qib.h rcd->head + dd->rhf_offset; head 1379 drivers/infiniband/hw/qib/qib.h hdrqtail = rcd->head; head 455 drivers/infiniband/hw/qib/qib_driver.c l = rcd->head; head 547 drivers/infiniband/hw/qib/qib_driver.c rcd->head = l; head 581 drivers/infiniband/hw/qib/qib_driver.c lval = (u64)rcd->head | dd->rhdrhead_intr_off; head 2059 drivers/infiniband/hw/qib/qib_iba6120.c u32 head, tail; head 2061 drivers/infiniband/hw/qib/qib_iba6120.c head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); head 2066 drivers/infiniband/hw/qib/qib_iba6120.c return head == tail; head 2153 drivers/infiniband/hw/qib/qib_iba6120.c dd->rcd[ctxt]->head = val; head 2711 drivers/infiniband/hw/qib/qib_iba7220.c u32 head, tail; head 2713 drivers/infiniband/hw/qib/qib_iba7220.c head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); head 2718 drivers/infiniband/hw/qib/qib_iba7220.c return head == tail; head 2784 drivers/infiniband/hw/qib/qib_iba7220.c dd->rcd[ctxt]->head = val; head 4448 drivers/infiniband/hw/qib/qib_iba7322.c u32 head, tail; head 4450 drivers/infiniband/hw/qib/qib_iba7322.c head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt); head 4455 drivers/infiniband/hw/qib/qib_iba7322.c return head == tail; head 4557 drivers/infiniband/hw/qib/qib_iba7322.c dd->rcd[ctxt]->head = val; head 4565 drivers/infiniband/hw/qib/qib_iba7322.c val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off; head 229 drivers/infiniband/hw/qib/qib_sdma.c static void unmap_desc(struct qib_pportdata *ppd, unsigned head) head 231 drivers/infiniband/hw/qib/qib_sdma.c __le64 *descqp = &ppd->sdma_descq[head].qw[0]; head 688 drivers/infiniband/hw/qib/qib_sdma.c u16 head, tail, cnt; head 690 drivers/infiniband/hw/qib/qib_sdma.c head = ppd->sdma_descq_head; head 696 drivers/infiniband/hw/qib/qib_sdma.c "SDMA ppd->sdma_descq_head: %u\n", head); head 703 drivers/infiniband/hw/qib/qib_sdma.c while (head != tail) { head 706 drivers/infiniband/hw/qib/qib_sdma.c descqp = &descq[head].qw[0]; head 720 drivers/infiniband/hw/qib/qib_sdma.c head, flags, addr, gen, dwlen, dwoffset); head 721 drivers/infiniband/hw/qib/qib_sdma.c if (++head == ppd->sdma_descq_cnt) head 722 drivers/infiniband/hw/qib/qib_sdma.c head = 0; head 80 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c unsigned int head; head 83 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->ibcq.cqe, &head); head 276 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c unsigned int head; head 284 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->ibcq.cqe, &head); head 293 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c items = (tail > head) ? (tail - head) : head 294 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c (cq->ibcq.cqe - head + tail); head 323 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c unsigned int head; head 329 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->ibcq.cqe, &head); head 343 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cqe = get_cqe(cq, head); head 428 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c unsigned int head; head 439 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) { head 442 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c eqe = get_eqe(dev, head); head 504 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c unsigned int head; head 509 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) { head 513 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c cqne = get_cqne(dev, head); head 90 drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h const __u32 head = atomic_read(&r->cons_head); head 93 drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h pvrdma_idx_valid(head, max_elems)) { head 95 drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h return tail != (head ^ max_elems); head 104 drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h const __u32 head = atomic_read(&r->cons_head); head 107 drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h pvrdma_idx_valid(head, max_elems)) { head 108 drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h *out_head = head & (max_elems - 1); head 109 drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h return tail != head; head 74 drivers/infiniband/sw/rdmavt/cq.c u32 head; head 83 drivers/infiniband/sw/rdmavt/cq.c head = RDMA_READ_UAPI_ATOMIC(u_wc->head); head 88 drivers/infiniband/sw/rdmavt/cq.c head = k_wc->head; head 96 drivers/infiniband/sw/rdmavt/cq.c if (head >= (unsigned)cq->ibcq.cqe) { head 97 drivers/infiniband/sw/rdmavt/cq.c head = cq->ibcq.cqe; head 100 drivers/infiniband/sw/rdmavt/cq.c next = head + 1; head 120 drivers/infiniband/sw/rdmavt/cq.c trace_rvt_cq_enter(cq, entry, head); head 122 drivers/infiniband/sw/rdmavt/cq.c uqueue[head].wr_id = entry->wr_id; head 123 drivers/infiniband/sw/rdmavt/cq.c uqueue[head].status = entry->status; head 124 drivers/infiniband/sw/rdmavt/cq.c uqueue[head].opcode = entry->opcode; head 125 drivers/infiniband/sw/rdmavt/cq.c uqueue[head].vendor_err = entry->vendor_err; head 126 drivers/infiniband/sw/rdmavt/cq.c uqueue[head].byte_len = entry->byte_len; head 127 drivers/infiniband/sw/rdmavt/cq.c uqueue[head].ex.imm_data = entry->ex.imm_data; head 128 drivers/infiniband/sw/rdmavt/cq.c uqueue[head].qp_num = entry->qp->qp_num; head 129 drivers/infiniband/sw/rdmavt/cq.c uqueue[head].src_qp = entry->src_qp; head 130 drivers/infiniband/sw/rdmavt/cq.c uqueue[head].wc_flags = entry->wc_flags; head 131 drivers/infiniband/sw/rdmavt/cq.c uqueue[head].pkey_index = entry->pkey_index; head 132 drivers/infiniband/sw/rdmavt/cq.c uqueue[head].slid = ib_lid_cpu16(entry->slid); head 133 drivers/infiniband/sw/rdmavt/cq.c uqueue[head].sl = entry->sl; head 134 drivers/infiniband/sw/rdmavt/cq.c uqueue[head].dlid_path_bits = entry->dlid_path_bits; head 135 drivers/infiniband/sw/rdmavt/cq.c uqueue[head].port_num = entry->port_num; head 137 drivers/infiniband/sw/rdmavt/cq.c RDMA_WRITE_UAPI_ATOMIC(u_wc->head, next); head 139 drivers/infiniband/sw/rdmavt/cq.c kqueue[head] = *entry; head 140 drivers/infiniband/sw/rdmavt/cq.c k_wc->head = next; head 359 drivers/infiniband/sw/rdmavt/cq.c if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) != head 363 drivers/infiniband/sw/rdmavt/cq.c if (cq->kqueue->head != cq->kqueue->tail) head 382 drivers/infiniband/sw/rdmavt/cq.c u32 head, tail, n; head 426 drivers/infiniband/sw/rdmavt/cq.c head = RDMA_READ_UAPI_ATOMIC(old_u_wc->head); head 430 drivers/infiniband/sw/rdmavt/cq.c head = old_k_wc->head; head 434 drivers/infiniband/sw/rdmavt/cq.c if (head > (u32)cq->ibcq.cqe) head 435 drivers/infiniband/sw/rdmavt/cq.c head = (u32)cq->ibcq.cqe; head 438 drivers/infiniband/sw/rdmavt/cq.c if (head < tail) head 439 drivers/infiniband/sw/rdmavt/cq.c n = cq->ibcq.cqe + 1 + head - tail; head 441 drivers/infiniband/sw/rdmavt/cq.c n = head - tail; head 446 drivers/infiniband/sw/rdmavt/cq.c for (n = 0; tail != head; n++) { head 458 drivers/infiniband/sw/rdmavt/cq.c RDMA_WRITE_UAPI_ATOMIC(u_wc->head, n); head 462 drivers/infiniband/sw/rdmavt/cq.c k_wc->head = n; head 536 drivers/infiniband/sw/rdmavt/cq.c if (tail == wc->head) head 1371 drivers/infiniband/sw/rdmavt/qp.c u32 head; head 1380 drivers/infiniband/sw/rdmavt/qp.c head = RDMA_READ_UAPI_ATOMIC(wq->head); head 1384 drivers/infiniband/sw/rdmavt/qp.c head = kwq->head; head 1388 drivers/infiniband/sw/rdmavt/qp.c if (head >= qp->r_rq.size) head 1389 drivers/infiniband/sw/rdmavt/qp.c head = 0; head 1392 drivers/infiniband/sw/rdmavt/qp.c while (tail != head) { head 1847 drivers/infiniband/sw/rdmavt/qp.c next = wq->head + 1; head 1865 drivers/infiniband/sw/rdmavt/qp.c wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head); head 1877 drivers/infiniband/sw/rdmavt/qp.c smp_store_release(&wq->head, next); head 2262 drivers/infiniband/sw/rdmavt/qp.c next = wq->head + 1; head 2271 drivers/infiniband/sw/rdmavt/qp.c wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head); head 2280 drivers/infiniband/sw/rdmavt/qp.c smp_store_release(&wq->head, next); head 2362 drivers/infiniband/sw/rdmavt/qp.c static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head) head 2366 drivers/infiniband/sw/rdmavt/qp.c count = head; head 2387 drivers/infiniband/sw/rdmavt/qp.c u32 head; head 2390 drivers/infiniband/sw/rdmavt/qp.c head = RDMA_READ_UAPI_ATOMIC(rq->wq->head); head 2392 drivers/infiniband/sw/rdmavt/qp.c head = rq->kwq->head; head 2394 drivers/infiniband/sw/rdmavt/qp.c return head; head 2417 drivers/infiniband/sw/rdmavt/qp.c u32 head; head 2451 drivers/infiniband/sw/rdmavt/qp.c head = get_rvt_head(rq, ip); head 2452 drivers/infiniband/sw/rdmavt/qp.c kwq->count = get_count(rq, tail, head); head 107 drivers/infiniband/sw/rdmavt/rc.c u32 head; head 114 drivers/infiniband/sw/rdmavt/rc.c head = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->head); head 117 drivers/infiniband/sw/rdmavt/rc.c head = READ_ONCE(qp->r_rq.kwq->head); head 120 drivers/infiniband/sw/rdmavt/rc.c if (head >= qp->r_rq.size) head 121 drivers/infiniband/sw/rdmavt/rc.c head = 0; head 130 drivers/infiniband/sw/rdmavt/rc.c credits = head - tail; head 179 drivers/infiniband/sw/rdmavt/srq.c u32 sz, size, n, head, tail; head 216 drivers/infiniband/sw/rdmavt/srq.c head = RDMA_READ_UAPI_ATOMIC(owq->head); head 220 drivers/infiniband/sw/rdmavt/srq.c head = okwq->head; head 223 drivers/infiniband/sw/rdmavt/srq.c if (head >= srq->rq.size || tail >= srq->rq.size) { head 227 drivers/infiniband/sw/rdmavt/srq.c n = head; head 238 drivers/infiniband/sw/rdmavt/srq.c while (tail != head) { head 255 drivers/infiniband/sw/rdmavt/srq.c RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->head, n); head 258 drivers/infiniband/sw/rdmavt/srq.c tmp_rq.kwq->head = n; head 105 drivers/infiniband/sw/rdmavt/trace_tx.h __field(u32, head) head 125 drivers/infiniband/sw/rdmavt/trace_tx.h __entry->head = qp->s_head; head 148 drivers/infiniband/sw/rdmavt/trace_tx.h __entry->head, head 545 drivers/infiniband/ulp/ipoib/ipoib_ib.c void *head, int hlen) head 555 drivers/infiniband/ulp/ipoib/ipoib_ib.c if (head) { head 557 drivers/infiniband/ulp/ipoib/ipoib_ib.c priv->tx_wr.header = head; head 1542 drivers/infiniband/ulp/ipoib/ipoib_main.c static void neigh_hash_free_rcu(struct rcu_head *head) head 1544 drivers/infiniband/ulp/ipoib/ipoib_main.c struct ipoib_neigh_hash *htbl = container_of(head, head 2555 drivers/infiniband/ulp/ipoib/ipoib_main.c LIST_HEAD(head); head 2562 drivers/infiniband/ulp/ipoib/ipoib_main.c unregister_netdevice_queue(cpriv->dev, &head); head 2563 drivers/infiniband/ulp/ipoib/ipoib_main.c unregister_netdevice_queue(priv->dev, &head); head 2564 drivers/infiniband/ulp/ipoib/ipoib_main.c unregister_netdevice_many(&head); head 42 drivers/input/evdev.c unsigned int head; head 103 drivers/input/evdev.c unsigned int i, head, num; head 110 drivers/input/evdev.c head = client->tail; head 116 drivers/input/evdev.c for (i = client->tail; i != client->head; i = (i + 1) & mask) { head 126 drivers/input/evdev.c } else if (head != i) { head 128 drivers/input/evdev.c client->buffer[head] = *ev; head 132 drivers/input/evdev.c head = (head + 1) & mask; head 136 drivers/input/evdev.c client->packet_head = head; head 140 drivers/input/evdev.c client->head = head; head 155 drivers/input/evdev.c client->buffer[client->head++] = ev; head 156 drivers/input/evdev.c client->head &= client->bufsize - 1; head 158 drivers/input/evdev.c if (unlikely(client->head == client->tail)) { head 160 drivers/input/evdev.c client->tail = (client->head - 1) & (client->bufsize - 1); head 203 drivers/input/evdev.c if (client->head != client->tail) { head 204 drivers/input/evdev.c client->packet_head = client->head = client->tail; head 217 drivers/input/evdev.c client->buffer[client->head++] = *event; head 218 drivers/input/evdev.c client->head &= client->bufsize - 1; head 220 drivers/input/evdev.c if (unlikely(client->head == client->tail)) { head 225 drivers/input/evdev.c client->tail = (client->head - 2) & (client->bufsize - 1); head 239 drivers/input/evdev.c client->packet_head = client->head; head 270 drivers/input/evdev.c if (client->packet_head == client->head) head 60 drivers/input/joydev.c int head; head 99 drivers/input/joydev.c client->buffer[client->head] = *event; head 102 drivers/input/joydev.c client->head++; head 103 drivers/input/joydev.c client->head &= JOYDEV_BUFFER_SIZE - 1; head 104 drivers/input/joydev.c if (client->tail == client->head) head 327 drivers/input/joydev.c have_event = client->head != client->tail; head 365 drivers/input/joydev.c client->tail = client->head; head 381 drivers/input/joydev.c client->head != client->tail; head 33 drivers/input/joystick/iforce/iforce-packets.c int head, tail; head 41 drivers/input/joystick/iforce/iforce-packets.c head = iforce->xmit.head; head 45 drivers/input/joystick/iforce/iforce-packets.c if (CIRC_SPACE(head, tail, XMIT_SIZE) < n+2) { head 52 drivers/input/joystick/iforce/iforce-packets.c empty = head == tail; head 53 drivers/input/joystick/iforce/iforce-packets.c XMIT_INC(iforce->xmit.head, n+2); head 58 drivers/input/joystick/iforce/iforce-packets.c iforce->xmit.buf[head] = HI(cmd); head 59 drivers/input/joystick/iforce/iforce-packets.c XMIT_INC(head, 1); head 60 drivers/input/joystick/iforce/iforce-packets.c iforce->xmit.buf[head] = LO(cmd); head 61 drivers/input/joystick/iforce/iforce-packets.c XMIT_INC(head, 1); head 63 drivers/input/joystick/iforce/iforce-packets.c c = CIRC_SPACE_TO_END(head, tail, XMIT_SIZE); head 66 drivers/input/joystick/iforce/iforce-packets.c memcpy(&iforce->xmit.buf[head], head 74 drivers/input/joystick/iforce/iforce-packets.c XMIT_INC(head, n); head 41 drivers/input/joystick/iforce/iforce-serio.c if (iforce->xmit.head == iforce->xmit.tail) { head 32 drivers/input/joystick/iforce/iforce-usb.c if (iforce->xmit.head == iforce->xmit.tail) { head 47 drivers/input/joystick/iforce/iforce-usb.c c = CIRC_CNT_TO_END(iforce->xmit.head, iforce->xmit.tail, XMIT_SIZE); head 61 drivers/input/misc/uinput.c unsigned char head; head 79 drivers/input/misc/uinput.c udev->buff[udev->head] = (struct input_event) { head 87 drivers/input/misc/uinput.c udev->head = (udev->head + 1) % UINPUT_BUFFER_SIZE; head 628 drivers/input/misc/uinput.c have_event = udev->head != udev->tail; head 673 drivers/input/misc/uinput.c else if (udev->head == udev->tail && head 686 drivers/input/misc/uinput.c udev->head != udev->tail || head 699 drivers/input/misc/uinput.c if (udev->head != udev->tail) head 565 drivers/input/mouse/cyapa_gen6.c struct pip_app_cmd_head head; head 575 drivers/input/mouse/cyapa_gen6.c put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd.head.addr); head 576 drivers/input/mouse/cyapa_gen6.c put_unaligned_le16(sizeof(cmd), &cmd.head.length - 2); head 577 drivers/input/mouse/cyapa_gen6.c cmd.head.report_id = PIP_APP_CMD_REPORT_ID; head 578 drivers/input/mouse/cyapa_gen6.c cmd.head.cmd_code = PIP_RETRIEVE_DATA_STRUCTURE; head 100 drivers/input/mousedev.c unsigned int head, tail; head 275 drivers/input/mousedev.c p = &client->packets[client->head]; head 277 drivers/input/mousedev.c new_head = (client->head + 1) % PACKET_QUEUE_LEN; head 279 drivers/input/mousedev.c p = &client->packets[client->head = new_head]; head 620 drivers/input/mousedev.c if (client->tail == client->head) { head 51 drivers/input/serio/sa1111ps2.c unsigned int head; head 97 drivers/input/serio/sa1111ps2.c if (ps2if->head == ps2if->tail) { head 117 drivers/input/serio/sa1111ps2.c unsigned int head; head 127 drivers/input/serio/sa1111ps2.c if (ps2if->head == ps2if->tail) head 129 drivers/input/serio/sa1111ps2.c head = (ps2if->head + 1) & (sizeof(ps2if->buf) - 1); head 130 drivers/input/serio/sa1111ps2.c if (head != ps2if->tail) { head 131 drivers/input/serio/sa1111ps2.c ps2if->buf[ps2if->head] = val; head 132 drivers/input/serio/sa1111ps2.c ps2if->head = head; head 30 drivers/input/serio/serio_raw.c unsigned int tail, head; head 146 drivers/input/serio/serio_raw.c empty = serio_raw->head == serio_raw->tail; head 170 drivers/input/serio/serio_raw.c if (serio_raw->head == serio_raw->tail && head 188 drivers/input/serio/serio_raw.c serio_raw->head != serio_raw->tail || head 248 drivers/input/serio/serio_raw.c if (serio_raw->head != serio_raw->tail) head 275 drivers/input/serio/serio_raw.c unsigned int head = serio_raw->head; head 278 drivers/input/serio/serio_raw.c serio_raw->queue[head] = data; head 279 drivers/input/serio/serio_raw.c head = (head + 1) % SERIO_RAW_QUEUE_LEN; head 280 drivers/input/serio/serio_raw.c if (likely(head != serio_raw->tail)) { head 281 drivers/input/serio/serio_raw.c serio_raw->head = head; head 41 drivers/input/serio/userio.c u8 head; head 62 drivers/input/serio/userio.c userio->buf[userio->head] = val; head 63 drivers/input/serio/userio.c userio->head = (userio->head + 1) % USERIO_BUFSIZE; head 65 drivers/input/serio/userio.c if (userio->head == userio->tail) head 140 drivers/input/serio/userio.c nonwrap_len = CIRC_CNT_TO_END(userio->head, head 167 drivers/input/serio/userio.c userio->head != userio->tail); head 257 drivers/input/serio/userio.c if (userio->head != userio->tail) head 643 drivers/iommu/amd_iommu.c u32 head, tail; head 645 drivers/iommu/amd_iommu.c head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); head 648 drivers/iommu/amd_iommu.c while (head != tail) { head 649 drivers/iommu/amd_iommu.c iommu_print_event(iommu, iommu->evt_buf + head); head 650 drivers/iommu/amd_iommu.c head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE; head 653 drivers/iommu/amd_iommu.c writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); head 676 drivers/iommu/amd_iommu.c u32 head, tail; head 681 drivers/iommu/amd_iommu.c head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); head 684 drivers/iommu/amd_iommu.c while (head != tail) { head 689 drivers/iommu/amd_iommu.c raw = (u64 *)(iommu->ppr_log + head); head 713 drivers/iommu/amd_iommu.c head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; head 714 drivers/iommu/amd_iommu.c writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); head 720 drivers/iommu/amd_iommu.c head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); head 738 drivers/iommu/amd_iommu.c u32 head, tail, cnt = 0; head 743 drivers/iommu/amd_iommu.c head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET); head 746 drivers/iommu/amd_iommu.c while (head != tail) { head 750 drivers/iommu/amd_iommu.c raw = (u64 *)(iommu->ga_log + head); head 757 drivers/iommu/amd_iommu.c head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE; head 758 drivers/iommu/amd_iommu.c writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); head 3177 drivers/iommu/amd_iommu.c struct list_head *head) head 3210 drivers/iommu/amd_iommu.c list_add_tail(®ion->list, head); head 3218 drivers/iommu/amd_iommu.c list_add_tail(®ion->list, head); head 3225 drivers/iommu/amd_iommu.c list_add_tail(®ion->list, head); head 3229 drivers/iommu/amd_iommu.c struct list_head *head) head 3233 drivers/iommu/amd_iommu.c list_for_each_entry_safe(entry, next, head, list) head 1317 drivers/iommu/arm-smmu-v3.c }, head = llq; head 1333 drivers/iommu/arm-smmu-v3.c head.cons = llq.cons; head 1334 drivers/iommu/arm-smmu-v3.c head.prod = queue_inc_prod_n(&llq, n + sync) | head 1337 drivers/iommu/arm-smmu-v3.c old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); head 1344 drivers/iommu/arm-smmu-v3.c head.prod &= ~CMDQ_PROD_OWNED_FLAG; head 1368 drivers/iommu/arm-smmu-v3.c arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod); head 2700 drivers/iommu/arm-smmu-v3.c struct list_head *head) head 2710 drivers/iommu/arm-smmu-v3.c list_add_tail(®ion->list, head); head 2712 drivers/iommu/arm-smmu-v3.c iommu_dma_get_resv_regions(dev, head); head 2716 drivers/iommu/arm-smmu-v3.c struct list_head *head) head 2720 drivers/iommu/arm-smmu-v3.c list_for_each_entry_safe(entry, next, head, list) head 1531 drivers/iommu/arm-smmu.c struct list_head *head) head 1541 drivers/iommu/arm-smmu.c list_add_tail(®ion->list, head); head 1543 drivers/iommu/arm-smmu.c iommu_dma_get_resv_regions(dev, head); head 1547 drivers/iommu/arm-smmu.c struct list_head *head) head 1551 drivers/iommu/arm-smmu.c list_for_each_entry_safe(entry, next, head, list) head 1159 drivers/iommu/dmar.c int head, tail; head 1175 drivers/iommu/dmar.c head = readl(iommu->reg + DMAR_IQH_REG); head 1176 drivers/iommu/dmar.c if ((head >> shift) == index) { head 1177 drivers/iommu/dmar.c struct qi_desc *desc = qi->desc + head; head 1199 drivers/iommu/dmar.c head = readl(iommu->reg + DMAR_IQH_REG); head 1200 drivers/iommu/dmar.c head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH; head 1201 drivers/iommu/dmar.c head |= 1; head 1208 drivers/iommu/dmar.c if (qi->desc_status[head] == QI_IN_USE) head 1209 drivers/iommu/dmar.c qi->desc_status[head] = QI_ABORT; head 1210 drivers/iommu/dmar.c head = (head - 2 + QI_LENGTH) % QI_LENGTH; head 1211 drivers/iommu/dmar.c } while (head != tail); head 5667 drivers/iommu/intel-iommu.c struct list_head *head) head 5697 drivers/iommu/intel-iommu.c list_add_tail(&resv->list, head); head 5710 drivers/iommu/intel-iommu.c list_add_tail(®->list, head); head 5720 drivers/iommu/intel-iommu.c list_add_tail(®->list, head); head 5724 drivers/iommu/intel-iommu.c struct list_head *head) head 5728 drivers/iommu/intel-iommu.c list_for_each_entry_safe(entry, next, head, list) head 535 drivers/iommu/intel-svm.c int head, tail, handled = 0; head 542 drivers/iommu/intel-svm.c head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; head 543 drivers/iommu/intel-svm.c while (head != tail) { head 554 drivers/iommu/intel-svm.c req = &iommu->prq[head / sizeof(*req)]; head 662 drivers/iommu/intel-svm.c head = (head + sizeof(*req)) & PRQ_RING_MASK; head 360 drivers/iommu/iommu.c struct list_head *head) head 371 drivers/iommu/iommu.c ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); head 99 drivers/iommu/iova.c fq->head = 0; head 460 drivers/iommu/iova.c for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE) head 465 drivers/iommu/iova.c return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); head 498 drivers/iommu/iova.c fq->head = (fq->head + 1) % IOVA_FQ_SIZE; head 95 drivers/iommu/virtio-iommu.c u32 head; head 400 drivers/iommu/virtio-iommu.c .head.type = VIRTIO_IOMMU_T_MAP, head 481 drivers/iommu/virtio-iommu.c probe->head.type = VIRTIO_IOMMU_T_PROBE; head 572 drivers/iommu/virtio-iommu.c } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) { head 689 drivers/iommu/virtio-iommu.c .head.type = VIRTIO_IOMMU_T_ATTACH, head 737 drivers/iommu/virtio-iommu.c .head.type = VIRTIO_IOMMU_T_MAP, head 772 drivers/iommu/virtio-iommu.c .head.type = VIRTIO_IOMMU_T_UNMAP, head 810 drivers/iommu/virtio-iommu.c static void viommu_get_resv_regions(struct device *dev, struct list_head *head) head 824 drivers/iommu/virtio-iommu.c list_add_tail(&new_entry->list, head); head 837 drivers/iommu/virtio-iommu.c list_add_tail(&msi->list, head); head 840 drivers/iommu/virtio-iommu.c iommu_dma_get_resv_regions(dev, head); head 843 drivers/iommu/virtio-iommu.c static void viommu_put_resv_regions(struct device *dev, struct list_head *head) head 847 drivers/iommu/virtio-iommu.c list_for_each_entry_safe(entry, next, head, list) head 81 drivers/isdn/capi/capilib.c void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize) head 99 drivers/isdn/capi/capilib.c list_add_tail(&np->list, head); head 105 drivers/isdn/capi/capilib.c void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci) head 110 drivers/isdn/capi/capilib.c list_for_each(l, head) { head 126 drivers/isdn/capi/capilib.c void capilib_release_appl(struct list_head *head, u16 applid) head 131 drivers/isdn/capi/capilib.c list_for_each_safe(l, n, head) { head 143 drivers/isdn/capi/capilib.c void capilib_release(struct list_head *head) head 148 drivers/isdn/capi/capilib.c list_for_each_safe(l, n, head) { head 158 drivers/isdn/capi/capilib.c u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid) head 163 drivers/isdn/capi/capilib.c list_for_each(l, head) { head 181 drivers/isdn/capi/capilib.c void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid) head 186 drivers/isdn/capi/capilib.c list_for_each(l, head) { head 145 drivers/isdn/mISDN/dsp_cmx.c count_list_member(struct list_head *head) head 150 drivers/isdn/mISDN/dsp_cmx.c list_for_each(m, head) head 49 drivers/isdn/mISDN/socket.c sk_add_node(sk, &l->head); head 493 drivers/isdn/mISDN/socket.c sk_for_each(csk, &data_sockets.head) { head 65 drivers/isdn/mISDN/stack.c sk_for_each(sk, &sl->head) { head 151 drivers/isdn/mISDN/stack.c if (!hlist_empty(&st->l1sock.head)) { head 157 drivers/isdn/mISDN/stack.c if (!hlist_empty(&st->l1sock.head)) head 376 drivers/isdn/mISDN/stack.c INIT_HLIST_HEAD(&newst->l1sock.head); head 445 drivers/isdn/mISDN/stack.c sk_add_node(&msk->sk, &dev->D.st->l1sock.head); head 644 drivers/isdn/mISDN/stack.c if (!hlist_empty(&st->l1sock.head)) head 184 drivers/lightnvm/pblk-rb.c #define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size) head 185 drivers/lightnvm/pblk-rb.c #define pblk_rb_ring_space(rb, head, tail, size) \ head 186 drivers/lightnvm/pblk-rb.c (CIRC_SPACE(head, tail, size)) head 531 drivers/lightnvm/pblk-recovery.c static void pblk_recov_line_add_ordered(struct list_head *head, head 536 drivers/lightnvm/pblk-recovery.c list_for_each_entry(t, head, list) head 352 drivers/md/dm-bio-prison-v1.c static void __sweep(struct dm_deferred_set *ds, struct list_head *head) head 356 drivers/md/dm-bio-prison-v1.c list_splice_init(&ds->entries[ds->sweeper].work_items, head); head 361 drivers/md/dm-bio-prison-v1.c list_splice_init(&ds->entries[ds->sweeper].work_items, head); head 364 drivers/md/dm-bio-prison-v1.c void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head) head 371 drivers/md/dm-bio-prison-v1.c __sweep(entry->ds, head); head 133 drivers/md/dm-bio-prison-v1.h void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head); head 113 drivers/md/dm-cache-policy-smq.c unsigned head, tail; head 119 drivers/md/dm-cache-policy-smq.c l->head = l->tail = INDEXER_NULL; head 124 drivers/md/dm-cache-policy-smq.c return to_entry(es, l->head); head 144 drivers/md/dm-cache-policy-smq.c return l->head == INDEXER_NULL; head 149 drivers/md/dm-cache-policy-smq.c struct entry *head = l_head(es, l); head 151 drivers/md/dm-cache-policy-smq.c e->next = l->head; head 154 drivers/md/dm-cache-policy-smq.c if (head) head 155 drivers/md/dm-cache-policy-smq.c head->prev = l->head = to_index(es, e); head 157 drivers/md/dm-cache-policy-smq.c l->head = l->tail = to_index(es, e); head 173 drivers/md/dm-cache-policy-smq.c l->head = l->tail = to_index(es, e); head 205 drivers/md/dm-cache-policy-smq.c l->head = e->next; head 565 drivers/md/dm-clone-target.c struct hlist_head head; head 591 drivers/md/dm-clone-target.c INIT_HLIST_HEAD(&bucket->head); head 619 drivers/md/dm-clone-target.c hlist_for_each_entry(hd, &bucket->head, h) { head 635 drivers/md/dm-clone-target.c hlist_add_head(&hd->h, &bucket->head); head 976 drivers/md/dm-clone-target.c struct dm_clone_region_hydration *head; head 986 drivers/md/dm-clone-target.c if (batch->head) { head 989 drivers/md/dm-clone-target.c (batch->head->region_nr + batch->nr_batched_regions) == hd->region_nr) { head 990 drivers/md/dm-clone-target.c list_add_tail(&hd->list, &batch->head->list); head 997 drivers/md/dm-clone-target.c hydration_copy(batch->head, batch->nr_batched_regions); head 998 drivers/md/dm-clone-target.c batch->head = NULL; head 1014 drivers/md/dm-clone-target.c batch->head = hd; head 1070 drivers/md/dm-clone-target.c .head = NULL, head 1109 drivers/md/dm-clone-target.c if (batch.head) head 1110 drivers/md/dm-clone-target.c hydration_copy(batch.head, batch.nr_batched_regions); head 129 drivers/md/dm-raid1.c should_wake = !(bl->head); head 638 drivers/md/dm-raid1.c if (!ms->failures.head) head 688 drivers/md/dm-raid1.c if (!writes->head) head 737 drivers/md/dm-raid1.c if (unlikely(requeue.head)) { head 791 drivers/md/dm-raid1.c if (likely(!failures->head)) head 530 drivers/md/dm-region-hash.c for (bio = bios->head; bio; bio = bio->bi_next) { head 169 drivers/md/dm-stats.c static void dm_stat_free(struct rcu_head *head) head 172 drivers/md/dm-stats.c struct dm_stat *s = container_of(head, struct dm_stat, rcu_head); head 1228 drivers/md/dm-thin.c static void process_prepared(struct pool *pool, struct list_head *head, head 1237 drivers/md/dm-thin.c list_splice_init(head, &maps); head 320 drivers/md/md-multipath.c struct list_head *head = &conf->retry_list; head 326 drivers/md/md-multipath.c if (list_empty(head)) head 328 drivers/md/md-multipath.c mp_bh = list_entry(head->prev, struct multipath_bh, retry_list); head 329 drivers/md/md-multipath.c list_del(head->prev); head 650 drivers/md/md.h #define rdev_for_each_list(rdev, tmp, head) \ head 651 drivers/md/md.h list_for_each_entry_safe(rdev, tmp, head, same_set) head 832 drivers/md/raid1.c if (conf->pending_bio_list.head) { head 2527 drivers/md/raid1.c struct list_head *head = &conf->retry_list; head 2560 drivers/md/raid1.c if (list_empty(head)) { head 2564 drivers/md/raid1.c r1_bio = list_entry(head->prev, struct r1bio, retry_list); head 2565 drivers/md/raid1.c list_del(head->prev); head 883 drivers/md/raid10.c if (conf->pending_bio_list.head) { head 2717 drivers/md/raid10.c struct list_head *head = &conf->retry_list; head 2753 drivers/md/raid10.c if (list_empty(head)) { head 2757 drivers/md/raid10.c r10_bio = list_entry(head->prev, struct r10bio, retry_list); head 2758 drivers/md/raid10.c list_del(head->prev); head 359 drivers/md/raid5.c struct llist_node *head; head 361 drivers/md/raid5.c head = llist_del_all(&conf->released_stripes); head 362 drivers/md/raid5.c head = llist_reverse_order(head); head 363 drivers/md/raid5.c llist_for_each_entry_safe(sh, t, head, release_list) { head 741 drivers/md/raid5.c struct stripe_head *head; head 755 drivers/md/raid5.c head = __find_stripe(conf, head_sector, conf->generation); head 756 drivers/md/raid5.c if (head && !atomic_inc_not_zero(&head->count)) { head 758 drivers/md/raid5.c if (!atomic_read(&head->count)) { head 759 drivers/md/raid5.c if (!test_bit(STRIPE_HANDLE, &head->state)) head 761 drivers/md/raid5.c BUG_ON(list_empty(&head->lru) && head 762 drivers/md/raid5.c !test_bit(STRIPE_EXPANDING, &head->state)); head 766 drivers/md/raid5.c list_del_init(&head->lru); head 769 drivers/md/raid5.c if (head->group) { head 770 drivers/md/raid5.c head->group->stripes_cnt--; head 771 drivers/md/raid5.c head->group = NULL; head 774 drivers/md/raid5.c atomic_inc(&head->count); head 779 drivers/md/raid5.c if (!head) head 781 drivers/md/raid5.c if (!stripe_can_batch(head)) head 784 drivers/md/raid5.c lock_two_stripes(head, sh); head 786 drivers/md/raid5.c if (!stripe_can_batch(head) || !stripe_can_batch(sh)) head 795 drivers/md/raid5.c if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || head 796 drivers/md/raid5.c bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) head 799 drivers/md/raid5.c if (head->batch_head) { head 800 drivers/md/raid5.c spin_lock(&head->batch_head->batch_lock); head 802 drivers/md/raid5.c if (!stripe_can_batch(head)) { head 803 drivers/md/raid5.c spin_unlock(&head->batch_head->batch_lock); head 813 drivers/md/raid5.c sh->batch_head = head->batch_head; head 819 drivers/md/raid5.c list_add(&sh->batch_list, &head->batch_list); head 820 drivers/md/raid5.c spin_unlock(&head->batch_head->batch_lock); head 822 drivers/md/raid5.c head->batch_head = head; head 823 drivers/md/raid5.c sh->batch_head = head->batch_head; head 824 drivers/md/raid5.c spin_lock(&head->batch_lock); head 825 drivers/md/raid5.c list_add_tail(&sh->batch_list, &head->batch_list); head 826 drivers/md/raid5.c spin_unlock(&head->batch_lock); head 845 drivers/md/raid5.c unlock_two_stripes(head, sh); head 847 drivers/md/raid5.c raid5_release_stripe(head); head 5082 drivers/md/raid5.c struct list_head head; head 5083 drivers/md/raid5.c list_add(&head, &conf->bitmap_list); head 5085 drivers/md/raid5.c while (!list_empty(&head)) { head 5086 drivers/md/raid5.c struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); head 22 drivers/media/cec/cec-notifier.c struct list_head head; head 42 drivers/media/cec/cec-notifier.c list_for_each_entry(n, &cec_notifiers, head) { head 67 drivers/media/cec/cec-notifier.c list_add_tail(&n->head, &cec_notifiers); head 79 drivers/media/cec/cec-notifier.c list_del(&n->head); head 504 drivers/media/common/siano/smscoreapi.c static void list_add_locked(struct list_head *new, struct list_head *head, head 511 drivers/media/common/siano/smscoreapi.c list_add(new, head); head 114 drivers/media/dvb-core/dmxdev.c struct list_head *head, *pos; head 116 drivers/media/dvb-core/dmxdev.c head = demux->get_frontends(demux); head 117 drivers/media/dvb-core/dmxdev.c if (!head) head 119 drivers/media/dvb-core/dmxdev.c list_for_each(pos, head) head 1176 drivers/media/dvb-core/dvb_demux.c struct list_head *head = &dvbdemux->frontend_list; head 1178 drivers/media/dvb-core/dvb_demux.c list_add(&(frontend->connectivity_list), head); head 1187 drivers/media/dvb-core/dvb_demux.c struct list_head *pos, *n, *head = &dvbdemux->frontend_list; head 1189 drivers/media/dvb-core/dvb_demux.c list_for_each_safe(pos, n, head) { head 588 drivers/media/mc/mc-entity.c static struct media_link *media_add_link(struct list_head *head) head 596 drivers/media/mc/mc-entity.c list_add_tail(&link->list, head); head 284 drivers/media/pci/cx18/cx18-driver.h #define list_entry_is_past_end(pos, head, member) \ head 285 drivers/media/pci/cx18/cx18-driver.h (&pos->member == (head)) head 733 drivers/media/platform/vsp1/vsp1_dl.c int vsp1_dl_list_add_chain(struct vsp1_dl_list *head, head 736 drivers/media/platform/vsp1/vsp1_dl.c head->has_chain = true; head 737 drivers/media/platform/vsp1/vsp1_dl.c list_add_tail(&dl->chain, &head->chain); head 77 drivers/media/platform/vsp1/vsp1_dl.h int vsp1_dl_list_add_chain(struct vsp1_dl_list *head, struct vsp1_dl_list *dl); head 773 drivers/media/usb/go7007/go7007-fw.c const unsigned char head[] = { 0x00, 0x00, 0x01, 0xb0, go->pali, head 779 drivers/media/usb/go7007/go7007-fw.c CODE_GEN(c, buf + 2 + sizeof(head)); head 793 drivers/media/usb/go7007/go7007-fw.c memcpy(buf + 2, head, sizeof(head)); head 811 drivers/media/usb/go7007/go7007-fw.c i = CODE_LENGTH(c) + sizeof(head) * 8; head 519 drivers/media/usb/uvc/uvc_video.c sample = &stream->clock.samples[stream->clock.head]; head 526 drivers/media/usb/uvc/uvc_video.c stream->clock.head = (stream->clock.head + 1) % stream->clock.size; head 538 drivers/media/usb/uvc/uvc_video.c clock->head = 0; head 687 drivers/media/usb/uvc/uvc_video.c first = &clock->samples[clock->head]; head 688 drivers/media/usb/uvc/uvc_video.c last = &clock->samples[(clock->head - 1) % clock->size]; head 606 drivers/media/usb/uvc/uvcvideo.h unsigned int head; head 1748 drivers/memstick/core/ms_block.c msb->geometry.heads = chs_table[i].head; head 269 drivers/memstick/core/ms_block.h unsigned char head; head 1161 drivers/message/fusion/mptsas.c struct list_head *head = &hd->target_reset_list; head 1168 drivers/message/fusion/mptsas.c if (list_empty(head)) head 1171 drivers/message/fusion/mptsas.c target_reset_list = list_entry(head->next, head 1196 drivers/message/fusion/mptsas.c struct list_head *head = &hd->target_reset_list; head 1241 drivers/message/fusion/mptsas.c if (list_empty(head)) head 1244 drivers/message/fusion/mptsas.c target_reset_list = list_entry(head->next, head 243 drivers/mfd/ezx-pcap.c u8 head; head 246 drivers/mfd/ezx-pcap.c head = pcap->adc_head; head 247 drivers/mfd/ezx-pcap.c if (!pcap->adc_queue[head]) { head 256 drivers/mfd/ezx-pcap.c tmp |= pcap->adc_queue[head]->flags | PCAP_ADC_ADEN; head 258 drivers/mfd/ezx-pcap.c if (pcap->adc_queue[head]->bank == PCAP_ADC_BANK_1) head 70 drivers/mfd/pcf50633-adc.c int head; head 72 drivers/mfd/pcf50633-adc.c head = adc->queue_head; head 74 drivers/mfd/pcf50633-adc.c if (!adc->queue[head]) head 77 drivers/mfd/pcf50633-adc.c adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg); head 84 drivers/mfd/pcf50633-adc.c int head, tail; head 88 drivers/mfd/pcf50633-adc.c head = adc->queue_head; head 98 drivers/mfd/pcf50633-adc.c if (head == tail) head 173 drivers/mfd/pcf50633-adc.c int head, res; head 176 drivers/mfd/pcf50633-adc.c head = adc->queue_head; head 178 drivers/mfd/pcf50633-adc.c req = adc->queue[head]; head 184 drivers/mfd/pcf50633-adc.c adc->queue[head] = NULL; head 185 drivers/mfd/pcf50633-adc.c adc->queue_head = (head + 1) & head 219 drivers/mfd/pcf50633-adc.c int i, head; head 224 drivers/mfd/pcf50633-adc.c head = adc->queue_head; head 226 drivers/mfd/pcf50633-adc.c if (WARN_ON(adc->queue[head])) head 98 drivers/misc/hpilo.c c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; head 103 drivers/misc/hpilo.c fifo_q->fifobar[fifo_q->head & fifo_q->imask] = head 105 drivers/misc/hpilo.c fifo_q->head += 1; head 121 drivers/misc/hpilo.c c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; head 219 drivers/misc/hpilo.c fifo_q->head = 0; head 169 drivers/misc/hpilo.h u64 head; head 336 drivers/misc/mei/client.c struct list_head *head) head 338 drivers/misc/mei/client.c list_add_tail(&cb->list, head); head 390 drivers/misc/mei/client.c static void mei_io_list_flush_cl(struct list_head *head, head 395 drivers/misc/mei/client.c list_for_each_entry_safe(cb, next, head, list) { head 410 drivers/misc/mei/client.c static void mei_io_tx_list_free_cl(struct list_head *head, head 415 drivers/misc/mei/client.c list_for_each_entry_safe(cb, next, head, list) { head 427 drivers/misc/mei/client.c static void mei_io_list_free_fp(struct list_head *head, const struct file *fp) head 431 drivers/misc/mei/client.c list_for_each_entry_safe(cb, next, head, list) head 71 drivers/misc/mic/scif/scif_debugfs.c static void scif_display_all_windows(struct list_head *head, struct seq_file *s) head 76 drivers/misc/mic/scif/scif_debugfs.c list_for_each(item, head) { head 1681 drivers/misc/mic/scif/scif_dma.c remote_req.head = &ep->rma_info.remote_reg_list; head 1712 drivers/misc/mic/scif/scif_dma.c req.head = &mmn->tc_reg_list; head 1749 drivers/misc/mic/scif/scif_dma.c req.head = &ep->rma_info.reg_list; head 309 drivers/misc/mic/scif/scif_fence.c req.head = &ep->rma_info.reg_list; head 311 drivers/misc/mic/scif/scif_fence.c req.head = &ep->rma_info.remote_reg_list; head 36 drivers/misc/mic/scif/scif_mmap.c req.head = &ep->rma_info.reg_list; head 90 drivers/misc/mic/scif/scif_mmap.c static void _scif_zap_mmaps(int node, struct list_head *head) head 96 drivers/misc/mic/scif/scif_mmap.c list_for_each(item, head) { head 240 drivers/misc/mic/scif/scif_mmap.c req.head = &ep->rma_info.remote_reg_list; head 379 drivers/misc/mic/scif/scif_mmap.c struct list_head *head = &ep->rma_info.remote_reg_list; head 386 drivers/misc/mic/scif/scif_mmap.c list_for_each_entry_from(window, head, list) { head 421 drivers/misc/mic/scif/scif_mmap.c head = &ep->rma_info.remote_reg_list; head 422 drivers/misc/mic/scif/scif_mmap.c list_for_each_entry_from(window, head, list) { head 456 drivers/misc/mic/scif/scif_mmap.c struct list_head *head = &ep->rma_info.remote_reg_list; head 463 drivers/misc/mic/scif/scif_mmap.c list_for_each_entry_safe_from(window, _window, head, list) { head 567 drivers/misc/mic/scif/scif_mmap.c req.head = &ep->rma_info.remote_reg_list; head 639 drivers/misc/mic/scif/scif_mmap.c req.head = &ep->rma_info.remote_reg_list; head 16 drivers/misc/mic/scif/scif_rb.c #define scif_rb_ring_cnt(head, tail, size) CIRC_CNT(head, tail, size) head 17 drivers/misc/mic/scif/scif_rb.c #define scif_rb_ring_space(head, tail, size) CIRC_SPACE(head, tail, size) head 1189 drivers/misc/mic/scif/scif_rma.c req.head = &ep->rma_info.remote_reg_list; head 1727 drivers/misc/mic/scif/scif_rma.c req.head = &ep->rma_info.reg_list; head 19 drivers/misc/mic/scif/scif_rma_list.c void scif_insert_tcw(struct scif_window *window, struct list_head *head) head 22 drivers/misc/mic/scif/scif_rma_list.c struct scif_window *prev = list_entry(head, struct scif_window, list); head 27 drivers/misc/mic/scif/scif_rma_list.c if (!list_empty(head)) { head 28 drivers/misc/mic/scif/scif_rma_list.c curr = list_entry(head->prev, struct scif_window, list); head 30 drivers/misc/mic/scif/scif_rma_list.c list_add_tail(&window->list, head); head 34 drivers/misc/mic/scif/scif_rma_list.c list_for_each(item, head) { head 49 drivers/misc/mic/scif/scif_rma_list.c void scif_insert_window(struct scif_window *window, struct list_head *head) head 55 drivers/misc/mic/scif/scif_rma_list.c list_for_each(item, head) { head 62 drivers/misc/mic/scif/scif_rma_list.c list_add(&window->list, head); head 78 drivers/misc/mic/scif/scif_rma_list.c struct list_head *item, *temp, *head = req->head; head 89 drivers/misc/mic/scif/scif_rma_list.c if (!list_empty(head)) { head 90 drivers/misc/mic/scif/scif_rma_list.c window = list_last_entry(head, struct scif_window, list); head 96 drivers/misc/mic/scif/scif_rma_list.c list_for_each_safe(item, temp, head) { head 145 drivers/misc/mic/scif/scif_rma_list.c list_for_each(item, req->head) { head 198 drivers/misc/mic/scif/scif_rma_list.c struct list_head *head = &ep->rma_info.reg_list; head 204 drivers/misc/mic/scif/scif_rma_list.c list_for_each_entry_safe_from(window, _window, head, list) { head 230 drivers/misc/mic/scif/scif_rma_list.c struct list_head *head = &ep->rma_info.reg_list; head 233 drivers/misc/mic/scif/scif_rma_list.c list_for_each_safe(item, tmp, head) { head 252 drivers/misc/mic/scif/scif_rma_list.c struct list_head *head = &ep->rma_info.reg_list; head 259 drivers/misc/mic/scif/scif_rma_list.c list_for_each_safe(item, tmp, head) { head 32 drivers/misc/mic/scif/scif_rma_list.h struct list_head *head; head 36 drivers/misc/mic/scif/scif_rma_list.h void scif_insert_window(struct scif_window *window, struct list_head *head); head 38 drivers/misc/mic/scif/scif_rma_list.h struct list_head *head); head 86 drivers/misc/mic/vop/vop_main.h u16 head; head 336 drivers/misc/mic/vop/vop_vringh.c vvr->head = USHRT_MAX; head 755 drivers/misc/mic/vop/vop_vringh.c u16 *head = &vvr->head; head 763 drivers/misc/mic/vop/vop_vringh.c head, GFP_KERNEL); head 816 drivers/misc/mic/vop/vop_vringh.c if (*head != USHRT_MAX && copy->out_len && copy->update_used) { head 822 drivers/misc/mic/vop/vop_vringh.c vringh_complete_kern(vrh, *head, total); head 823 drivers/misc/mic/vop/vop_vringh.c *head = USHRT_MAX; head 13 drivers/misc/ocxl/pasid.c static void dump_list(struct list_head *head, char *type_str) head 18 drivers/misc/ocxl/pasid.c list_for_each_entry(cur, head, list) { head 24 drivers/misc/ocxl/pasid.c static int range_alloc(struct list_head *head, u32 size, int max_id, head 35 drivers/misc/ocxl/pasid.c pos = head; head 37 drivers/misc/ocxl/pasid.c list_for_each_entry(cur, head, list) { head 56 drivers/misc/ocxl/pasid.c dump_list(head, type_str); head 61 drivers/misc/ocxl/pasid.c static void range_free(struct list_head *head, u32 start, u32 size, head 67 drivers/misc/ocxl/pasid.c list_for_each_entry_safe(cur, tmp, head, list) { head 77 drivers/misc/ocxl/pasid.c dump_list(head, type_str); head 305 drivers/misc/sgi-gru/gru_instructions.h unsigned int head; head 603 drivers/misc/sgi-gru/gru_instructions.h static inline union gru_mesqhead gru_mesq_head(int head, int limit) head 607 drivers/misc/sgi-gru/gru_instructions.h mqh.head = head; head 115 drivers/misc/sgi-gru/grukservices.c union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */ head 559 drivers/misc/sgi-gru/grukservices.c mq->head = gru_mesq_head(2, qlines / 2 + 1); head 640 drivers/misc/sgi-gru/grukservices.c unsigned int limit, head; head 646 drivers/misc/sgi-gru/grukservices.c head = gru_get_amo_value_head(cb); head 667 drivers/misc/sgi-gru/grukservices.c if (head != limit) { head 959 drivers/misc/sgi-xp/xpc_uv.c xpc_init_fifo_uv(struct xpc_fifo_head_uv *head) head 961 drivers/misc/sgi-xp/xpc_uv.c head->first = NULL; head 962 drivers/misc/sgi-xp/xpc_uv.c head->last = NULL; head 963 drivers/misc/sgi-xp/xpc_uv.c spin_lock_init(&head->lock); head 964 drivers/misc/sgi-xp/xpc_uv.c head->n_entries = 0; head 968 drivers/misc/sgi-xp/xpc_uv.c xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head) head 973 drivers/misc/sgi-xp/xpc_uv.c spin_lock_irqsave(&head->lock, irq_flags); head 974 drivers/misc/sgi-xp/xpc_uv.c first = head->first; head 975 drivers/misc/sgi-xp/xpc_uv.c if (head->first != NULL) { head 976 drivers/misc/sgi-xp/xpc_uv.c head->first = first->next; head 977 drivers/misc/sgi-xp/xpc_uv.c if (head->first == NULL) head 978 drivers/misc/sgi-xp/xpc_uv.c head->last = NULL; head 980 drivers/misc/sgi-xp/xpc_uv.c head->n_entries--; head 981 drivers/misc/sgi-xp/xpc_uv.c BUG_ON(head->n_entries < 0); head 985 drivers/misc/sgi-xp/xpc_uv.c spin_unlock_irqrestore(&head->lock, irq_flags); head 990 drivers/misc/sgi-xp/xpc_uv.c xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head, head 996 drivers/misc/sgi-xp/xpc_uv.c spin_lock_irqsave(&head->lock, irq_flags); head 997 drivers/misc/sgi-xp/xpc_uv.c if (head->last != NULL) head 998 drivers/misc/sgi-xp/xpc_uv.c head->last->next = last; head 1000 drivers/misc/sgi-xp/xpc_uv.c head->first = last; head 1001 drivers/misc/sgi-xp/xpc_uv.c head->last = last; head 1002 drivers/misc/sgi-xp/xpc_uv.c head->n_entries++; head 1003 drivers/misc/sgi-xp/xpc_uv.c spin_unlock_irqrestore(&head->lock, irq_flags); head 1007 drivers/misc/sgi-xp/xpc_uv.c xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head) head 1009 drivers/misc/sgi-xp/xpc_uv.c return head->n_entries; head 233 drivers/misc/sgi-xp/xpnet.c "skb->end=0x%p skb->len=%d\n", (void *)skb->head, head 243 drivers/misc/sgi-xp/xpnet.c (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), head 354 drivers/misc/sgi-xp/xpnet.c (void *)queued_msg->skb->head); head 419 drivers/misc/sgi-xp/xpnet.c "skb->end=0x%p skb->len=%d\n", (void *)skb->head, head 34 drivers/misc/vmw_vmci/vmci_context.c struct list_head head; head 37 drivers/misc/vmw_vmci/vmci_context.c .head = LIST_HEAD_INIT(ctx_list.head), head 170 drivers/misc/vmw_vmci/vmci_context.c list_add_tail_rcu(&context->list_item, &ctx_list.head); head 222 drivers/misc/vmw_vmci/vmci_context.c list_for_each_entry_rcu(sub_ctx, &ctx_list.head, list_item) { head 377 drivers/misc/vmw_vmci/vmci_context.c list_for_each_entry_rcu(context, &ctx_list.head, list_item) { head 399 drivers/misc/vmw_vmci/vmci_context.c list_for_each_entry_rcu(c, &ctx_list.head, list_item) { head 222 drivers/misc/vmw_vmci/vmci_queue_pair.c struct list_head head; head 227 drivers/misc/vmw_vmci/vmci_queue_pair.c .head = LIST_HEAD_INIT(qp_broker_list.head), head 232 drivers/misc/vmw_vmci/vmci_queue_pair.c .head = LIST_HEAD_INIT(qp_guest_endpoints.head), head 805 drivers/misc/vmw_vmci/vmci_queue_pair.c list_for_each_entry(entry, &qp_list->head, list_item) { head 998 drivers/misc/vmw_vmci/vmci_queue_pair.c list_add(&entry->list_item, &qp_list->head); head 1887 drivers/misc/vmw_vmci/vmci_queue_pair.c if (!list_empty(&qp_list->head)) { head 1889 drivers/misc/vmw_vmci/vmci_queue_pair.c list_first_entry(&qp_list->head, struct qp_entry, head 2594 drivers/misc/vmw_vmci/vmci_queue_pair.c u64 head; head 2612 drivers/misc/vmw_vmci/vmci_queue_pair.c head = vmci_q_header_consumer_head(produce_q->q_header); head 2613 drivers/misc/vmw_vmci/vmci_queue_pair.c if (likely(head + read < consume_q_size)) { head 2614 drivers/misc/vmw_vmci/vmci_queue_pair.c result = qp_memcpy_from_queue_iter(to, consume_q, head, read); head 2618 drivers/misc/vmw_vmci/vmci_queue_pair.c const size_t tmp = (size_t) (consume_q_size - head); head 2620 drivers/misc/vmw_vmci/vmci_queue_pair.c result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp); head 358 drivers/mmc/host/usdhi6rol0.c size_t head = PAGE_SIZE - sg->offset; head 359 drivers/mmc/host/usdhi6rol0.c size_t blk_head = head % data->blksz; head 377 drivers/mmc/host/usdhi6rol0.c if (head < data->blksz) head 164 drivers/mmc/host/vub300.c struct sd_command_header head; head 1066 drivers/mmc/host/vub300.c vub300->cmnd.head.header_size = 20; head 1067 drivers/mmc/host/vub300.c vub300->cmnd.head.header_type = 0x00; head 1068 drivers/mmc/host/vub300.c vub300->cmnd.head.port_number = 0; /* "0" means port 1 */ head 1069 drivers/mmc/host/vub300.c vub300->cmnd.head.command_type = 0x00; /* standard read command */ head 1070 drivers/mmc/host/vub300.c vub300->cmnd.head.response_type = response_type; head 1071 drivers/mmc/host/vub300.c vub300->cmnd.head.command_index = cmd->opcode; head 1072 drivers/mmc/host/vub300.c vub300->cmnd.head.arguments[0] = cmd->arg >> 24; head 1073 drivers/mmc/host/vub300.c vub300->cmnd.head.arguments[1] = cmd->arg >> 16; head 1074 drivers/mmc/host/vub300.c vub300->cmnd.head.arguments[2] = cmd->arg >> 8; head 1075 drivers/mmc/host/vub300.c vub300->cmnd.head.arguments[3] = cmd->arg >> 0; head 1078 drivers/mmc/host/vub300.c vub300->cmnd.head.block_count[0] = 0; head 1079 drivers/mmc/host/vub300.c vub300->cmnd.head.block_count[1] = 0; head 1080 drivers/mmc/host/vub300.c vub300->cmnd.head.block_size[0] = (vub300->fbs[fn] >> 8) & 0xFF; head 1081 drivers/mmc/host/vub300.c vub300->cmnd.head.block_size[1] = (vub300->fbs[fn] >> 0) & 0xFF; head 1082 drivers/mmc/host/vub300.c vub300->cmnd.head.command_type = 0x00; head 1083 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[0] = 0; head 1084 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[1] = 0; head 1085 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[2] = 0; head 1086 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[3] = 0; head 1088 drivers/mmc/host/vub300.c vub300->cmnd.head.block_count[0] = 0; head 1089 drivers/mmc/host/vub300.c vub300->cmnd.head.block_count[1] = 0; head 1090 drivers/mmc/host/vub300.c vub300->cmnd.head.block_size[0] = (vub300->fbs[0] >> 8) & 0xFF; head 1091 drivers/mmc/host/vub300.c vub300->cmnd.head.block_size[1] = (vub300->fbs[0] >> 0) & 0xFF; head 1092 drivers/mmc/host/vub300.c vub300->cmnd.head.command_type = 0x00; head 1093 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[0] = 0; head 1094 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[1] = 0; head 1095 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[2] = 0; head 1096 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[3] = 0; head 1099 drivers/mmc/host/vub300.c if (0x08 & vub300->cmnd.head.arguments[0]) { /* BLOCK MODE */ head 1100 drivers/mmc/host/vub300.c vub300->cmnd.head.block_count[0] = head 1102 drivers/mmc/host/vub300.c vub300->cmnd.head.block_count[1] = head 1104 drivers/mmc/host/vub300.c vub300->cmnd.head.block_size[0] = head 1106 drivers/mmc/host/vub300.c vub300->cmnd.head.block_size[1] = head 1109 drivers/mmc/host/vub300.c vub300->cmnd.head.block_count[0] = 0; head 1110 drivers/mmc/host/vub300.c vub300->cmnd.head.block_count[1] = 0; head 1111 drivers/mmc/host/vub300.c vub300->cmnd.head.block_size[0] = head 1113 drivers/mmc/host/vub300.c vub300->cmnd.head.block_size[1] = head 1116 drivers/mmc/host/vub300.c vub300->cmnd.head.command_type = head 1118 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[0] = head 1120 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[1] = head 1122 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[2] = head 1124 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[3] = head 1127 drivers/mmc/host/vub300.c vub300->cmnd.head.block_count[0] = 0; head 1128 drivers/mmc/host/vub300.c vub300->cmnd.head.block_count[1] = 0; head 1131 drivers/mmc/host/vub300.c vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF; head 1132 drivers/mmc/host/vub300.c vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF; head 1133 drivers/mmc/host/vub300.c vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF; head 1134 drivers/mmc/host/vub300.c vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF; head 1135 drivers/mmc/host/vub300.c vub300->cmnd.head.command_type = head 1137 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[0] = head 1139 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[1] = head 1141 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[2] = head 1143 drivers/mmc/host/vub300.c vub300->cmnd.head.transfer_size[3] = head 1146 drivers/mmc/host/vub300.c vub300->cmnd.head.block_count[0] = 0; head 1147 drivers/mmc/host/vub300.c vub300->cmnd.head.block_count[1] = 0; head 1150 drivers/mmc/host/vub300.c if (vub300->cmnd.head.block_size[0] || vub300->cmnd.head.block_size[1]) { head 1151 drivers/mmc/host/vub300.c u16 block_size = vub300->cmnd.head.block_size[1] | head 1152 drivers/mmc/host/vub300.c (vub300->cmnd.head.block_size[0] << 8); head 1155 drivers/mmc/host/vub300.c vub300->cmnd.head.block_boundary[0] = head 1157 drivers/mmc/host/vub300.c vub300->cmnd.head.block_boundary[1] = head 1160 drivers/mmc/host/vub300.c vub300->cmnd.head.block_boundary[0] = 0; head 1161 drivers/mmc/host/vub300.c vub300->cmnd.head.block_boundary[1] = 0; head 2181 drivers/mmc/host/vub300.c vub300->cmnd.head.block_size[0] = 0x00; head 2182 drivers/mmc/host/vub300.c vub300->cmnd.head.block_size[1] = 0x00; head 638 drivers/mtd/sm_ftl.c ftl->heads = chs_table[i].head; head 64 drivers/mtd/sm_ftl.h unsigned char head; head 57 drivers/mtd/ssfdc.c unsigned char head; head 74 drivers/mtd/ssfdc.c static int get_chs(unsigned long size, unsigned short *cyl, unsigned char *head, head 87 drivers/mtd/ssfdc.c if (head) head 88 drivers/mtd/ssfdc.c *head = chs_table[k].head; head 90 drivers/net/bonding/bond_alb.c SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX; head 104 drivers/net/bonding/bond_alb.c index = SLAVE_TLB_INFO(slave).head; head 205 drivers/net/bonding/bond_alb.c u32 next_index = slave_info->head; head 214 drivers/net/bonding/bond_alb.c slave_info->head = hash_index; head 69 drivers/net/caif/caif_serial.c struct sk_buff_head head; head 223 drivers/net/caif/caif_serial.c while ((skb = skb_peek(&ser->head)) != NULL) { head 252 drivers/net/caif/caif_serial.c struct sk_buff *tmp = skb_dequeue(&ser->head); head 258 drivers/net/caif/caif_serial.c if (ser->head.qlen <= SEND_QUEUE_LOW && head 277 drivers/net/caif/caif_serial.c if (ser->head.qlen > SEND_QUEUE_HIGH && head 283 drivers/net/caif/caif_serial.c skb_queue_tail(&ser->head, skb); head 421 drivers/net/caif/caif_serial.c skb_queue_head_init(&serdev->head); head 50 drivers/net/caif/caif_virtio.c unsigned short head; head 267 drivers/net/caif/caif_virtio.c if (cfv->ctx.head != USHRT_MAX) { head 269 drivers/net/caif/caif_virtio.c cfv->ctx.head, head 271 drivers/net/caif/caif_virtio.c cfv->ctx.head = USHRT_MAX; head 278 drivers/net/caif/caif_virtio.c &cfv->ctx.head, head 716 drivers/net/caif/caif_virtio.c cfv->ctx.head = USHRT_MAX; head 1215 drivers/net/can/dev.c static void can_dellink(struct net_device *dev, struct list_head *head) head 68 drivers/net/can/rx-offload.c static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, head 73 drivers/net/can/rx-offload.c skb_queue_reverse_walk(head, pos) { head 84 drivers/net/can/rx-offload.c skb_queue_len(head)); head 92 drivers/net/can/rx-offload.c __skb_queue_head(head, new); head 94 drivers/net/can/rx-offload.c __skb_queue_after(head, insert, new); head 250 drivers/net/can/vxcan.c static void vxcan_dellink(struct net_device *dev, struct list_head *head) head 263 drivers/net/can/vxcan.c unregister_netdevice_queue(dev, head); head 268 drivers/net/can/vxcan.c unregister_netdevice_queue(peer, head); head 145 drivers/net/eql.c struct list_head *this, *tmp, *head; head 148 drivers/net/eql.c head = &eql->queue.all_slaves; head 149 drivers/net/eql.c list_for_each_safe(this, tmp, head) { head 232 drivers/net/eql.c struct list_head *head, *tmp, *this; head 236 drivers/net/eql.c head = &queue->all_slaves; head 237 drivers/net/eql.c list_for_each_safe(this, tmp, head) { head 299 drivers/net/eql.c struct list_head *this, *tmp, *head; head 305 drivers/net/eql.c head = &queue->all_slaves; head 306 drivers/net/eql.c list_for_each_safe(this, tmp, head) { head 364 drivers/net/eql.c struct list_head *this, *head; head 366 drivers/net/eql.c head = &queue->all_slaves; head 367 drivers/net/eql.c list_for_each(this, head) { head 170 drivers/net/ethernet/3com/3c509.c int head, size; head 1388 drivers/net/ethernet/3com/3c515.c skb->head, temp); head 122 drivers/net/ethernet/amazon/ena/ena_com.c sq->head = 0; head 144 drivers/net/ethernet/amazon/ena/ena_com.c cq->head = 0; head 167 drivers/net/ethernet/amazon/ena/ena_com.c aenq->head = aenq->q_depth; head 453 drivers/net/ethernet/amazon/ena/ena_com.c io_cq->head = 0; head 491 drivers/net/ethernet/amazon/ena/ena_com.c head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); head 515 drivers/net/ethernet/amazon/ena/ena_com.c admin_queue->cq.head += comp_num; head 517 drivers/net/ethernet/amazon/ena/ena_com.c admin_queue->sq.head += comp_num; head 1511 drivers/net/ethernet/amazon/ena/ena_com.c WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n"); head 2021 drivers/net/ethernet/amazon/ena/ena_com.c masked_head = aenq->head & (aenq->q_depth - 1); head 2057 drivers/net/ethernet/amazon/ena/ena_com.c aenq->head += processed; head 2066 drivers/net/ethernet/amazon/ena/ena_com.c writel_relaxed((u32)aenq->head, head 160 drivers/net/ethernet/amazon/ena/ena_com.h u16 head; head 214 drivers/net/ethernet/amazon/ena/ena_com.h u16 head; head 224 drivers/net/ethernet/amazon/ena/ena_com.h u16 head; head 270 drivers/net/ethernet/amazon/ena/ena_com.h u16 head; head 42 drivers/net/ethernet/amazon/ena/ena_eth_com.c head_masked = io_cq->head & (io_cq->q_depth - 1); head 273 drivers/net/ethernet/amazon/ena/ena_eth_com.c head_masked = io_cq->head & (io_cq->q_depth - 1); head 195 drivers/net/ethernet/amazon/ena/ena_eth_com.h u16 unreported_comp, head; head 199 drivers/net/ethernet/amazon/ena/ena_eth_com.h head = io_cq->head; head 200 drivers/net/ethernet/amazon/ena/ena_eth_com.h unreported_comp = head - io_cq->last_head_update; head 205 drivers/net/ethernet/amazon/ena/ena_eth_com.h io_cq->qid, head); head 206 drivers/net/ethernet/amazon/ena/ena_eth_com.h writel(head, io_cq->cq_head_db_reg); head 207 drivers/net/ethernet/amazon/ena/ena_eth_com.h io_cq->last_head_update = head; head 235 drivers/net/ethernet/amazon/ena/ena_eth_com.h io_cq->head++; head 238 drivers/net/ethernet/amazon/ena/ena_eth_com.h if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) head 249 drivers/net/ethernet/amazon/ena/ena_eth_com.h masked_head = io_cq->head & (io_cq->q_depth - 1); head 445 drivers/net/ethernet/amd/am79c961a.c unsigned int head; head 448 drivers/net/ethernet/amd/am79c961a.c head = priv->txhead; head 449 drivers/net/ethernet/amd/am79c961a.c hdraddr = priv->txhdr + (head << 3); head 450 drivers/net/ethernet/amd/am79c961a.c bufaddr = priv->txbuffer[head]; head 451 drivers/net/ethernet/amd/am79c961a.c head += 1; head 452 drivers/net/ethernet/amd/am79c961a.c if (head >= TX_BUFFERS) head 453 drivers/net/ethernet/amd/am79c961a.c head = 0; head 458 drivers/net/ethernet/amd/am79c961a.c priv->txhead = head; head 241 drivers/net/ethernet/amd/atarilance.c #define PKTBUF_ADDR(head) (((unsigned char *)(MEM)) + (head)->base) head 779 drivers/net/ethernet/amd/atarilance.c struct lance_tx_head *head; head 816 drivers/net/ethernet/amd/atarilance.c head = &(MEM->tx_head[entry]); head 823 drivers/net/ethernet/amd/atarilance.c head->length = -len; head 824 drivers/net/ethernet/amd/atarilance.c head->misc = 0; head 825 drivers/net/ethernet/amd/atarilance.c lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); head 826 drivers/net/ethernet/amd/atarilance.c head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; head 973 drivers/net/ethernet/amd/atarilance.c struct lance_rx_head *head = &(MEM->rx_head[entry]); head 974 drivers/net/ethernet/amd/atarilance.c int status = head->flag; head 987 drivers/net/ethernet/amd/atarilance.c head->flag &= (RMD1_ENP|RMD1_STP); head 990 drivers/net/ethernet/amd/atarilance.c short pkt_len = head->msg_length & 0xfff; head 1007 drivers/net/ethernet/amd/atarilance.c head->flag |= RMD1_OWN_CHIP; head 1014 drivers/net/ethernet/amd/atarilance.c u_char *data = PKTBUF_ADDR(head); head 1024 drivers/net/ethernet/amd/atarilance.c lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len ); head 1032 drivers/net/ethernet/amd/atarilance.c head->flag |= RMD1_OWN_CHIP; head 106 drivers/net/ethernet/amd/sun3lance.c #define PKTBUF_ADDR(head) (void *)((unsigned long)(MEM) | (head)->base) head 520 drivers/net/ethernet/amd/sun3lance.c struct lance_tx_head *head; head 614 drivers/net/ethernet/amd/sun3lance.c head = &(MEM->tx_head[entry]); head 625 drivers/net/ethernet/amd/sun3lance.c head->length = (-len) | 0xf000; head 626 drivers/net/ethernet/amd/sun3lance.c head->misc = 0; head 628 drivers/net/ethernet/amd/sun3lance.c skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len); head 630 drivers/net/ethernet/amd/sun3lance.c memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len); head 632 drivers/net/ethernet/amd/sun3lance.c head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; head 702 drivers/net/ethernet/amd/sun3lance.c struct lance_tx_head *head = &(MEM->tx_head[old_tx]); head 706 drivers/net/ethernet/amd/sun3lance.c if (head->flag & TMD1_OWN_CHIP) head 709 drivers/net/ethernet/amd/sun3lance.c if (head->flag & TMD1_ERR) { head 710 drivers/net/ethernet/amd/sun3lance.c int status = head->misc; head 725 drivers/net/ethernet/amd/sun3lance.c } else if(head->flag & (TMD1_ENP | TMD1_STP)) { head 727 drivers/net/ethernet/amd/sun3lance.c head->flag &= ~(TMD1_ENP | TMD1_STP); head 728 drivers/net/ethernet/amd/sun3lance.c if(head->flag & (TMD1_ONE | TMD1_MORE)) head 789 drivers/net/ethernet/amd/sun3lance.c struct lance_rx_head *head = &(MEM->rx_head[entry]); head 790 drivers/net/ethernet/amd/sun3lance.c int status = head->flag; head 803 drivers/net/ethernet/amd/sun3lance.c head->flag &= (RMD1_ENP|RMD1_STP); head 807 drivers/net/ethernet/amd/sun3lance.c short pkt_len = (head->msg_length & 0xfff) - 4; head 818 drivers/net/ethernet/amd/sun3lance.c head->msg_length = 0; head 819 drivers/net/ethernet/amd/sun3lance.c head->flag |= RMD1_OWN_CHIP; head 826 drivers/net/ethernet/amd/sun3lance.c u_char *data = PKTBUF_ADDR(head); head 840 drivers/net/ethernet/amd/sun3lance.c u_char *data = PKTBUF_ADDR(head); head 848 drivers/net/ethernet/amd/sun3lance.c PKTBUF_ADDR(head), head 859 drivers/net/ethernet/amd/sun3lance.c head->msg_length = 0; head 860 drivers/net/ethernet/amd/sun3lance.c head->flag = RMD1_OWN_CHIP; head 246 drivers/net/ethernet/apm/xgene-v2/main.c u8 head; head 249 drivers/net/ethernet/apm/xgene-v2/main.c head = tx_ring->head; head 256 drivers/net/ethernet/apm/xgene-v2/main.c raw_desc = &tx_ring->raw_desc[head]; head 263 drivers/net/ethernet/apm/xgene-v2/main.c skb = tx_ring->pkt_info[head].skb; head 264 drivers/net/ethernet/apm/xgene-v2/main.c dma_addr = tx_ring->pkt_info[head].dma_addr; head 265 drivers/net/ethernet/apm/xgene-v2/main.c pkt_buf = tx_ring->pkt_info[head].pkt_buf; head 276 drivers/net/ethernet/apm/xgene-v2/main.c head = (head + 1) & (XGENE_ENET_NUM_DESC - 1); head 282 drivers/net/ethernet/apm/xgene-v2/main.c tx_ring->head = head; head 294 drivers/net/ethernet/apm/xgene-v2/main.c u8 head, rx_error; head 300 drivers/net/ethernet/apm/xgene-v2/main.c head = rx_ring->head; head 307 drivers/net/ethernet/apm/xgene-v2/main.c raw_desc = &rx_ring->raw_desc[head]; head 314 drivers/net/ethernet/apm/xgene-v2/main.c skb = rx_ring->pkt_info[head].skb; head 315 drivers/net/ethernet/apm/xgene-v2/main.c rx_ring->pkt_info[head].skb = NULL; head 316 drivers/net/ethernet/apm/xgene-v2/main.c dma_addr = rx_ring->pkt_info[head].dma_addr; head 342 drivers/net/ethernet/apm/xgene-v2/main.c head = (head + 1) & (XGENE_ENET_NUM_DESC - 1); head 346 drivers/net/ethernet/apm/xgene-v2/main.c rx_ring->head = head; head 42 drivers/net/ethernet/apm/xgene-v2/ring.c ring->head = 0; head 54 drivers/net/ethernet/apm/xgene-v2/ring.c ring->head = 0; head 76 drivers/net/ethernet/apm/xgene-v2/ring.h u8 head; head 616 drivers/net/ethernet/apm/xgene/xgene_enet_main.c u16 slots, head; head 626 drivers/net/ethernet/apm/xgene/xgene_enet_main.c head = buf_pool->head; head 636 drivers/net/ethernet/apm/xgene/xgene_enet_main.c page = buf_pool->frag_page[head]; head 639 drivers/net/ethernet/apm/xgene/xgene_enet_main.c buf_pool->frag_page[head] = NULL; head 640 drivers/net/ethernet/apm/xgene/xgene_enet_main.c head = (head + 1) & slots; head 642 drivers/net/ethernet/apm/xgene/xgene_enet_main.c buf_pool->head = head; head 680 drivers/net/ethernet/apm/xgene/xgene_enet_main.c u16 slots, head; head 728 drivers/net/ethernet/apm/xgene/xgene_enet_main.c head = page_pool->head; head 739 drivers/net/ethernet/apm/xgene/xgene_enet_main.c page = page_pool->frag_page[head]; head 745 drivers/net/ethernet/apm/xgene/xgene_enet_main.c page_pool->frag_page[head] = NULL; head 746 drivers/net/ethernet/apm/xgene/xgene_enet_main.c head = (head + 1) & slots; head 749 drivers/net/ethernet/apm/xgene/xgene_enet_main.c page_pool->head = head; head 787 drivers/net/ethernet/apm/xgene/xgene_enet_main.c u16 head = ring->head; head 793 drivers/net/ethernet/apm/xgene/xgene_enet_main.c raw_desc = &ring->raw_desc[head]; head 803 drivers/net/ethernet/apm/xgene/xgene_enet_main.c head = (head + 1) & slots; head 804 drivers/net/ethernet/apm/xgene/xgene_enet_main.c exp_desc = &ring->raw_desc[head]; head 807 drivers/net/ethernet/apm/xgene/xgene_enet_main.c head = (head - 1) & slots; head 824 drivers/net/ethernet/apm/xgene/xgene_enet_main.c head = (head + 1) & slots; head 837 drivers/net/ethernet/apm/xgene/xgene_enet_main.c ring->head = head; head 95 drivers/net/ethernet/apm/xgene/xgene_enet_main.h u16 head; head 676 drivers/net/ethernet/apple/macmace.c int left, head; head 697 drivers/net/ethernet/apple/macmace.c head = N_RX_RING - left; head 701 drivers/net/ethernet/apple/macmace.c while (mp->rx_tail < head) { head 620 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c list_for_each_entry(pos, &o->head, link) { head 651 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c list_for_each_entry(pos, &o->head, link) head 667 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c list_for_each_entry(pos, &o->head, link) head 683 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c list_for_each_entry(pos, &o->head, link) head 704 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c list_for_each_entry(pos, &o->head, link) head 721 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c list_for_each_entry(pos, &o->head, link) head 738 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c list_for_each_entry(pos, &o->head, link) head 1232 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c if (list_empty(&o->head)) { head 1239 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c *ppos = list_first_entry(&o->head, head 1248 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c if (list_is_last(&pos->link, &o->head)) head 1823 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c list_add(®_elem->link, &cam_obj->head); head 2070 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c list_for_each_entry(pos, &o->head, link) { head 2118 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c INIT_LIST_HEAD(&o->head); head 298 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h struct list_head head; head 346 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c list_for_each(pos, &obj->head) head 3893 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct hlist_head *head; head 3897 drivers/net/ethernet/broadcom/bnxt/bnxt.c head = &bp->ntp_fltr_hash_tbl[i]; head 3898 drivers/net/ethernet/broadcom/bnxt/bnxt.c hlist_for_each_entry_safe(fltr, tmp, head, hash) { head 11042 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct hlist_head *head; head 11092 drivers/net/ethernet/broadcom/bnxt/bnxt.c head = &bp->ntp_fltr_hash_tbl[idx]; head 11094 drivers/net/ethernet/broadcom/bnxt/bnxt.c hlist_for_each_entry_rcu(fltr, head, hash) { head 11116 drivers/net/ethernet/broadcom/bnxt/bnxt.c hlist_add_head_rcu(&new_fltr->hash, head); head 11135 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct hlist_head *head; head 11140 drivers/net/ethernet/broadcom/bnxt/bnxt.c head = &bp->ntp_fltr_hash_tbl[i]; head 11141 drivers/net/ethernet/broadcom/bnxt/bnxt.c hlist_for_each_entry_safe(fltr, tmp, head, hash) { head 906 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c struct hlist_head *head; head 909 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c head = &bp->ntp_fltr_hash_tbl[i]; head 911 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c hlist_for_each_entry_rcu(fltr, head, hash) { head 936 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c struct hlist_head *head; head 938 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c head = &bp->ntp_fltr_hash_tbl[i]; head 940 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c hlist_for_each_entry_rcu(fltr, head, hash) { head 270 drivers/net/ethernet/brocade/bna/bna.h struct bna_mac *bna_cam_mod_mac_get(struct list_head *head); head 2050 drivers/net/ethernet/brocade/bna/bna_enet.c bna_cam_mod_mac_get(struct list_head *head) head 2054 drivers/net/ethernet/brocade/bna/bna_enet.c mac = list_first_entry_or_null(head, struct bna_mac, qe); head 838 drivers/net/ethernet/cadence/macb_main.c unsigned int head; head 852 drivers/net/ethernet/cadence/macb_main.c head = queue->tx_head; head 853 drivers/net/ethernet/cadence/macb_main.c for (tail = queue->tx_tail; tail != head; tail++) { head 1355 drivers/net/ethernet/cadence/macb_main.c unsigned int head = queue->tx_head; head 1362 drivers/net/ethernet/cadence/macb_main.c if (head == tail) head 1749 drivers/net/ethernet/cadence/macb_main.c (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); head 1819 drivers/net/ethernet/cadence/macb_main.c queue_index, skb->len, skb->head, skb->data, head 2675 drivers/net/ethernet/cadence/macb_main.c unsigned int tail, head; head 2682 drivers/net/ethernet/cadence/macb_main.c head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); head 2694 drivers/net/ethernet/cadence/macb_main.c regs_buff[9] = head; head 2696 drivers/net/ethernet/cadence/macb_main.c regs_buff[11] = macb_tx_dma(&bp->queues[0], head); head 300 drivers/net/ethernet/cadence/macb_ptp.c unsigned long head = queue->tx_ts_head; head 307 drivers/net/ethernet/cadence/macb_ptp.c if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0) head 312 drivers/net/ethernet/cadence/macb_ptp.c tx_timestamp = &queue->tx_timestamps[head]; head 320 drivers/net/ethernet/cadence/macb_ptp.c (head + 1) & (PTP_TS_BUFFER_SIZE - 1)); head 330 drivers/net/ethernet/cadence/macb_ptp.c unsigned long head, tail; head 334 drivers/net/ethernet/cadence/macb_ptp.c head = smp_load_acquire(&queue->tx_ts_head); head 337 drivers/net/ethernet/cadence/macb_ptp.c while (CIRC_CNT(head, tail, PTP_TS_BUFFER_SIZE)) { head 316 drivers/net/ethernet/cavium/liquidio/octeon_iq.h struct list_head head; head 414 drivers/net/ethernet/cavium/liquidio/request_manager.c [OCTEON_ORDERED_SC_LIST].head); head 749 drivers/net/ethernet/cavium/liquidio/request_manager.c INIT_LIST_HEAD(&oct->sc_buf_pool.head); head 766 drivers/net/ethernet/cavium/liquidio/request_manager.c list_add_tail(&sc->node, &oct->sc_buf_pool.head); head 789 drivers/net/ethernet/cavium/liquidio/request_manager.c list_for_each_safe(tmp, tmp2, &done_sc_list->head) { head 798 drivers/net/ethernet/cavium/liquidio/request_manager.c list_add_tail(&sc->node, &zombie_sc_list->head); head 823 drivers/net/ethernet/cavium/liquidio/request_manager.c list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) { head 844 drivers/net/ethernet/cavium/liquidio/request_manager.c list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { head 852 drivers/net/ethernet/cavium/liquidio/request_manager.c INIT_LIST_HEAD(&oct->sc_buf_pool.head); head 878 drivers/net/ethernet/cavium/liquidio/request_manager.c if (list_empty(&oct->sc_buf_pool.head)) { head 883 drivers/net/ethernet/cavium/liquidio/request_manager.c list_for_each(tmp, &oct->sc_buf_pool.head) head 935 drivers/net/ethernet/cavium/liquidio/request_manager.c list_add_tail(&sc->node, &oct->sc_buf_pool.head); head 36 drivers/net/ethernet/cavium/liquidio/response_manager.c INIT_LIST_HEAD(&oct->response_list[i].head); head 79 drivers/net/ethernet/cavium/liquidio/response_manager.c if (list_empty(&ordered_sc_list->head)) { head 84 drivers/net/ethernet/cavium/liquidio/response_manager.c sc = list_first_entry(&ordered_sc_list->head, head 148 drivers/net/ethernet/cavium/liquidio/response_manager.c [OCTEON_DONE_SC_LIST].head); head 189 drivers/net/ethernet/cavium/liquidio/response_manager.c head); head 42 drivers/net/ethernet/cavium/liquidio/response_manager.h struct list_head head; head 271 drivers/net/ethernet/cavium/thunder/nicvf_queues.c rbdr->head = 0; head 320 drivers/net/ethernet/cavium/thunder/nicvf_queues.c int head, tail; head 332 drivers/net/ethernet/cavium/thunder/nicvf_queues.c head = rbdr->head; head 336 drivers/net/ethernet/cavium/thunder/nicvf_queues.c while (head != tail) { head 337 drivers/net/ethernet/cavium/thunder/nicvf_queues.c desc = GET_RBDR_DESC(rbdr, head); head 344 drivers/net/ethernet/cavium/thunder/nicvf_queues.c head++; head 345 drivers/net/ethernet/cavium/thunder/nicvf_queues.c head &= (rbdr->dmem.q_len - 1); head 360 drivers/net/ethernet/cavium/thunder/nicvf_queues.c head = 0; head 361 drivers/net/ethernet/cavium/thunder/nicvf_queues.c while (head < rbdr->pgcnt) { head 362 drivers/net/ethernet/cavium/thunder/nicvf_queues.c pgcache = &rbdr->pgcache[head]; head 370 drivers/net/ethernet/cavium/thunder/nicvf_queues.c head++; head 518 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->head = 0; head 592 drivers/net/ethernet/cavium/thunder/nicvf_queues.c while (sq->head != sq->tail) { head 593 drivers/net/ethernet/cavium/thunder/nicvf_queues.c skb = (struct sk_buff *)sq->skbuff[sq->head]; head 597 drivers/net/ethernet/cavium/thunder/nicvf_queues.c page = (struct page *)sq->xdp_page[sq->head]; head 603 drivers/net/ethernet/cavium/thunder/nicvf_queues.c hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); head 612 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_unmap_sndq_buffers(nic, sq, sq->head, head 618 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->head++; head 619 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->head &= (sq->dmem.q_len - 1); head 666 drivers/net/ethernet/cavium/thunder/nicvf_queues.c rbdr->head = nicvf_queue_reg_read(nic, head 1148 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->head += desc_cnt; head 1149 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->head &= (sq->dmem.q_len - 1); head 1182 drivers/net/ethernet/cavium/thunder/nicvf_queues.c u64 head, tail; head 1187 drivers/net/ethernet/cavium/thunder/nicvf_queues.c head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; head 1189 drivers/net/ethernet/cavium/thunder/nicvf_queues.c while (sq->head != head) { head 1190 drivers/net/ethernet/cavium/thunder/nicvf_queues.c hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); head 1195 drivers/net/ethernet/cavium/thunder/nicvf_queues.c skb = (struct sk_buff *)sq->skbuff[sq->head]; head 227 drivers/net/ethernet/cavium/thunder/nicvf_queues.h u32 head; head 270 drivers/net/ethernet/cavium/thunder/nicvf_queues.h u32 head; head 1281 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c static void clean_l2_data(struct rcu_head *head) head 1283 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); head 1588 drivers/net/ethernet/chelsio/cxgb3/sge.c dui = (struct deferred_unmap_info *)skb->head; head 1607 drivers/net/ethernet/chelsio/cxgb3/sge.c dui = (struct deferred_unmap_info *)skb->head; head 1714 drivers/net/ethernet/chelsio/cxgb3/sge.c map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { head 1729 drivers/net/ethernet/chelsio/cxgb3/sge.c write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); head 1768 drivers/net/ethernet/chelsio/cxgb3/sge.c map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) head 1784 drivers/net/ethernet/chelsio/cxgb3/sge.c (dma_addr_t *)skb->head); head 297 drivers/net/ethernet/chelsio/cxgb4/sge.c unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); head 2289 drivers/net/ethernet/chelsio/cxgb4/sge.c (dma_addr_t *)skb->head)) { head 2322 drivers/net/ethernet/chelsio/cxgb4/sge.c (dma_addr_t *)skb->head); head 235 drivers/net/ethernet/cisco/enic/enic_clsf.c struct hlist_head *head; head 237 drivers/net/ethernet/cisco/enic/enic_clsf.c head = &enic->rfs_h.ht_head[tbl_idx]; head 242 drivers/net/ethernet/cisco/enic/enic_clsf.c hlist_add_head(&d->node, head); head 239 drivers/net/ethernet/dec/tulip/interrupt.c skb->head, temp); head 465 drivers/net/ethernet/dec/tulip/interrupt.c skb->head, temp); head 1076 drivers/net/ethernet/dlink/sundance.c unsigned head = np->cur_task % TX_RING_SIZE; head 1094 drivers/net/ethernet/dlink/sundance.c iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc), head 518 drivers/net/ethernet/dnet.c skb->len, skb->head, skb->data); head 129 drivers/net/ethernet/emulex/benet/be.h u32 tail, head; head 154 drivers/net/ethernet/emulex/benet/be.h return q->dma_mem.va + q->head * q->entry_size; head 169 drivers/net/ethernet/emulex/benet/be.h index_inc(&q->head, q->len); head 604 drivers/net/ethernet/emulex/benet/be_cmds.c u32 index = mcc_obj->q.head; head 921 drivers/net/ethernet/emulex/benet/be_main.c u32 head = txo->q.head; head 924 drivers/net/ethernet/emulex/benet/be_main.c return head; head 931 drivers/net/ethernet/emulex/benet/be_main.c struct sk_buff *skb, u16 head) head 935 drivers/net/ethernet/emulex/benet/be_main.c struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head); head 940 drivers/net/ethernet/emulex/benet/be_main.c BUG_ON(txo->sent_skb_list[head]); head 941 drivers/net/ethernet/emulex/benet/be_main.c txo->sent_skb_list[head] = skb; head 942 drivers/net/ethernet/emulex/benet/be_main.c txo->last_req_hdr = head; head 965 drivers/net/ethernet/emulex/benet/be_main.c struct be_tx_obj *txo, u32 head, bool map_single, head 973 drivers/net/ethernet/emulex/benet/be_main.c txq->head = head; head 985 drivers/net/ethernet/emulex/benet/be_main.c txq->head = head; head 999 drivers/net/ethernet/emulex/benet/be_main.c u32 head; head 1003 drivers/net/ethernet/emulex/benet/be_main.c head = be_tx_get_wrb_hdr(txo); head 1027 drivers/net/ethernet/emulex/benet/be_main.c be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head); head 1034 drivers/net/ethernet/emulex/benet/be_main.c be_xmit_restore(adapter, txo, head, map_single, copied); head 1434 drivers/net/ethernet/emulex/benet/be_main.c i, txo->q.head, txo->q.tail, head 1449 drivers/net/ethernet/emulex/benet/be_main.c i, txo->cq.head, txo->cq.tail, head 2600 drivers/net/ethernet/emulex/benet/be_main.c page_info = &rxo->page_info_tbl[rxq->head]; head 2642 drivers/net/ethernet/emulex/benet/be_main.c page_info = &rxo->page_info_tbl[rxq->head]; head 2824 drivers/net/ethernet/emulex/benet/be_main.c rxq->head = 0; head 2928 drivers/net/ethernet/emulex/benet/be_main.c txq->head = notified_idx; head 636 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c if (aligned_start >= skb->head) head 378 drivers/net/ethernet/freescale/fec_main.c *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; head 107 drivers/net/ethernet/google/gve/gve.h u32 head; /* offset to write at */ head 41 drivers/net/ethernet/google/gve/gve_tx.c fifo->head = 0; head 56 drivers/net/ethernet/google/gve/gve_tx.c return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head; head 95 drivers/net/ethernet/google/gve/gve_tx.c iov[0].iov_offset = fifo->head; head 97 drivers/net/ethernet/google/gve/gve_tx.c fifo->head += bytes; head 99 drivers/net/ethernet/google/gve/gve_tx.c if (fifo->head > fifo->size) { head 104 drivers/net/ethernet/google/gve/gve_tx.c overflow = fifo->head - fifo->size; head 109 drivers/net/ethernet/google/gve/gve_tx.c fifo->head = overflow; head 113 drivers/net/ethernet/google/gve/gve_tx.c aligned_head = L1_CACHE_ALIGN(fifo->head); head 114 drivers/net/ethernet/google/gve/gve_tx.c padding = aligned_head - fifo->head; head 117 drivers/net/ethernet/google/gve/gve_tx.c fifo->head = aligned_head; head 119 drivers/net/ethernet/google/gve/gve_tx.c if (fifo->head == fifo->size) head 120 drivers/net/ethernet/google/gve/gve_tx.c fifo->head = 0; head 251 drivers/net/ethernet/hisilicon/hip04_eth.c static inline unsigned int tx_count(unsigned int head, unsigned int tail) head 253 drivers/net/ethernet/hisilicon/hip04_eth.c return (head - tail) % TX_DESC_NUM; head 104 drivers/net/ethernet/hisilicon/hisi_femac.c unsigned int head; head 218 drivers/net/ethernet/hisilicon/hisi_femac.c pos = rxq->head; head 242 drivers/net/ethernet/hisilicon/hisi_femac.c rxq->head = pos; head 362 drivers/net/ethernet/hisilicon/hisi_femac.c queue->head = 0; head 394 drivers/net/ethernet/hisilicon/hisi_femac.c while (pos != rxq->head) { head 398 drivers/net/ethernet/hisilicon/hisi_femac.c pos, rxq->head); head 413 drivers/net/ethernet/hisilicon/hisi_femac.c while (pos != txq->head) { head 417 drivers/net/ethernet/hisilicon/hisi_femac.c pos, txq->head); head 517 drivers/net/ethernet/hisilicon/hisi_femac.c if (unlikely(!CIRC_SPACE(txq->head, txq->tail, head 533 drivers/net/ethernet/hisilicon/hisi_femac.c txq->dma_phys[txq->head] = addr; head 535 drivers/net/ethernet/hisilicon/hisi_femac.c txq->skb[txq->head] = skb; head 536 drivers/net/ethernet/hisilicon/hisi_femac.c txq->head = (txq->head + 1) % txq->num; head 18 drivers/net/ethernet/hisilicon/hns/hnae.c hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head) head 23 drivers/net/ethernet/hisilicon/hns/hnae.c list_add_tail_rcu(node, head); head 67 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c u32 head, tail; head 73 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD); head 74 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c if (tail == head) head 955 drivers/net/ethernet/hisilicon/hns/hns_enet.c int head; head 958 drivers/net/ethernet/hisilicon/hns/hns_enet.c head = readl_relaxed(ring->io_base + RCB_REG_HEAD); head 961 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (is_ring_empty(ring) || head == ring->next_to_clean) head 964 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (!is_valid_clean_head(ring, head)) { head 965 drivers/net/ethernet/hisilicon/hns/hns_enet.c netdev_err(ndev, "wrong head (%d, %d-%d)\n", head, head 973 drivers/net/ethernet/hisilicon/hns/hns_enet.c while (head != ring->next_to_clean) { head 1006 drivers/net/ethernet/hisilicon/hns/hns_enet.c int head; head 1010 drivers/net/ethernet/hisilicon/hns/hns_enet.c head = readl_relaxed(ring->io_base + RCB_REG_HEAD); head 1012 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (head != ring->next_to_clean) { head 1025 drivers/net/ethernet/hisilicon/hns/hns_enet.c int head = readl_relaxed(ring->io_base + RCB_REG_HEAD); head 1027 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (head == ring->next_to_clean) head 1038 drivers/net/ethernet/hisilicon/hns/hns_enet.c int head; head 1041 drivers/net/ethernet/hisilicon/hns/hns_enet.c head = ring->next_to_use; /* ntu :soft setted ring position*/ head 1044 drivers/net/ethernet/hisilicon/hns/hns_enet.c while (head != ring->next_to_clean) head 1625 drivers/net/ethernet/hisilicon/hns/hns_enet.c int head, tail; head 1656 drivers/net/ethernet/hisilicon/hns/hns_enet.c head = readl_relaxed(ring->io_base + RCB_REG_HEAD); head 1659 drivers/net/ethernet/hisilicon/hns/hns_enet.c fetch_num = ring_dist(ring, head, tail); head 1661 drivers/net/ethernet/hisilicon/hns/hns_enet.c while (head != tail) { head 1662 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (ring->desc_cb[head].page_offset != 0) { head 1667 drivers/net/ethernet/hisilicon/hns/hns_enet.c head++; head 1668 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (head == ring->desc_num) head 1669 drivers/net/ethernet/hisilicon/hns/hns_enet.c head = 0; head 117 drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h u32 head; head 128 drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) head 2293 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head, head 2299 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c while (head != ntc) { head 2336 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c int head; head 2338 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); head 2341 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (is_ring_empty(ring) || head == ring->next_to_clean) head 2344 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (unlikely(!is_valid_clean_head(ring, head))) { head 2345 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, head 2356 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_nic_reclaim_desc(ring, head, &bytes, &pkts); head 2513 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c skb->csum_start = (unsigned char *)th - skb->head; head 3190 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_ring_chain_node *head) head 3193 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_ring_chain_node *cur_chain = head; head 3261 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c cur_chain = head->next; head 3267 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c head->next = NULL; head 3273 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_ring_chain_node *head) head 3278 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c chain = head->next; head 635 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h #define hns3_for_each_ring(pos, head) \ head 636 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h for (pos = (head).ring; pos; pos = pos->next) head 27 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head) head 33 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c return head >= ntc && head <= ntu; head 35 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c return head >= ntc || head <= ntu; head 141 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c u32 head; head 144 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); head 147 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c if (!is_valid_csq_clean_head(csq, head)) { head 148 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, head 158 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; head 159 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c csq->next_to_clean = head; head 165 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); head 166 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c return head == hw->cmq.csq.next_to_use; head 28 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h u32 head; head 109 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) head 113 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c chain = head->next; head 31 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c int head) head 37 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c return head >= ntc && head <= ntu; head 39 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c return head >= ntc || head <= ntu; head 47 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c u32 head; head 49 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); head 52 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c if (!hclgevf_is_valid_csq_clean_head(csq, head)) { head 53 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, head 61 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; head 62 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c csq->next_to_clean = head; head 68 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c u32 head; head 70 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); head 72 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c return head == hw->cmq.csq.next_to_use; head 378 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c hdev->arq.head = 0; head 36 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h u32 head; head 269 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c while (tail != hdev->arq.head) { head 276 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c msg_q = hdev->arq.msg_q[hdev->arq.head]; head 325 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c msg_q = hdev->arq.msg_q[hdev->arq.head]; head 159 drivers/net/ethernet/intel/e1000e/e1000.h void __iomem *head; head 1145 drivers/net/ethernet/intel/e1000e/netdev.c readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, head 2926 drivers/net/ethernet/intel/e1000e/netdev.c tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); head 2929 drivers/net/ethernet/intel/e1000e/netdev.c writel(0, tx_ring->head); head 3250 drivers/net/ethernet/intel/e1000e/netdev.c rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); head 3253 drivers/net/ethernet/intel/e1000e/netdev.c writel(0, rx_ring->head); head 173 drivers/net/ethernet/intel/fm10k/fm10k.h #define fm10k_for_each_ring(pos, head) \ head 174 drivers/net/ethernet/intel/fm10k/fm10k.h for (pos = &(head).ring[(head).count]; (--pos) >= (head).ring;) head 1127 drivers/net/ethernet/intel/fm10k/fm10k_main.c u32 head, tail; head 1130 drivers/net/ethernet/intel/fm10k/fm10k_main.c head = ring->next_to_clean; head 1133 drivers/net/ethernet/intel/fm10k/fm10k_main.c head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx)); head 1137 drivers/net/ethernet/intel/fm10k/fm10k_main.c return ((head <= tail) ? tail : tail + ring->count) - head; head 16 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c fifo->head = 0; head 28 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c return fifo->tail - fifo->head; head 39 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c return fifo->size + fifo->head - fifo->tail; head 50 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c return fifo->head == fifo->tail; head 62 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c return (fifo->head + offset) & (fifo->size - 1); head 85 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u32 *head = fifo->buffer + fm10k_fifo_head_offset(fifo, 0); head 92 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c return FM10K_TLV_DWORD_LEN(*head); head 106 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c fifo->head += len; head 120 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c fifo->head = fifo->tail; head 132 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) head 134 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 len = tail - head; head 185 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 head = (mbx->head + offset + 1) & ((mbx->mbmem_len << 1) - 1); head 188 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c return (head > mbx->head) ? --head : ++head; head 201 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 head = (mbx->head - offset - 1) & ((mbx->mbmem_len << 1) - 1); head 204 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c return (head < mbx->head) ? ++head : --head; head 335 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u32 *head = fifo->buffer; head 350 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head += end; head 356 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c for (end = fifo->size - end; len; head = fifo->buffer) { head 366 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c fm10k_write_reg(hw, mbmem + tail++, *(head++)); head 383 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c struct fm10k_mbx_info *mbx, u16 head) head 385 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 mbmem_len, len, ack = fm10k_mbx_index_len(mbx, head, mbx->tail); head 429 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 end, len, head; head 433 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head = fm10k_mbx_head_sub(mbx, len); head 434 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (head >= mbx->mbmem_len) head 435 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head++; head 445 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head &= mbx->mbmem_len - 1; head 446 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (!head) head 447 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head++; head 452 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c *(tail++) = fm10k_read_reg(hw, mbmem + head++); head 476 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 len, seq = fm10k_mbx_index_len(mbx, mbx->head, tail); head 484 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c mbx->head = fm10k_mbx_head_add(mbx, len); head 617 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c static void fm10k_mbx_update_local_crc(struct fm10k_mbx_info *mbx, u16 head) head 619 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 len = mbx->tail_len - fm10k_mbx_index_len(mbx, head, mbx->tail); head 622 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head = fm10k_fifo_head_offset(&mbx->tx, mbx->pulled); head 625 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c mbx->local = fm10k_fifo_crc(&mbx->tx, head, len, mbx->local); head 711 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c err = fm10k_tlv_msg_parse(hw, fifo->buffer + fifo->head, head 723 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c fifo->tail -= fifo->head; head 724 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c fifo->head = 0; head 841 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD) | head 855 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); head 881 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); head 901 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c FM10K_MSG_HDR_FIELD_SET(mbx->head, TAIL) | head 939 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c FM10K_MSG_HDR_FIELD_SET(mbx->head, HEAD); head 952 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 type, rsvd0, head, tail, size; head 958 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); head 967 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (tail != mbx->head) head 973 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) head 975 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) head 981 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) head 992 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) head 1018 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c struct fm10k_mbx_info *mbx, u16 head) head 1024 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c fm10k_mbx_update_local_crc(mbx, head); head 1027 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c fm10k_mbx_pull_head(hw, mbx, head); head 1059 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 len, head, ack; head 1065 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head = FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, HEAD); head 1066 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c ack = fm10k_mbx_index_len(mbx, head, mbx->tail); head 1085 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c mbx->rx.head = 0; head 1151 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 size, head; head 1155 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); head 1179 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c mbx->tail = head; head 1181 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c return fm10k_mbx_create_reply(hw, mbx, head); head 1197 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 head, tail; head 1201 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); head 1206 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c mbx->tail = head; head 1223 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c return fm10k_mbx_create_reply(hw, mbx, head); head 1240 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 head; head 1244 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); head 1266 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (head != mbx->tail) head 1276 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c return fm10k_mbx_create_reply(hw, mbx, head); head 1292 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 head; head 1295 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head = FM10K_MSG_HDR_FIELD_GET(*hdr, HEAD); head 1308 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c mbx->tail = head; head 1596 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c mbx->head = 1; head 1637 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD); head 1654 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c FM10K_MSG_HDR_FIELD_SET(mbx->head, SM_HEAD) | head 1675 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c mbx->head = 1; head 1774 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 tail, head, ver; head 1778 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD); head 1784 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (!head || head > FM10K_SM_MBX_FIFO_LEN) head 1788 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (mbx->tail < head) head 1789 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head += mbx->mbmem_len - 1; head 1790 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (tail < mbx->head) head 1792 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) head 1794 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) head 1886 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (tail < mbx->head) head 1898 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c mbx->head = fm10k_mbx_head_sub(mbx, mbx->pushed); head 1902 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (mbx->head > mbmem_len) head 1903 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c mbx->head -= mbmem_len; head 1918 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c struct fm10k_mbx_info *mbx, u16 head) head 1926 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c if (mbx->tail < head) head 1927 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head += mbmem_len; head 1929 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c fm10k_mbx_pull_head(hw, mbx, head); head 1963 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c struct fm10k_mbx_info *mbx, u16 head) head 1969 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c fm10k_sm_mbx_transmit(hw, mbx, head); head 2041 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c u16 head, tail; head 2046 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c head = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_HEAD); head 2068 drivers/net/ethernet/intel/fm10k/fm10k_mbx.c fm10k_sm_mbx_create_reply(hw, mbx, head); head 231 drivers/net/ethernet/intel/fm10k/fm10k_mbx.h u16 head; head 267 drivers/net/ethernet/intel/fm10k/fm10k_mbx.h u16 head, head_len, pushed; head 23 drivers/net/ethernet/intel/i40e/i40e_adminq.c hw->aq.asq.head = I40E_VF_ATQH1; head 28 drivers/net/ethernet/intel/i40e/i40e_adminq.c hw->aq.arq.head = I40E_VF_ARQH1; head 34 drivers/net/ethernet/intel/i40e/i40e_adminq.c hw->aq.asq.head = I40E_PF_ATQH; head 39 drivers/net/ethernet/intel/i40e/i40e_adminq.c hw->aq.arq.head = I40E_PF_ARQH; head 275 drivers/net/ethernet/intel/i40e/i40e_adminq.c wr32(hw, hw->aq.asq.head, 0); head 304 drivers/net/ethernet/intel/i40e/i40e_adminq.c wr32(hw, hw->aq.arq.head, 0); head 460 drivers/net/ethernet/intel/i40e/i40e_adminq.c wr32(hw, hw->aq.asq.head, 0); head 494 drivers/net/ethernet/intel/i40e/i40e_adminq.c wr32(hw, hw->aq.arq.head, 0); head 679 drivers/net/ethernet/intel/i40e/i40e_adminq.c while (rd32(hw, hw->aq.asq.head) != ntc) { head 681 drivers/net/ethernet/intel/i40e/i40e_adminq.c "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); head 715 drivers/net/ethernet/intel/i40e/i40e_adminq.c return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; head 755 drivers/net/ethernet/intel/i40e/i40e_adminq.c val = rd32(hw, hw->aq.asq.head); head 971 drivers/net/ethernet/intel/i40e/i40e_adminq.c ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; head 34 drivers/net/ethernet/intel/i40e/i40e_adminq.h u32 head; head 674 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 }, head 700 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 }, head 20 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h u16 head; head 53 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h u16 head; head 311 drivers/net/ethernet/intel/i40e/i40e_main.c u32 head, val; head 356 drivers/net/ethernet/intel/i40e/i40e_main.c head = i40e_get_head(tx_ring); head 367 drivers/net/ethernet/intel/i40e/i40e_main.c head, tx_ring->next_to_use, head 696 drivers/net/ethernet/intel/i40e/i40e_txrx.c u32 head, tail; head 699 drivers/net/ethernet/intel/i40e/i40e_txrx.c head = i40e_get_head(ring); head 702 drivers/net/ethernet/intel/i40e/i40e_txrx.c head = ring->next_to_clean; head 706 drivers/net/ethernet/intel/i40e/i40e_txrx.c if (head != tail) head 707 drivers/net/ethernet/intel/i40e/i40e_txrx.c return (head < tail) ? head 708 drivers/net/ethernet/intel/i40e/i40e_txrx.c tail - head : (tail + ring->count - head); head 468 drivers/net/ethernet/intel/i40e/i40e_txrx.h #define i40e_for_each_ring(pos, head) \ head 469 drivers/net/ethernet/intel/i40e/i40e_txrx.h for (pos = (head).ring; pos != NULL; pos = pos->next) head 508 drivers/net/ethernet/intel/i40e/i40e_txrx.h void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; head 510 drivers/net/ethernet/intel/i40e/i40e_txrx.h return le32_to_cpu(*(volatile __le32 *)head); head 20 drivers/net/ethernet/intel/iavf/iavf_adminq.c hw->aq.asq.head = IAVF_VF_ATQH1; head 25 drivers/net/ethernet/intel/iavf/iavf_adminq.c hw->aq.arq.head = IAVF_VF_ARQH1; head 262 drivers/net/ethernet/intel/iavf/iavf_adminq.c wr32(hw, hw->aq.asq.head, 0); head 291 drivers/net/ethernet/intel/iavf/iavf_adminq.c wr32(hw, hw->aq.arq.head, 0); head 447 drivers/net/ethernet/intel/iavf/iavf_adminq.c wr32(hw, hw->aq.asq.head, 0); head 481 drivers/net/ethernet/intel/iavf/iavf_adminq.c wr32(hw, hw->aq.arq.head, 0); head 581 drivers/net/ethernet/intel/iavf/iavf_adminq.c while (rd32(hw, hw->aq.asq.head) != ntc) { head 583 drivers/net/ethernet/intel/iavf/iavf_adminq.c "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); head 618 drivers/net/ethernet/intel/iavf/iavf_adminq.c return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; head 657 drivers/net/ethernet/intel/iavf/iavf_adminq.c val = rd32(hw, hw->aq.asq.head); head 872 drivers/net/ethernet/intel/iavf/iavf_adminq.c ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK; head 34 drivers/net/ethernet/intel/iavf/iavf_adminq.h u32 head; head 115 drivers/net/ethernet/intel/iavf/iavf_txrx.c u32 head, tail; head 117 drivers/net/ethernet/intel/iavf/iavf_txrx.c head = ring->next_to_clean; head 120 drivers/net/ethernet/intel/iavf/iavf_txrx.c if (head != tail) head 121 drivers/net/ethernet/intel/iavf/iavf_txrx.c return (head < tail) ? head 122 drivers/net/ethernet/intel/iavf/iavf_txrx.c tail - head : (tail + ring->count - head); head 425 drivers/net/ethernet/intel/iavf/iavf_txrx.h #define iavf_for_each_ring(pos, head) \ head 426 drivers/net/ethernet/intel/iavf/iavf_txrx.h for (pos = (head).ring; pos != NULL; pos = pos->next) head 1110 drivers/net/ethernet/intel/ice/ice_common.c ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), head 8 drivers/net/ethernet/intel/ice/ice_controlq.c (qinfo)->sq.head = prefix##_ATQH; \ head 16 drivers/net/ethernet/intel/ice/ice_controlq.c (qinfo)->rq.head = prefix##_ARQH; \ head 257 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, ring->head, 0); head 466 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->sq.head, 0); head 533 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->rq.head, 0); head 795 drivers/net/ethernet/intel/ice/ice_controlq.c while (rd32(hw, cq->sq.head) != ntc) { head 797 drivers/net/ethernet/intel/ice/ice_controlq.c "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); head 825 drivers/net/ethernet/intel/ice/ice_controlq.c return rd32(hw, cq->sq.head) == cq->sq.next_to_use; head 887 drivers/net/ethernet/intel/ice/ice_controlq.c val = rd32(hw, cq->sq.head); head 1061 drivers/net/ethernet/intel/ice/ice_controlq.c ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); head 1118 drivers/net/ethernet/intel/ice/ice_controlq.c ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); head 55 drivers/net/ethernet/intel/ice/ice_controlq.h u32 head; head 271 drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h u16 head; head 56 drivers/net/ethernet/intel/ice/ice_main.c u16 head, tail; head 58 drivers/net/ethernet/intel/ice/ice_main.c head = ring->next_to_clean; head 61 drivers/net/ethernet/intel/ice/ice_main.c if (head != tail) head 62 drivers/net/ethernet/intel/ice/ice_main.c return (head < tail) ? head 63 drivers/net/ethernet/intel/ice/ice_main.c tail - head : (tail + ring->count - head); head 1097 drivers/net/ethernet/intel/ice/ice_main.c ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); head 4688 drivers/net/ethernet/intel/ice/ice_main.c u32 head, val = 0; head 4690 drivers/net/ethernet/intel/ice/ice_main.c head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) & head 4697 drivers/net/ethernet/intel/ice/ice_main.c head, tx_ring->next_to_use, val); head 2783 drivers/net/ethernet/intel/ice/ice_switch.c struct list_head *head; head 2785 drivers/net/ethernet/intel/ice/ice_switch.c head = &sw->recp_list[i].filt_replay_rules; head 2786 drivers/net/ethernet/intel/ice/ice_switch.c status = ice_replay_vsi_fltr(hw, vsi_handle, i, head); head 230 drivers/net/ethernet/intel/ice/ice_txrx.h #define ice_for_each_ring(pos, head) \ head 231 drivers/net/ethernet/intel/ice/ice_txrx.h for (pos = (head).ring; pos; pos = pos->next) head 1169 drivers/net/ethernet/intel/igb/igb_main.c struct igb_ring_container *head) head 1171 drivers/net/ethernet/intel/igb/igb_main.c head->ring = ring; head 1172 drivers/net/ethernet/intel/igb/igb_main.c head->count++; head 135 drivers/net/ethernet/intel/igbvf/igbvf.h u16 head; head 518 drivers/net/ethernet/intel/igbvf/netdev.c writel(0, adapter->hw.hw_addr + tx_ring->head); head 601 drivers/net/ethernet/intel/igbvf/netdev.c writel(0, adapter->hw.hw_addr + rx_ring->head); head 1301 drivers/net/ethernet/intel/igbvf/netdev.c tx_ring->head = E1000_TDH(0); head 1382 drivers/net/ethernet/intel/igbvf/netdev.c rx_ring->head = E1000_RDH(0); head 3487 drivers/net/ethernet/intel/igc/igc_main.c struct igc_ring_container *head) head 3489 drivers/net/ethernet/intel/igc/igc_main.c head->ring = ring; head 3490 drivers/net/ethernet/intel/igc/igc_main.c head->count++; head 437 drivers/net/ethernet/intel/ixgbe/ixgbe.h #define ixgbe_for_each_ring(pos, head) \ head 438 drivers/net/ethernet/intel/ixgbe/ixgbe.h for (pos = (head).ring; pos != NULL; pos = pos->next) head 817 drivers/net/ethernet/intel/ixgbe/ixgbe.h struct sk_buff *head; head 807 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c struct ixgbe_ring_container *head) head 809 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c ring->next = head->ring; head 810 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c head->ring = ring; head 811 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c head->count++; head 812 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c head->next_update = jiffies + 1; head 1018 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned int head, tail; head 1020 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c head = ring->next_to_clean; head 1023 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c return ((head <= tail) ? tail : tail + ring->count) - head; head 225 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h #define ixgbevf_for_each_ring(pos, head) \ head 226 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h for (pos = (head).ring; pos != NULL; pos = pos->next) head 203 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx)); head 206 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c if (head != tail) head 207 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c return (head < tail) ? head 208 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c tail - head : (tail + ring->count - head); head 2674 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c struct ixgbevf_ring_container *head) head 2676 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ring->next = head->ring; head 2677 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c head->ring = ring; head 2678 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c head->count++; head 75 drivers/net/ethernet/marvell/octeontx2/af/rvu.h struct hlist_head head; head 96 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c INIT_HLIST_HEAD(&list->head); head 419 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c u64 reg, head; head 425 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c head = (reg >> 4) & AQ_PTR_MASK; head 427 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), head 1659 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c hlist_for_each_entry(mce, &mce_list->head, node) { head 1685 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c hlist_add_head(&mce->node, &mce_list->head); head 1742 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c hlist_for_each_entry(mce, &mce_list->head, node) { head 24 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c u64 reg, head; head 30 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c head = (reg >> 4) & AQ_PTR_MASK; head 32 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)), head 458 drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h u64 head : 20; head 462 drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h u64 head : 20; head 1215 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_cmd_mailbox *tmp, *head = NULL; head 1238 drivers/net/ethernet/mellanox/mlx5/core/cmd.c tmp->next = head; head 1242 drivers/net/ethernet/mellanox/mlx5/core/cmd.c head = tmp; head 1244 drivers/net/ethernet/mellanox/mlx5/core/cmd.c msg->next = head; head 1248 drivers/net/ethernet/mellanox/mlx5/core/cmd.c while (head) { head 1249 drivers/net/ethernet/mellanox/mlx5/core/cmd.c tmp = head->next; head 1250 drivers/net/ethernet/mellanox/mlx5/core/cmd.c free_cmd_box(dev, head); head 1251 drivers/net/ethernet/mellanox/mlx5/core/cmd.c head = tmp; head 1261 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_cmd_mailbox *head = msg->next; head 1264 drivers/net/ethernet/mellanox/mlx5/core/cmd.c while (head) { head 1265 drivers/net/ethernet/mellanox/mlx5/core/cmd.c next = head->next; head 1266 drivers/net/ethernet/mellanox/mlx5/core/cmd.c free_cmd_box(dev, head); head 1267 drivers/net/ethernet/mellanox/mlx5/core/cmd.c head = next; head 1458 drivers/net/ethernet/mellanox/mlx5/core/cmd.c list_add_tail(&msg->list, &msg->parent->head); head 1646 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (list_empty(&ch->head)) { head 1650 drivers/net/ethernet/mellanox/mlx5/core/cmd.c msg = list_entry(ch->head.next, typeof(*msg), list); head 1827 drivers/net/ethernet/mellanox/mlx5/core/cmd.c list_for_each_entry_safe(msg, n, &ch->head, list) { head 1858 drivers/net/ethernet/mellanox/mlx5/core/cmd.c INIT_LIST_HEAD(&ch->head); head 1867 drivers/net/ethernet/mellanox/mlx5/core/cmd.c list_add_tail(&msg->list, &ch->head); head 362 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c struct hlist_head *head = head 370 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c hlist_add_head(&cur_string->hlist, head); head 432 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c static struct tracer_string_format *mlx5_tracer_message_find(struct hlist_head *head, head 437 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c hlist_for_each_entry(message, head, hlist) head 447 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c struct hlist_head *head = head 450 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c return mlx5_tracer_message_find(head, tracer_event->event_id, tracer_event->string_event.tmsn); head 596 drivers/net/ethernet/mellanox/mlx5/core/en.h u32 head; head 258 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c rq->mpwqe.wq.head : mlx5_wq_cyc_get_head(&rq->wqe.wq); head 660 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c struct hlist_head *head; head 662 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst); head 663 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c hlist_for_each_entry(arfs_rule, head, hlist) { head 375 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c struct list_head *head = &priv->fs.ethtool.rules; head 380 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c head = &iter->list; head 383 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c list_add(&rule->list, head); head 615 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->page_cache.head = 0; head 658 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (i = rq->page_cache.head; i != rq->page_cache.tail; head 830 drivers/net/ethernet/mellanox/mlx5/core/en_main.c u16 head; head 837 drivers/net/ethernet/mellanox/mlx5/core/en_main.c head = wq->head; head 841 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->dealloc_wqe(rq, head); head 842 drivers/net/ethernet/mellanox/mlx5/core/en_main.c head = mlx5_wq_ll_get_wqe_next_ix(wq, head); head 845 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->mpwqe.actual_wq_head = wq->head; head 701 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list; head 703 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c list_for_each_entry_safe(cb_priv, temp, head, list) { head 199 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c if (tail_next == cache->head) { head 220 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c if (unlikely(cache->head == cache->tail)) { head 225 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c if (page_ref_count(cache->page_cache[cache->head].page) != 1) { head 230 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c *dma_info = cache->page_cache[cache->head]; head 231 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1); head 458 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head); head 571 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c u16 head = mlx5_wq_cyc_get_head(wq); head 573 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk); head 660 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c u16 head; head 680 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c head = rq->mpwqe.actual_wq_head; head 683 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c alloc_err = mlx5e_alloc_rx_mpwqe(rq, head); head 687 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c head = mlx5_wq_ll_get_wqe_next_ix(wq, head); head 697 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c rq->mpwqe.actual_wq_head = head; head 750 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c #define list_for_each_advance_continue(pos, head, reverse) \ head 752 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c &pos->list != (head); \ head 1558 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c static void free_match_list(struct match_list_head *head, bool ft_locked) head 1560 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c if (!list_empty(&head->list)) { head 1563 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c list_del(&head->first.list); head 1564 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c tree_put_node(&head->first.g->node, ft_locked); head 1565 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c list_for_each_entry_safe(iter, match_tmp, &head->list, head 211 drivers/net/ethernet/mellanox/mlx5/core/uar.c struct list_head *head; head 219 drivers/net/ethernet/mellanox/mlx5/core/uar.c head = &bfregs->wc_head.list; head 222 drivers/net/ethernet/mellanox/mlx5/core/uar.c head = &bfregs->reg_head.list; head 226 drivers/net/ethernet/mellanox/mlx5/core/uar.c if (list_empty(head)) { head 232 drivers/net/ethernet/mellanox/mlx5/core/uar.c list_add(&up->list, head); head 234 drivers/net/ethernet/mellanox/mlx5/core/uar.c up = list_entry(head->next, struct mlx5_uars_page, list); head 300 drivers/net/ethernet/mellanox/mlx5/core/uar.c struct list_head *head; head 304 drivers/net/ethernet/mellanox/mlx5/core/uar.c head = &bfregs->wc_head.list; head 307 drivers/net/ethernet/mellanox/mlx5/core/uar.c head = &bfregs->reg_head.list; head 324 drivers/net/ethernet/mellanox/mlx5/core/uar.c list_add_tail(&up->list, head); head 256 drivers/net/ethernet/mellanox/mlx5/core/wq.c wq->head = 0; head 74 drivers/net/ethernet/mellanox/mlx5/core/wq.h u16 head; head 258 drivers/net/ethernet/mellanox/mlx5/core/wq.h wq->head = head_next; head 471 drivers/net/ethernet/micrel/ks8842.c __func__, skb->len, skb->head, skb->data, head 631 drivers/net/ethernet/mscc/ocelot.c list_add_tail(&oskb->head, &port->skbs); head 2227 drivers/net/ethernet/mscc/ocelot.c entry = list_entry(pos, struct ocelot_skb, head); head 509 drivers/net/ethernet/mscc/ocelot.h struct list_head head; head 224 drivers/net/ethernet/mscc/ocelot_board.c entry = list_entry(pos, struct ocelot_skb, head); head 502 drivers/net/ethernet/neterion/vxge/vxge-main.c u32 ns = *(u32 *)(skb->head + pkt_length); head 354 drivers/net/ethernet/netronome/nfp/crypto/tls.c if (!WARN_ON_ONCE((u8 *)back < skb->head || head 38 drivers/net/ethernet/netronome/nfp/flower/action.c push_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_MPLS; head 39 drivers/net/ethernet/netronome/nfp/flower/action.c push_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; head 68 drivers/net/ethernet/netronome/nfp/flower/action.c pop_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_POP_MPLS; head 69 drivers/net/ethernet/netronome/nfp/flower/action.c pop_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; head 80 drivers/net/ethernet/netronome/nfp/flower/action.c set_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_SET_MPLS; head 81 drivers/net/ethernet/netronome/nfp/flower/action.c set_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; head 108 drivers/net/ethernet/netronome/nfp/flower/action.c pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN; head 109 drivers/net/ethernet/netronome/nfp/flower/action.c pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ; head 120 drivers/net/ethernet/netronome/nfp/flower/action.c push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN; head 121 drivers/net/ethernet/netronome/nfp/flower/action.c push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ; head 162 drivers/net/ethernet/netronome/nfp/flower/action.c pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG; head 163 drivers/net/ethernet/netronome/nfp/flower/action.c pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ; head 183 drivers/net/ethernet/netronome/nfp/flower/action.c output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT; head 184 drivers/net/ethernet/netronome/nfp/flower/action.c output->head.len_lw = act_size >> NFP_FL_LW_SIZ; head 320 drivers/net/ethernet/netronome/nfp/flower/action.c pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL; head 321 drivers/net/ethernet/netronome/nfp/flower/action.c pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ; head 382 drivers/net/ethernet/netronome/nfp/flower/action.c push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE; head 383 drivers/net/ethernet/netronome/nfp/flower/action.c push->head.len_lw = act_size >> NFP_FL_LW_SIZ; head 420 drivers/net/ethernet/netronome/nfp/flower/action.c set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; head 421 drivers/net/ethernet/netronome/nfp/flower/action.c set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ; head 510 drivers/net/ethernet/netronome/nfp/flower/action.c set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET; head 511 drivers/net/ethernet/netronome/nfp/flower/action.c set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ; head 548 drivers/net/ethernet/netronome/nfp/flower/action.c set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; head 549 drivers/net/ethernet/netronome/nfp/flower/action.c set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> head 556 drivers/net/ethernet/netronome/nfp/flower/action.c set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; head 557 drivers/net/ethernet/netronome/nfp/flower/action.c set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> head 572 drivers/net/ethernet/netronome/nfp/flower/action.c set_ip_ttl_tos->head.jump_id = head 574 drivers/net/ethernet/netronome/nfp/flower/action.c set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >> head 590 drivers/net/ethernet/netronome/nfp/flower/action.c set_ip_ttl_tos->head.jump_id = head 592 drivers/net/ethernet/netronome/nfp/flower/action.c set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >> head 612 drivers/net/ethernet/netronome/nfp/flower/action.c ip6->head.jump_id = opcode_tag; head 613 drivers/net/ethernet/netronome/nfp/flower/action.c ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ; head 658 drivers/net/ethernet/netronome/nfp/flower/action.c ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL; head 659 drivers/net/ethernet/netronome/nfp/flower/action.c ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ; head 728 drivers/net/ethernet/netronome/nfp/flower/action.c set_tport->head.jump_id = opcode; head 729 drivers/net/ethernet/netronome/nfp/flower/action.c set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ; head 777 drivers/net/ethernet/netronome/nfp/flower/action.c if (set_act->set_eth.head.len_lw) { head 783 drivers/net/ethernet/netronome/nfp/flower/action.c if (set_act->set_ip_ttl_tos.head.len_lw) { head 794 drivers/net/ethernet/netronome/nfp/flower/action.c if (set_act->set_ip_addr.head.len_lw) { head 805 drivers/net/ethernet/netronome/nfp/flower/action.c if (set_act->set_ip6_tc_hl_fl.head.len_lw) { head 815 drivers/net/ethernet/netronome/nfp/flower/action.c if (set_act->set_ip6_dst.head.len_lw && head 816 drivers/net/ethernet/netronome/nfp/flower/action.c set_act->set_ip6_src.head.len_lw) { head 832 drivers/net/ethernet/netronome/nfp/flower/action.c } else if (set_act->set_ip6_dst.head.len_lw) { head 840 drivers/net/ethernet/netronome/nfp/flower/action.c } else if (set_act->set_ip6_src.head.len_lw) { head 849 drivers/net/ethernet/netronome/nfp/flower/action.c if (set_act->set_tport.head.len_lw) { head 129 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 136 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 145 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 154 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 165 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 174 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 181 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 187 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 194 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 199 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 208 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 216 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 231 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 240 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 246 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 251 drivers/net/ethernet/netronome/nfp/flower/cmsg.h struct nfp_fl_act_head head; head 46 drivers/net/ethernet/netronome/nfp/flower/metadata.c if (!CIRC_SPACE(ring->head, ring->tail, head 51 drivers/net/ethernet/netronome/nfp/flower/metadata.c memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS); head 52 drivers/net/ethernet/netronome/nfp/flower/metadata.c ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) % head 83 drivers/net/ethernet/netronome/nfp/flower/metadata.c if (ring->head == ring->tail) { head 141 drivers/net/ethernet/netronome/nfp/flower/metadata.c if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0) head 144 drivers/net/ethernet/netronome/nfp/flower/metadata.c memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS); head 145 drivers/net/ethernet/netronome/nfp/flower/metadata.c ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) % head 170 drivers/net/ethernet/netronome/nfp/flower/metadata.c if (ring->head == ring->tail) head 43 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c struct nfp_police_cfg_head head; head 53 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c struct nfp_police_cfg_head head; head 118 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c config->head.port = cpu_to_be32(netdev_port_id); head 175 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c config->head.port = cpu_to_be32(netdev_port_id); head 193 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c netdev_port_id = be32_to_cpu(msg->head.port); head 226 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c struct nfp_police_cfg_head *head; head 236 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c head = nfp_flower_cmsg_get_data(skb); head 237 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c memset(head, 0, sizeof(struct nfp_police_cfg_head)); head 238 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c head->port = cpu_to_be32(netdev_port_id); head 1728 drivers/net/ethernet/netronome/nfp/nfp_net_common.c if (skb && rxbuf && skb->head == rxbuf->frag) head 116 drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c skb->head, skb->data); head 107 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c static void __resource_add(struct list_head *head, struct nfp_cpp_resource *res) head 112 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c list_for_each(pos, head) { head 89 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c seq_printf(seq, "%d\n", q->head->index); head 396 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->head = q->tail; head 446 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->head->cb = cb; head 447 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->head->cb_arg = cb_arg; head 448 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->head = q->head->next; head 452 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->head->index, ring_doorbell); head 456 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->dbval | q->head->index); head 461 drivers/net/ethernet/pensando/ionic/ionic_dev.c unsigned int mask, tail, head; head 465 drivers/net/ethernet/pensando/ionic/ionic_dev.c head = q->head->index; head 467 drivers/net/ethernet/pensando/ionic/ionic_dev.c return ((pos - tail) & mask) < ((head - tail) & mask); head 478 drivers/net/ethernet/pensando/ionic/ionic_dev.c if (q->tail->index == q->head->index) head 485 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->name, stop_index, q->tail->index, q->head->index); head 174 drivers/net/ethernet/pensando/ionic/ionic_dev.h struct ionic_desc_info *head; head 232 drivers/net/ethernet/pensando/ionic/ionic_dev.h if (q->head->index >= avail) head 233 drivers/net/ethernet/pensando/ionic/ionic_dev.h avail += q->head->left - 1; head 235 drivers/net/ethernet/pensando/ionic/ionic_dev.h avail -= q->head->index + 1; head 179 drivers/net/ethernet/pensando/ionic/ionic_main.c while (adminq->tail != adminq->head) { head 250 drivers/net/ethernet/pensando/ionic/ionic_main.c memcpy(adminq->head->desc, &ctx->cmd, sizeof(ctx->cmd)); head 50 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c struct hlist_head *head; head 55 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c head = &lif->rx_filters.by_id[i]; head 56 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c hlist_for_each_entry_safe(f, tmp, head, by_id) head 67 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c struct hlist_head *head; head 101 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c head = &lif->rx_filters.by_hash[key]; head 102 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c hlist_add_head(&f->by_hash, head); head 105 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c head = &lif->rx_filters.by_id[key]; head 106 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c hlist_add_head(&f->by_id, head); head 116 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c struct hlist_head *head; head 120 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c head = &lif->rx_filters.by_hash[key]; head 122 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c hlist_for_each_entry(f, head, by_hash) { head 136 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c struct hlist_head *head; head 140 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c head = &lif->rx_filters.by_hash[key]; head 142 drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c hlist_for_each_entry(f, head, by_hash) { head 19 drivers/net/ethernet/pensando/ionic/ionic_txrx.c DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell); head 41 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_rxq_desc *new = q->head->desc; head 169 drivers/net/ethernet/pensando/ionic/ionic_txrx.c if (q->tail->index == q->head->index) head 267 drivers/net/ethernet/pensando/ionic/ionic_txrx.c desc = q->head->desc; head 272 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ring_doorbell = ((q->head->index + 1) & head 290 drivers/net/ethernet/pensando/ionic/ionic_txrx.c for (cur = q->tail; cur != q->head; cur = cur->next) { head 538 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; head 539 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_txq_desc *desc = q->head->desc; head 548 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_desc_info *abort = q->head; head 705 drivers/net/ethernet/pensando/ionic/ionic_txrx.c while (rewind->desc != q->head->desc) { head 709 drivers/net/ethernet/pensando/ionic/ionic_txrx.c q->head = abort; head 717 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_txq_desc *desc = q->head->desc; head 754 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_txq_desc *desc = q->head->desc; head 785 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; head 624 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c struct list_head *head; head 628 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c list_for_each(head, del_list) { head 629 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c cur = list_entry(head, nx_mac_list_t, list); head 632 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c list_move_tail(head, &adapter->mac_list); head 656 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c struct list_head *head; head 685 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c head = &del_list; head 686 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c while (!list_empty(head)) { head 687 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c cur = list_entry(head->next, nx_mac_list_t, list); head 718 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c struct list_head *head = &adapter->mac_list; head 720 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c while (!list_empty(head)) { head 721 drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c cur = list_entry(head->next, nx_mac_list_t, list); head 1643 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c #define netxen_merge_rx_buffers(list, head) \ head 1644 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c do { list_splice_tail_init(list, head); } while (0); head 1815 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c struct list_head *head; head 1819 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c head = &rds_ring->free_list; head 1820 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c while (!list_empty(head)) { head 1822 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c buffer = list_entry(head->next, struct netxen_rx_buffer, list); head 1872 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c struct list_head *head; head 1879 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c head = &rds_ring->free_list; head 1880 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c while (!list_empty(head)) { head 1882 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c buffer = list_entry(head->next, struct netxen_rx_buffer, list); head 3201 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c struct list_head *head; head 3211 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c list_for_each(head, &adapter->ip_list) { head 3212 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c cur = list_entry(head, struct nx_ip_list, list); head 749 drivers/net/ethernet/qlogic/qed/qed_spq.c struct list_head *head, u32 keep_reserve) head 755 drivers/net/ethernet/qlogic/qed/qed_spq.c !list_empty(head)) { head 757 drivers/net/ethernet/qlogic/qed/qed_spq.c list_first_entry(head, struct qed_spq_entry, list); head 262 drivers/net/ethernet/qlogic/qede/qede_filter.c struct hlist_head *head; head 265 drivers/net/ethernet/qlogic/qede/qede_filter.c head = &edev->arfs->arfs_hl_head[i]; head 267 drivers/net/ethernet/qlogic/qede/qede_filter.c hlist_for_each_entry_safe(fltr, temp, head, node) { head 1354 drivers/net/ethernet/qlogic/qede/qede_filter.c qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location) head 1358 drivers/net/ethernet/qlogic/qede/qede_filter.c hlist_for_each_entry(fltr, head, node) head 1369 drivers/net/ethernet/qlogic/qede/qede_filter.c struct hlist_head *head; head 1381 drivers/net/ethernet/qlogic/qede/qede_filter.c head = QEDE_ARFS_BUCKET_HEAD(edev, 0); head 1383 drivers/net/ethernet/qlogic/qede/qede_filter.c hlist_for_each_entry(fltr, head, node) { head 1674 drivers/net/ethernet/qlogic/qede/qede_filter.c struct hlist_head *head; head 1676 drivers/net/ethernet/qlogic/qede/qede_filter.c head = QEDE_ARFS_BUCKET_HEAD(edev, 0); head 1678 drivers/net/ethernet/qlogic/qede/qede_filter.c hlist_for_each_entry_safe(fltr, temp, head, node) { head 76 drivers/net/ethernet/qlogic/qede/qede_rdma.c struct list_head *head = &edev->rdma_info.rdma_event_list; head 80 drivers/net/ethernet/qlogic/qede/qede_rdma.c while (!list_empty(head)) { head 81 drivers/net/ethernet/qlogic/qede/qede_rdma.c event_node = list_entry(head->next, struct qede_rdma_event_work, head 3895 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c struct list_head *head = &mbx->cmd_q; head 3900 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c while (!list_empty(head)) { head 3901 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c cmd = list_entry(head->next, struct qlcnic_cmd_args, list); head 4102 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c struct list_head *head = &mbx->cmd_q; head 4121 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c if (list_empty(head)) { head 4125 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c cmd = list_entry(head->next, struct qlcnic_cmd_args, list); head 464 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c struct list_head *head; head 468 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c list_for_each(head, &adapter->mac_list) { head 469 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c cur = list_entry(head, struct qlcnic_mac_vlan_list, list); head 487 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c struct list_head *head; head 490 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c list_for_each(head, &adapter->mac_list) { head 491 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c cur = list_entry(head, struct qlcnic_mac_vlan_list, list); head 519 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c struct list_head *head, *tmp; head 521 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c list_for_each_safe(head, tmp, &adapter->mac_list) { head 522 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c cur = list_entry(head, struct qlcnic_mac_vlan_list, list); head 622 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c struct list_head *head = &adapter->mac_list; head 625 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c while (!list_empty(head)) { head 626 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list); head 638 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c struct hlist_head *head; head 644 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c head = &(adapter->fhash.fhead[i]); head 645 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { head 663 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c head = &(adapter->rx_fhash.fhead[i]); head 665 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c hlist_for_each_entry_safe(tmp_fil, n, head, fnode) head 683 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c struct hlist_head *head; head 688 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c head = &(adapter->fhash.fhead[i]); head 689 drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { head 176 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head, head 182 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { head 196 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c struct hlist_head *head; head 213 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c head = &(adapter->rx_fhash.fhead[hindex]); head 215 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id); head 231 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c hlist_add_head(&(fil->fnode), head); head 235 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c head = &adapter->fhash.fhead[hindex]; head 239 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id); head 257 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c head = &adapter->rx_fhash.fhead[hindex]; head 261 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id); head 310 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c struct hlist_head *head; head 331 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c head = &(adapter->fhash.fhead[hindex]); head 333 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c hlist_for_each_entry_safe(tmp_fil, n, head, fnode) { head 358 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c hlist_add_head(&(fil->fnode), head); head 852 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c struct list_head *head; head 858 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c head = &rds_ring->free_list; head 859 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c while (!list_empty(head)) { head 860 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); head 1444 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c struct list_head *head; head 1447 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c head = &rds_ring->free_list; head 1449 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c while (!list_empty(head)) { head 1451 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); head 323 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c struct list_head *head; head 325 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c list_for_each(head, &adapter->mac_list) { head 326 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c cur = list_entry(head, struct qlcnic_mac_vlan_list, list); head 2885 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c void *head; head 2909 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c head = kcalloc(adapter->fhash.fbucket_size, head 2912 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c if (!head) head 2916 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c adapter->fhash.fhead = head; head 2926 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c head = kcalloc(adapter->rx_fhash.fbucket_size, head 2929 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c if (!head) head 2933 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c adapter->rx_fhash.fhead = head; head 1524 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c struct list_head *head = &bc->async_cmd_list; head 1531 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c while (!list_empty(head)) { head 1532 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c entry = list_entry(head->next, struct qlcnic_async_cmd, head 1601 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c struct list_head *head; head 1605 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c head = &bc->async_cmd_list; head 1608 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c list_splice_init(head, &del_list); head 1618 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c if (!list_empty(head)) head 2099 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c struct list_head *head = &adapter->mac_list; head 2102 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c while (!list_empty(head)) { head 2103 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list); head 79 drivers/net/ethernet/qualcomm/qca_debug.c if (qca->txr.skb[qca->txr.head] == NULL) head 287 drivers/net/ethernet/qualcomm/qca_spi.c if (qca->txr.skb[qca->txr.head] == NULL) head 300 drivers/net/ethernet/qualcomm/qca_spi.c while (qca->txr.skb[qca->txr.head]) { head 301 drivers/net/ethernet/qualcomm/qca_spi.c pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN; head 309 drivers/net/ethernet/qualcomm/qca_spi.c if (qcaspi_tx_frame(qca, qca->txr.skb[qca->txr.head]) == -1) { head 316 drivers/net/ethernet/qualcomm/qca_spi.c n_stats->tx_bytes += qca->txr.skb[qca->txr.head]->len; head 324 drivers/net/ethernet/qualcomm/qca_spi.c dev_kfree_skb(qca->txr.skb[qca->txr.head]); head 325 drivers/net/ethernet/qualcomm/qca_spi.c qca->txr.skb[qca->txr.head] = NULL; head 327 drivers/net/ethernet/qualcomm/qca_spi.c new_head = qca->txr.head + 1; head 330 drivers/net/ethernet/qualcomm/qca_spi.c qca->txr.head = new_head; head 488 drivers/net/ethernet/qualcomm/qca_spi.c qca->txr.head = 0; head 577 drivers/net/ethernet/qualcomm/qca_spi.c (qca->txr.skb[qca->txr.head] == NULL) && head 585 drivers/net/ethernet/qualcomm/qca_spi.c qca->txr.skb[qca->txr.head]); head 58 drivers/net/ethernet/qualcomm/qca_spi.h u16 head; head 177 drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c static void rmnet_dellink(struct net_device *dev, struct list_head *head) head 206 drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c unregister_netdevice_queue(dev, head); head 30 drivers/net/ethernet/rocker/rocker.h u32 head; head 371 drivers/net/ethernet/rocker/rocker_main.c u32 head = __pos_inc(info->head, info->size); head 373 drivers/net/ethernet/rocker/rocker_main.c desc_info = &info->desc_info[info->head]; head 374 drivers/net/ethernet/rocker/rocker_main.c if (head == info->tail) head 390 drivers/net/ethernet/rocker/rocker_main.c u32 head = __pos_inc(info->head, info->size); head 392 drivers/net/ethernet/rocker/rocker_main.c BUG_ON(head == info->tail); head 394 drivers/net/ethernet/rocker/rocker_main.c info->head = head; head 395 drivers/net/ethernet/rocker/rocker_main.c rocker_write32(rocker, DMA_DESC_HEAD(info->type), head); head 403 drivers/net/ethernet/rocker/rocker_main.c if (info->tail == info->head) head 437 drivers/net/ethernet/rocker/rocker_main.c info->head = 0; head 479 drivers/net/ethernet/rocker/rocker_main.c BUG_ON(info->head || info->tail); head 19 drivers/net/ethernet/rocker/rocker_tlv.c const struct rocker_tlv *head = (const struct rocker_tlv *) buf; head 24 drivers/net/ethernet/rocker/rocker_tlv.c rocker_tlv_for_each(tlv, head, buf_len, rem) { head 45 drivers/net/ethernet/rocker/rocker_tlv.h #define rocker_tlv_for_each(pos, head, len, rem) \ head 46 drivers/net/ethernet/rocker/rocker_tlv.h for (pos = head, rem = len; \ head 3178 drivers/net/ethernet/sfc/efx.c struct hlist_head *head; head 3181 drivers/net/ethernet/sfc/efx.c head = efx_rps_hash_bucket(efx, spec); head 3182 drivers/net/ethernet/sfc/efx.c if (!head) head 3184 drivers/net/ethernet/sfc/efx.c hlist_for_each(node, head) { head 3197 drivers/net/ethernet/sfc/efx.c struct hlist_head *head; head 3200 drivers/net/ethernet/sfc/efx.c head = efx_rps_hash_bucket(efx, spec); head 3201 drivers/net/ethernet/sfc/efx.c if (!head) head 3203 drivers/net/ethernet/sfc/efx.c hlist_for_each(node, head) { head 3214 drivers/net/ethernet/sfc/efx.c hlist_add_head(&rule->node, head); head 3222 drivers/net/ethernet/sfc/efx.c struct hlist_head *head; head 3225 drivers/net/ethernet/sfc/efx.c head = efx_rps_hash_bucket(efx, spec); head 3226 drivers/net/ethernet/sfc/efx.c if (WARN_ON(!head)) head 3228 drivers/net/ethernet/sfc/efx.c hlist_for_each(node, head) { head 3254 drivers/net/ethernet/sfc/efx.c struct list_head *head = &efx->rss_context.list; head 3261 drivers/net/ethernet/sfc/efx.c list_for_each_entry(ctx, head, list) { head 3287 drivers/net/ethernet/sfc/efx.c struct list_head *head = &efx->rss_context.list; head 3292 drivers/net/ethernet/sfc/efx.c list_for_each_entry(ctx, head, list) head 234 drivers/net/ethernet/sgi/meth.c priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head); head 452 drivers/net/ethernet/sgi/meth.c priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head; head 283 drivers/net/ethernet/socionext/netsec.c u16 head, tail; head 779 drivers/net/ethernet/socionext/netsec.c int idx = dring->head; head 809 drivers/net/ethernet/socionext/netsec.c dring->head = (dring->head + 1) % DESC_NUM; head 824 drivers/net/ethernet/socionext/netsec.c if (tx_ring->head >= tx_ring->tail) head 825 drivers/net/ethernet/socionext/netsec.c filled = tx_ring->head - tx_ring->tail; head 827 drivers/net/ethernet/socionext/netsec.c filled = tx_ring->head + DESC_NUM - tx_ring->tail; head 1080 drivers/net/ethernet/socionext/netsec.c if (dring->head >= dring->tail) head 1081 drivers/net/ethernet/socionext/netsec.c used = dring->head - dring->tail; head 1083 drivers/net/ethernet/socionext/netsec.c used = dring->head + DESC_NUM - dring->tail; head 1212 drivers/net/ethernet/socionext/netsec.c dring->head = 0; head 403 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c static void dwmac4_display_ring(void *head, unsigned int size, bool rx) head 405 drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c struct dma_desc *p = (struct dma_desc *)head; head 420 drivers/net/ethernet/stmicro/stmmac/enh_desc.c static void enh_desc_display_ring(void *head, unsigned int size, bool rx) head 422 drivers/net/ethernet/stmicro/stmmac/enh_desc.c struct dma_extended_desc *ep = (struct dma_extended_desc *)head; head 80 drivers/net/ethernet/stmicro/stmmac/hwif.h void (*display_ring)(void *head, unsigned int size, bool rx); head 272 drivers/net/ethernet/stmicro/stmmac/norm_desc.c static void ndesc_display_ring(void *head, unsigned int size, bool rx) head 274 drivers/net/ethernet/stmicro/stmmac/norm_desc.c struct dma_desc *p = (struct dma_desc *)head; head 4036 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c static void sysfs_display_ring(void *head, int size, int extend_desc, head 4040 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c struct dma_extended_desc *ep = (struct dma_extended_desc *)head; head 4041 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c struct dma_desc *p = (struct dma_desc *)head; head 201 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c skb->csum_start = skb_transport_header(skb) - skb->head; head 1303 drivers/net/ethernet/sun/sunvnet_common.c curr->csum_start = skb_transport_header(curr) - curr->head; head 109 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_desc __iomem *head, *tail; head 392 drivers/net/ethernet/ti/davinci_cpdma.c if (chan->head) { head 393 drivers/net/ethernet/ti/davinci_cpdma.c chan_write(chan, hdp, desc_phys(pool, chan->head)); head 992 drivers/net/ethernet/ti/davinci_cpdma.c if (!chan->head) { head 994 drivers/net/ethernet/ti/davinci_cpdma.c chan->head = desc; head 1231 drivers/net/ethernet/ti/davinci_cpdma.c desc = chan->head; head 1253 drivers/net/ethernet/ti/davinci_cpdma.c chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); head 1258 drivers/net/ethernet/ti/davinci_cpdma.c if ((status & CPDMA_DESC_EOQ) && chan->head) { head 1260 drivers/net/ethernet/ti/davinci_cpdma.c chan_write(chan, hdp, desc_phys(pool, chan->head)); head 1354 drivers/net/ethernet/ti/davinci_cpdma.c while (chan->head) { head 1355 drivers/net/ethernet/ti/davinci_cpdma.c struct cpdma_desc __iomem *desc = chan->head; head 1359 drivers/net/ethernet/ti/davinci_cpdma.c chan->head = desc_from_phys(pool, next_dma); head 152 drivers/net/ethernet/toshiba/ps3_gelic_net.c if (gelic_descr_get_status(card->rx_chain.head) != head 155 drivers/net/ethernet/toshiba/ps3_gelic_net.c be32_to_cpu(card->rx_chain.head->dmac_cmd_status)); head 157 drivers/net/ethernet/toshiba/ps3_gelic_net.c be32_to_cpu(card->rx_chain.head->next_descr_addr)); head 159 drivers/net/ethernet/toshiba/ps3_gelic_net.c card->rx_chain.head); head 163 drivers/net/ethernet/toshiba/ps3_gelic_net.c card->rx_chain.head->bus_addr, 0); head 230 drivers/net/ethernet/toshiba/ps3_gelic_net.c chain->head = start_descr; head 342 drivers/net/ethernet/toshiba/ps3_gelic_net.c chain->head = start_descr; head 424 drivers/net/ethernet/toshiba/ps3_gelic_net.c struct gelic_descr *descr = card->rx_chain.head; head 439 drivers/net/ethernet/toshiba/ps3_gelic_net.c } while (descr != card->rx_chain.head); head 452 drivers/net/ethernet/toshiba/ps3_gelic_net.c struct gelic_descr *descr = card->rx_chain.head; head 462 drivers/net/ethernet/toshiba/ps3_gelic_net.c } while (descr != card->rx_chain.head); head 546 drivers/net/ethernet/toshiba/ps3_gelic_net.c tx_chain->head != tx_chain->tail && tx_chain->tail; head 678 drivers/net/ethernet/toshiba/ps3_gelic_net.c if (!card->tx_chain.head) head 681 drivers/net/ethernet/toshiba/ps3_gelic_net.c if (card->tx_chain.tail != card->tx_chain.head->next && head 682 drivers/net/ethernet/toshiba/ps3_gelic_net.c gelic_descr_get_status(card->tx_chain.head) == head 684 drivers/net/ethernet/toshiba/ps3_gelic_net.c return card->tx_chain.head; head 799 drivers/net/ethernet/toshiba/ps3_gelic_net.c card->tx_chain.head = descr->next; head 889 drivers/net/ethernet/toshiba/ps3_gelic_net.c card->tx_chain.head = descr; head 971 drivers/net/ethernet/toshiba/ps3_gelic_net.c struct gelic_descr *descr = chain->head; head 1060 drivers/net/ethernet/toshiba/ps3_gelic_net.c chain->head = descr->next; head 1732 drivers/net/ethernet/toshiba/ps3_gelic_net.c card->tx_top = card->tx_chain.head; head 1733 drivers/net/ethernet/toshiba/ps3_gelic_net.c card->rx_top = card->rx_chain.head; head 1768 drivers/net/ethernet/toshiba/ps3_gelic_net.c gelic_card_free_chain(card, card->rx_chain.head); head 1770 drivers/net/ethernet/toshiba/ps3_gelic_net.c gelic_card_free_chain(card, card->tx_chain.head); head 252 drivers/net/ethernet/toshiba/ps3_gelic_net.h struct gelic_descr *head; head 339 drivers/net/ethernet/toshiba/spider_net.c chain->head = chain->ring; head 355 drivers/net/ethernet/toshiba/spider_net.c descr = card->rx_chain.head; head 365 drivers/net/ethernet/toshiba/spider_net.c } while (descr != card->rx_chain.head); head 497 drivers/net/ethernet/toshiba/spider_net.c while (spider_net_get_descr_status(chain->head->hwdescr) == head 499 drivers/net/ethernet/toshiba/spider_net.c if (spider_net_prepare_rx_descr(card, chain->head)) head 501 drivers/net/ethernet/toshiba/spider_net.c chain->head = chain->head->next; head 529 drivers/net/ethernet/toshiba/spider_net.c if (spider_net_prepare_rx_descr(card, chain->head)) head 532 drivers/net/ethernet/toshiba/spider_net.c chain->head = chain->head->next; head 662 drivers/net/ethernet/toshiba/spider_net.c descr = card->tx_chain.head; head 669 drivers/net/ethernet/toshiba/spider_net.c chain->head = descr->next; head 711 drivers/net/ethernet/toshiba/spider_net.c while (descr != card->tx_chain.head) { head 768 drivers/net/ethernet/toshiba/spider_net.c if (chain->tail == chain->head) { head 857 drivers/net/ethernet/toshiba/spider_net.c if (descr == card->tx_chain.head) head 1008 drivers/net/ethernet/toshiba/spider_net.c if (descr == chain->head) head 1083 drivers/net/ethernet/toshiba/spider_net.c descr = chain->head; head 1091 drivers/net/ethernet/toshiba/spider_net.c descr = chain->head; head 1098 drivers/net/ethernet/toshiba/spider_net.c chain->head = descr; head 394 drivers/net/ethernet/toshiba/spider_net.h struct spider_net_descr *head; head 1780 drivers/net/ethernet/toshiba/tc35815.c int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM; head 1781 drivers/net/ethernet/toshiba/tc35815.c struct TxFD *txhead = &lp->tfd_base[head]; head 156 drivers/net/fjes/fjes_hw.c info->v1i.head = 0; head 902 drivers/net/fjes/fjes_hw.c return EP_RING_EMPTY(info->v1i.head, info->v1i.tail, head 914 drivers/net/fjes/fjes_hw.c (info->v1i.head, head 932 drivers/net/fjes/fjes_hw.c EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max); head 941 drivers/net/fjes/fjes_hw.c if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max)) head 218 drivers/net/fjes/fjes_hw.h u32 head; head 526 drivers/net/fjes/fjes_main.c if (EP_RING_FULL(info->v1i.head, info->v1i.tail, head 474 drivers/net/geneve.c struct list_head *head, head 505 drivers/net/geneve.c list_for_each_entry(p, head, list) { head 526 drivers/net/geneve.c pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); head 1656 drivers/net/geneve.c static void geneve_dellink(struct net_device *dev, struct list_head *head) head 1661 drivers/net/geneve.c unregister_netdevice_queue(dev, head); head 1829 drivers/net/geneve.c static void geneve_destroy_tunnels(struct net *net, struct list_head *head) head 1838 drivers/net/geneve.c unregister_netdevice_queue(dev, head); head 1846 drivers/net/geneve.c unregister_netdevice_queue(geneve->dev, head); head 107 drivers/net/gtp.c struct hlist_head *head; head 110 drivers/net/gtp.c head = >p->tid_hash[gtp0_hashfn(tid) % gtp->hash_size]; head 112 drivers/net/gtp.c hlist_for_each_entry_rcu(pdp, head, hlist_tid) { head 123 drivers/net/gtp.c struct hlist_head *head; head 126 drivers/net/gtp.c head = >p->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size]; head 128 drivers/net/gtp.c hlist_for_each_entry_rcu(pdp, head, hlist_tid) { head 139 drivers/net/gtp.c struct hlist_head *head; head 142 drivers/net/gtp.c head = >p->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size]; head 144 drivers/net/gtp.c hlist_for_each_entry_rcu(pdp, head, hlist_addr) { head 704 drivers/net/gtp.c static void gtp_dellink(struct net_device *dev, struct list_head *head) head 715 drivers/net/gtp.c unregister_netdevice_queue(dev, head); head 1027 drivers/net/gtp.c static void pdp_context_free(struct rcu_head *head) head 1029 drivers/net/gtp.c struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head); head 114 drivers/net/hyperv/netvsc.c static void free_netvsc_device(struct rcu_head *head) head 117 drivers/net/hyperv/netvsc.c = container_of(head, struct netvsc_device, rcu); head 583 drivers/net/hyperv/netvsc_drv.c rndis_msg = (struct rndis_message *)skb->head; head 171 drivers/net/ipvlan/ipvlan.h void ipvlan_link_delete(struct net_device *dev, struct list_head *head); head 627 drivers/net/ipvlan/ipvlan_main.c void ipvlan_link_delete(struct net_device *dev, struct list_head *head) head 642 drivers/net/ipvlan/ipvlan_main.c unregister_netdevice_queue(dev, head); head 113 drivers/net/ipvlan/ipvtap.c struct list_head *head) head 119 drivers/net/ipvlan/ipvtap.c ipvlan_link_delete(dev, head); head 320 drivers/net/macsec.c static void free_rx_sc_rcu(struct rcu_head *head) head 322 drivers/net/macsec.c struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); head 339 drivers/net/macsec.c static void free_rxsa(struct rcu_head *head) head 341 drivers/net/macsec.c struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); head 367 drivers/net/macsec.c static void free_txsa(struct rcu_head *head) head 369 drivers/net/macsec.c struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); head 3121 drivers/net/macsec.c static void macsec_common_dellink(struct net_device *dev, struct list_head *head) head 3126 drivers/net/macsec.c unregister_netdevice_queue(dev, head); head 3134 drivers/net/macsec.c static void macsec_dellink(struct net_device *dev, struct list_head *head) head 3140 drivers/net/macsec.c macsec_common_dellink(dev, head); head 3478 drivers/net/macsec.c LIST_HEAD(head); head 3504 drivers/net/macsec.c macsec_common_dellink(m->secy.netdev, &head); head 3510 drivers/net/macsec.c unregister_netdevice_many(&head); head 1322 drivers/net/macvlan.c struct nlattr *nla, *head; head 1357 drivers/net/macvlan.c head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]); head 1360 drivers/net/macvlan.c nla_for_each_attr(nla, head, len, rem) { head 1498 drivers/net/macvlan.c void macvlan_dellink(struct net_device *dev, struct list_head *head) head 1505 drivers/net/macvlan.c unregister_netdevice_queue(dev, head); head 121 drivers/net/macvtap.c struct list_head *head) head 127 drivers/net/macvtap.c macvlan_dellink(dev, head); head 1126 drivers/net/ppp/ppp_generic.c static void ppp_nl_dellink(struct net_device *dev, struct list_head *head) head 1128 drivers/net/ppp/ppp_generic.c unregister_netdevice_queue(dev, head); head 2441 drivers/net/ppp/ppp_generic.c struct sk_buff *head, *tail; head 2447 drivers/net/ppp/ppp_generic.c head = __skb_peek(list); head 2490 drivers/net/ppp/ppp_generic.c head = p; head 2499 drivers/net/ppp/ppp_generic.c (PPP_MP_CB(head)->BEbits & B)) { head 2528 drivers/net/ppp/ppp_generic.c head = skb_peek(list); head 2529 drivers/net/ppp/ppp_generic.c if (!head) head 2539 drivers/net/ppp/ppp_generic.c if (PPP_MP_CB(head)->sequence != ppp->nextseq) { head 2541 drivers/net/ppp/ppp_generic.c if (p == head) head 2555 drivers/net/ppp/ppp_generic.c PPP_MP_CB(head)->sequence-1); head 2560 drivers/net/ppp/ppp_generic.c skb = head; head 2561 drivers/net/ppp/ppp_generic.c if (head != tail) { head 2563 drivers/net/ppp/ppp_generic.c p = skb_queue_next(list, head); head 324 drivers/net/ppp/pptp.c skb_set_network_header(skb, skb->head-skb->data); head 408 drivers/net/tun.c static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) head 412 drivers/net/tun.c hlist_for_each_entry_rcu(e, head, hash_link) { head 420 drivers/net/tun.c struct hlist_head *head, head 433 drivers/net/tun.c hlist_add_head_rcu(&e->hash_link, head); head 517 drivers/net/tun.c struct hlist_head *head; head 522 drivers/net/tun.c head = &tun->flows[tun_hashfn(rxhash)]; head 526 drivers/net/tun.c e = tun_flow_find(head, rxhash); head 536 drivers/net/tun.c if (!tun_flow_find(head, rxhash) && head 538 drivers/net/tun.c tun_flow_create(tun, head, rxhash, queue_index); head 2127 drivers/net/tun.c 16, 1, skb->head, head 246 drivers/net/usb/asix_common.c skb->data = memmove(skb->head + 4, skb->data, skb->len); head 116 drivers/net/usb/cdc_eem.c skb->data = memmove(skb->head + head 151 drivers/net/usb/gl620a.c skb->data = memmove(skb->head + (4 + 4*1), head 78 drivers/net/usb/int51x1.c skb->data = memmove(skb->head + INT51X1_HEADER_SIZE, head 176 drivers/net/usb/kalmia.c skb->data = memmove(skb->head + KALMIA_HEADER_LENGTH, head 259 drivers/net/usb/lg-vl600.c skb->data = memmove(skb->head + sizeof(*frame), head 437 drivers/net/usb/net1080.c skb->data = memmove(skb->head head 312 drivers/net/usb/qmi_wwan.c struct list_head *head) head 319 drivers/net/usb/qmi_wwan.c unregister_netdevice_queue(dev, head); head 727 drivers/net/usb/r8152.c void *head; head 1598 drivers/net/usb/r8152.c tp->tx_info[i].head = NULL; head 1658 drivers/net/usb/r8152.c tp->tx_info[i].head = tx_agg_align(buf); head 1884 drivers/net/usb/r8152.c tx_data = agg->head; head 1936 drivers/net/usb/r8152.c remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); head 1961 drivers/net/usb/r8152.c agg->head, (int)(tx_data - (u8 *)agg->head), head 551 drivers/net/usb/rndis_host.c skb->data = memmove(skb->head + sizeof *hdr, head 124 drivers/net/usb/sr9800.c skb->data = memmove(skb->head + 4, skb->data, head 368 drivers/net/veth.c static struct sk_buff *veth_build_skb(void *head, int headroom, int len, head 377 drivers/net/veth.c skb = build_skb(head, buflen); head 513 drivers/net/veth.c void *head = hard_start - sizeof(struct xdp_frame); head 541 drivers/net/veth.c xdp.data_hard_start = head; head 553 drivers/net/veth.c xdp.data_hard_start = head; head 575 drivers/net/veth.c skb = veth_build_skb(head, headroom, len, 0); head 620 drivers/net/veth.c void *head, *start; head 632 drivers/net/veth.c head = page_address(page); head 633 drivers/net/veth.c start = head + VETH_XDP_HEADROOM; head 635 drivers/net/veth.c page_frag_free(head); head 639 drivers/net/veth.c nskb = veth_build_skb(head, head 643 drivers/net/veth.c page_frag_free(head); head 654 drivers/net/veth.c xdp.data_hard_start = skb->head; head 1358 drivers/net/veth.c static void veth_dellink(struct net_device *dev, struct list_head *head) head 1371 drivers/net/veth.c unregister_netdevice_queue(dev, head); head 1376 drivers/net/veth.c unregister_netdevice_queue(peer, head); head 1313 drivers/net/vrf.c static void vrf_dellink(struct net_device *dev, struct list_head *head) head 1321 drivers/net/vrf.c unregister_netdevice_queue(dev, head); head 490 drivers/net/vxlan.c struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni); head 493 drivers/net/vxlan.c hlist_for_each_entry_rcu(f, head, hlist) { head 728 drivers/net/vxlan.c struct list_head *head, head 767 drivers/net/vxlan.c list_for_each_entry(p, head, list) { head 779 drivers/net/vxlan.c pp = call_gro_receive(eth_gro_receive, head, skb); head 864 drivers/net/vxlan.c static void vxlan_fdb_free(struct rcu_head *head) head 866 drivers/net/vxlan.c struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); head 888 drivers/net/vxlan.c static void vxlan_dst_free(struct rcu_head *head) head 890 drivers/net/vxlan.c struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu); head 4031 drivers/net/vxlan.c static void vxlan_dellink(struct net_device *dev, struct list_head *head) head 4038 drivers/net/vxlan.c unregister_netdevice_queue(dev, head); head 4411 drivers/net/vxlan.c static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) head 4420 drivers/net/vxlan.c unregister_netdevice_queue(dev, head); head 4427 drivers/net/vxlan.c unregister_netdevice_queue(vxlan->dev, head); head 202 drivers/net/wireless/ath/ath10k/debug.c static void ath10k_fw_stats_pdevs_free(struct list_head *head) head 206 drivers/net/wireless/ath/ath10k/debug.c list_for_each_entry_safe(i, tmp, head, list) { head 212 drivers/net/wireless/ath/ath10k/debug.c static void ath10k_fw_stats_vdevs_free(struct list_head *head) head 216 drivers/net/wireless/ath/ath10k/debug.c list_for_each_entry_safe(i, tmp, head, list) { head 222 drivers/net/wireless/ath/ath10k/debug.c static void ath10k_fw_stats_peers_free(struct list_head *head) head 226 drivers/net/wireless/ath/ath10k/debug.c list_for_each_entry_safe(i, tmp, head, list) { head 232 drivers/net/wireless/ath/ath10k/debug.c static void ath10k_fw_extd_stats_peers_free(struct list_head *head) head 236 drivers/net/wireless/ath/ath10k/debug.c list_for_each_entry_safe(i, tmp, head, list) { head 8069 drivers/net/wireless/ath/ath10k/wmi.c size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head) head 8074 drivers/net/wireless/ath/ath10k/wmi.c list_for_each_entry(i, head, list) head 8080 drivers/net/wireless/ath/ath10k/wmi.c size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head) head 8085 drivers/net/wireless/ath/ath10k/wmi.c list_for_each_entry(i, head, list) head 7373 drivers/net/wireless/ath/ath10k/wmi.h size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head); head 7374 drivers/net/wireless/ath/ath10k/wmi.h size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head); head 2800 drivers/net/wireless/ath/ath6kl/cfg80211.c if (info->beacon.head == NULL) head 2802 drivers/net/wireless/ath/ath6kl/cfg80211.c mgmt = (struct ieee80211_mgmt *) info->beacon.head; head 2804 drivers/net/wireless/ath/ath6kl/cfg80211.c if (ies > info->beacon.head + info->beacon.head_len) head 397 drivers/net/wireless/ath/ath6kl/txrx.c (skb_network_header(skb) - skb->head) + head 900 drivers/net/wireless/ath/ath6kl/txrx.c packet = (struct htc_packet *) skb->head; head 926 drivers/net/wireless/ath/ath6kl/txrx.c packet = (struct htc_packet *) skb->head; head 76 drivers/net/wireless/ath/ath9k/ath9k.h struct list_head *head, const char *name, head 284 drivers/net/wireless/ath/ath9k/init.c struct list_head *head, const char *name, head 294 drivers/net/wireless/ath/ath9k/init.c INIT_LIST_HEAD(head); head 369 drivers/net/wireless/ath/ath9k/init.c list_add_tail(&bf->list, head); head 400 drivers/net/wireless/ath/ath9k/init.c list_add_tail(&bf->list, head); head 60 drivers/net/wireless/ath/ath9k/xmit.c struct list_head *head, bool internal); head 1977 drivers/net/wireless/ath/ath9k/xmit.c struct list_head *head, bool internal) head 1990 drivers/net/wireless/ath/ath9k/xmit.c if (list_empty(head)) head 1994 drivers/net/wireless/ath/ath9k/xmit.c bf = list_first_entry(head, struct ath_buf, list); head 1995 drivers/net/wireless/ath/ath9k/xmit.c bf_last = list_entry(head->prev, struct ath_buf, list); head 2001 drivers/net/wireless/ath/ath9k/xmit.c list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]); head 2005 drivers/net/wireless/ath/ath9k/xmit.c list_splice_tail_init(head, &txq->axq_q); head 473 drivers/net/wireless/ath/carl9170/carl9170.h struct rcu_head head; head 40 drivers/net/wireless/ath/carl9170/fw.c const struct carl9170fw_desc_head *head, unsigned int max_len) head 51 drivers/net/wireless/ath/carl9170/fw.c pos = head; head 56 drivers/net/wireless/ath/carl9170/fw.c if (pos_addr + sizeof(*head) > end_addr) head 61 drivers/net/wireless/ath/carl9170/fw.c if (pos_length < sizeof(*head)) head 113 drivers/net/wireless/ath/carl9170/fwdesc.h struct carl9170fw_desc_head head; head 134 drivers/net/wireless/ath/carl9170/fwdesc.h struct carl9170fw_desc_head head; head 151 drivers/net/wireless/ath/carl9170/fwdesc.h struct carl9170fw_desc_head head; head 160 drivers/net/wireless/ath/carl9170/fwdesc.h struct carl9170fw_desc_head head; head 176 drivers/net/wireless/ath/carl9170/fwdesc.h struct carl9170fw_desc_head head; head 186 drivers/net/wireless/ath/carl9170/fwdesc.h struct carl9170fw_desc_head head; head 196 drivers/net/wireless/ath/carl9170/fwdesc.h struct carl9170fw_desc_head head; head 206 drivers/net/wireless/ath/carl9170/fwdesc.h struct carl9170fw_desc_head head; head 214 drivers/net/wireless/ath/carl9170/fwdesc.h .head = { \ head 221 drivers/net/wireless/ath/carl9170/fwdesc.h static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head, head 225 drivers/net/wireless/ath/carl9170/fwdesc.h head->magic[0] = magic[0]; head 226 drivers/net/wireless/ath/carl9170/fwdesc.h head->magic[1] = magic[1]; head 227 drivers/net/wireless/ath/carl9170/fwdesc.h head->magic[2] = magic[2]; head 228 drivers/net/wireless/ath/carl9170/fwdesc.h head->magic[3] = magic[3]; head 230 drivers/net/wireless/ath/carl9170/fwdesc.h head->length = length; head 231 drivers/net/wireless/ath/carl9170/fwdesc.h head->min_ver = min_ver; head 232 drivers/net/wireless/ath/carl9170/fwdesc.h head->cur_ver = cur_ver; head 242 drivers/net/wireless/ath/carl9170/fwdesc.h #define CHECK_HDR_VERSION(head, _min_ver) \ head 243 drivers/net/wireless/ath/carl9170/fwdesc.h (((head)->cur_ver < _min_ver) || ((head)->min_ver > _min_ver)) \ head 250 drivers/net/wireless/ath/carl9170/fwdesc.h static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head, head 254 drivers/net/wireless/ath/carl9170/fwdesc.h if (descid[0] == head->magic[0] && descid[1] == head->magic[1] && head 255 drivers/net/wireless/ath/carl9170/fwdesc.h descid[2] == head->magic[2] && descid[3] == head->magic[3] && head 256 drivers/net/wireless/ath/carl9170/fwdesc.h !CHECK_HDR_VERSION(head, compatible_revision) && head 257 drivers/net/wireless/ath/carl9170/fwdesc.h (le16_to_cpu(head->length) >= min_len)) head 290 drivers/net/wireless/ath/carl9170/rx.c struct ar9170_rx_head *head, struct ar9170_rx_macstatus *mac, head 362 drivers/net/wireless/ath/carl9170/rx.c switch (head->plcp[0]) { head 378 drivers/net/wireless/ath/carl9170/rx.c "rate (%x).\n", head->plcp[0]); head 387 drivers/net/wireless/ath/carl9170/rx.c switch (head->plcp[0] & 0xf) { head 415 drivers/net/wireless/ath/carl9170/rx.c "rate (%x).\n", head->plcp[0]); head 425 drivers/net/wireless/ath/carl9170/rx.c if (head->plcp[3] & 0x80) head 427 drivers/net/wireless/ath/carl9170/rx.c if (head->plcp[6] & 0x80) head 430 drivers/net/wireless/ath/carl9170/rx.c status->rate_idx = clamp(head->plcp[3] & 0x7f, 0, 75); head 611 drivers/net/wireless/ath/carl9170/rx.c kfree_rcu(entry, head); head 699 drivers/net/wireless/ath/carl9170/rx.c struct ar9170_rx_head *head; head 723 drivers/net/wireless/ath/carl9170/rx.c head = (void *) buf; head 783 drivers/net/wireless/ath/carl9170/rx.c head = &ar->rx_plcp; head 788 drivers/net/wireless/ath/carl9170/rx.c head = (void *) buf; head 806 drivers/net/wireless/ath/carl9170/rx.c if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status))) head 463 drivers/net/wireless/ath/carl9170/tx.c kfree_rcu(entry, head); head 395 drivers/net/wireless/ath/carl9170/wlan.h struct ar9170_rx_frame_head head; head 161 drivers/net/wireless/ath/dfs_pattern_detector.c struct list_head head; head 184 drivers/net/wireless/ath/dfs_pattern_detector.c list_del(&cd->head); head 204 drivers/net/wireless/ath/dfs_pattern_detector.c INIT_LIST_HEAD(&cd->head); head 218 drivers/net/wireless/ath/dfs_pattern_detector.c list_add(&cd->head, &dpd->channel_detectors); head 241 drivers/net/wireless/ath/dfs_pattern_detector.c list_for_each_entry(cd, &dpd->channel_detectors, head) { head 257 drivers/net/wireless/ath/dfs_pattern_detector.c list_for_each_entry(cd, &dpd->channel_detectors, head) head 265 drivers/net/wireless/ath/dfs_pattern_detector.c list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head) head 335 drivers/net/wireless/ath/dfs_pattern_detector.c list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head) head 37 drivers/net/wireless/ath/dfs_pri_detector.c struct list_head head; head 105 drivers/net/wireless/ath/dfs_pri_detector.c list_for_each_entry_safe(p, p0, &pulse_pool, head) { head 106 drivers/net/wireless/ath/dfs_pri_detector.c list_del(&p->head); head 110 drivers/net/wireless/ath/dfs_pri_detector.c list_for_each_entry_safe(ps, ps0, &pseq_pool, head) { head 111 drivers/net/wireless/ath/dfs_pri_detector.c list_del(&ps->head); head 122 drivers/net/wireless/ath/dfs_pri_detector.c list_add(&pe->head, &pulse_pool); head 130 drivers/net/wireless/ath/dfs_pri_detector.c list_add(&pse->head, &pseq_pool); head 140 drivers/net/wireless/ath/dfs_pri_detector.c pse = list_first_entry(&pseq_pool, struct pri_sequence, head); head 141 drivers/net/wireless/ath/dfs_pri_detector.c list_del(&pse->head); head 153 drivers/net/wireless/ath/dfs_pri_detector.c pe = list_first_entry(&pulse_pool, struct pulse_elem, head); head 154 drivers/net/wireless/ath/dfs_pri_detector.c list_del(&pe->head); head 166 drivers/net/wireless/ath/dfs_pri_detector.c return list_entry(l->prev, struct pulse_elem, head); head 173 drivers/net/wireless/ath/dfs_pri_detector.c list_del_init(&p->head); head 214 drivers/net/wireless/ath/dfs_pri_detector.c INIT_LIST_HEAD(&p->head); head 216 drivers/net/wireless/ath/dfs_pri_detector.c list_add(&p->head, &pde->pulses); head 229 drivers/net/wireless/ath/dfs_pri_detector.c list_for_each_entry(p, &pde->pulses, head) { head 258 drivers/net/wireless/ath/dfs_pri_detector.c list_for_each_entry_continue(p2, &pde->pulses, head) { head 297 drivers/net/wireless/ath/dfs_pri_detector.c INIT_LIST_HEAD(&new_ps->head); head 298 drivers/net/wireless/ath/dfs_pri_detector.c list_add(&new_ps->head, &pde->sequences); head 309 drivers/net/wireless/ath/dfs_pri_detector.c list_for_each_entry_safe(ps, ps2, &pde->sequences, head) { head 315 drivers/net/wireless/ath/dfs_pri_detector.c list_del_init(&ps->head); head 344 drivers/net/wireless/ath/dfs_pri_detector.c list_for_each_entry(ps, &pde->sequences, head) { head 363 drivers/net/wireless/ath/dfs_pri_detector.c list_for_each_entry_safe(ps, ps0, &pde->sequences, head) { head 364 drivers/net/wireless/ath/dfs_pri_detector.c list_del_init(&ps->head); head 367 drivers/net/wireless/ath/dfs_pri_detector.c list_for_each_entry_safe(p, p0, &pde->pulses, head) { head 368 drivers/net/wireless/ath/dfs_pri_detector.c list_del_init(&p->head); head 36 drivers/net/wireless/ath/dfs_pri_detector.h struct list_head head; head 1837 drivers/net/wireless/ath/wil6210/cfg80211.c b->head, b->head_len, true); head 308 drivers/net/wireless/ath/wil6210/debugfs.c seq_printf(s, " head = 0x%08x\n", r.head); head 319 drivers/net/wireless/ath/wil6210/debugfs.c !wmi_addr(wil, r.head)) { head 335 drivers/net/wireless/ath/wil6210/debugfs.c (r.head - r.base == delta) ? "h" : " ", head 1297 drivers/net/wireless/ath/wil6210/main.c le32_to_cpus(&r->head); head 470 drivers/net/wireless/ath/wil6210/wil6210.h u32 head; head 664 drivers/net/wireless/ath/wil6210/wmi.c void __iomem *head = wmi_addr(wil, r->head); head 691 drivers/net/wireless/ath/wil6210/wmi.c if (!head) { head 692 drivers/net/wireless/ath/wil6210/wmi.c wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head); head 700 drivers/net/wireless/ath/wil6210/wmi.c wil_memcpy_fromio_32(&d_head, head, sizeof(d_head)); head 711 drivers/net/wireless/ath/wil6210/wmi.c next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size); head 712 drivers/net/wireless/ath/wil6210/wmi.c wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head); head 749 drivers/net/wireless/ath/wil6210/wmi.c wil_w(wil, r->head + offsetof(struct wil6210_mbox_ring_desc, sync), 1); head 751 drivers/net/wireless/ath/wil6210/wmi.c wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head), head 752 drivers/net/wireless/ath/wil6210/wmi.c r->head = next_head); head 1918 drivers/net/wireless/ath/wil6210/wmi.c r->head = wil_r(wil, RGF_MBOX + head 1919 drivers/net/wireless/ath/wil6210/wmi.c offsetof(struct wil6210_mbox_ctl, rx.head)); head 1920 drivers/net/wireless/ath/wil6210/wmi.c if (r->tail == r->head) head 1924 drivers/net/wireless/ath/wil6210/wmi.c r->head, r->tail); head 3428 drivers/net/wireless/ath/wil6210/wmi.c r->head = wil_r(wil, RGF_MBOX + head 3429 drivers/net/wireless/ath/wil6210/wmi.c offsetof(struct wil6210_mbox_ctl, rx.head)); head 3430 drivers/net/wireless/ath/wil6210/wmi.c if (r->tail != r->head) head 4508 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c (u8 *)&settings->beacon.head[ie_offset], head 113 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h static inline void dma_spin_for_len(uint len, struct sk_buff *head) head 117 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h while (!(len = *(u16 *) KSEG1ADDR(head->data))) head 120 drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h *(u16 *) (head->data) = cpu_to_le16((u16) len); head 241 drivers/net/wireless/intel/iwlegacy/4965-rs.c tl->total -= tl->packet_count[tl->head]; head 242 drivers/net/wireless/intel/iwlegacy/4965-rs.c tl->packet_count[tl->head] = 0; head 245 drivers/net/wireless/intel/iwlegacy/4965-rs.c tl->head++; head 246 drivers/net/wireless/intel/iwlegacy/4965-rs.c if (tl->head >= TID_QUEUE_MAX_SIZE) head 247 drivers/net/wireless/intel/iwlegacy/4965-rs.c tl->head = 0; head 282 drivers/net/wireless/intel/iwlegacy/4965-rs.c tl->head = 0; head 295 drivers/net/wireless/intel/iwlegacy/4965-rs.c idx = (tl->head + idx) % TID_QUEUE_MAX_SIZE; head 2768 drivers/net/wireless/intel/iwlegacy/common.h u8 head; /* start of the circular buffer */ head 255 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tl->total -= tl->packet_count[tl->head]; head 256 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tl->packet_count[tl->head] = 0; head 259 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tl->head++; head 260 drivers/net/wireless/intel/iwlwifi/dvm/rs.c if (tl->head >= TID_QUEUE_MAX_SIZE) head 261 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tl->head = 0; head 296 drivers/net/wireless/intel/iwlwifi/dvm/rs.c tl->head = 0; head 309 drivers/net/wireless/intel/iwlwifi/dvm/rs.c index = (tl->head + index) % TID_QUEUE_MAX_SIZE; head 316 drivers/net/wireless/intel/iwlwifi/dvm/rs.h u8 head; /* start of the circular buffer */ head 4993 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c struct sk_buff *head, head 5005 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); head 2211 drivers/net/wireless/intel/iwlwifi/pcie/tx.c csum_skb->head; head 236 drivers/net/wireless/intersil/hostap/hostap_proc.c int head = local->io_debug_head; head 246 drivers/net/wireless/intersil/hostap/hostap_proc.c start_bytes = (PRISM2_IO_DEBUG_SIZE - head) * 4; head 253 drivers/net/wireless/intersil/hostap/hostap_proc.c memcpy(page, ((u8 *) &local->io_debug[head]) + off, copy); head 395 drivers/net/wireless/intersil/p54/fwio.c struct p54_scan_head *head; head 405 drivers/net/wireless/intersil/p54/fwio.c skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) + head 412 drivers/net/wireless/intersil/p54/fwio.c head = skb_put(skb, sizeof(*head)); head 413 drivers/net/wireless/intersil/p54/fwio.c memset(head->scan_params, 0, sizeof(head->scan_params)); head 414 drivers/net/wireless/intersil/p54/fwio.c head->mode = cpu_to_le16(mode); head 415 drivers/net/wireless/intersil/p54/fwio.c head->dwell = cpu_to_le16(dwell); head 416 drivers/net/wireless/intersil/p54/fwio.c head->freq = freq; head 569 drivers/net/wireless/marvell/mwifiex/init.c struct list_head *head; head 573 drivers/net/wireless/marvell/mwifiex/init.c head = &adapter->bss_prio_tbl[i].bss_prio_head; head 579 drivers/net/wireless/marvell/mwifiex/init.c priv->bss_type, priv->bss_num, i, head); head 583 drivers/net/wireless/marvell/mwifiex/init.c list_for_each_entry_safe(bssprio_node, tmp_node, head, head 272 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c struct mwifiex_ie_types_header *head; head 280 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c while (tlv_buf_left >= sizeof(*head)) { head 281 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c head = (struct mwifiex_ie_types_header *)tlv_buf; head 282 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c tlv = le16_to_cpu(head->type); head 283 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c tlv_buf_len = le16_to_cpu(head->len); head 285 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c if (tlv_buf_left < (sizeof(*head) + tlv_buf_len)) head 314 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c tlv_buf += (sizeof(*head) + tlv_buf_len); head 315 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c tlv_buf_left -= (sizeof(*head) + tlv_buf_len); head 1111 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c struct mwifiex_ie_types_header *head; head 1121 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c while (tlv_buf_left >= sizeof(*head)) { head 1122 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c head = (struct mwifiex_ie_types_header *)tlv_buf; head 1123 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c tlv = le16_to_cpu(head->type); head 1124 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c tlv_buf_len = le16_to_cpu(head->len); head 1126 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c if (tlv_buf_left < (sizeof(*head) + tlv_buf_len)) head 1132 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c (u8 *)head + sizeof(*head), head 1135 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c (u8 *)head + sizeof(*head), tlv_buf_len); head 1141 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c tlv_buf += (sizeof(*head) + tlv_buf_len); head 1142 drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c tlv_buf_left -= (sizeof(*head) + tlv_buf_len); head 262 drivers/net/wireless/marvell/mwifiex/uap_cmd.c const u8 *var_pos = params->beacon.head + var_offset; head 71 drivers/net/wireless/marvell/mwifiex/wmm.h mwifiex_wmm_list_len(struct list_head *head) head 76 drivers/net/wireless/marvell/mwifiex/wmm.h list_for_each(pos, head) head 141 drivers/net/wireless/marvell/mwl8k.c int head; head 156 drivers/net/wireless/marvell/mwl8k.c int head; head 1167 drivers/net/wireless/marvell/mwl8k.c rxq->head = 0; head 1333 drivers/net/wireless/marvell/mwl8k.c skb = rxq->buf[rxq->head].skb; head 1337 drivers/net/wireless/marvell/mwl8k.c rxd = rxq->rxd + (rxq->head * priv->rxd_ops->rxd_size); head 1344 drivers/net/wireless/marvell/mwl8k.c rxq->buf[rxq->head].skb = NULL; head 1347 drivers/net/wireless/marvell/mwl8k.c dma_unmap_addr(&rxq->buf[rxq->head], dma), head 1349 drivers/net/wireless/marvell/mwl8k.c dma_unmap_addr_set(&rxq->buf[rxq->head], dma, 0); head 1351 drivers/net/wireless/marvell/mwl8k.c rxq->head++; head 1352 drivers/net/wireless/marvell/mwl8k.c if (rxq->head == MWL8K_RX_DESCS) head 1353 drivers/net/wireless/marvell/mwl8k.c rxq->head = 0; head 1458 drivers/net/wireless/marvell/mwl8k.c txq->head = 0; head 1529 drivers/net/wireless/marvell/mwl8k.c txq->len, txq->head, txq->tail, head 1687 drivers/net/wireless/marvell/mwl8k.c tx = txq->head; head 1699 drivers/net/wireless/marvell/mwl8k.c txq->head = (tx + 1) % MWL8K_TX_DESCS; head 14 drivers/net/wireless/mediatek/mt76/agg-rx.c tid->head = ieee80211_sn_inc(tid->head); head 28 drivers/net/wireless/mediatek/mt76/agg-rx.c u16 head) head 32 drivers/net/wireless/mediatek/mt76/agg-rx.c while (ieee80211_sn_less(tid->head, head)) { head 33 drivers/net/wireless/mediatek/mt76/agg-rx.c idx = tid->head % tid->size; head 41 drivers/net/wireless/mediatek/mt76/agg-rx.c int idx = tid->head % tid->size; head 45 drivers/net/wireless/mediatek/mt76/agg-rx.c idx = tid->head % tid->size; head 61 drivers/net/wireless/mediatek/mt76/agg-rx.c start = tid->head % tid->size; head 64 drivers/net/wireless/mediatek/mt76/agg-rx.c for (idx = (tid->head + 1) % tid->size; head 146 drivers/net/wireless/mediatek/mt76/agg-rx.c u16 seqno, head, size; head 176 drivers/net/wireless/mediatek/mt76/agg-rx.c head = tid->head; head 179 drivers/net/wireless/mediatek/mt76/agg-rx.c sn_less = ieee80211_sn_less(seqno, head); head 194 drivers/net/wireless/mediatek/mt76/agg-rx.c if (seqno == head) { head 195 drivers/net/wireless/mediatek/mt76/agg-rx.c tid->head = ieee80211_sn_inc(head); head 207 drivers/net/wireless/mediatek/mt76/agg-rx.c if (!ieee80211_sn_less(seqno, head + size)) { head 208 drivers/net/wireless/mediatek/mt76/agg-rx.c head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size)); head 209 drivers/net/wireless/mediatek/mt76/agg-rx.c mt76_rx_aggr_release_frames(tid, frames, head); head 244 drivers/net/wireless/mediatek/mt76/agg-rx.c tid->head = ssn; head 42 drivers/net/wireless/mediatek/mt76/debugfs.c i, q->q->queued, q->q->head, q->q->tail, head 57 drivers/net/wireless/mediatek/mt76/dma.c q->entry[q->head].txwi = DMA_DUMMY_DATA; head 58 drivers/net/wireless/mediatek/mt76/dma.c q->entry[q->head].skip_buf0 = true; head 75 drivers/net/wireless/mediatek/mt76/dma.c idx = q->head; head 76 drivers/net/wireless/mediatek/mt76/dma.c q->head = (q->head + 1) % q->ndesc; head 133 drivers/net/wireless/mediatek/mt76/dma.c q->head = readl(&q->regs->dma_idx); head 134 drivers/net/wireless/mediatek/mt76/dma.c q->tail = q->head; head 135 drivers/net/wireless/mediatek/mt76/dma.c writel(q->head, &q->regs->cpu_idx); head 253 drivers/net/wireless/mediatek/mt76/dma.c writel(q->head, &q->regs->cpu_idx); head 116 drivers/net/wireless/mediatek/mt76/mt76.h u16 head; head 242 drivers/net/wireless/mediatek/mt76/mt76.h u16 head; head 156 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c list_add(&seq->head, &dfs_pd->seq_pool); head 173 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c head); head 174 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c list_del(&seq->head); head 220 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) { head 221 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c list_del_init(&seq->head); head 505 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c INIT_LIST_HEAD(&seq_p->head); head 506 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c list_add(&seq_p->head, &dfs_pd->sequences); head 523 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) { head 525 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c list_del_init(&seq->head); head 553 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c list_for_each_entry(seq, &dfs_pd->sequences, head) { head 76 drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h struct list_head head; head 399 drivers/net/wireless/mediatek/mt76/usb.c urb = q->entry[q->head].urb; head 400 drivers/net/wireless/mediatek/mt76/usb.c q->head = (q->head + 1) % q->ndesc; head 575 drivers/net/wireless/mediatek/mt76/usb.c q->head = q->tail = 0; head 668 drivers/net/wireless/mediatek/mt76/usb.c if (!q->entry[q->head].done) head 671 drivers/net/wireless/mediatek/mt76/usb.c if (q->entry[q->head].schedule) { head 672 drivers/net/wireless/mediatek/mt76/usb.c q->entry[q->head].schedule = false; head 676 drivers/net/wireless/mediatek/mt76/usb.c entry = q->entry[q->head]; head 677 drivers/net/wireless/mediatek/mt76/usb.c q->entry[q->head].done = false; head 678 drivers/net/wireless/mediatek/mt76/usb.c q->head = (q->head + 1) % q->ndesc; head 902 drivers/net/wireless/mediatek/mt76/usb.c entry = q->entry[q->head]; head 903 drivers/net/wireless/mediatek/mt76/usb.c q->head = (q->head + 1) % q->ndesc; head 280 drivers/net/wireless/quantenna/qtnfmac/commands.c s->beacon.head, s->beacon.head_len); head 50 drivers/net/wireless/quantenna/qtnfmac/core.h struct list_head head; head 12 drivers/net/wireless/quantenna/qtnfmac/util.c INIT_LIST_HEAD(&list->head); head 24 drivers/net/wireless/quantenna/qtnfmac/util.c list_for_each_entry(node, &list->head, list) { head 40 drivers/net/wireless/quantenna/qtnfmac/util.c list_for_each_entry(node, &list->head, list) { head 67 drivers/net/wireless/quantenna/qtnfmac/util.c list_add_tail(&node->list, &list->head); head 100 drivers/net/wireless/quantenna/qtnfmac/util.c list_for_each_entry_safe(node, tmp, &list->head, list) { head 105 drivers/net/wireless/quantenna/qtnfmac/util.c INIT_LIST_HEAD(&list->head); head 31 drivers/net/wireless/quantenna/qtnfmac/util.h return list_empty(&list->head); head 1018 drivers/net/wireless/ralink/rt2x00/rt2x00.h struct rcu_head head; head 296 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c kfree_rcu(bar_entry, head); head 90 drivers/net/wireless/realtek/rtw88/pci.c return tx_ring->r.head + offset; head 116 drivers/net/wireless/realtek/rtw88/pci.c u8 *head = tx_ring->r.head; head 123 drivers/net/wireless/realtek/rtw88/pci.c pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma); head 124 drivers/net/wireless/realtek/rtw88/pci.c tx_ring->r.head = NULL; head 152 drivers/net/wireless/realtek/rtw88/pci.c u8 *head = rx_ring->r.head; head 157 drivers/net/wireless/realtek/rtw88/pci.c pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma); head 185 drivers/net/wireless/realtek/rtw88/pci.c u8 *head; head 187 drivers/net/wireless/realtek/rtw88/pci.c head = pci_zalloc_consistent(pdev, ring_sz, &dma); head 188 drivers/net/wireless/realtek/rtw88/pci.c if (!head) { head 194 drivers/net/wireless/realtek/rtw88/pci.c tx_ring->r.head = head; head 221 drivers/net/wireless/realtek/rtw88/pci.c buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + head 240 drivers/net/wireless/realtek/rtw88/pci.c buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + head 254 drivers/net/wireless/realtek/rtw88/pci.c u8 *head; head 260 drivers/net/wireless/realtek/rtw88/pci.c head = pci_zalloc_consistent(pdev, ring_sz, &dma); head 261 drivers/net/wireless/realtek/rtw88/pci.c if (!head) { head 265 drivers/net/wireless/realtek/rtw88/pci.c rx_ring->r.head = head; head 303 drivers/net/wireless/realtek/rtw88/pci.c pci_free_consistent(pdev, ring_sz, head, dma); head 592 drivers/net/wireless/realtek/rtw88/pci.c buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + head 153 drivers/net/wireless/realtek/rtw88/pci.h u8 *head; head 233 drivers/net/wireless/realtek/rtw88/pci.h buf_desc = ring->r.head + ring->r.wp * size; head 1062 drivers/net/wireless/rsi/rsi_91x_sdio.c skb_queue_head_init(&sdev->rx_q.head); head 78 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c skb = skb_dequeue(&sdev->rx_q.head); head 95 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c skb_queue_purge(&sdev->rx_q.head); head 159 drivers/net/wireless/rsi/rsi_91x_sdio_ops.c skb_queue_tail(&dev->rx_q.head, skb); head 116 drivers/net/wireless/rsi/rsi_sdio.h struct sk_buff_head head; head 17 drivers/net/wireless/st/cw1200/queue.c struct list_head head; head 71 drivers/net/wireless/st/cw1200/queue.c list_for_each_entry_safe(item, tmp, gc_list, head) { head 72 drivers/net/wireless/st/cw1200/queue.c list_del(&item->head); head 86 drivers/net/wireless/st/cw1200/queue.c list_add_tail(&gc_item->head, gc_list); head 90 drivers/net/wireless/st/cw1200/queue.c struct list_head *head, head 97 drivers/net/wireless/st/cw1200/queue.c list_for_each_entry_safe(item, tmp, &queue->queue, head) { head 108 drivers/net/wireless/st/cw1200/queue.c cw1200_queue_register_post_gc(head, item); head 110 drivers/net/wireless/st/cw1200/queue.c list_move_tail(&item->head, &queue->free_pool); head 195 drivers/net/wireless/st/cw1200/queue.c list_add_tail(&queue->pool[i].head, &queue->free_pool); head 210 drivers/net/wireless/st/cw1200/queue.c list_for_each_entry_safe(item, tmp, &queue->pending, head) { head 214 drivers/net/wireless/st/cw1200/queue.c list_move_tail(&item->head, &queue->free_pool); head 291 drivers/net/wireless/st/cw1200/queue.c &queue->free_pool, struct cw1200_queue_item, head); head 294 drivers/net/wireless/st/cw1200/queue.c list_move_tail(&item->head, &queue->queue); head 341 drivers/net/wireless/st/cw1200/queue.c list_for_each_entry(item, &queue->queue, head) { head 353 drivers/net/wireless/st/cw1200/queue.c list_move_tail(&item->head, &queue->pending); head 406 drivers/net/wireless/st/cw1200/queue.c list_move(&item->head, &queue->queue); head 418 drivers/net/wireless/st/cw1200/queue.c list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) { head 432 drivers/net/wireless/st/cw1200/queue.c list_move(&item->head, &queue->queue); head 474 drivers/net/wireless/st/cw1200/queue.c list_move(&item->head, &queue->free_pool); head 544 drivers/net/wireless/st/cw1200/queue.c list_for_each_entry(item, &queue->pending, head) { head 567 drivers/net/wireless/virt_wifi.c struct list_head *head) head 581 drivers/net/wireless/virt_wifi.c unregister_netdevice_queue(dev, head); head 185 drivers/net/xen-netfront.c static void add_id_to_freelist(unsigned *head, union skb_entry *list, head 188 drivers/net/xen-netfront.c skb_entry_set_link(&list[id], *head); head 189 drivers/net/xen-netfront.c *head = id; head 192 drivers/net/xen-netfront.c static unsigned short get_id_from_freelist(unsigned *head, head 195 drivers/net/xen-netfront.c unsigned int id = *head; head 196 drivers/net/xen-netfront.c *head = list[id].link; head 815 drivers/nfc/pn544/pn544.c u8 head; head 2420 drivers/ntb/ntb_transport.c unsigned int head = qp->tx_index; head 2423 drivers/ntb/ntb_transport.c return tail > head ? tail - head : qp->tx_max_entry + tail - head; head 436 drivers/nvme/host/core.c struct nvme_ns_head *head = head 439 drivers/nvme/host/core.c nvme_mpath_remove_disk(head); head 440 drivers/nvme/host/core.c ida_simple_remove(&head->subsys->ns_ida, head->instance); head 441 drivers/nvme/host/core.c list_del_init(&head->entry); head 442 drivers/nvme/host/core.c cleanup_srcu_struct(&head->srcu); head 443 drivers/nvme/host/core.c nvme_put_subsystem(head->subsys); head 444 drivers/nvme/host/core.c kfree(head); head 447 drivers/nvme/host/core.c static void nvme_put_ns_head(struct nvme_ns_head *head) head 449 drivers/nvme/host/core.c kref_put(&head->ref, nvme_free_ns_head); head 460 drivers/nvme/host/core.c nvme_put_ns_head(ns->head); head 605 drivers/nvme/host/core.c cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); head 656 drivers/nvme/host/core.c cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); head 675 drivers/nvme/host/core.c cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); head 700 drivers/nvme/host/core.c cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); head 1305 drivers/nvme/host/core.c c.rw.nsid = cpu_to_le32(ns->head->ns_id); head 1500 drivers/nvme/host/core.c struct nvme_ns_head **head, int *srcu_idx) head 1506 drivers/nvme/host/core.c *head = disk->private_data; head 1507 drivers/nvme/host/core.c *srcu_idx = srcu_read_lock(&(*head)->srcu); head 1508 drivers/nvme/host/core.c ns = nvme_find_path(*head); head 1510 drivers/nvme/host/core.c srcu_read_unlock(&(*head)->srcu, *srcu_idx); head 1514 drivers/nvme/host/core.c *head = NULL; head 1519 drivers/nvme/host/core.c static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) head 1521 drivers/nvme/host/core.c if (head) head 1522 drivers/nvme/host/core.c srcu_read_unlock(&head->srcu, idx); head 1536 drivers/nvme/host/core.c struct nvme_ns_head *head, head 1543 drivers/nvme/host/core.c nvme_put_ns_from_disk(head, srcu_idx); head 1563 drivers/nvme/host/core.c struct nvme_ns_head *head = NULL; head 1568 drivers/nvme/host/core.c ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); head 1578 drivers/nvme/host/core.c return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); head 1583 drivers/nvme/host/core.c ret = ns->head->ns_id; head 1601 drivers/nvme/host/core.c nvme_put_ns_from_disk(head, srcu_idx); head 1611 drivers/nvme/host/core.c if (WARN_ON_ONCE(ns->head->disk)) head 1857 drivers/nvme/host/core.c if (ns->head->disk) { head 1858 drivers/nvme/host/core.c nvme_update_disk_info(ns->head->disk, ns, id); head 1859 drivers/nvme/host/core.c blk_queue_stack_limits(ns->head->disk->queue, ns->queue); head 1860 drivers/nvme/host/core.c revalidate_disk(ns->head->disk); head 1878 drivers/nvme/host/core.c ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id); head 1888 drivers/nvme/host/core.c ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids); head 1892 drivers/nvme/host/core.c if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) { head 1894 drivers/nvme/host/core.c "identifiers changed for nsid %d\n", ns->head->ns_id); head 1935 drivers/nvme/host/core.c struct nvme_ns_head *head = NULL; head 1941 drivers/nvme/host/core.c ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); head 1950 drivers/nvme/host/core.c c.common.nsid = cpu_to_le32(ns->head->ns_id); head 1954 drivers/nvme/host/core.c nvme_put_ns_from_disk(head, srcu_idx); head 2048 drivers/nvme/host/core.c struct nvme_ns_head *head = bdev->bd_disk->private_data; head 2050 drivers/nvme/host/core.c if (!kref_get_unless_zero(&head->ref)) head 3029 drivers/nvme/host/core.c return nvme_get_ns_from_dev(dev)->head; head 3037 drivers/nvme/host/core.c struct nvme_ns_head *head = dev_to_ns_head(dev); head 3038 drivers/nvme/host/core.c struct nvme_ns_ids *ids = &head->ids; head 3039 drivers/nvme/host/core.c struct nvme_subsystem *subsys = head->subsys; head 3061 drivers/nvme/host/core.c head->ns_id); head 3329 drivers/nvme/host/core.c struct nvme_ns_head *head; head 3330 drivers/nvme/host/core.c size_t size = sizeof(*head); head 3337 drivers/nvme/host/core.c head = kzalloc(size, GFP_KERNEL); head 3338 drivers/nvme/host/core.c if (!head) head 3343 drivers/nvme/host/core.c head->instance = ret; head 3344 drivers/nvme/host/core.c INIT_LIST_HEAD(&head->list); head 3345 drivers/nvme/host/core.c ret = init_srcu_struct(&head->srcu); head 3348 drivers/nvme/host/core.c head->subsys = ctrl->subsys; head 3349 drivers/nvme/host/core.c head->ns_id = nsid; head 3350 drivers/nvme/host/core.c kref_init(&head->ref); head 3352 drivers/nvme/host/core.c ret = nvme_report_ns_ids(ctrl, nsid, id, &head->ids); head 3356 drivers/nvme/host/core.c ret = __nvme_check_ids(ctrl->subsys, head); head 3363 drivers/nvme/host/core.c ret = nvme_mpath_alloc_disk(ctrl, head); head 3367 drivers/nvme/host/core.c list_add_tail(&head->entry, &ctrl->subsys->nsheads); head 3371 drivers/nvme/host/core.c return head; head 3373 drivers/nvme/host/core.c cleanup_srcu_struct(&head->srcu); head 3375 drivers/nvme/host/core.c ida_simple_remove(&ctrl->subsys->ns_ida, head->instance); head 3377 drivers/nvme/host/core.c kfree(head); head 3389 drivers/nvme/host/core.c struct nvme_ns_head *head = NULL; head 3394 drivers/nvme/host/core.c head = __nvme_find_ns_head(ctrl->subsys, nsid); head 3395 drivers/nvme/host/core.c if (!head) { head 3396 drivers/nvme/host/core.c head = nvme_alloc_ns_head(ctrl, nsid, id); head 3397 drivers/nvme/host/core.c if (IS_ERR(head)) { head 3398 drivers/nvme/host/core.c ret = PTR_ERR(head); head 3408 drivers/nvme/host/core.c if (!nvme_ns_ids_equal(&head->ids, &ids)) { head 3417 drivers/nvme/host/core.c list_add_tail(&ns->siblings, &head->list); head 3418 drivers/nvme/host/core.c ns->head = head; head 3432 drivers/nvme/host/core.c return nsa->head->ns_id - nsb->head->ns_id; head 3441 drivers/nvme/host/core.c if (ns->head->ns_id == nsid) { head 3447 drivers/nvme/host/core.c if (ns->head->ns_id > nsid) head 3462 drivers/nvme/host/core.c ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id); head 3574 drivers/nvme/host/core.c nvme_put_ns_head(ns->head); head 3598 drivers/nvme/host/core.c synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */ head 3636 drivers/nvme/host/core.c if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) head 439 drivers/nvme/host/lightnvm.c c.identity.nsid = cpu_to_le32(ns->head->ns_id); head 489 drivers/nvme/host/lightnvm.c c.get_bb.nsid = cpu_to_le32(ns->head->ns_id); head 539 drivers/nvme/host/lightnvm.c c.set_bb.nsid = cpu_to_le32(ns->head->ns_id); head 595 drivers/nvme/host/lightnvm.c ret = nvme_get_log(ctrl, ns->head->ns_id, head 628 drivers/nvme/host/lightnvm.c c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id); head 868 drivers/nvme/host/lightnvm.c c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id); head 905 drivers/nvme/host/lightnvm.c c.common.nsid = cpu_to_le32(ns->head->ns_id); head 56 drivers/nvme/host/multipath.c sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); head 57 drivers/nvme/host/multipath.c } else if (ns->head->disk) { head 59 drivers/nvme/host/multipath.c ctrl->instance, ns->head->instance); head 63 drivers/nvme/host/multipath.c ns->head->instance); head 73 drivers/nvme/host/multipath.c spin_lock_irqsave(&ns->head->requeue_lock, flags); head 74 drivers/nvme/host/multipath.c blk_steal_bios(&ns->head->requeue_list, req); head 75 drivers/nvme/host/multipath.c spin_unlock_irqrestore(&ns->head->requeue_lock, flags); head 114 drivers/nvme/host/multipath.c kblockd_schedule_work(&ns->head->requeue_work); head 123 drivers/nvme/host/multipath.c if (ns->head->disk) head 124 drivers/nvme/host/multipath.c kblockd_schedule_work(&ns->head->requeue_work); head 140 drivers/nvme/host/multipath.c struct nvme_ns_head *head = ns->head; head 144 drivers/nvme/host/multipath.c if (!head) head 148 drivers/nvme/host/multipath.c if (ns == rcu_access_pointer(head->current_path[node])) { head 149 drivers/nvme/host/multipath.c rcu_assign_pointer(head->current_path[node], NULL); head 165 drivers/nvme/host/multipath.c kblockd_schedule_work(&ns->head->requeue_work); head 177 drivers/nvme/host/multipath.c static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node) head 182 drivers/nvme/host/multipath.c list_for_each_entry_rcu(ns, &head->list, siblings) { head 186 drivers/nvme/host/multipath.c if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA) head 212 drivers/nvme/host/multipath.c rcu_assign_pointer(head->current_path[node], found); head 216 drivers/nvme/host/multipath.c static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head, head 219 drivers/nvme/host/multipath.c ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, head 223 drivers/nvme/host/multipath.c return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); head 226 drivers/nvme/host/multipath.c static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, head 231 drivers/nvme/host/multipath.c if (list_is_singular(&head->list)) { head 237 drivers/nvme/host/multipath.c for (ns = nvme_next_ns(head, old); head 239 drivers/nvme/host/multipath.c ns = nvme_next_ns(head, ns)) { head 255 drivers/nvme/host/multipath.c rcu_assign_pointer(head->current_path[node], found); head 265 drivers/nvme/host/multipath.c inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) head 270 drivers/nvme/host/multipath.c ns = srcu_dereference(head->current_path[node], &head->srcu); head 271 drivers/nvme/host/multipath.c if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR && ns) head 272 drivers/nvme/host/multipath.c ns = nvme_round_robin_path(head, node, ns); head 274 drivers/nvme/host/multipath.c ns = __nvme_find_path(head, node); head 278 drivers/nvme/host/multipath.c static bool nvme_available_path(struct nvme_ns_head *head) head 282 drivers/nvme/host/multipath.c list_for_each_entry_rcu(ns, &head->list, siblings) { head 299 drivers/nvme/host/multipath.c struct nvme_ns_head *head = q->queuedata; head 300 drivers/nvme/host/multipath.c struct device *dev = disk_to_dev(head->disk); head 313 drivers/nvme/host/multipath.c srcu_idx = srcu_read_lock(&head->srcu); head 314 drivers/nvme/host/multipath.c ns = nvme_find_path(head); head 319 drivers/nvme/host/multipath.c disk_devt(ns->head->disk), head 322 drivers/nvme/host/multipath.c } else if (nvme_available_path(head)) { head 325 drivers/nvme/host/multipath.c spin_lock_irq(&head->requeue_lock); head 326 drivers/nvme/host/multipath.c bio_list_add(&head->requeue_list, bio); head 327 drivers/nvme/host/multipath.c spin_unlock_irq(&head->requeue_lock); head 335 drivers/nvme/host/multipath.c srcu_read_unlock(&head->srcu, srcu_idx); head 341 drivers/nvme/host/multipath.c struct nvme_ns_head *head = head 345 drivers/nvme/host/multipath.c spin_lock_irq(&head->requeue_lock); head 346 drivers/nvme/host/multipath.c next = bio_list_get(&head->requeue_list); head 347 drivers/nvme/host/multipath.c spin_unlock_irq(&head->requeue_lock); head 357 drivers/nvme/host/multipath.c bio->bi_disk = head->disk; head 362 drivers/nvme/host/multipath.c int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) head 367 drivers/nvme/host/multipath.c mutex_init(&head->lock); head 368 drivers/nvme/host/multipath.c bio_list_init(&head->requeue_list); head 369 drivers/nvme/host/multipath.c spin_lock_init(&head->requeue_lock); head 370 drivers/nvme/host/multipath.c INIT_WORK(&head->requeue_work, nvme_requeue_work); head 383 drivers/nvme/host/multipath.c q->queuedata = head; head 395 drivers/nvme/host/multipath.c head->disk = alloc_disk(0); head 396 drivers/nvme/host/multipath.c if (!head->disk) head 398 drivers/nvme/host/multipath.c head->disk->fops = &nvme_ns_head_ops; head 399 drivers/nvme/host/multipath.c head->disk->private_data = head; head 400 drivers/nvme/host/multipath.c head->disk->queue = q; head 401 drivers/nvme/host/multipath.c head->disk->flags = GENHD_FL_EXT_DEVT; head 402 drivers/nvme/host/multipath.c sprintf(head->disk->disk_name, "nvme%dn%d", head 403 drivers/nvme/host/multipath.c ctrl->subsys->instance, head->instance); head 414 drivers/nvme/host/multipath.c struct nvme_ns_head *head = ns->head; head 416 drivers/nvme/host/multipath.c lockdep_assert_held(&ns->head->lock); head 418 drivers/nvme/host/multipath.c if (!head->disk) head 421 drivers/nvme/host/multipath.c if (!(head->disk->flags & GENHD_FL_UP)) head 422 drivers/nvme/host/multipath.c device_add_disk(&head->subsys->dev, head->disk, head 428 drivers/nvme/host/multipath.c srcu_idx = srcu_read_lock(&head->srcu); head 430 drivers/nvme/host/multipath.c __nvme_find_path(head, node); head 431 drivers/nvme/host/multipath.c srcu_read_unlock(&head->srcu, srcu_idx); head 434 drivers/nvme/host/multipath.c synchronize_srcu(&ns->head->srcu); head 435 drivers/nvme/host/multipath.c kblockd_schedule_work(&ns->head->requeue_work); head 486 drivers/nvme/host/multipath.c mutex_lock(&ns->head->lock); head 493 drivers/nvme/host/multipath.c mutex_unlock(&ns->head->lock); head 517 drivers/nvme/host/multipath.c if (ns->head->ns_id < nsid) head 519 drivers/nvme/host/multipath.c if (ns->head->ns_id == nsid) head 664 drivers/nvme/host/multipath.c mutex_lock(&ns->head->lock); head 667 drivers/nvme/host/multipath.c mutex_unlock(&ns->head->lock); head 671 drivers/nvme/host/multipath.c void nvme_mpath_remove_disk(struct nvme_ns_head *head) head 673 drivers/nvme/host/multipath.c if (!head->disk) head 675 drivers/nvme/host/multipath.c if (head->disk->flags & GENHD_FL_UP) head 676 drivers/nvme/host/multipath.c del_gendisk(head->disk); head 677 drivers/nvme/host/multipath.c blk_set_queue_dying(head->disk->queue); head 679 drivers/nvme/host/multipath.c kblockd_schedule_work(&head->requeue_work); head 680 drivers/nvme/host/multipath.c flush_work(&head->requeue_work); head 681 drivers/nvme/host/multipath.c blk_cleanup_queue(head->disk->queue); head 682 drivers/nvme/host/multipath.c put_disk(head->disk); head 365 drivers/nvme/host/nvme.h struct nvme_ns_head *head; head 526 drivers/nvme/host/nvme.h int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); head 528 drivers/nvme/host/nvme.h void nvme_mpath_remove_disk(struct nvme_ns_head *head); head 534 drivers/nvme/host/nvme.h struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); head 538 drivers/nvme/host/nvme.h struct nvme_ns_head *head = ns->head; head 540 drivers/nvme/host/nvme.h if (head->disk && list_empty(&head->list)) head 541 drivers/nvme/host/nvme.h kblockd_schedule_work(&head->requeue_work); head 550 drivers/nvme/host/nvme.h trace_block_bio_complete(ns->head->disk->queue, head 570 drivers/nvme/host/nvme.h sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); head 580 drivers/nvme/host/nvme.h struct nvme_ns_head *head) head 588 drivers/nvme/host/nvme.h static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) head 933 drivers/nvme/host/pci.c u16 head = nvmeq->cq_head; head 935 drivers/nvme/host/pci.c if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, head 937 drivers/nvme/host/pci.c writel(head, nvmeq->q_db + nvmeq->dev->db_stride); head 227 drivers/nvmem/rave-sp-eeprom.c unsigned int head; head 232 drivers/nvmem/rave-sp-eeprom.c head = offset % RAVE_SP_EEPROM_PAGE_SIZE; head 242 drivers/nvmem/rave-sp-eeprom.c if (unlikely(head)) { head 243 drivers/nvmem/rave-sp-eeprom.c chunk = RAVE_SP_EEPROM_PAGE_SIZE - head; head 250 drivers/nvmem/rave-sp-eeprom.c head = 0; head 114 drivers/of/pdt.c struct property *head, *tail; head 116 drivers/of/pdt.c head = tail = of_pdt_build_one_prop(node, NULL, head 127 drivers/of/pdt.c return head; head 988 drivers/opp/core.c BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); head 1121 drivers/opp/core.c blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp); head 1287 drivers/opp/core.c struct list_head **head) head 1301 drivers/opp/core.c *head = &opp->node; head 1335 drivers/opp/core.c struct list_head *head; head 1339 drivers/opp/core.c head = &opp_table->opp_list; head 1342 drivers/opp/core.c ret = _opp_is_duplicate(dev, new_opp, opp_table, &head); head 1349 drivers/opp/core.c list_add(&new_opp->node, head); head 1420 drivers/opp/core.c blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); head 2089 drivers/opp/core.c blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, head 2092 drivers/opp/core.c blocking_notifier_call_chain(&opp_table->head, head 2160 drivers/opp/core.c ret = blocking_notifier_chain_register(&opp_table->head, nb); head 2185 drivers/opp/core.c ret = blocking_notifier_chain_unregister(&opp_table->head, nb); head 642 drivers/opp/of.c blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); head 166 drivers/opp/opp.h struct blocking_notifier_head head; head 294 drivers/pci/controller/pcie-iproc-msi.c static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head) head 299 drivers/pci/controller/pcie-iproc-msi.c offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32); head 317 drivers/pci/controller/pcie-iproc-msi.c u32 eq, head, tail, nr_events; head 337 drivers/pci/controller/pcie-iproc-msi.c head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD, head 347 drivers/pci/controller/pcie-iproc-msi.c nr_events = (tail < head) ? head 348 drivers/pci/controller/pcie-iproc-msi.c (EQ_LEN - (head - tail)) : (tail - head); head 354 drivers/pci/controller/pcie-iproc-msi.c hwirq = decode_msi_hwirq(msi, eq, head); head 358 drivers/pci/controller/pcie-iproc-msi.c head++; head 359 drivers/pci/controller/pcie-iproc-msi.c head %= EQ_LEN; head 366 drivers/pci/controller/pcie-iproc-msi.c iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head); head 1191 drivers/pci/controller/pcie-iproc.c struct list_head *head = resources; head 1199 drivers/pci/controller/pcie-iproc.c head = &tmp->node; head 1210 drivers/pci/controller/pcie-iproc.c resource_list_add(entry, head); head 413 drivers/pci/hotplug/cpqphp.h int cpqhp_resource_sort_and_combine(struct pci_resource **head); head 461 drivers/pci/hotplug/cpqphp.h static inline void return_resource(struct pci_resource **head, head 464 drivers/pci/hotplug/cpqphp.h if (!node || !head) head 466 drivers/pci/hotplug/cpqphp.h node->next = *head; head 467 drivers/pci/hotplug/cpqphp.h *head = node; head 298 drivers/pci/hotplug/cpqphp_ctrl.c static int sort_by_size(struct pci_resource **head) head 304 drivers/pci/hotplug/cpqphp_ctrl.c if (!(*head)) head 307 drivers/pci/hotplug/cpqphp_ctrl.c if (!((*head)->next)) head 314 drivers/pci/hotplug/cpqphp_ctrl.c if (((*head)->next) && head 315 drivers/pci/hotplug/cpqphp_ctrl.c ((*head)->length > (*head)->next->length)) { head 317 drivers/pci/hotplug/cpqphp_ctrl.c current_res = *head; head 318 drivers/pci/hotplug/cpqphp_ctrl.c *head = (*head)->next; head 319 drivers/pci/hotplug/cpqphp_ctrl.c current_res->next = (*head)->next; head 320 drivers/pci/hotplug/cpqphp_ctrl.c (*head)->next = current_res; head 323 drivers/pci/hotplug/cpqphp_ctrl.c current_res = *head; head 346 drivers/pci/hotplug/cpqphp_ctrl.c static int sort_by_max_size(struct pci_resource **head) head 352 drivers/pci/hotplug/cpqphp_ctrl.c if (!(*head)) head 355 drivers/pci/hotplug/cpqphp_ctrl.c if (!((*head)->next)) head 362 drivers/pci/hotplug/cpqphp_ctrl.c if (((*head)->next) && head 363 drivers/pci/hotplug/cpqphp_ctrl.c ((*head)->length < (*head)->next->length)) { head 365 drivers/pci/hotplug/cpqphp_ctrl.c current_res = *head; head 366 drivers/pci/hotplug/cpqphp_ctrl.c *head = (*head)->next; head 367 drivers/pci/hotplug/cpqphp_ctrl.c current_res->next = (*head)->next; head 368 drivers/pci/hotplug/cpqphp_ctrl.c (*head)->next = current_res; head 371 drivers/pci/hotplug/cpqphp_ctrl.c current_res = *head; head 396 drivers/pci/hotplug/cpqphp_ctrl.c static struct pci_resource *do_pre_bridge_resource_split(struct pci_resource **head, head 406 drivers/pci/hotplug/cpqphp_ctrl.c if (!(*head) || !(*orig_head)) head 409 drivers/pci/hotplug/cpqphp_ctrl.c rc = cpqhp_resource_sort_and_combine(head); head 414 drivers/pci/hotplug/cpqphp_ctrl.c if ((*head)->base != (*orig_head)->base) head 417 drivers/pci/hotplug/cpqphp_ctrl.c if ((*head)->length == (*orig_head)->length) head 425 drivers/pci/hotplug/cpqphp_ctrl.c node = *head; head 445 drivers/pci/hotplug/cpqphp_ctrl.c *head = split_node; head 453 drivers/pci/hotplug/cpqphp_ctrl.c if (*head == node) { head 454 drivers/pci/hotplug/cpqphp_ctrl.c *head = node->next; head 456 drivers/pci/hotplug/cpqphp_ctrl.c prevnode = *head; head 473 drivers/pci/hotplug/cpqphp_ctrl.c static struct pci_resource *do_bridge_resource_split(struct pci_resource **head, u32 alignment) head 480 drivers/pci/hotplug/cpqphp_ctrl.c rc = cpqhp_resource_sort_and_combine(head); head 485 drivers/pci/hotplug/cpqphp_ctrl.c node = *head; head 526 drivers/pci/hotplug/cpqphp_ctrl.c static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size) head 533 drivers/pci/hotplug/cpqphp_ctrl.c if (!(*head)) head 536 drivers/pci/hotplug/cpqphp_ctrl.c if (cpqhp_resource_sort_and_combine(head)) head 539 drivers/pci/hotplug/cpqphp_ctrl.c if (sort_by_size(head)) head 542 drivers/pci/hotplug/cpqphp_ctrl.c for (node = *head; node; node = node->next) { head 597 drivers/pci/hotplug/cpqphp_ctrl.c if (*head == node) { head 598 drivers/pci/hotplug/cpqphp_ctrl.c *head = node->next; head 600 drivers/pci/hotplug/cpqphp_ctrl.c prevnode = *head; head 623 drivers/pci/hotplug/cpqphp_ctrl.c static struct pci_resource *get_max_resource(struct pci_resource **head, u32 size) head 630 drivers/pci/hotplug/cpqphp_ctrl.c if (cpqhp_resource_sort_and_combine(head)) head 633 drivers/pci/hotplug/cpqphp_ctrl.c if (sort_by_max_size(head)) head 636 drivers/pci/hotplug/cpqphp_ctrl.c for (max = *head; max; max = max->next) { head 690 drivers/pci/hotplug/cpqphp_ctrl.c temp = *head; head 692 drivers/pci/hotplug/cpqphp_ctrl.c *head = max->next; head 720 drivers/pci/hotplug/cpqphp_ctrl.c static struct pci_resource *get_resource(struct pci_resource **head, u32 size) head 727 drivers/pci/hotplug/cpqphp_ctrl.c if (cpqhp_resource_sort_and_combine(head)) head 730 drivers/pci/hotplug/cpqphp_ctrl.c if (sort_by_size(head)) head 733 drivers/pci/hotplug/cpqphp_ctrl.c for (node = *head; node; node = node->next) { head 787 drivers/pci/hotplug/cpqphp_ctrl.c if (*head == node) { head 788 drivers/pci/hotplug/cpqphp_ctrl.c *head = node->next; head 790 drivers/pci/hotplug/cpqphp_ctrl.c prevnode = *head; head 813 drivers/pci/hotplug/cpqphp_ctrl.c int cpqhp_resource_sort_and_combine(struct pci_resource **head) head 819 drivers/pci/hotplug/cpqphp_ctrl.c dbg("%s: head = %p, *head = %p\n", __func__, head, *head); head 821 drivers/pci/hotplug/cpqphp_ctrl.c if (!(*head)) head 824 drivers/pci/hotplug/cpqphp_ctrl.c dbg("*head->next = %p\n", (*head)->next); head 826 drivers/pci/hotplug/cpqphp_ctrl.c if (!(*head)->next) head 829 drivers/pci/hotplug/cpqphp_ctrl.c dbg("*head->base = 0x%x\n", (*head)->base); head 830 drivers/pci/hotplug/cpqphp_ctrl.c dbg("*head->next->base = 0x%x\n", (*head)->next->base); head 835 drivers/pci/hotplug/cpqphp_ctrl.c if (((*head)->next) && head 836 drivers/pci/hotplug/cpqphp_ctrl.c ((*head)->base > (*head)->next->base)) { head 837 drivers/pci/hotplug/cpqphp_ctrl.c node1 = *head; head 838 drivers/pci/hotplug/cpqphp_ctrl.c (*head) = (*head)->next; head 839 drivers/pci/hotplug/cpqphp_ctrl.c node1->next = (*head)->next; head 840 drivers/pci/hotplug/cpqphp_ctrl.c (*head)->next = node1; head 844 drivers/pci/hotplug/cpqphp_ctrl.c node1 = (*head); head 859 drivers/pci/hotplug/cpqphp_ctrl.c node1 = *head; head 41 drivers/pci/setup-bus.c static void free_list(struct list_head *head) head 45 drivers/pci/setup-bus.c list_for_each_entry_safe(dev_res, tmp, head, list) { head 58 drivers/pci/setup-bus.c static int add_to_list(struct list_head *head, struct pci_dev *dev, head 76 drivers/pci/setup-bus.c list_add(&tmp->list, head); head 81 drivers/pci/setup-bus.c static void remove_from_list(struct list_head *head, struct resource *res) head 85 drivers/pci/setup-bus.c list_for_each_entry_safe(dev_res, tmp, head, list) { head 94 drivers/pci/setup-bus.c static struct pci_dev_resource *res_to_dev_res(struct list_head *head, head 99 drivers/pci/setup-bus.c list_for_each_entry(dev_res, head, list) { head 107 drivers/pci/setup-bus.c static resource_size_t get_res_add_size(struct list_head *head, head 112 drivers/pci/setup-bus.c dev_res = res_to_dev_res(head, res); head 116 drivers/pci/setup-bus.c static resource_size_t get_res_add_align(struct list_head *head, head 121 drivers/pci/setup-bus.c dev_res = res_to_dev_res(head, res); head 127 drivers/pci/setup-bus.c static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) head 159 drivers/pci/setup-bus.c n = head; head 160 drivers/pci/setup-bus.c list_for_each_entry(dev_res, head, list) { head 176 drivers/pci/setup-bus.c static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head) head 192 drivers/pci/setup-bus.c pdev_sort_resources(dev, head); head 214 drivers/pci/setup-bus.c struct list_head *head) head 231 drivers/pci/setup-bus.c list_for_each_entry(dev_res, head, list) { head 273 drivers/pci/setup-bus.c static void assign_requested_resources_sorted(struct list_head *head, head 280 drivers/pci/setup-bus.c list_for_each_entry(dev_res, head, list) { head 343 drivers/pci/setup-bus.c static void __assign_resources_sorted(struct list_head *head, head 380 drivers/pci/setup-bus.c list_for_each_entry(dev_res, head, list) { head 388 drivers/pci/setup-bus.c list_for_each_entry_safe(dev_res, tmp_res, head, list) { head 416 drivers/pci/setup-bus.c list_for_each_entry(dev_res2, head, list) { head 430 drivers/pci/setup-bus.c assign_requested_resources_sorted(head, &local_fail_head); head 435 drivers/pci/setup-bus.c list_for_each_entry(dev_res, head, list) head 438 drivers/pci/setup-bus.c free_list(head); head 445 drivers/pci/setup-bus.c list_for_each_entry_safe(dev_res, tmp_res, head, list) head 457 drivers/pci/setup-bus.c list_for_each_entry(dev_res, head, list) head 472 drivers/pci/setup-bus.c assign_requested_resources_sorted(head, fail_head); head 476 drivers/pci/setup-bus.c reassign_resources_sorted(realloc_head, head); head 477 drivers/pci/setup-bus.c free_list(head); head 484 drivers/pci/setup-bus.c LIST_HEAD(head); head 486 drivers/pci/setup-bus.c __dev_sort_resources(dev, &head); head 487 drivers/pci/setup-bus.c __assign_resources_sorted(&head, add_head, fail_head); head 496 drivers/pci/setup-bus.c LIST_HEAD(head); head 499 drivers/pci/setup-bus.c __dev_sort_resources(dev, &head); head 501 drivers/pci/setup-bus.c __assign_resources_sorted(&head, realloc_head, fail_head); head 350 drivers/perf/arm_spe_pmu.c u64 head = PERF_IDX2OFF(handle->head, buf); head 352 drivers/perf/arm_spe_pmu.c memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len); head 361 drivers/perf/arm_spe_pmu.c u64 head = PERF_IDX2OFF(handle->head, buf); head 370 drivers/perf/arm_spe_pmu.c if (head < limit >> 1) head 377 drivers/perf/arm_spe_pmu.c if (limit - head < spe_pmu->max_record_sz) { head 378 drivers/perf/arm_spe_pmu.c arm_spe_pmu_pad_buf(handle, limit - head); head 379 drivers/perf/arm_spe_pmu.c handle->head = PERF_IDX2OFF(limit, buf); head 380 drivers/perf/arm_spe_pmu.c limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head; head 392 drivers/perf/arm_spe_pmu.c u64 head, tail, wakeup; head 408 drivers/perf/arm_spe_pmu.c head = PERF_IDX2OFF(handle->head, buf); head 409 drivers/perf/arm_spe_pmu.c if (!IS_ALIGNED(head, spe_pmu->align)) { head 410 drivers/perf/arm_spe_pmu.c unsigned long delta = roundup(head, spe_pmu->align) - head; head 414 drivers/perf/arm_spe_pmu.c head = PERF_IDX2OFF(handle->head, buf); head 422 drivers/perf/arm_spe_pmu.c tail = PERF_IDX2OFF(handle->head + handle->size, buf); head 431 drivers/perf/arm_spe_pmu.c if (head < tail) head 443 drivers/perf/arm_spe_pmu.c if (handle->wakeup < (handle->head + handle->size) && head <= wakeup) head 446 drivers/perf/arm_spe_pmu.c if (limit > head) head 461 drivers/perf/arm_spe_pmu.c u64 head = PERF_IDX2OFF(handle->head, buf); head 467 drivers/perf/arm_spe_pmu.c if (limit && (limit - head < spe_pmu->max_record_sz)) { head 468 drivers/perf/arm_spe_pmu.c arm_spe_pmu_pad_buf(handle, limit - head); head 499 drivers/perf/arm_spe_pmu.c base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf); head 512 drivers/perf/arm_spe_pmu.c size = offset - PERF_IDX2OFF(handle->head, buf); head 515 drivers/perf/arm_spe_pmu.c handle->head = offset; head 83 drivers/platform/chrome/cros_ec_debugfs.c buf_space = CIRC_SPACE(cb->head, cb->tail, LOG_SIZE); head 105 drivers/platform/chrome/cros_ec_debugfs.c cb->buf[cb->head] = ec_buffer[idx]; head 106 drivers/platform/chrome/cros_ec_debugfs.c cb->head = CIRC_ADD(cb->head, LOG_SIZE, 1); head 137 drivers/platform/chrome/cros_ec_debugfs.c while (!CIRC_CNT(cb->head, cb->tail, LOG_SIZE)) { head 146 drivers/platform/chrome/cros_ec_debugfs.c CIRC_CNT(cb->head, cb->tail, LOG_SIZE)); head 156 drivers/platform/chrome/cros_ec_debugfs.c ret = min_t(size_t, CIRC_CNT_TO_END(cb->head, cb->tail, LOG_SIZE), head 180 drivers/platform/chrome/cros_ec_debugfs.c if (CIRC_CNT(debug_info->log_buffer.head, head 358 drivers/platform/chrome/cros_ec_debugfs.c debug_info->log_buffer.head = 0; head 97 drivers/platform/chrome/wilco_ec/event.c int head; head 122 drivers/platform/chrome/wilco_ec/event.c return q->head == q->tail && !q->entries[q->head]; head 128 drivers/platform/chrome/wilco_ec/event.c return q->head == q->tail && q->entries[q->head]; head 156 drivers/platform/chrome/wilco_ec/event.c q->entries[q->head] = ev; head 157 drivers/platform/chrome/wilco_ec/event.c q->head = (q->head + 1) % q->capacity; head 292 drivers/platform/mellanox/mlxbf-tmfifo.c unsigned int idx, head; head 298 drivers/platform/mellanox/mlxbf-tmfifo.c head = virtio16_to_cpu(vdev, vr->avail->ring[idx]); head 299 drivers/platform/mellanox/mlxbf-tmfifo.c if (WARN_ON(head >= vr->num)) head 304 drivers/platform/mellanox/mlxbf-tmfifo.c return &vr->desc[head]; head 424 drivers/platform/mellanox/mlxbf-tmfifo.c seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail, head 427 drivers/platform/mellanox/mlxbf-tmfifo.c memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, len); head 429 drivers/platform/mellanox/mlxbf-tmfifo.c memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg); head 433 drivers/platform/mellanox/mlxbf-tmfifo.c cons->tx_buf.head = (cons->tx_buf.head + len) % head 454 drivers/platform/mellanox/mlxbf-tmfifo.c avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail, head 513 drivers/platform/mellanox/mlxbf-tmfifo.c size = CIRC_CNT(cons->tx_buf.head, cons->tx_buf.tail, head 533 drivers/platform/mellanox/mlxbf-tmfifo.c seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail, head 351 drivers/platform/x86/dell-rbtn.c first = !rbtn_chain_head.head; head 373 drivers/platform/x86/dell-rbtn.c if (auto_remove_rfkill && !rbtn_chain_head.head) head 416 drivers/platform/x86/dell-rbtn.c if (auto_remove_rfkill && rbtn_chain_head.head) head 32 drivers/power/reset/reboot-mode.c list_for_each_entry(info, &reboot->head, list) { head 70 drivers/power/reset/reboot-mode.c INIT_LIST_HEAD(&reboot->head); head 101 drivers/power/reset/reboot-mode.c list_add_tail(&info->list, &reboot->head); head 110 drivers/power/reset/reboot-mode.c list_for_each_entry(info, &reboot->head, list) head 127 drivers/power/reset/reboot-mode.c list_for_each_entry(info, &reboot->head, list) head 71 drivers/ps3/ps3-vuart.c struct list_head head; head 77 drivers/ps3/ps3-vuart.c struct list_head head; head 458 drivers/ps3/ps3-vuart.c const unsigned char *head; head 488 drivers/ps3/ps3-vuart.c if (list_empty(&priv->tx_list.head)) { head 519 drivers/ps3/ps3-vuart.c lb->head = lb->data; head 524 drivers/ps3/ps3-vuart.c list_add_tail(&lb->link, &priv->tx_list.head); head 574 drivers/ps3/ps3-vuart.c lb->head = lb->data; head 578 drivers/ps3/ps3-vuart.c list_add_tail(&lb->link, &priv->rx_list.head); head 627 drivers/ps3/ps3-vuart.c list_for_each_entry_safe(lb, n, &priv->rx_list.head, link) { head 628 drivers/ps3/ps3-vuart.c bytes_read = min((unsigned int)(lb->tail - lb->head), bytes); head 630 drivers/ps3/ps3-vuart.c memcpy(buf, lb->head, bytes_read); head 635 drivers/ps3/ps3-vuart.c if (bytes_read < lb->tail - lb->head) { head 636 drivers/ps3/ps3-vuart.c lb->head += bytes_read; head 730 drivers/ps3/ps3-vuart.c list_for_each_entry_safe(lb, n, &priv->tx_list.head, link) { head 734 drivers/ps3/ps3-vuart.c result = ps3_vuart_raw_write(dev, lb->head, lb->tail - lb->head, head 746 drivers/ps3/ps3-vuart.c if (bytes_written < lb->tail - lb->head) { head 747 drivers/ps3/ps3-vuart.c lb->head += bytes_written; head 1029 drivers/ps3/ps3-vuart.c INIT_LIST_HEAD(&priv->tx_list.head); head 1032 drivers/ps3/ps3-vuart.c INIT_LIST_HEAD(&priv->rx_list.head); head 42 drivers/ps3/ps3av.c u32 head[PS3AV_HEAD_MAX]; head 499 drivers/ps3/ps3av.c ps3av->head[i], video_mode->vid, head 809 drivers/ps3/ps3av.c ps3av->head[i] = PS3AV_CMD_VIDEO_HEAD_A + i; head 366 drivers/ps3/ps3av_cmd.c u32 ps3av_cmd_set_video_mode(void *p, u32 head, int video_vid, int video_fmt, head 385 drivers/ps3/ps3av_cmd.c video_mode->video_head = head; head 387 drivers/ps3/ps3av_cmd.c && head == PS3AV_CMD_VIDEO_HEAD_B) head 409 drivers/ps3/ps3av_cmd.c int ps3av_cmd_video_format_black(u32 head, u32 video_fmt, u32 mute) head 415 drivers/ps3/ps3av_cmd.c video_format.video_head = head; head 445 drivers/ptp/ptp_chardev.c event[i] = queue->buf[queue->head]; head 446 drivers/ptp/ptp_chardev.c queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; head 59 drivers/ptp/ptp_clock.c queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; head 24 drivers/ptp/ptp_private.h int head; head 60 drivers/ptp/ptp_private.h int cnt = q->tail - q->head; head 80 drivers/ptp/ptp_sysfs.c event = queue->buf[queue->head]; head 81 drivers/ptp/ptp_sysfs.c queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; head 167 drivers/rapidio/rio_cm.c int head; head 564 drivers/rapidio/rio_cm.c ch->rx_ring.buf[ch->rx_ring.head] = buf; head 565 drivers/rapidio/rio_cm.c ch->rx_ring.head++; head 567 drivers/rapidio/rio_cm.c ch->rx_ring.head %= RIOCM_RX_RING_SIZE; head 1320 drivers/rapidio/rio_cm.c ch->rx_ring.head = 0; head 51 drivers/rpmsg/qcom_glink_rpm.c void __iomem *head; head 59 drivers/rpmsg/qcom_glink_rpm.c unsigned int head; head 62 drivers/rpmsg/qcom_glink_rpm.c head = readl(pipe->head); head 65 drivers/rpmsg/qcom_glink_rpm.c if (head < tail) head 66 drivers/rpmsg/qcom_glink_rpm.c return pipe->native.length - tail + head; head 68 drivers/rpmsg/qcom_glink_rpm.c return head - tail; head 113 drivers/rpmsg/qcom_glink_rpm.c unsigned int head; head 116 drivers/rpmsg/qcom_glink_rpm.c head = readl(pipe->head); head 119 drivers/rpmsg/qcom_glink_rpm.c if (tail <= head) head 120 drivers/rpmsg/qcom_glink_rpm.c return pipe->native.length - head + tail; head 122 drivers/rpmsg/qcom_glink_rpm.c return tail - head; head 126 drivers/rpmsg/qcom_glink_rpm.c unsigned int head, head 131 drivers/rpmsg/qcom_glink_rpm.c len = min_t(size_t, count, pipe->native.length - head); head 133 drivers/rpmsg/qcom_glink_rpm.c __iowrite32_copy(pipe->fifo + head, data, head 142 drivers/rpmsg/qcom_glink_rpm.c head += count; head 143 drivers/rpmsg/qcom_glink_rpm.c if (head >= pipe->native.length) head 144 drivers/rpmsg/qcom_glink_rpm.c head -= pipe->native.length; head 146 drivers/rpmsg/qcom_glink_rpm.c return head; head 156 drivers/rpmsg/qcom_glink_rpm.c unsigned int head; head 172 drivers/rpmsg/qcom_glink_rpm.c head = readl(pipe->head); head 173 drivers/rpmsg/qcom_glink_rpm.c head = glink_rpm_tx_write_one(pipe, head, hdr, hlen); head 174 drivers/rpmsg/qcom_glink_rpm.c head = glink_rpm_tx_write_one(pipe, head, data, aligned_dlen); head 178 drivers/rpmsg/qcom_glink_rpm.c head = glink_rpm_tx_write_one(pipe, head, padding, pad); head 179 drivers/rpmsg/qcom_glink_rpm.c writel(head, pipe->head); head 231 drivers/rpmsg/qcom_glink_rpm.c rx->head = msg_ram + offset + sizeof(u32); head 238 drivers/rpmsg/qcom_glink_rpm.c tx->head = msg_ram + offset + sizeof(u32); head 297 drivers/rpmsg/qcom_glink_rpm.c writel(0, tx_pipe->head); head 40 drivers/rpmsg/qcom_glink_smem.c __le32 *head; head 54 drivers/rpmsg/qcom_glink_smem.c u32 head; head 70 drivers/rpmsg/qcom_glink_smem.c head = le32_to_cpu(*pipe->head); head 73 drivers/rpmsg/qcom_glink_smem.c if (head < tail) head 74 drivers/rpmsg/qcom_glink_smem.c return pipe->native.length - tail + head; head 76 drivers/rpmsg/qcom_glink_smem.c return head - tail; head 117 drivers/rpmsg/qcom_glink_smem.c u32 head; head 121 drivers/rpmsg/qcom_glink_smem.c head = le32_to_cpu(*pipe->head); head 124 drivers/rpmsg/qcom_glink_smem.c if (tail <= head) head 125 drivers/rpmsg/qcom_glink_smem.c avail = pipe->native.length - head + tail; head 127 drivers/rpmsg/qcom_glink_smem.c avail = tail - head; head 138 drivers/rpmsg/qcom_glink_smem.c unsigned int head, head 143 drivers/rpmsg/qcom_glink_smem.c len = min_t(size_t, count, pipe->native.length - head); head 145 drivers/rpmsg/qcom_glink_smem.c memcpy(pipe->fifo + head, data, len); head 150 drivers/rpmsg/qcom_glink_smem.c head += count; head 151 drivers/rpmsg/qcom_glink_smem.c if (head >= pipe->native.length) head 152 drivers/rpmsg/qcom_glink_smem.c head -= pipe->native.length; head 154 drivers/rpmsg/qcom_glink_smem.c return head; head 162 drivers/rpmsg/qcom_glink_smem.c unsigned int head; head 164 drivers/rpmsg/qcom_glink_smem.c head = le32_to_cpu(*pipe->head); head 166 drivers/rpmsg/qcom_glink_smem.c head = glink_smem_tx_write_one(pipe, head, hdr, hlen); head 167 drivers/rpmsg/qcom_glink_smem.c head = glink_smem_tx_write_one(pipe, head, data, dlen); head 170 drivers/rpmsg/qcom_glink_smem.c head = ALIGN(head, 8); head 171 drivers/rpmsg/qcom_glink_smem.c if (head >= pipe->native.length) head 172 drivers/rpmsg/qcom_glink_smem.c head -= pipe->native.length; head 177 drivers/rpmsg/qcom_glink_smem.c *pipe->head = cpu_to_le32(head); head 248 drivers/rpmsg/qcom_glink_smem.c tx_pipe->head = &descs[1]; head 250 drivers/rpmsg/qcom_glink_smem.c rx_pipe->head = &descs[3]; head 277 drivers/rpmsg/qcom_glink_smem.c *tx_pipe->head = 0; head 245 drivers/rpmsg/qcom_smd.c __le32 head; head 267 drivers/rpmsg/qcom_smd.c __le32 head; head 401 drivers/rpmsg/qcom_smd.c SET_TX_CHANNEL_INFO(channel, head, 0); head 429 drivers/rpmsg/qcom_smd.c unsigned head; head 432 drivers/rpmsg/qcom_smd.c head = GET_RX_CHANNEL_INFO(channel, head); head 435 drivers/rpmsg/qcom_smd.c return (head - tail) & (channel->fifo_size - 1); head 685 drivers/rpmsg/qcom_smd.c unsigned head; head 689 drivers/rpmsg/qcom_smd.c head = GET_TX_CHANNEL_INFO(channel, head); head 692 drivers/rpmsg/qcom_smd.c return mask - ((head - tail) & mask); head 703 drivers/rpmsg/qcom_smd.c unsigned head; head 707 drivers/rpmsg/qcom_smd.c head = GET_TX_CHANNEL_INFO(channel, head); head 709 drivers/rpmsg/qcom_smd.c len = min_t(size_t, count, channel->fifo_size - head); head 711 drivers/rpmsg/qcom_smd.c smd_copy_to_fifo(channel->tx_fifo + head, head 724 drivers/rpmsg/qcom_smd.c head += count; head 725 drivers/rpmsg/qcom_smd.c head &= (channel->fifo_size - 1); head 726 drivers/rpmsg/qcom_smd.c SET_TX_CHANNEL_INFO(channel, head, head); head 691 drivers/rtc/rtc-imxdi.c if (list_empty_careful(&imxdi->write_wait.head)) head 202 drivers/s390/block/dasd_eckd.c static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head) head 205 drivers/s390/block/dasd_eckd.c geo->head = cyl >> 16; head 206 drivers/s390/block/dasd_eckd.c geo->head <<= 4; head 207 drivers/s390/block/dasd_eckd.c geo->head |= head; head 220 drivers/s390/block/dasd_eckd.c u8 head; head 244 drivers/s390/block/dasd_eckd.c head = sense[31] & 0x0F; head 245 drivers/s390/block/dasd_eckd.c *track = cyl * private->rdc_data.trk_per_cyl + head; head 522 drivers/s390/block/dasd_eckd.c data->search_arg.head = data->seek_addr.head; head 699 drivers/s390/block/dasd_eckd.c data->search_arg.head = data->seek_addr.head; head 2295 drivers/s390/block/dasd_eckd.c private->count_area[i].head != count_area_head[i] || head 2310 drivers/s390/block/dasd_eckd.c private->count_area[i].head != count_area_head[i] || head 2757 drivers/s390/block/dasd_eckd.c ect->head = address.head; head 2772 drivers/s390/block/dasd_eckd.c ect->head = address.head; head 2786 drivers/s390/block/dasd_eckd.c ect->head = address.head; head 2795 drivers/s390/block/dasd_eckd.c address.cyl == 0 && address.head == 0) { head 2802 drivers/s390/block/dasd_eckd.c address.cyl == 0 && address.head == 1) { head 3234 drivers/s390/block/dasd_eckd.c int head; head 3237 drivers/s390/block/dasd_eckd.c head = fmt_buffer[start].head; head 3250 drivers/s390/block/dasd_eckd.c if ((fmt_buffer[i].head == head && head 3252 drivers/s390/block/dasd_eckd.c fmt_buffer[i].head != head || head 3322 drivers/s390/block/dasd_eckd.c geo.cyl == 0 && geo.head == 0) { head 3329 drivers/s390/block/dasd_eckd.c geo.cyl == 0 && geo.head == 1) { head 3346 drivers/s390/block/dasd_eckd.c fmt_buffer[pos].head != geo.head || head 4342 drivers/s390/block/dasd_eckd.c lredata->search_arg.head = lredata->seek_addr.head; head 136 drivers/s390/block/dasd_eckd.h __u16 head; head 144 drivers/s390/block/dasd_eckd.h __u16 head; head 149 drivers/s390/block/dasd_eckd.h __u16 head; head 91 drivers/s390/block/dasd_eer.c int head; head 106 drivers/s390/block/dasd_eer.c if (eerb->head < eerb->tail) head 107 drivers/s390/block/dasd_eer.c return eerb->tail - eerb->head - 1; head 108 drivers/s390/block/dasd_eer.c return eerb->buffersize - eerb->head + eerb->tail -1; head 118 drivers/s390/block/dasd_eer.c if (eerb->head >= eerb->tail) head 119 drivers/s390/block/dasd_eer.c return eerb->head - eerb->tail; head 120 drivers/s390/block/dasd_eer.c return eerb->buffersize - eerb->tail + eerb->head; head 140 drivers/s390/block/dasd_eer.c headindex = eerb->head / PAGE_SIZE; head 141 drivers/s390/block/dasd_eer.c localhead = eerb->head % PAGE_SIZE; head 146 drivers/s390/block/dasd_eer.c eerb->head += len; head 147 drivers/s390/block/dasd_eer.c if (eerb->head == eerb->buffersize) head 148 drivers/s390/block/dasd_eer.c eerb->head = 0; /* wrap around */ head 149 drivers/s390/block/dasd_eer.c BUG_ON(eerb->head > eerb->buffersize); head 638 drivers/s390/block/dasd_eer.c eerb->head != eerb->tail); head 674 drivers/s390/block/dasd_eer.c if (eerb->head != eerb->tail) head 83 drivers/s390/char/con3215.c int head; /* first free byte in output buffer */ head 192 drivers/s390/char/con3215.c req->start = (raw->head - raw->count + raw->written) & head 201 drivers/s390/char/con3215.c while (lines < RAW3215_MAX_NEWLINE && ix != raw->head) { head 213 drivers/s390/char/con3215.c req->delayable = (ix == raw->head) && (len < RAW3215_MIN_WRITE); head 475 drivers/s390/char/con3215.c ix = (raw->head - raw->count) & (RAW3215_BUFFER_SIZE - 1); head 483 drivers/s390/char/con3215.c raw->head = ix; head 540 drivers/s390/char/con3215.c RAW3215_BUFFER_SIZE - raw->head)); head 543 drivers/s390/char/con3215.c memcpy(raw->buffer + raw->head, str, c); head 544 drivers/s390/char/con3215.c ASCEBC(raw->buffer + raw->head, c); head 545 drivers/s390/char/con3215.c raw->head = (raw->head + c) & (RAW3215_BUFFER_SIZE - 1); head 583 drivers/s390/char/con3215.c raw->buffer[raw->head] = (char) _ascebc[(int) ch]; head 584 drivers/s390/char/con3215.c raw->head = (raw->head + 1) & (RAW3215_BUFFER_SIZE - 1); head 93 drivers/s390/cio/airq.c struct hlist_head *head; head 98 drivers/s390/cio/airq.c head = &airq_lists[tpi_info->isc]; head 100 drivers/s390/cio/airq.c hlist_for_each_entry_rcu(airq, head, list) head 169 drivers/s390/cio/eadm_sch.c list_for_each_entry(private, &eadm_list, head) { head 174 drivers/s390/cio/eadm_sch.c list_move_tail(&private->head, &eadm_list); head 226 drivers/s390/cio/eadm_sch.c INIT_LIST_HEAD(&private->head); head 244 drivers/s390/cio/eadm_sch.c list_add(&private->head, &eadm_list); head 290 drivers/s390/cio/eadm_sch.c list_del(&private->head); head 17 drivers/s390/cio/eadm_sch.h struct list_head head; head 298 drivers/s390/cio/vfio_ccw_cp.c static inline int is_cpa_within_range(u32 cpa, u32 head, int len) head 300 drivers/s390/cio/vfio_ccw_cp.c u32 tail = head + (len - 1) * sizeof(struct ccw1); head 302 drivers/s390/cio/vfio_ccw_cp.c return (head <= cpa && cpa <= tail); head 305 drivers/s390/cio/vfio_ccw_cp.c static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len) head 310 drivers/s390/cio/vfio_ccw_cp.c return is_cpa_within_range(ccw->cda, head, len); head 130 drivers/s390/crypto/zcrypt_error.h } __packed * head = reply->message; head 131 drivers/s390/crypto/zcrypt_error.h unsigned int apfs = *((u32 *)head->fmt2.apfs); head 692 drivers/s390/net/netiucv.c conn->rx_buff->data = conn->rx_buff->head; head 742 drivers/s390/net/netiucv.c conn->tx_buff->data = conn->tx_buff->head; head 402 drivers/s390/net/qeth_core_main.c struct qeth_qdio_out_buffer *head = q->bufs[bidx]; head 418 drivers/s390/net/qeth_core_main.c WARN_ON_ONCE(head->next_pending != f); head 419 drivers/s390/net/qeth_core_main.c head->next_pending = c; head 422 drivers/s390/net/qeth_core_main.c head = c; head 250 drivers/s390/scsi/zfcp_fc.c struct fc_els_rscn *head; head 256 drivers/s390/scsi/zfcp_fc.c head = (struct fc_els_rscn *) status_buffer->payload.data; head 257 drivers/s390/scsi/zfcp_fc.c page = (struct fc_els_rscn_page *) head; head 260 drivers/s390/scsi/zfcp_fc.c no_entries = be16_to_cpu(head->rscn_plen) / head 112 drivers/scsi/aic7xxx/queue.h #define SLIST_HEAD_INITIALIZER(head) \ head 123 drivers/scsi/aic7xxx/queue.h #define SLIST_EMPTY(head) ((head)->slh_first == NULL) head 125 drivers/scsi/aic7xxx/queue.h #define SLIST_FIRST(head) ((head)->slh_first) head 127 drivers/scsi/aic7xxx/queue.h #define SLIST_FOREACH(var, head, field) \ head 128 drivers/scsi/aic7xxx/queue.h for ((var) = SLIST_FIRST((head)); \ head 132 drivers/scsi/aic7xxx/queue.h #define SLIST_INIT(head) do { \ head 133 drivers/scsi/aic7xxx/queue.h SLIST_FIRST((head)) = NULL; \ head 141 drivers/scsi/aic7xxx/queue.h #define SLIST_INSERT_HEAD(head, elm, field) do { \ head 142 drivers/scsi/aic7xxx/queue.h SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ head 143 drivers/scsi/aic7xxx/queue.h SLIST_FIRST((head)) = (elm); \ head 148 drivers/scsi/aic7xxx/queue.h #define SLIST_REMOVE(head, elm, type, field) do { \ head 149 drivers/scsi/aic7xxx/queue.h if (SLIST_FIRST((head)) == (elm)) { \ head 150 drivers/scsi/aic7xxx/queue.h SLIST_REMOVE_HEAD((head), field); \ head 153 drivers/scsi/aic7xxx/queue.h struct type *curelm = SLIST_FIRST((head)); \ head 161 drivers/scsi/aic7xxx/queue.h #define SLIST_REMOVE_HEAD(head, field) do { \ head 162 drivers/scsi/aic7xxx/queue.h SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ head 174 drivers/scsi/aic7xxx/queue.h #define STAILQ_HEAD_INITIALIZER(head) \ head 175 drivers/scsi/aic7xxx/queue.h { NULL, &(head).stqh_first } head 185 drivers/scsi/aic7xxx/queue.h #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) head 187 drivers/scsi/aic7xxx/queue.h #define STAILQ_FIRST(head) ((head)->stqh_first) head 189 drivers/scsi/aic7xxx/queue.h #define STAILQ_FOREACH(var, head, field) \ head 190 drivers/scsi/aic7xxx/queue.h for((var) = STAILQ_FIRST((head)); \ head 194 drivers/scsi/aic7xxx/queue.h #define STAILQ_INIT(head) do { \ head 195 drivers/scsi/aic7xxx/queue.h STAILQ_FIRST((head)) = NULL; \ head 196 drivers/scsi/aic7xxx/queue.h (head)->stqh_last = &STAILQ_FIRST((head)); \ head 199 drivers/scsi/aic7xxx/queue.h #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ head 201 drivers/scsi/aic7xxx/queue.h (head)->stqh_last = &STAILQ_NEXT((elm), field); \ head 205 drivers/scsi/aic7xxx/queue.h #define STAILQ_INSERT_HEAD(head, elm, field) do { \ head 206 drivers/scsi/aic7xxx/queue.h if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ head 207 drivers/scsi/aic7xxx/queue.h (head)->stqh_last = &STAILQ_NEXT((elm), field); \ head 208 drivers/scsi/aic7xxx/queue.h STAILQ_FIRST((head)) = (elm); \ head 211 drivers/scsi/aic7xxx/queue.h #define STAILQ_INSERT_TAIL(head, elm, field) do { \ head 213 drivers/scsi/aic7xxx/queue.h STAILQ_LAST((head)) = (elm); \ head 214 drivers/scsi/aic7xxx/queue.h (head)->stqh_last = &STAILQ_NEXT((elm), field); \ head 217 drivers/scsi/aic7xxx/queue.h #define STAILQ_LAST(head) (*(head)->stqh_last) head 221 drivers/scsi/aic7xxx/queue.h #define STAILQ_REMOVE(head, elm, type, field) do { \ head 222 drivers/scsi/aic7xxx/queue.h if (STAILQ_FIRST((head)) == (elm)) { \ head 223 drivers/scsi/aic7xxx/queue.h STAILQ_REMOVE_HEAD(head, field); \ head 226 drivers/scsi/aic7xxx/queue.h struct type *curelm = STAILQ_FIRST((head)); \ head 231 drivers/scsi/aic7xxx/queue.h (head)->stqh_last = &STAILQ_NEXT((curelm), field);\ head 235 drivers/scsi/aic7xxx/queue.h #define STAILQ_REMOVE_HEAD(head, field) do { \ head 236 drivers/scsi/aic7xxx/queue.h if ((STAILQ_FIRST((head)) = \ head 237 drivers/scsi/aic7xxx/queue.h STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ head 238 drivers/scsi/aic7xxx/queue.h (head)->stqh_last = &STAILQ_FIRST((head)); \ head 241 drivers/scsi/aic7xxx/queue.h #define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \ head 242 drivers/scsi/aic7xxx/queue.h if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \ head 243 drivers/scsi/aic7xxx/queue.h (head)->stqh_last = &STAILQ_FIRST((head)); \ head 254 drivers/scsi/aic7xxx/queue.h #define LIST_HEAD_INITIALIZER(head) \ head 267 drivers/scsi/aic7xxx/queue.h #define LIST_EMPTY(head) ((head)->lh_first == NULL) head 269 drivers/scsi/aic7xxx/queue.h #define LIST_FIRST(head) ((head)->lh_first) head 271 drivers/scsi/aic7xxx/queue.h #define LIST_FOREACH(var, head, field) \ head 272 drivers/scsi/aic7xxx/queue.h for ((var) = LIST_FIRST((head)); \ head 276 drivers/scsi/aic7xxx/queue.h #define LIST_INIT(head) do { \ head 277 drivers/scsi/aic7xxx/queue.h LIST_FIRST((head)) = NULL; \ head 295 drivers/scsi/aic7xxx/queue.h #define LIST_INSERT_HEAD(head, elm, field) do { \ head 296 drivers/scsi/aic7xxx/queue.h if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ head 297 drivers/scsi/aic7xxx/queue.h LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ head 298 drivers/scsi/aic7xxx/queue.h LIST_FIRST((head)) = (elm); \ head 299 drivers/scsi/aic7xxx/queue.h (elm)->field.le_prev = &LIST_FIRST((head)); \ head 320 drivers/scsi/aic7xxx/queue.h #define TAILQ_HEAD_INITIALIZER(head) \ head 321 drivers/scsi/aic7xxx/queue.h { NULL, &(head).tqh_first } head 332 drivers/scsi/aic7xxx/queue.h #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) head 334 drivers/scsi/aic7xxx/queue.h #define TAILQ_FIRST(head) ((head)->tqh_first) head 336 drivers/scsi/aic7xxx/queue.h #define TAILQ_FOREACH(var, head, field) \ head 337 drivers/scsi/aic7xxx/queue.h for ((var) = TAILQ_FIRST((head)); \ head 341 drivers/scsi/aic7xxx/queue.h #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ head 342 drivers/scsi/aic7xxx/queue.h for ((var) = TAILQ_LAST((head), headname); \ head 346 drivers/scsi/aic7xxx/queue.h #define TAILQ_INIT(head) do { \ head 347 drivers/scsi/aic7xxx/queue.h TAILQ_FIRST((head)) = NULL; \ head 348 drivers/scsi/aic7xxx/queue.h (head)->tqh_last = &TAILQ_FIRST((head)); \ head 351 drivers/scsi/aic7xxx/queue.h #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ head 356 drivers/scsi/aic7xxx/queue.h (head)->tqh_last = &TAILQ_NEXT((elm), field); \ head 368 drivers/scsi/aic7xxx/queue.h #define TAILQ_INSERT_HEAD(head, elm, field) do { \ head 369 drivers/scsi/aic7xxx/queue.h if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ head 370 drivers/scsi/aic7xxx/queue.h TAILQ_FIRST((head))->field.tqe_prev = \ head 373 drivers/scsi/aic7xxx/queue.h (head)->tqh_last = &TAILQ_NEXT((elm), field); \ head 374 drivers/scsi/aic7xxx/queue.h TAILQ_FIRST((head)) = (elm); \ head 375 drivers/scsi/aic7xxx/queue.h (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ head 378 drivers/scsi/aic7xxx/queue.h #define TAILQ_INSERT_TAIL(head, elm, field) do { \ head 380 drivers/scsi/aic7xxx/queue.h (elm)->field.tqe_prev = (head)->tqh_last; \ head 381 drivers/scsi/aic7xxx/queue.h *(head)->tqh_last = (elm); \ head 382 drivers/scsi/aic7xxx/queue.h (head)->tqh_last = &TAILQ_NEXT((elm), field); \ head 385 drivers/scsi/aic7xxx/queue.h #define TAILQ_LAST(head, headname) \ head 386 drivers/scsi/aic7xxx/queue.h (*(((struct headname *)((head)->tqh_last))->tqh_last)) head 393 drivers/scsi/aic7xxx/queue.h #define TAILQ_REMOVE(head, elm, field) do { \ head 398 drivers/scsi/aic7xxx/queue.h (head)->tqh_last = (elm)->field.tqe_prev; \ head 411 drivers/scsi/aic7xxx/queue.h #define CIRCLEQ_HEAD_INITIALIZER(head) \ head 412 drivers/scsi/aic7xxx/queue.h { (void *)&(head), (void *)&(head) } head 423 drivers/scsi/aic7xxx/queue.h #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) head 425 drivers/scsi/aic7xxx/queue.h #define CIRCLEQ_FIRST(head) ((head)->cqh_first) head 427 drivers/scsi/aic7xxx/queue.h #define CIRCLEQ_FOREACH(var, head, field) \ head 428 drivers/scsi/aic7xxx/queue.h for ((var) = CIRCLEQ_FIRST((head)); \ head 429 drivers/scsi/aic7xxx/queue.h (var) != (void *)(head); \ head 432 drivers/scsi/aic7xxx/queue.h #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ head 433 drivers/scsi/aic7xxx/queue.h for ((var) = CIRCLEQ_LAST((head)); \ head 434 drivers/scsi/aic7xxx/queue.h (var) != (void *)(head); \ head 437 drivers/scsi/aic7xxx/queue.h #define CIRCLEQ_INIT(head) do { \ head 438 drivers/scsi/aic7xxx/queue.h CIRCLEQ_FIRST((head)) = (void *)(head); \ head 439 drivers/scsi/aic7xxx/queue.h CIRCLEQ_LAST((head)) = (void *)(head); \ head 442 drivers/scsi/aic7xxx/queue.h #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ head 445 drivers/scsi/aic7xxx/queue.h if (CIRCLEQ_NEXT((listelm), field) == (void *)(head)) \ head 446 drivers/scsi/aic7xxx/queue.h CIRCLEQ_LAST((head)) = (elm); \ head 452 drivers/scsi/aic7xxx/queue.h #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ head 455 drivers/scsi/aic7xxx/queue.h if (CIRCLEQ_PREV((listelm), field) == (void *)(head)) \ head 456 drivers/scsi/aic7xxx/queue.h CIRCLEQ_FIRST((head)) = (elm); \ head 462 drivers/scsi/aic7xxx/queue.h #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ head 463 drivers/scsi/aic7xxx/queue.h CIRCLEQ_NEXT((elm), field) = CIRCLEQ_FIRST((head)); \ head 464 drivers/scsi/aic7xxx/queue.h CIRCLEQ_PREV((elm), field) = (void *)(head); \ head 465 drivers/scsi/aic7xxx/queue.h if (CIRCLEQ_LAST((head)) == (void *)(head)) \ head 466 drivers/scsi/aic7xxx/queue.h CIRCLEQ_LAST((head)) = (elm); \ head 468 drivers/scsi/aic7xxx/queue.h CIRCLEQ_PREV(CIRCLEQ_FIRST((head)), field) = (elm); \ head 469 drivers/scsi/aic7xxx/queue.h CIRCLEQ_FIRST((head)) = (elm); \ head 472 drivers/scsi/aic7xxx/queue.h #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ head 473 drivers/scsi/aic7xxx/queue.h CIRCLEQ_NEXT((elm), field) = (void *)(head); \ head 474 drivers/scsi/aic7xxx/queue.h CIRCLEQ_PREV((elm), field) = CIRCLEQ_LAST((head)); \ head 475 drivers/scsi/aic7xxx/queue.h if (CIRCLEQ_FIRST((head)) == (void *)(head)) \ head 476 drivers/scsi/aic7xxx/queue.h CIRCLEQ_FIRST((head)) = (elm); \ head 478 drivers/scsi/aic7xxx/queue.h CIRCLEQ_NEXT(CIRCLEQ_LAST((head)), field) = (elm); \ head 479 drivers/scsi/aic7xxx/queue.h CIRCLEQ_LAST((head)) = (elm); \ head 482 drivers/scsi/aic7xxx/queue.h #define CIRCLEQ_LAST(head) ((head)->cqh_last) head 488 drivers/scsi/aic7xxx/queue.h #define CIRCLEQ_REMOVE(head, elm, field) do { \ head 489 drivers/scsi/aic7xxx/queue.h if (CIRCLEQ_NEXT((elm), field) == (void *)(head)) \ head 490 drivers/scsi/aic7xxx/queue.h CIRCLEQ_LAST((head)) = CIRCLEQ_PREV((elm), field); \ head 494 drivers/scsi/aic7xxx/queue.h if (CIRCLEQ_PREV((elm), field) == (void *)(head)) \ head 495 drivers/scsi/aic7xxx/queue.h CIRCLEQ_FIRST((head)) = CIRCLEQ_NEXT((elm), field); \ head 84 drivers/scsi/arcmsr/arcmsr_attr.c unsigned int head = acb->rqbuf_putIndex; head 85 drivers/scsi/arcmsr/arcmsr_attr.c unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER); head 87 drivers/scsi/arcmsr/arcmsr_attr.c allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER); head 2810 drivers/scsi/arcmsr/arcmsr_hba.c unsigned int head = acb->rqbuf_putIndex; head 2811 drivers/scsi/arcmsr/arcmsr_hba.c unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER); head 2813 drivers/scsi/arcmsr/arcmsr_hba.c allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER); head 3023 drivers/scsi/arcmsr/arcmsr_hba.c struct list_head *head = &acb->ccb_free_list; head 3027 drivers/scsi/arcmsr/arcmsr_hba.c if (!list_empty(head)) { head 3028 drivers/scsi/arcmsr/arcmsr_hba.c ccb = list_entry(head->next, struct CommandControlBlock, list); head 61 drivers/scsi/arm/queue.c INIT_LIST_HEAD(&queue->head); head 89 drivers/scsi/arm/queue.c if (!list_empty(&queue->head)) head 103 drivers/scsi/arm/queue.c int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) head 123 drivers/scsi/arm/queue.c if (head) head 124 drivers/scsi/arm/queue.c list_add(l, &queue->head); head 126 drivers/scsi/arm/queue.c list_add_tail(l, &queue->head); head 165 drivers/scsi/arm/queue.c list_for_each(l, &queue->head) { head 190 drivers/scsi/arm/queue.c if (!list_empty(&queue->head)) head 191 drivers/scsi/arm/queue.c SCpnt = __queue_remove(queue, queue->head.next); head 214 drivers/scsi/arm/queue.c list_for_each(l, &queue->head) { head 240 drivers/scsi/arm/queue.c list_for_each(l, &queue->head) { head 264 drivers/scsi/arm/queue.c list_for_each(l, &queue->head) { head 290 drivers/scsi/arm/queue.c list_for_each(l, &queue->head) { head 11 drivers/scsi/arm/queue.h struct list_head head; head 61 drivers/scsi/arm/queue.h extern int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head); head 35 drivers/scsi/be2iscsi/be.h u16 tail, head; head 53 drivers/scsi/be2iscsi/be.h return q->dma_mem.va + q->head * q->entry_size; head 68 drivers/scsi/be2iscsi/be.h index_inc(&q->head, q->len); head 132 drivers/scsi/be2iscsi/be_cmds.c wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK; head 85 drivers/scsi/bfa/bfa_core.c if (trcm->tail == trcm->head) head 86 drivers/scsi/bfa/bfa_core.c trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); head 59 drivers/scsi/bfa/bfa_cs.h u32 head; head 93 drivers/scsi/bfa/bfa_cs.h trcm->head = trcm->tail = trcm->stopped = 0; head 131 drivers/scsi/bfa/bfa_plog.h u16 head; head 306 drivers/scsi/bfa/bfa_svc.c if (plog->head == plog->tail) head 307 drivers/scsi/bfa/bfa_svc.c BFA_PL_LOG_REC_INCR(plog->head); head 316 drivers/scsi/bfa/bfa_svc.c plog->head = plog->tail = 0; head 2060 drivers/scsi/bfa/bfad_bsg.c bfad->plog_buf.head = bfad->plog_buf.tail = 0; head 161 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head; head 202 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; head 251 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c req = (struct cpl_abort_req *)skb->head; head 277 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; head 312 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c req = (struct cpl_rx_data_ack *)skb->head; head 1071 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head; head 1106 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c req = (struct ulp_mem_io *)skb->head; head 1158 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c req = (struct cpl_set_tcb_field *)skb->head; head 1194 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c req = (struct cpl_set_tcb_field *)skb->head; head 228 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c (struct cpl_act_open_req *)skb->head; head 251 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c (struct cpl_t5_act_open_req *)skb->head; head 279 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c (struct cpl_t6_act_open_req *)skb->head; head 347 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c (struct cpl_act_open_req6 *)skb->head; head 372 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c (struct cpl_t5_act_open_req6 *)skb->head; head 395 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c (struct cpl_t6_act_open_req6 *)skb->head; head 439 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; head 487 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c req = (struct cpl_abort_req *)skb->head; head 507 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; head 540 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c req = (struct cpl_rx_data_ack *)skb->head; head 620 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c flowc = (struct fw_flowc_wr *)skb->head; head 1929 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen, head 1952 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c req = (struct ulp_mem_io *)skb->head; head 2010 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c req = (struct cpl_set_tcb_field *)skb->head; head 2045 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c req = (struct cpl_set_tcb_field *)skb->head; head 357 drivers/scsi/cxgbi/libcxgbi.h memset(skb->head, 0, wrlen + dlen); head 697 drivers/scsi/dc395x.c static unsigned int list_size(struct list_head *head) head 701 drivers/scsi/dc395x.c list_for_each(pos, head) head 707 drivers/scsi/dc395x.c static struct DeviceCtlBlk *dcb_get_next(struct list_head *head, head 714 drivers/scsi/dc395x.c if (list_empty(head)) head 718 drivers/scsi/dc395x.c list_for_each_entry(i, head, list) head 727 drivers/scsi/dc395x.c list_for_each_entry(i, head, list) { head 747 drivers/scsi/dc395x.c struct list_head *head) head 750 drivers/scsi/dc395x.c list_for_each_entry(i, head, list) head 877 drivers/scsi/esp_scsi.c struct list_head *head = &esp->esp_cmd_pool; head 880 drivers/scsi/esp_scsi.c if (list_empty(head)) { head 883 drivers/scsi/esp_scsi.c ret = list_entry(head->next, struct esp_cmd_entry, list); head 1359 drivers/scsi/fcoe/fcoe.c skb->len, skb->data_len, skb->head, skb->data, head 1677 drivers/scsi/fcoe/fcoe.c skb->head, skb->data, skb_tail_pointer(skb), head 990 drivers/scsi/hpsa.c if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { head 991 drivers/scsi/hpsa.c a = rq->head[rq->current_entry]; head 8146 drivers/scsi/hpsa.c if (!h->reply_queue[i].head) head 8150 drivers/scsi/hpsa.c h->reply_queue[i].head, head 8152 drivers/scsi/hpsa.c h->reply_queue[i].head = NULL; head 9172 drivers/scsi/hpsa.c memset(h->reply_queue[i].head, 0, h->reply_queue_size); head 9237 drivers/scsi/hpsa.c memset(h->reply_queue[i].head, head 9452 drivers/scsi/hpsa.c h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev, head 9456 drivers/scsi/hpsa.c if (!h->reply_queue[i].head) { head 116 drivers/scsi/hpsa.h u64 *head; head 506 drivers/scsi/hpsa.h if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) { head 507 drivers/scsi/hpsa.h register_value = rq->head[rq->current_entry]; head 596 drivers/scsi/hpsa.h register_value = rq->head[rq->current_entry]; head 598 drivers/scsi/hpsa.h rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED; head 164 drivers/scsi/hptiop.c u32 head = inbound_head + 1; head 166 drivers/scsi/hptiop.c if (head == MVIOP_QUEUE_LEN) head 167 drivers/scsi/hptiop.c head = 0; head 170 drivers/scsi/hptiop.c writel(head, &hba->u.mv.mu->inbound_head); head 799 drivers/scsi/ips.c item = ha->copp_waitlist.head; head 864 drivers/scsi/ips.c item = ha->copp_waitlist.head; head 2560 drivers/scsi/ips.c (ha->copp_waitlist.head) && (scb = ips_getscb(ha))) { head 2627 drivers/scsi/ips.c p = ha->scb_waitlist.head; head 2760 drivers/scsi/ips.c item->q_next = queue->head; head 2761 drivers/scsi/ips.c queue->head = item; head 2787 drivers/scsi/ips.c item = queue->head; head 2793 drivers/scsi/ips.c queue->head = item->q_next; head 2825 drivers/scsi/ips.c if (item == queue->head) { head 2829 drivers/scsi/ips.c p = queue->head; head 2875 drivers/scsi/ips.c if (!queue->head) head 2876 drivers/scsi/ips.c queue->head = item; head 2898 drivers/scsi/ips.c item = queue->head; head 2904 drivers/scsi/ips.c queue->head = (struct scsi_cmnd *) item->host_scribble; head 2936 drivers/scsi/ips.c if (item == queue->head) { head 2940 drivers/scsi/ips.c p = queue->head; head 2987 drivers/scsi/ips.c if (!queue->head) head 2988 drivers/scsi/ips.c queue->head = item; head 3011 drivers/scsi/ips.c item = queue->head; head 3017 drivers/scsi/ips.c queue->head = item->next; head 3049 drivers/scsi/ips.c if (item == queue->head) { head 3053 drivers/scsi/ips.c p = queue->head; head 962 drivers/scsi/ips.h struct ips_scb *head; head 971 drivers/scsi/ips.h struct scsi_cmnd *head; head 982 drivers/scsi/ips.h struct ips_copp_wait_item *head; head 2561 drivers/scsi/isci/host.c u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1); head 2562 drivers/scsi/isci/host.c u16 tci = ihost->tci_pool[head]; head 2564 drivers/scsi/isci/host.c ihost->tci_head = head + 1; head 883 drivers/scsi/lpfc/lpfc_bsg.c struct list_head head, *curr, *next; head 891 drivers/scsi/lpfc/lpfc_bsg.c list_add_tail(&head, &mlist->dma.list); head 893 drivers/scsi/lpfc/lpfc_bsg.c list_for_each_safe(curr, next, &head) { head 925 drivers/scsi/lpfc/lpfc_bsg.c struct list_head head; head 939 drivers/scsi/lpfc/lpfc_bsg.c INIT_LIST_HEAD(&head); head 940 drivers/scsi/lpfc/lpfc_bsg.c list_add_tail(&head, &piocbq->list); head 985 drivers/scsi/lpfc/lpfc_bsg.c iocbq = list_entry(head.prev, typeof(*iocbq), list); head 988 drivers/scsi/lpfc/lpfc_bsg.c list_for_each_entry(iocbq, &head, list) { head 1008 drivers/scsi/lpfc/lpfc_bsg.c list_for_each_entry(iocbq, &head, list) { head 1152 drivers/scsi/lpfc/lpfc_bsg.c if (!list_empty(&head)) head 1153 drivers/scsi/lpfc/lpfc_bsg.c list_del(&head); head 2960 drivers/scsi/lpfc/lpfc_bsg.c struct list_head head, *curr, *next; head 2996 drivers/scsi/lpfc/lpfc_bsg.c INIT_LIST_HEAD(&head); head 2997 drivers/scsi/lpfc/lpfc_bsg.c list_add_tail(&head, &dmp->list); head 2998 drivers/scsi/lpfc/lpfc_bsg.c list_for_each_safe(curr, next, &head) { head 3064 drivers/scsi/lpfc/lpfc_bsg.c list_del(&head); head 3117 drivers/scsi/lpfc/lpfc_bsg.c struct list_head head; head 3252 drivers/scsi/lpfc/lpfc_bsg.c INIT_LIST_HEAD(&head); head 3253 drivers/scsi/lpfc/lpfc_bsg.c list_add_tail(&head, &txbuffer->dma.list); head 3254 drivers/scsi/lpfc/lpfc_bsg.c list_for_each_entry(curr, &head, list) { head 3277 drivers/scsi/lpfc/lpfc_bsg.c list_del(&head); head 113 drivers/scsi/lpfc/lpfc_ct.c struct list_head head; head 138 drivers/scsi/lpfc/lpfc_ct.c INIT_LIST_HEAD(&head); head 139 drivers/scsi/lpfc/lpfc_ct.c list_add_tail(&head, &piocbq->list); head 140 drivers/scsi/lpfc/lpfc_ct.c list_for_each_entry(iocbq, &head, list) { head 158 drivers/scsi/lpfc/lpfc_ct.c list_del(&head); head 160 drivers/scsi/lpfc/lpfc_ct.c INIT_LIST_HEAD(&head); head 161 drivers/scsi/lpfc/lpfc_ct.c list_add_tail(&head, &piocbq->list); head 162 drivers/scsi/lpfc/lpfc_ct.c list_for_each_entry(iocbq, &head, list) { head 177 drivers/scsi/lpfc/lpfc_ct.c list_del(&head); head 601 drivers/scsi/lpfc/lpfc_ct.c struct list_head head; head 610 drivers/scsi/lpfc/lpfc_ct.c list_add_tail(&head, &mp->list); head 611 drivers/scsi/lpfc/lpfc_ct.c list_for_each_entry_safe(mp, next_mp, &head, list) { head 655 drivers/scsi/lpfc/lpfc_ct.c list_del(&head); head 425 drivers/scsi/megaraid.c struct list_head *head = &adapter->free_list; head 429 drivers/scsi/megaraid.c if( !list_empty(head) ) { head 431 drivers/scsi/megaraid.c scb = list_entry(head->next, scb_t, list); head 433 drivers/scsi/megaraid.c list_del_init(head->next); head 1274 drivers/scsi/megaraid/megaraid_mbox.c struct list_head *head = &adapter->kscb_pool; head 1281 drivers/scsi/megaraid/megaraid_mbox.c if (list_empty(head)) { head 1286 drivers/scsi/megaraid/megaraid_mbox.c scb = list_entry(head->next, scb_t, list); head 3564 drivers/scsi/megaraid/megaraid_mbox.c struct list_head *head = &adapter->uscb_pool; head 3574 drivers/scsi/megaraid/megaraid_mbox.c if (list_empty(head)) { // should never happen because of CMM head 3584 drivers/scsi/megaraid/megaraid_mbox.c scb = list_entry(head->next, scb_t, list); head 595 drivers/scsi/megaraid/megaraid_mm.c struct list_head* head; head 602 drivers/scsi/megaraid/megaraid_mm.c head = &adp->kioc_pool; head 604 drivers/scsi/megaraid/megaraid_mm.c if (list_empty(head)) { head 612 drivers/scsi/megaraid/megaraid_mm.c kioc = list_entry(head->next, uioc_t, list); head 8902 drivers/scsi/mpt3sas/mpt3sas_scsih.c LIST_HEAD(head); head 8916 drivers/scsi/mpt3sas/mpt3sas_scsih.c list_move_tail(&sas_device->list, &head); head 8925 drivers/scsi/mpt3sas/mpt3sas_scsih.c list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { head 8932 drivers/scsi/mpt3sas/mpt3sas_scsih.c INIT_LIST_HEAD(&head); head 8937 drivers/scsi/mpt3sas/mpt3sas_scsih.c list_move_tail(&pcie_device->list, &head); head 8943 drivers/scsi/mpt3sas/mpt3sas_scsih.c list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) { head 151 drivers/scsi/ncr53c8xx.c static inline struct list_head *ncr_list_pop(struct list_head *head) head 153 drivers/scsi/ncr53c8xx.c if (!list_empty(head)) { head 154 drivers/scsi/ncr53c8xx.c struct list_head *elem = head->next; head 1480 drivers/scsi/ncr53c8xx.c struct head header; head 1593 drivers/scsi/ncr53c8xx.c struct head header; head 2110 drivers/scsi/ncr53c8xx.c SCR_COPY (sizeof (struct head)), head 2412 drivers/scsi/ncr53c8xx.c SCR_COPY (sizeof (struct head)), head 2631 drivers/scsi/ncr53c8xx.c SCR_COPY (sizeof (struct head)), head 2856 drivers/scsi/ncr53c8xx.c SCR_COPY (sizeof (struct head)), head 4283 drivers/scsi/qla2xxx/qla_def.h struct list_head head; head 4287 drivers/scsi/qla2xxx/qla_def.h struct list_head head; head 4057 drivers/scsi/qla2xxx/qla_os.c INIT_LIST_HEAD(&ha->pool.good.head); head 4058 drivers/scsi/qla2xxx/qla_os.c INIT_LIST_HEAD(&ha->pool.unusable.head); head 4092 drivers/scsi/qla2xxx/qla_os.c &ha->pool.unusable.head); head 4096 drivers/scsi/qla2xxx/qla_os.c &ha->pool.good.head); head 4103 drivers/scsi/qla2xxx/qla_os.c &ha->pool.good.head, list) { head 4302 drivers/scsi/qla2xxx/qla_os.c list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, head 4735 drivers/scsi/qla2xxx/qla_os.c list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, head 4745 drivers/scsi/qla2xxx/qla_os.c list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) { head 4181 drivers/scsi/scsi_debug.c pp->head = (start_sec - (pp->cyl * heads_by_sects)) head 232 drivers/scsi/scsi_error.c static void scsi_eh_inc_host_failed(struct rcu_head *head) head 234 drivers/scsi/scsi_error.c struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu); head 1593 drivers/scsi/scsi_transport_fc.c #define get_list_head_entry(pos, head, member) \ head 1594 drivers/scsi/scsi_transport_fc.c pos = list_entry((head)->next, typeof(*pos), member) head 305 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof (struct sym_ccb, phys.head.status), head 455 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof (struct sym_ccb, phys.head.lastp), head 514 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof (struct sym_ccb, phys.head.lastp), head 546 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof (struct sym_ccb, phys.head.lastp), head 562 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof (struct sym_ccb, phys.head.lastp), head 654 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof (struct sym_ccb, phys.head.status), head 664 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof (struct sym_ccb, phys.head.status), head 740 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof (struct sym_ccb, phys.head.lastp), head 742 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof (struct sym_ccb, phys.head.savep), head 762 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof (struct sym_ccb, phys.head.savep), head 764 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof (struct sym_ccb, phys.head.lastp), head 792 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof (struct sym_ccb, phys.head.status), head 879 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof(struct sym_tcb, head.wval), head 881 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof(struct sym_tcb, head.sval), head 888 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof(struct sym_tcb, head.uval), head 911 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof(struct sym_tcb, head.luntbl_sa), head 927 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof(struct sym_tcb, head.lun0_sa), head 932 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof(struct sym_lcb, head.resel_sa), head 955 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof(struct sym_lcb, head.itlq_tbl_sa), head 988 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof(struct sym_ccb, phys.head.go.restart), head 1003 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof (struct sym_ccb, phys.head.status), head 1014 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof(struct sym_lcb, head.itl_task_sa), head 1019 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof(struct sym_ccb, phys.head.go.restart), head 1692 drivers/scsi/sym53c8xx_2/sym_fw2.h offsetof(struct sym_ccb, phys.head.lastp), head 269 drivers/scsi/sym53c8xx_2/sym_glue.c if ((len & 1) && (tp->head.wval & EWS)) { head 390 drivers/scsi/sym53c8xx_2/sym_glue.c cp->phys.head.lastp = cpu_to_scr(lastp); head 391 drivers/scsi/sym53c8xx_2/sym_glue.c cp->phys.head.savep = cpu_to_scr(lastp); head 392 drivers/scsi/sym53c8xx_2/sym_glue.c cp->startp = cp->phys.head.savep; head 847 drivers/scsi/sym53c8xx_2/sym_glue.c tp->head.sval = 0; head 848 drivers/scsi/sym53c8xx_2/sym_glue.c tp->head.wval = np->rv_scntl3; head 849 drivers/scsi/sym53c8xx_2/sym_glue.c tp->head.uval = 0; head 1554 drivers/scsi/sym53c8xx_2/sym_hipd.c lp->head.resel_sa = head 1562 drivers/scsi/sym53c8xx_2/sym_hipd.c lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); head 1563 drivers/scsi/sym53c8xx_2/sym_hipd.c lp->head.resel_sa = head 1880 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.sval = 0; head 1881 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.wval = np->rv_scntl3; head 1882 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.uval = 0; head 1940 drivers/scsi/sym53c8xx_2/sym_hipd.c sval = tp->head.sval; head 1941 drivers/scsi/sym53c8xx_2/sym_hipd.c wval = tp->head.wval; head 1942 drivers/scsi/sym53c8xx_2/sym_hipd.c uval = tp->head.uval; head 1994 drivers/scsi/sym53c8xx_2/sym_hipd.c if (tp->head.sval == sval && head 1995 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.wval == wval && head 1996 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.uval == uval) head 1998 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.sval = sval; head 1999 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.wval = wval; head 2000 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.uval = uval; head 2012 drivers/scsi/sym53c8xx_2/sym_hipd.c OUTB(np, nc_sxfer, tp->head.sval); head 2013 drivers/scsi/sym53c8xx_2/sym_hipd.c OUTB(np, nc_scntl3, tp->head.wval); head 2016 drivers/scsi/sym53c8xx_2/sym_hipd.c OUTB(np, nc_scntl4, tp->head.uval); head 2027 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.select.sel_scntl3 = tp->head.wval; head 2028 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.select.sel_sxfer = tp->head.sval; head 2030 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.select.sel_scntl4 = tp->head.uval; head 2096 drivers/scsi/sym53c8xx_2/sym_hipd.c u_char wide = (tp->head.wval & EWS) ? BUS_16_BIT : BUS_8_BIT; head 3145 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.head.savep = cpu_to_scr(startp); head 3146 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.head.lastp = cpu_to_scr(startp); head 3157 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); head 3330 drivers/scsi/sym53c8xx_2/sym_hipd.c np->abrt_sel.sel_scntl3 = tp->head.wval; head 3331 drivers/scsi/sym53c8xx_2/sym_hipd.c np->abrt_sel.sel_sxfer = tp->head.sval; head 3542 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.sval = 0; head 3543 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.wval = np->rv_scntl3; head 3544 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.uval = 0; head 3869 drivers/scsi/sym53c8xx_2/sym_hipd.c if (cp->phys.head.lastp == cp->goalp) head 3876 drivers/scsi/sym53c8xx_2/sym_hipd.c if (cp->startp == cp->phys.head.lastp || head 3877 drivers/scsi/sym53c8xx_2/sym_hipd.c sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), head 4698 drivers/scsi/sym53c8xx_2/sym_hipd.c lp->head.resel_sa = head 4731 drivers/scsi/sym53c8xx_2/sym_hipd.c lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); head 4732 drivers/scsi/sym53c8xx_2/sym_hipd.c lp->head.resel_sa = head 4810 drivers/scsi/sym53c8xx_2/sym_hipd.c lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); head 4817 drivers/scsi/sym53c8xx_2/sym_hipd.c lp->head.resel_sa = head 4903 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, idle)); head 4904 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l)); head 4959 drivers/scsi/sym53c8xx_2/sym_hipd.c offsetof(struct sym_tcb, head.sval)) &3) == 0); head 4961 drivers/scsi/sym53c8xx_2/sym_hipd.c offsetof(struct sym_tcb, head.wval)) &3) == 0); head 4987 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); head 5013 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); head 5020 drivers/scsi/sym53c8xx_2/sym_hipd.c lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); head 5025 drivers/scsi/sym53c8xx_2/sym_hipd.c lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); head 5084 drivers/scsi/sym53c8xx_2/sym_hipd.c lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl)); head 5108 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); head 5115 drivers/scsi/sym53c8xx_2/sym_hipd.c tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); head 5228 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select)); head 5229 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA(np, resel_dsa)); head 5235 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.select.sel_scntl3 = tp->head.wval; head 5236 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.select.sel_sxfer = tp->head.sval; head 5237 drivers/scsi/sym53c8xx_2/sym_hipd.c cp->phys.select.sel_scntl4 = tp->head.uval; head 5512 drivers/scsi/sym53c8xx_2/sym_hipd.c if (cp->phys.head.lastp != cp->goalp) head 5767 drivers/scsi/sym53c8xx_2/sym_hipd.c np->target[i].head.luntbl_sa = head 5769 drivers/scsi/sym53c8xx_2/sym_hipd.c np->target[i].head.lun0_sa = head 384 drivers/scsi/sym53c8xx_2/sym_hipd.h /*0*/ struct sym_tcbh head; head 474 drivers/scsi/sym53c8xx_2/sym_hipd.h /*0*/ struct sym_lcbh head; head 598 drivers/scsi/sym53c8xx_2/sym_hipd.h #define host_xflags phys.head.status[0] head 599 drivers/scsi/sym53c8xx_2/sym_hipd.h #define host_status phys.head.status[1] head 600 drivers/scsi/sym53c8xx_2/sym_hipd.h #define ssss_status phys.head.status[2] head 601 drivers/scsi/sym53c8xx_2/sym_hipd.h #define host_flags phys.head.status[3] head 667 drivers/scsi/sym53c8xx_2/sym_hipd.h cp->phys.head.lastp = cpu_to_scr(dp); \ head 673 drivers/scsi/sym53c8xx_2/sym_hipd.h cp->phys.head.lastp : np->ccb_head.lastp) head 677 drivers/scsi/sym53c8xx_2/sym_hipd.h cp->phys.head.lastp = cpu_to_scr(dp); \ head 680 drivers/scsi/sym53c8xx_2/sym_hipd.h #define sym_get_script_dp(np, cp) (cp->phys.head.lastp) head 695 drivers/scsi/sym53c8xx_2/sym_hipd.h /*0*/ struct sym_ccbh head; head 42 drivers/scsi/sym53c8xx_2/sym_misc.h static inline struct sym_quehead *sym_que_first(struct sym_quehead *head) head 44 drivers/scsi/sym53c8xx_2/sym_misc.h return (head->flink == head) ? 0 : head->flink; head 47 drivers/scsi/sym53c8xx_2/sym_misc.h static inline struct sym_quehead *sym_que_last(struct sym_quehead *head) head 49 drivers/scsi/sym53c8xx_2/sym_misc.h return (head->blink == head) ? 0 : head->blink; head 69 drivers/scsi/sym53c8xx_2/sym_misc.h static inline int sym_que_empty(struct sym_quehead *head) head 71 drivers/scsi/sym53c8xx_2/sym_misc.h return head->flink == head; head 75 drivers/scsi/sym53c8xx_2/sym_misc.h struct sym_quehead *head) head 81 drivers/scsi/sym53c8xx_2/sym_misc.h struct sym_quehead *at = head->flink; head 83 drivers/scsi/sym53c8xx_2/sym_misc.h first->blink = head; head 84 drivers/scsi/sym53c8xx_2/sym_misc.h head->flink = first; head 117 drivers/scsi/sym53c8xx_2/sym_misc.h #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink) head 119 drivers/scsi/sym53c8xx_2/sym_misc.h static inline struct sym_quehead *sym_remque_head(struct sym_quehead *head) head 121 drivers/scsi/sym53c8xx_2/sym_misc.h struct sym_quehead *elem = head->flink; head 123 drivers/scsi/sym53c8xx_2/sym_misc.h if (elem != head) head 124 drivers/scsi/sym53c8xx_2/sym_misc.h __sym_que_del(head, elem->flink); head 130 drivers/scsi/sym53c8xx_2/sym_misc.h #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head) head 132 drivers/scsi/sym53c8xx_2/sym_misc.h static inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head) head 134 drivers/scsi/sym53c8xx_2/sym_misc.h struct sym_quehead *elem = head->blink; head 136 drivers/scsi/sym53c8xx_2/sym_misc.h if (elem != head) head 137 drivers/scsi/sym53c8xx_2/sym_misc.h __sym_que_del(elem->blink, head); head 146 drivers/scsi/sym53c8xx_2/sym_misc.h #define FOR_EACH_QUEUED_ELEMENT(head, qp) \ head 147 drivers/scsi/sym53c8xx_2/sym_misc.h for (qp = (head)->flink; qp != (head); qp = qp->flink) head 32 drivers/scsi/ufs/cdns-pltfrm.c struct list_head *head = &hba->clk_list_head; head 36 drivers/scsi/ufs/cdns-pltfrm.c if (list_empty(head)) head 39 drivers/scsi/ufs/cdns-pltfrm.c list_for_each_entry(clki, head, list) { head 370 drivers/scsi/ufs/ufshcd.c struct list_head *head = &hba->clk_list_head; head 372 drivers/scsi/ufs/ufshcd.c if (list_empty(head)) head 375 drivers/scsi/ufs/ufshcd.c list_for_each_entry(clki, head, list) { head 907 drivers/scsi/ufs/ufshcd.c struct list_head *head = &hba->clk_list_head; head 911 drivers/scsi/ufs/ufshcd.c if (list_empty(head)) head 918 drivers/scsi/ufs/ufshcd.c list_for_each_entry(clki, head, list) { head 983 drivers/scsi/ufs/ufshcd.c struct list_head *head = &hba->clk_list_head; head 985 drivers/scsi/ufs/ufshcd.c if (list_empty(head)) head 988 drivers/scsi/ufs/ufshcd.c list_for_each_entry(clki, head, list) { head 7281 drivers/scsi/ufs/ufshcd.c struct list_head *head = &hba->clk_list_head; head 7286 drivers/scsi/ufs/ufshcd.c if (list_empty(head)) head 7300 drivers/scsi/ufs/ufshcd.c list_for_each_entry(clki, head, list) { head 7335 drivers/scsi/ufs/ufshcd.c list_for_each_entry(clki, head, list) { head 7364 drivers/scsi/ufs/ufshcd.c struct list_head *head = &hba->clk_list_head; head 7366 drivers/scsi/ufs/ufshcd.c if (list_empty(head)) head 7369 drivers/scsi/ufs/ufshcd.c list_for_each_entry(clki, head, list) { head 27 drivers/sh/intc/virq.c #define for_each_virq(entry, head) \ head 28 drivers/sh/intc/virq.c for (entry = head; entry; entry = entry->next) head 96 drivers/slimbus/qcom-ctrl.c int head; head 137 drivers/slimbus/qcom-ctrl.c if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) { head 156 drivers/slimbus/qcom-ctrl.c idx = ctrl->tx.head; head 157 drivers/slimbus/qcom-ctrl.c ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n; head 310 drivers/slimbus/qcom-ctrl.c if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) { head 335 drivers/slimbus/qcom-ctrl.c u32 *head; head 353 drivers/slimbus/qcom-ctrl.c head = (u32 *)pbuf; head 356 drivers/slimbus/qcom-ctrl.c *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, head 360 drivers/slimbus/qcom-ctrl.c *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, head 376 drivers/slimbus/qcom-ctrl.c qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG); head 426 drivers/slimbus/qcom-ctrl.c if (ctrl->rx.tail == ctrl->rx.head) { head 430 drivers/slimbus/qcom-ctrl.c memcpy(buf, ctrl->rx.base + (ctrl->rx.head * ctrl->rx.sl_sz), head 433 drivers/slimbus/qcom-ctrl.c ctrl->rx.head = (ctrl->rx.head + 1) % ctrl->rx.n; head 34 drivers/soc/fsl/qe/qe_common.c struct list_head head; head 134 drivers/soc/fsl/qe/qe_common.c list_add(&entry->head, &muram_block_list); head 179 drivers/soc/fsl/qe/qe_common.c list_for_each_entry(tmp, &muram_block_list, head) { head 182 drivers/soc/fsl/qe/qe_common.c list_del(&tmp->head); head 924 drivers/spi/spi-topcliff-pch.c int head; head 1047 drivers/spi/spi-topcliff-pch.c head = PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE; head 1050 drivers/spi/spi-topcliff-pch.c rem = data->bpw_len % PCH_DMA_TRANS_SIZE - head; head 1054 drivers/spi/spi-topcliff-pch.c PCH_DMA_TRANS_SIZE - head; head 1061 drivers/spi/spi-topcliff-pch.c head = 0; head 1074 drivers/spi/spi-topcliff-pch.c sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head, head 1076 drivers/spi/spi-topcliff-pch.c sg_dma_len(sg) = size + head; head 1078 drivers/spi/spi-topcliff-pch.c sg->offset = head + size * i; head 1084 drivers/spi/spi-topcliff-pch.c sg->offset = head + size * i; head 2503 drivers/staging/exfat/exfat_super.c struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos); head 2507 drivers/staging/exfat/exfat_super.c hlist_add_head(&EXFAT_I(inode)->i_hash_fat, head); head 3284 drivers/staging/exfat/exfat_super.c struct hlist_head *head = sbi->inode_hashtable + exfat_hash(i_pos); head 3288 drivers/staging/exfat/exfat_super.c hlist_for_each_entry(info, head, i_hash_fat) { head 48 drivers/staging/isdn/gigaset/asyncdata.c unsigned char *src = inbuf->data + inbuf->head; head 120 drivers/staging/isdn/gigaset/asyncdata.c unsigned char *src = inbuf->data + inbuf->head; head 142 drivers/staging/isdn/gigaset/asyncdata.c unsigned char *src = inbuf->data + inbuf->head; head 288 drivers/staging/isdn/gigaset/asyncdata.c unsigned char *src = inbuf->data + inbuf->head; head 344 drivers/staging/isdn/gigaset/asyncdata.c if (inbuf->data[inbuf->head] == DLE_FLAG && head 347 drivers/staging/isdn/gigaset/asyncdata.c inbuf->head++; head 348 drivers/staging/isdn/gigaset/asyncdata.c if (inbuf->head == inbuf->tail || head 349 drivers/staging/isdn/gigaset/asyncdata.c inbuf->head == RBUFSIZE) { head 363 drivers/staging/isdn/gigaset/asyncdata.c switch (inbuf->data[inbuf->head]) { head 369 drivers/staging/isdn/gigaset/asyncdata.c inbuf->head++; /* byte consumed */ head 379 drivers/staging/isdn/gigaset/asyncdata.c inbuf->head++; /* byte consumed */ head 390 drivers/staging/isdn/gigaset/asyncdata.c inbuf->data[inbuf->head]); head 409 drivers/staging/isdn/gigaset/asyncdata.c gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", inbuf->head, inbuf->tail); head 411 drivers/staging/isdn/gigaset/asyncdata.c while (inbuf->head != inbuf->tail) { head 416 drivers/staging/isdn/gigaset/asyncdata.c numbytes = (inbuf->head > inbuf->tail ? head 417 drivers/staging/isdn/gigaset/asyncdata.c RBUFSIZE : inbuf->tail) - inbuf->head; head 432 drivers/staging/isdn/gigaset/asyncdata.c inbuf->head += procbytes; head 435 drivers/staging/isdn/gigaset/asyncdata.c if (inbuf->head >= RBUFSIZE) head 436 drivers/staging/isdn/gigaset/asyncdata.c inbuf->head = 0; head 438 drivers/staging/isdn/gigaset/asyncdata.c gig_dbg(DEBUG_INTR, "head set to %u", inbuf->head); head 296 drivers/staging/isdn/gigaset/common.c unsigned head, tail; head 301 drivers/staging/isdn/gigaset/common.c head = cs->ev_head; head 304 drivers/staging/isdn/gigaset/common.c while (tail != head) { head 305 drivers/staging/isdn/gigaset/common.c ev = cs->events + head; head 307 drivers/staging/isdn/gigaset/common.c head = (head + 1) % MAX_EVENTS; head 549 drivers/staging/isdn/gigaset/common.c inbuf->head = 0; head 566 drivers/staging/isdn/gigaset/common.c unsigned n, head, tail, bytesleft; head 575 drivers/staging/isdn/gigaset/common.c head = inbuf->head; head 576 drivers/staging/isdn/gigaset/common.c gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail); head 579 drivers/staging/isdn/gigaset/common.c if (head > tail) head 580 drivers/staging/isdn/gigaset/common.c n = head - 1 - tail; head 581 drivers/staging/isdn/gigaset/common.c else if (head == 0) head 831 drivers/staging/isdn/gigaset/common.c cs->inbuf->head = 0; head 1847 drivers/staging/isdn/gigaset/ev-layer.c unsigned head, tail; head 1854 drivers/staging/isdn/gigaset/ev-layer.c head = cs->ev_head; head 1858 drivers/staging/isdn/gigaset/ev-layer.c if (tail == head) { head 1866 drivers/staging/isdn/gigaset/ev-layer.c if (tail == head) { head 1873 drivers/staging/isdn/gigaset/ev-layer.c ev = cs->events + head; head 1883 drivers/staging/isdn/gigaset/ev-layer.c head = (head + 1) % MAX_EVENTS; head 1884 drivers/staging/isdn/gigaset/ev-layer.c cs->ev_head = head; head 1904 drivers/staging/isdn/gigaset/ev-layer.c if (cs->inbuf->head != cs->inbuf->tail) { head 280 drivers/staging/isdn/gigaset/gigaset.h int head, tail; head 941 drivers/staging/isdn/gigaset/isocdata.c unsigned tail, head, numbytes; head 944 drivers/staging/isdn/gigaset/isocdata.c head = inbuf->head; head 945 drivers/staging/isdn/gigaset/isocdata.c while (head != (tail = inbuf->tail)) { head 946 drivers/staging/isdn/gigaset/isocdata.c gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail); head 947 drivers/staging/isdn/gigaset/isocdata.c if (head > tail) head 949 drivers/staging/isdn/gigaset/isocdata.c src = inbuf->data + head; head 950 drivers/staging/isdn/gigaset/isocdata.c numbytes = tail - head; head 961 drivers/staging/isdn/gigaset/isocdata.c head += numbytes; head 962 drivers/staging/isdn/gigaset/isocdata.c if (head == RBUFSIZE) head 963 drivers/staging/isdn/gigaset/isocdata.c head = 0; head 964 drivers/staging/isdn/gigaset/isocdata.c gig_dbg(DEBUG_INTR, "setting head to %u", head); head 965 drivers/staging/isdn/gigaset/isocdata.c inbuf->head = head; head 661 drivers/staging/isdn/gigaset/ser-gigaset.c unsigned tail, head, n; head 674 drivers/staging/isdn/gigaset/ser-gigaset.c head = inbuf->head; head 676 drivers/staging/isdn/gigaset/ser-gigaset.c head, tail, count); head 678 drivers/staging/isdn/gigaset/ser-gigaset.c if (head <= tail) { head 689 drivers/staging/isdn/gigaset/ser-gigaset.c n = head - tail - 1; head 2040 drivers/staging/ks7010/ks_hostif.c list_for_each_entry(pmk, &priv->pmklist.head, list) { head 2292 drivers/staging/ks7010/ks_hostif.c INIT_LIST_HEAD(&priv->pmklist.head); head 356 drivers/staging/ks7010/ks_wlan.h struct list_head head; head 1667 drivers/staging/ks7010/ks_wlan_net.c if (list_empty(&priv->pmklist.head)) { head 1675 drivers/staging/ks7010/ks_wlan_net.c list_add(&pmk->list, &priv->pmklist.head); head 1680 drivers/staging/ks7010/ks_wlan_net.c list_for_each(ptr, &priv->pmklist.head) { head 1684 drivers/staging/ks7010/ks_wlan_net.c list_move(&pmk->list, &priv->pmklist.head); head 1689 drivers/staging/ks7010/ks_wlan_net.c if (ptr != &priv->pmklist.head) head 1700 drivers/staging/ks7010/ks_wlan_net.c list_add(&pmk->list, &priv->pmklist.head); head 1703 drivers/staging/ks7010/ks_wlan_net.c pmk = list_entry(priv->pmklist.head.prev, struct pmk, head 1707 drivers/staging/ks7010/ks_wlan_net.c list_move(&pmk->list, &priv->pmklist.head); head 1711 drivers/staging/ks7010/ks_wlan_net.c if (list_empty(&priv->pmklist.head)) head 1714 drivers/staging/ks7010/ks_wlan_net.c list_for_each(ptr, &priv->pmklist.head) { head 1724 drivers/staging/ks7010/ks_wlan_net.c if (ptr == &priv->pmklist.head) head 1729 drivers/staging/ks7010/ks_wlan_net.c INIT_LIST_HEAD(&priv->pmklist.head); head 100 drivers/staging/media/allegro-dvt/allegro-core.c struct list_head head; head 106 drivers/staging/media/allegro-dvt/allegro-core.c unsigned int head; head 762 drivers/staging/media/allegro-dvt/allegro-core.c mbox->head = base; head 768 drivers/staging/media/allegro-dvt/allegro-core.c regmap_write(dev->sram, mbox->head, 0); head 829 drivers/staging/media/allegro-dvt/allegro-core.c unsigned int head; head 833 drivers/staging/media/allegro-dvt/allegro-core.c regmap_read(dev->sram, mbox->head, &head); head 834 drivers/staging/media/allegro-dvt/allegro-core.c if (head > mbox->size) { head 837 drivers/staging/media/allegro-dvt/allegro-core.c head, mbox->size); head 842 drivers/staging/media/allegro-dvt/allegro-core.c regmap_bulk_read(dev->sram, mbox->data + head, head 869 drivers/staging/media/allegro-dvt/allegro-core.c (size_t)(mbox->size - (head + sizeof(*header)))); head 870 drivers/staging/media/allegro-dvt/allegro-core.c regmap_bulk_read(dev->sram, mbox->data + head + sizeof(*header), head 876 drivers/staging/media/allegro-dvt/allegro-core.c regmap_write(dev->sram, mbox->head, (head + size) % mbox->size); head 1190 drivers/staging/media/allegro-dvt/allegro-core.c list_for_each_entry(al_buffer, list, head) head 1203 drivers/staging/media/allegro-dvt/allegro-core.c list_for_each_entry(al_buffer, list, head) { head 1250 drivers/staging/media/allegro-dvt/allegro-core.c INIT_LIST_HEAD(&buffer->head); head 1255 drivers/staging/media/allegro-dvt/allegro-core.c list_add(&buffer->head, list); head 1261 drivers/staging/media/allegro-dvt/allegro-core.c list_for_each_entry_safe(buffer, tmp, list, head) { head 1262 drivers/staging/media/allegro-dvt/allegro-core.c list_del(&buffer->head); head 1275 drivers/staging/media/allegro-dvt/allegro-core.c list_for_each_entry_safe(buffer, tmp, list, head) { head 1276 drivers/staging/media/allegro-dvt/allegro-core.c list_del(&buffer->head); head 152 drivers/staging/most/dim2/dim2.c struct list_head *head = &hdm_ch->pending_list; head 161 drivers/staging/most/dim2/dim2.c if (list_empty(head)) { head 171 drivers/staging/most/dim2/dim2.c mbo = list_first_entry(head, struct mbo, list); head 181 drivers/staging/most/dim2/dim2.c list_del(head->next); head 189 drivers/staging/most/dim2/dim2.c list_move_tail(head->next, &hdm_ch->started_list); head 254 drivers/staging/most/dim2/dim2.c struct list_head *head; head 277 drivers/staging/most/dim2/dim2.c head = &hdm_ch->started_list; head 281 drivers/staging/most/dim2/dim2.c if (list_empty(head)) { head 287 drivers/staging/most/dim2/dim2.c mbo = list_first_entry(head, struct mbo, list); head 288 drivers/staging/most/dim2/dim2.c list_del(head->next); head 408 drivers/staging/most/dim2/dim2.c static void complete_all_mbos(struct list_head *head) head 415 drivers/staging/most/dim2/dim2.c if (list_empty(head)) { head 420 drivers/staging/most/dim2/dim2.c mbo = list_first_entry(head, struct mbo, list); head 421 drivers/staging/most/dim2/dim2.c list_del(head->next); head 289 drivers/staging/mt7621-dma/mtk-hsdma.c LIST_HEAD(head); head 294 drivers/staging/mt7621-dma/mtk-hsdma.c vchan_get_all_descriptors(&chan->vchan, &head); head 297 drivers/staging/mt7621-dma/mtk-hsdma.c vchan_dma_desc_free_list(&chan->vchan, &head); head 342 drivers/staging/octeon-usb/octeon-hcd.c int head; head 1208 drivers/staging/octeon-usb/octeon-hcd.c while (available && (fifo->head != fifo->tail)) { head 1248 drivers/staging/octeon-usb/octeon-hcd.c return fifo->head != fifo->tail; head 1258 drivers/staging/octeon-usb/octeon-hcd.c if (usb->periodic.head != usb->periodic.tail) { head 1272 drivers/staging/octeon-usb/octeon-hcd.c if (usb->nonperiodic.head != usb->nonperiodic.tail) { head 1327 drivers/staging/octeon-usb/octeon-hcd.c fifo->entry[fifo->head].channel = channel; head 1328 drivers/staging/octeon-usb/octeon-hcd.c fifo->entry[fifo->head].address = head 1331 drivers/staging/octeon-usb/octeon-hcd.c fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize + 3) >> 2; head 1332 drivers/staging/octeon-usb/octeon-hcd.c fifo->head++; head 1333 drivers/staging/octeon-usb/octeon-hcd.c if (fifo->head > MAX_CHANNELS) head 1334 drivers/staging/octeon-usb/octeon-hcd.c fifo->head = 0; head 259 drivers/staging/octeon/ethernet-rx.c prefetch(&skb->head); head 282 drivers/staging/octeon/ethernet-rx.c skb->data = skb->head + work->packet_ptr.s.addr - head 283 drivers/staging/octeon/ethernet-rx.c cvmx_ptr_to_phys(skb->head); head 299 drivers/staging/octeon/ethernet-tx.c fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f); head 225 drivers/staging/ralink-gdma/ralink-gdma.c LIST_HEAD(head); head 231 drivers/staging/ralink-gdma/ralink-gdma.c vchan_get_all_descriptors(&chan->vchan, &head); head 234 drivers/staging/ralink-gdma/ralink-gdma.c vchan_dma_desc_free_list(&chan->vchan, &head); head 76 drivers/staging/rtl8188eu/include/rtw_event.h int head; head 243 drivers/staging/rtl8188eu/include/rtw_xmit.h volatile int head; head 471 drivers/staging/rtl8192u/r8192U.h } head; head 48 drivers/staging/rtl8712/osdep_service.h static inline u32 end_of_queue_search(struct list_head *head, head 51 drivers/staging/rtl8712/osdep_service.h return (head == plist); head 86 drivers/staging/rtl8712/rtl871x_event.h /*volatile*/ int head; head 95 drivers/staging/rtl8712/rtl871x_event.h /*volatile*/ int head; head 946 drivers/staging/rtl8712/rtl871x_xmit.c pxmitpriv->bmc_txqueue.head = 0; head 949 drivers/staging/rtl8712/rtl871x_xmit.c pxmitpriv->vo_txqueue.head = 0; head 952 drivers/staging/rtl8712/rtl871x_xmit.c pxmitpriv->vi_txqueue.head = 0; head 955 drivers/staging/rtl8712/rtl871x_xmit.c pxmitpriv->bk_txqueue.head = 0; head 958 drivers/staging/rtl8712/rtl871x_xmit.c pxmitpriv->be_txqueue.head = 0; head 962 drivers/staging/rtl8712/rtl871x_xmit.c pxmitpriv->vo_txqueue.head = 0; head 965 drivers/staging/rtl8712/rtl871x_xmit.c pxmitpriv->vi_txqueue.head = 0; head 968 drivers/staging/rtl8712/rtl871x_xmit.c pxmitpriv->be_txqueue.head = 0; head 971 drivers/staging/rtl8712/rtl871x_xmit.c pxmitpriv->bk_txqueue.head = 0; head 185 drivers/staging/rtl8712/rtl871x_xmit.h /*volatile*/ sint head; head 273 drivers/staging/rtl8712/usb_ops_linux.c precvbuf->phead = precvbuf->pskb->head; head 279 drivers/staging/rtl8712/usb_ops_linux.c precvbuf->phead = precvbuf->pskb->head; head 323 drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c precvframe->u.hdr.rx_head = pkt_copy->head; head 944 drivers/staging/rtl8723bs/hal/sdio_ops.c recvbuf->phead = recvbuf->pskb->head; head 94 drivers/staging/rtl8723bs/include/rtw_event.h volatile int head; head 103 drivers/staging/rtl8723bs/include/rtw_event.h volatile int head; head 343 drivers/staging/rtl8723bs/include/rtw_xmit.h volatile sint head; head 2734 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c static int rtw_add_beacon(struct adapter *adapter, const u8 *head, size_t head_len, const u8 *tail, size_t tail_len) head 2753 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c memcpy(pbuf, (void *)head+24, head_len-24);/* 24 =beacon header len. */ head 2790 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c ret = rtw_add_beacon(adapter, settings->beacon.head, settings->beacon.head_len, head 2816 drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c ret = rtw_add_beacon(adapter, info->head, info->head_len, info->tail, info->tail_len); head 282 drivers/staging/rtl8723bs/os_dep/recv_linux.c RT_TRACE(_module_recv_osdep_c_, _drv_info_, ("\n skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n", skb->head, skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), skb->len)); head 79 drivers/staging/unisys/visorhba/visorhba_main.c struct visordisk_info head; head 255 drivers/staging/vt6656/rxtx.c u8 *head = tx_context->data + offsetof(struct vnt_tx_buffer, fifo_head); head 262 drivers/staging/vt6656/rxtx.c return (u16)(hdr_pos - head); head 522 drivers/staging/vt6656/rxtx.c union vnt_tx_data_head *head) head 525 drivers/staging/vt6656/rxtx.c struct vnt_cts_fb *buf = &head->cts_g_fb; head 558 drivers/staging/vt6656/rxtx.c union vnt_tx_data_head *head) head 561 drivers/staging/vt6656/rxtx.c struct vnt_cts *buf = &head->cts_g; head 588 drivers/staging/vt6656/rxtx.c union vnt_tx_data_head *head = &tx_head->tx_rts.tx.head; head 607 drivers/staging/vt6656/rxtx.c head = &tx_head->tx_rts.tx.mic.head; head 610 drivers/staging/vt6656/rxtx.c return vnt_rxtx_rts_g_fb_head(tx_context, &head->rts_g_fb); head 612 drivers/staging/vt6656/rxtx.c return vnt_rxtx_rts_g_head(tx_context, &head->rts_g); head 620 drivers/staging/vt6656/rxtx.c union vnt_tx_data_head *head = &tx_head->tx_cts.tx.head; head 634 drivers/staging/vt6656/rxtx.c head = &tx_head->tx_cts.tx.mic.head; head 638 drivers/staging/vt6656/rxtx.c return vnt_fill_cts_fb_head(tx_context, head); head 640 drivers/staging/vt6656/rxtx.c return vnt_fill_cts_head(tx_context, head); head 648 drivers/staging/vt6656/rxtx.c union vnt_tx_data_head *head = &tx_head->tx_ab.tx.head; head 657 drivers/staging/vt6656/rxtx.c head = &tx_head->tx_ab.tx.mic.head; head 670 drivers/staging/vt6656/rxtx.c &head->rts_a_fb); head 672 drivers/staging/vt6656/rxtx.c return vnt_rxtx_rts_ab_head(tx_context, &head->rts_ab); head 677 drivers/staging/vt6656/rxtx.c &head->data_head_a_fb); head 679 drivers/staging/vt6656/rxtx.c return vnt_rxtx_datahead_ab(tx_context, &head->data_head_ab); head 187 drivers/staging/vt6656/rxtx.h union vnt_tx_data_head head; head 192 drivers/staging/vt6656/rxtx.h union vnt_tx_data_head head; head 1829 drivers/staging/wilc1000/wilc_hif.c memcpy(cur_byte, params->head, params->head_len); head 655 drivers/staging/wlan-ng/hfa384x_usb.c struct usbctlx_completor head; head 661 drivers/staging/wlan-ng/hfa384x_usb.c static inline int usbctlx_cmd_completor_fn(struct usbctlx_completor *head) head 665 drivers/staging/wlan-ng/hfa384x_usb.c complete = (struct usbctlx_cmd_completor *)head; head 674 drivers/staging/wlan-ng/hfa384x_usb.c completor->head.complete = usbctlx_cmd_completor_fn; head 677 drivers/staging/wlan-ng/hfa384x_usb.c return &completor->head; head 687 drivers/staging/wlan-ng/hfa384x_usb.c struct usbctlx_completor head; head 694 drivers/staging/wlan-ng/hfa384x_usb.c static int usbctlx_rrid_completor_fn(struct usbctlx_completor *head) head 699 drivers/staging/wlan-ng/hfa384x_usb.c complete = (struct usbctlx_rrid_completor *)head; head 720 drivers/staging/wlan-ng/hfa384x_usb.c completor->head.complete = usbctlx_rrid_completor_fn; head 724 drivers/staging/wlan-ng/hfa384x_usb.c return &completor->head; head 747 drivers/staging/wlan-ng/hfa384x_usb.c struct usbctlx_completor head; head 754 drivers/staging/wlan-ng/hfa384x_usb.c static int usbctlx_rmem_completor_fn(struct usbctlx_completor *head) head 757 drivers/staging/wlan-ng/hfa384x_usb.c (struct usbctlx_rmem_completor *)head; head 770 drivers/staging/wlan-ng/hfa384x_usb.c completor->head.complete = usbctlx_rmem_completor_fn; head 774 drivers/staging/wlan-ng/hfa384x_usb.c return &completor->head; head 2831 drivers/staging/wlan-ng/hfa384x_usb.c struct hfa384x_usbctlx *head; head 2835 drivers/staging/wlan-ng/hfa384x_usb.c head = list_entry(hw->ctlxq.pending.next, head 2839 drivers/staging/wlan-ng/hfa384x_usb.c list_move_tail(&head->list, &hw->ctlxq.active); head 2844 drivers/staging/wlan-ng/hfa384x_usb.c &head->outbuf, ROUNDUP64(head->outbufsize), head 2852 drivers/staging/wlan-ng/hfa384x_usb.c head->state = CTLX_REQ_SUBMITTED; head 2875 drivers/staging/wlan-ng/hfa384x_usb.c list_move(&head->list, &hw->ctlxq.pending); head 2888 drivers/staging/wlan-ng/hfa384x_usb.c le16_to_cpu(head->outbuf.type), result); head 2889 drivers/staging/wlan-ng/hfa384x_usb.c unlocked_usbctlx_complete(hw, head); head 1394 drivers/target/iscsi/cxgbit/cxgbit_target.c skb, skb->head, skb->data, skb->len, skb->data_len, head 201 drivers/target/target_core_ua.c int head = 1; head 242 drivers/target/target_core_ua.c if (head) { head 245 drivers/target/target_core_ua.c head = 0; head 261 drivers/target/target_core_ua.c return head == 0; head 273 drivers/target/target_core_ua.c int head = 1; head 304 drivers/target/target_core_ua.c if (head) { head 307 drivers/target/target_core_ua.c head = 0; head 320 drivers/target/target_core_ua.c return (head) ? -EPERM : 0; head 614 drivers/target/target_core_user.c static inline size_t spc_used(size_t head, size_t tail, size_t size) head 616 drivers/target/target_core_user.c int diff = head - tail; head 624 drivers/target/target_core_user.c static inline size_t spc_free(size_t head, size_t tail, size_t size) head 627 drivers/target/target_core_user.c return (size - spc_used(head, tail, size) - 1); head 630 drivers/target/target_core_user.c static inline size_t head_to_end(size_t head, size_t size) head 632 drivers/target/target_core_user.c return size - head; head 647 drivers/target/target_core_user.c #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) head 161 drivers/target/tcm_fc/tfc_sess.c struct hlist_head *head; head 172 drivers/target/tcm_fc/tfc_sess.c head = &tport->hash[ft_sess_hash(port_id)]; head 173 drivers/target/tcm_fc/tfc_sess.c hlist_for_each_entry_rcu(sess, head, hash) { head 194 drivers/target/tcm_fc/tfc_sess.c struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)]; head 197 drivers/target/tcm_fc/tfc_sess.c hlist_add_head_rcu(&sess->hash, head); head 212 drivers/target/tcm_fc/tfc_sess.c struct hlist_head *head; head 217 drivers/target/tcm_fc/tfc_sess.c head = &tport->hash[ft_sess_hash(port_id)]; head 218 drivers/target/tcm_fc/tfc_sess.c hlist_for_each_entry_rcu(sess, head, hash) head 263 drivers/target/tcm_fc/tfc_sess.c struct hlist_head *head; head 266 drivers/target/tcm_fc/tfc_sess.c head = &tport->hash[ft_sess_hash(port_id)]; head 267 drivers/target/tcm_fc/tfc_sess.c hlist_for_each_entry_rcu(sess, head, hash) { head 289 drivers/target/tcm_fc/tfc_sess.c struct hlist_head *head; head 292 drivers/target/tcm_fc/tfc_sess.c for (head = tport->hash; head 293 drivers/target/tcm_fc/tfc_sess.c head < &tport->hash[FT_SESS_HASH_SIZE]; head++) { head 294 drivers/target/tcm_fc/tfc_sess.c hlist_for_each_entry_rcu(sess, head, hash) { head 45 drivers/thermal/rcar_thermal.c struct list_head head; head 102 drivers/thermal/rcar_thermal.c list_for_each_entry(pos, &common->head, list) head 509 drivers/thermal/rcar_thermal.c INIT_LIST_HEAD(&common->head); head 604 drivers/thermal/rcar_thermal.c list_move_tail(&priv->list, &common->head); head 628 drivers/thermal/rcar_thermal.c struct rcar_thermal_priv *priv = list_first_entry(&common->head, head 643 drivers/thermal/rcar_thermal.c struct rcar_thermal_priv *priv = list_first_entry(&common->head, head 181 drivers/thunderbolt/nhi.c return ((ring->head + 1) % ring->size) == ring->tail; head 186 drivers/thunderbolt/nhi.c return ring->head == ring->tail; head 202 drivers/thunderbolt/nhi.c descriptor = &ring->descriptors[ring->head]; head 211 drivers/thunderbolt/nhi.c ring->head = (ring->head + 1) % ring->size; head 213 drivers/thunderbolt/nhi.c ring_iowrite_prod(ring, ring->head); head 215 drivers/thunderbolt/nhi.c ring_iowrite_cons(ring, ring->head); head 519 drivers/thunderbolt/nhi.c ring->head = 0; head 682 drivers/thunderbolt/nhi.c ring->head = 0; head 216 drivers/tty/amiserial.c if (info->xmit.head != info->xmit.tail head 349 drivers/tty/amiserial.c if (info->xmit.head == info->xmit.tail head 363 drivers/tty/amiserial.c if (CIRC_CNT(info->xmit.head, head 371 drivers/tty/amiserial.c if (info->xmit.head == info->xmit.tail) { head 571 drivers/tty/amiserial.c info->xmit.head = info->xmit.tail = 0; head 793 drivers/tty/amiserial.c if (CIRC_SPACE(info->xmit.head, head 800 drivers/tty/amiserial.c info->xmit.buf[info->xmit.head++] = ch; head 801 drivers/tty/amiserial.c info->xmit.head &= SERIAL_XMIT_SIZE-1; head 814 drivers/tty/amiserial.c if (info->xmit.head == info->xmit.tail head 844 drivers/tty/amiserial.c c = CIRC_SPACE_TO_END(info->xmit.head, head 852 drivers/tty/amiserial.c memcpy(info->xmit.buf + info->xmit.head, buf, c); head 853 drivers/tty/amiserial.c info->xmit.head = ((info->xmit.head + c) & head 861 drivers/tty/amiserial.c if (info->xmit.head != info->xmit.tail head 883 drivers/tty/amiserial.c return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); head 892 drivers/tty/amiserial.c return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); head 903 drivers/tty/amiserial.c info->xmit.head = info->xmit.tail = 0; head 53 drivers/tty/ehv_bytechan.c unsigned int head; /* circular buffer head */ head 416 drivers/tty/ehv_bytechan.c CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE), head 425 drivers/tty/ehv_bytechan.c count = CIRC_CNT(bc->head, bc->tail, BUF_SIZE); head 430 drivers/tty/ehv_bytechan.c if (CIRC_CNT(bc->head, bc->tail, BUF_SIZE)) head 479 drivers/tty/ehv_bytechan.c len = CIRC_SPACE_TO_END(bc->head, bc->tail, BUF_SIZE); head 483 drivers/tty/ehv_bytechan.c memcpy(bc->buf + bc->head, s, len); head 484 drivers/tty/ehv_bytechan.c bc->head = (bc->head + len) & (BUF_SIZE - 1); head 546 drivers/tty/ehv_bytechan.c count = CIRC_SPACE(bc->head, bc->tail, BUF_SIZE); head 693 drivers/tty/ehv_bytechan.c bc->head = 0; head 895 drivers/tty/hvc/hvcs.c struct list_head head; head 903 drivers/tty/hvc/hvcs.c retval = hvcs_get_partner_info(unit_address, &head, hvcs_pi_buff); head 915 drivers/tty/hvc/hvcs.c list_for_each_entry(pi, &head, node) head 918 drivers/tty/hvc/hvcs.c hvcs_free_partner_info(&head); head 1872 drivers/tty/moxa.c u16 head, tail, tx_mask, spage, epage; head 1881 drivers/tty/moxa.c head = readw(ofsAddr + TXrptr); head 1882 drivers/tty/moxa.c c = (head > tail) ? (head - tail - 1) : (head - tail + tx_mask); head 1891 drivers/tty/moxa.c if (head > tail) head 1892 drivers/tty/moxa.c len = head - tail - 1; head 1932 drivers/tty/moxa.c u16 pageno, pageofs, bufhead, head; head 1936 drivers/tty/moxa.c head = readw(ofsAddr + RXrptr); head 1941 drivers/tty/moxa.c count = (tail >= head) ? (tail - head) : (tail - head + rx_mask + 1); head 1951 drivers/tty/moxa.c ofs = baseAddr + DynPage_addr + bufhead + head; head 1952 drivers/tty/moxa.c len = (tail >= head) ? (tail - head) : head 1953 drivers/tty/moxa.c (rx_mask + 1 - head); head 1957 drivers/tty/moxa.c head = (head + len) & rx_mask; head 1961 drivers/tty/moxa.c pageno = spage + (head >> 13); head 1962 drivers/tty/moxa.c pageofs = head & Page_mask; head 1975 drivers/tty/moxa.c head = (head + total) & rx_mask; head 1977 drivers/tty/moxa.c writew(head, ofsAddr + RXrptr); head 776 drivers/tty/n_tty.c size_t head; head 779 drivers/tty/n_tty.c head = ldata->echo_head; head 780 drivers/tty/n_tty.c ldata->echo_mark = head; head 786 drivers/tty/n_tty.c nr = head - ldata->echo_tail; head 793 drivers/tty/n_tty.c ldata->echo_commit = head; head 983 drivers/tty/n_tty.c size_t head; head 1014 drivers/tty/n_tty.c head = ldata->read_head; head 1018 drivers/tty/n_tty.c head--; head 1019 drivers/tty/n_tty.c c = read_buf(ldata, head); head 1021 drivers/tty/n_tty.c MASK(head) != MASK(ldata->canon_head)); head 1034 drivers/tty/n_tty.c cnt = ldata->read_head - head; head 1035 drivers/tty/n_tty.c ldata->read_head = head; head 1045 drivers/tty/n_tty.c head++; head 1046 drivers/tty/n_tty.c echo_char_raw(read_buf(ldata, head), ldata); head 1515 drivers/tty/n_tty.c size_t n, head; head 1517 drivers/tty/n_tty.c head = ldata->read_head & (N_TTY_BUF_SIZE - 1); head 1518 drivers/tty/n_tty.c n = min_t(size_t, count, N_TTY_BUF_SIZE - head); head 1519 drivers/tty/n_tty.c memcpy(read_buf_addr(ldata, head), cp, n); head 1524 drivers/tty/n_tty.c head = ldata->read_head & (N_TTY_BUF_SIZE - 1); head 1525 drivers/tty/n_tty.c n = min_t(size_t, count, N_TTY_BUF_SIZE - head); head 1526 drivers/tty/n_tty.c memcpy(read_buf_addr(ldata, head), cp, n); head 1971 drivers/tty/n_tty.c size_t head = smp_load_acquire(&ldata->commit_head); head 1975 drivers/tty/n_tty.c n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail); head 1987 drivers/tty/n_tty.c (head == ldata->read_tail)) head 2431 drivers/tty/n_tty.c size_t nr, head, tail; head 2435 drivers/tty/n_tty.c head = ldata->canon_head; head 2437 drivers/tty/n_tty.c nr = head - tail; head 2439 drivers/tty/n_tty.c while (MASK(head) != MASK(tail)) { head 87 drivers/tty/serial/8250/8250_core.c struct list_head *head; head 118 drivers/tty/serial/8250/8250_core.c l = i->head; head 134 drivers/tty/serial/8250/8250_core.c if (l == i->head && pass_counter++ > PASS_LIMIT) head 156 drivers/tty/serial/8250/8250_core.c if (!list_empty(i->head)) { head 157 drivers/tty/serial/8250/8250_core.c if (i->head == &up->list) head 158 drivers/tty/serial/8250/8250_core.c i->head = i->head->next; head 161 drivers/tty/serial/8250/8250_core.c BUG_ON(i->head != &up->list); head 162 drivers/tty/serial/8250/8250_core.c i->head = NULL; head 166 drivers/tty/serial/8250/8250_core.c if (i->head == NULL) { head 203 drivers/tty/serial/8250/8250_core.c if (i->head) { head 204 drivers/tty/serial/8250/8250_core.c list_add(&up->list, i->head); head 210 drivers/tty/serial/8250/8250_core.c i->head = &up->list; head 242 drivers/tty/serial/8250/8250_core.c BUG_ON(i->head == NULL); head 244 drivers/tty/serial/8250/8250_core.c if (list_empty(i->head)) head 78 drivers/tty/serial/8250/8250_dma.c dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); head 968 drivers/tty/serial/8250/8250_omap.c dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); head 266 drivers/tty/serial/altera_uart.c if (xmit->head == xmit->tail) head 277 drivers/tty/serial/altera_uart.c if (xmit->head == xmit->tail) { head 629 drivers/tty/serial/amba-pl011.c if (xmit->tail < xmit->head) head 740 drivers/tty/serial/atmel_serial.c if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE)) head 744 drivers/tty/serial/atmel_serial.c c = &((struct atmel_uart_char *)ring->buf)[ring->head]; head 751 drivers/tty/serial/atmel_serial.c ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1); head 951 drivers/tty/serial/atmel_serial.c tx_len = CIRC_CNT_TO_END(xmit->head, head 1139 drivers/tty/serial/atmel_serial.c ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue; head 1140 drivers/tty/serial/atmel_serial.c BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx)); head 1153 drivers/tty/serial/atmel_serial.c if (ring->head < ring->tail) { head 1162 drivers/tty/serial/atmel_serial.c if (ring->tail < ring->head) { head 1163 drivers/tty/serial/atmel_serial.c count = ring->head - ring->tail; head 1167 drivers/tty/serial/atmel_serial.c if (ring->head >= sg_dma_len(&atmel_port->sg_rx)) head 1168 drivers/tty/serial/atmel_serial.c ring->head = 0; head 1169 drivers/tty/serial/atmel_serial.c ring->tail = ring->head; head 1479 drivers/tty/serial/atmel_serial.c count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); head 1525 drivers/tty/serial/atmel_serial.c while (ring->head != ring->tail) { head 1608 drivers/tty/serial/atmel_serial.c unsigned int head; head 1617 drivers/tty/serial/atmel_serial.c head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr; head 1630 drivers/tty/serial/atmel_serial.c head = min(head, pdc->dma_size); head 1632 drivers/tty/serial/atmel_serial.c if (likely(head != tail)) { head 1642 drivers/tty/serial/atmel_serial.c count = head - tail; head 1651 drivers/tty/serial/atmel_serial.c pdc->ofs = head; head 1658 drivers/tty/serial/atmel_serial.c if (head >= pdc->dma_size) { head 1666 drivers/tty/serial/atmel_serial.c } while (head >= pdc->dma_size); head 2083 drivers/tty/serial/atmel_serial.c atmel_port->rx_ring.head = 0; head 718 drivers/tty/serial/cpm_uart/cpm_uart_core.c xmit->tail != xmit->head) { head 726 drivers/tty/serial/cpm_uart/cpm_uart_core.c if (xmit->head == xmit->tail) head 423 drivers/tty/serial/fsl_lpuart.c if (xmit->tail < xmit->head || xmit->head == 0) { head 431 drivers/tty/serial/fsl_lpuart.c sg_set_buf(sgl + 1, xmit->buf, xmit->head); head 1084 drivers/tty/serial/fsl_lpuart.c ring->head = sport->rx_sgl.length - state.residue; head 1085 drivers/tty/serial/fsl_lpuart.c BUG_ON(ring->head > sport->rx_sgl.length); head 1098 drivers/tty/serial/fsl_lpuart.c if (ring->head < ring->tail) { head 1107 drivers/tty/serial/fsl_lpuart.c if (ring->tail < ring->head) { head 1108 drivers/tty/serial/fsl_lpuart.c count = ring->head - ring->tail; head 1111 drivers/tty/serial/fsl_lpuart.c if (ring->head >= sport->rx_sgl.length) head 1112 drivers/tty/serial/fsl_lpuart.c ring->head = 0; head 1113 drivers/tty/serial/fsl_lpuart.c ring->tail = ring->head; head 1228 drivers/tty/serial/fsl_lpuart.c sport->rx_ring.head = 0; head 630 drivers/tty/serial/icom.c while ((port->state->xmit.head != temp_tail) && head 606 drivers/tty/serial/imx.c if (xmit->tail < xmit->head || xmit->head == 0) { head 614 drivers/tty/serial/imx.c sg_set_buf(sgl + 1, xmit->buf, xmit->head); head 1106 drivers/tty/serial/imx.c rx_ring->head = sg_dma_len(sgl) - state.residue; head 1110 drivers/tty/serial/imx.c rx_ring->tail = ((rx_ring->head-1) / bd_size) * bd_size; head 1112 drivers/tty/serial/imx.c if (rx_ring->head <= sg_dma_len(sgl) && head 1113 drivers/tty/serial/imx.c rx_ring->head > rx_ring->tail) { head 1116 drivers/tty/serial/imx.c r_bytes = rx_ring->head - rx_ring->tail; head 1134 drivers/tty/serial/imx.c WARN_ON(rx_ring->head > sg_dma_len(sgl)); head 1135 drivers/tty/serial/imx.c WARN_ON(rx_ring->head <= rx_ring->tail); head 1156 drivers/tty/serial/imx.c sport->rx_ring.head = 0; head 355 drivers/tty/serial/jsm/jsm_cls.c u16 head; head 365 drivers/tty/serial/jsm/jsm_cls.c head = ch->ch_r_head & RQUEUEMASK; head 373 drivers/tty/serial/jsm/jsm_cls.c qleft = tail - head - 1; head 422 drivers/tty/serial/jsm/jsm_cls.c ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE head 424 drivers/tty/serial/jsm/jsm_cls.c ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx); head 428 drivers/tty/serial/jsm/jsm_cls.c if (ch->ch_equeue[head] & UART_LSR_PE) head 430 drivers/tty/serial/jsm/jsm_cls.c if (ch->ch_equeue[head] & UART_LSR_BI) head 432 drivers/tty/serial/jsm/jsm_cls.c if (ch->ch_equeue[head] & UART_LSR_FE) head 436 drivers/tty/serial/jsm/jsm_cls.c head = (head + 1) & RQUEUEMASK; head 443 drivers/tty/serial/jsm/jsm_cls.c ch->ch_r_head = head & RQUEUEMASK; head 444 drivers/tty/serial/jsm/jsm_cls.c ch->ch_e_head = head & EQUEUEMASK; head 282 drivers/tty/serial/jsm/jsm_neo.c u16 head; head 286 drivers/tty/serial/jsm/jsm_neo.c head = ch->ch_r_head & RQUEUEMASK; head 294 drivers/tty/serial/jsm/jsm_neo.c if ((qleft = tail - head - 1) < 0) head 344 drivers/tty/serial/jsm/jsm_neo.c n = min(((u32) total), (RQUEUESIZE - (u32) head)); head 365 drivers/tty/serial/jsm/jsm_neo.c memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n); head 371 drivers/tty/serial/jsm/jsm_neo.c memset(ch->ch_equeue + head, 0, n); head 374 drivers/tty/serial/jsm/jsm_neo.c head = (head + n) & RQUEUEMASK; head 450 drivers/tty/serial/jsm/jsm_neo.c memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1); head 451 drivers/tty/serial/jsm/jsm_neo.c ch->ch_equeue[head] = (u8) linestatus; head 454 drivers/tty/serial/jsm/jsm_neo.c ch->ch_rqueue[head], ch->ch_equeue[head]); head 460 drivers/tty/serial/jsm/jsm_neo.c head = (head + 1) & RQUEUEMASK; head 469 drivers/tty/serial/jsm/jsm_neo.c ch->ch_r_head = head & RQUEUEMASK; head 470 drivers/tty/serial/jsm/jsm_neo.c ch->ch_e_head = head & EQUEUEMASK; head 476 drivers/tty/serial/jsm/jsm_neo.c u16 head; head 524 drivers/tty/serial/jsm/jsm_neo.c head = circ->head & (UART_XMIT_SIZE - 1); head 533 drivers/tty/serial/jsm/jsm_neo.c s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail; head 515 drivers/tty/serial/jsm/jsm_tty.c u16 head; head 541 drivers/tty/serial/jsm/jsm_tty.c head = ch->ch_r_head & rmask; head 544 drivers/tty/serial/jsm/jsm_tty.c data_len = (head - tail) & rmask; head 577 drivers/tty/serial/jsm/jsm_tty.c ch->ch_portnum, head, tail); head 591 drivers/tty/serial/jsm/jsm_tty.c s = ((head >= tail) ? head : RQUEUESIZE) - tail; head 765 drivers/tty/serial/max310x.c until_end = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); head 343 drivers/tty/serial/mcf.c if (xmit->head == xmit->tail) head 353 drivers/tty/serial/mcf.c if (xmit->head == xmit->tail) { head 302 drivers/tty/serial/men_z135_uart.c int head; head 348 drivers/tty/serial/men_z135_uart.c head = xmit->head & (UART_XMIT_SIZE - 1); head 351 drivers/tty/serial/men_z135_uart.c s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail; head 893 drivers/tty/serial/msm_serial.c pio_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); head 894 drivers/tty/serial/msm_serial.c dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); head 597 drivers/tty/serial/mxs-auart.c CIRC_CNT_TO_END(xmit->head, head 810 drivers/tty/serial/pch_uart.c CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); head 909 drivers/tty/serial/pch_uart.c size = min(xmit->head - xmit->tail, fifo_size); head 966 drivers/tty/serial/pch_uart.c bytes = min((int)CIRC_CNT(xmit->head, xmit->tail, head 967 drivers/tty/serial/pch_uart.c UART_XMIT_SIZE), CIRC_CNT_TO_END(xmit->head, head 323 drivers/tty/serial/samsung.c count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); head 714 drivers/tty/serial/samsung.c count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); head 568 drivers/tty/serial/serial-tegra.c count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); head 561 drivers/tty/serial/serial_core.c circ->buf[circ->head] = c; head 562 drivers/tty/serial/serial_core.c circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); head 600 drivers/tty/serial/serial_core.c c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); head 605 drivers/tty/serial/serial_core.c memcpy(circ->buf + circ->head, buf, c); head 606 drivers/tty/serial/serial_core.c circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1); head 1406 drivers/tty/serial/sh-sci.c int head, tail; head 1416 drivers/tty/serial/sh-sci.c head = xmit->head; head 1420 drivers/tty/serial/sh-sci.c CIRC_CNT(head, tail, UART_XMIT_SIZE), head 1421 drivers/tty/serial/sh-sci.c CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE)); head 1451 drivers/tty/serial/sh-sci.c __func__, xmit->buf, tail, head, s->cookie_tx); head 185 drivers/tty/serial/sirfsoc_uart.c tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); head 605 drivers/tty/serial/sirfsoc_uart.c sirfport->rx_dma_items.xmit.head = 0; head 974 drivers/tty/serial/sirfsoc_uart.c sirfport->rx_dma_items.xmit.head = 0; head 1027 drivers/tty/serial/sirfsoc_uart.c !CIRC_CNT(xmit->head, xmit->tail, head 1192 drivers/tty/serial/sirfsoc_uart.c xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue; head 1193 drivers/tty/serial/sirfsoc_uart.c sirfport->rx_last_pos = xmit->head; head 1196 drivers/tty/serial/sirfsoc_uart.c count = CIRC_CNT_TO_END(xmit->head, xmit->tail, head 1206 drivers/tty/serial/sirfsoc_uart.c count = CIRC_CNT_TO_END(xmit->head, xmit->tail, head 1238 drivers/tty/serial/sirfsoc_uart.c xmit->buf[xmit->head] = head 1240 drivers/tty/serial/sirfsoc_uart.c xmit->head = (xmit->head + 1) & head 1403 drivers/tty/serial/sirfsoc_uart.c sirfport->rx_dma_items.xmit.head = head 239 drivers/tty/serial/sprd_serial.c CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); head 371 drivers/tty/serial/stm32-usart.c if (xmit->tail < xmit->head) { head 65 drivers/tty/serial/sunhv.c len = CIRC_CNT_TO_END(xmit->head, xmit->tail, head 99 drivers/tty/serial/tegra-tcu.c count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); head 368 drivers/tty/serial/ucc_uart.c (xmit->tail != xmit->head)) { head 376 drivers/tty/serial/ucc_uart.c if (xmit->head == xmit->tail) head 190 drivers/tty/synclink_gt.c static void add_cond_wait(struct cond_wait **head, struct cond_wait *w); head 191 drivers/tty/synclink_gt.c static void remove_cond_wait(struct cond_wait **head, struct cond_wait *w); head 192 drivers/tty/synclink_gt.c static void flush_cond_wait(struct cond_wait **head); head 2997 drivers/tty/synclink_gt.c static void add_cond_wait(struct cond_wait **head, struct cond_wait *w) head 3001 drivers/tty/synclink_gt.c w->next = *head; head 3002 drivers/tty/synclink_gt.c *head = w; head 3005 drivers/tty/synclink_gt.c static void remove_cond_wait(struct cond_wait **head, struct cond_wait *cw) head 3010 drivers/tty/synclink_gt.c for (w = *head, prev = NULL ; w != NULL ; prev = w, w = w->next) { head 3015 drivers/tty/synclink_gt.c *head = w->next; head 3021 drivers/tty/synclink_gt.c static void flush_cond_wait(struct cond_wait **head) head 3023 drivers/tty/synclink_gt.c while (*head != NULL) { head 3024 drivers/tty/synclink_gt.c wake_up_interruptible(&(*head)->q); head 3025 drivers/tty/synclink_gt.c *head = (*head)->next; head 70 drivers/tty/tty_buffer.c restart = buf->head->commit != buf->head->read; head 124 drivers/tty/tty_buffer.c while ((p = buf->head) != NULL) { head 125 drivers/tty/tty_buffer.c buf->head = p->next; head 135 drivers/tty/tty_buffer.c buf->head = &buf->sentinel; head 231 drivers/tty/tty_buffer.c while ((next = smp_load_acquire(&buf->head->next)) != NULL) { head 232 drivers/tty/tty_buffer.c tty_buffer_free(port, buf->head); head 233 drivers/tty/tty_buffer.c buf->head = next; head 235 drivers/tty/tty_buffer.c buf->head->read = buf->head->commit; head 472 drivers/tty/tty_buffer.c receive_buf(struct tty_port *port, struct tty_buffer *head, int count) head 474 drivers/tty/tty_buffer.c unsigned char *p = char_buf_ptr(head, head->read); head 478 drivers/tty/tty_buffer.c if (~head->flags & TTYB_NORMAL) head 479 drivers/tty/tty_buffer.c f = flag_buf_ptr(head, head->read); head 508 drivers/tty/tty_buffer.c struct tty_buffer *head = buf->head; head 520 drivers/tty/tty_buffer.c next = smp_load_acquire(&head->next); head 524 drivers/tty/tty_buffer.c count = smp_load_acquire(&head->commit) - head->read; head 528 drivers/tty/tty_buffer.c buf->head = next; head 529 drivers/tty/tty_buffer.c tty_buffer_free(port, head); head 533 drivers/tty/tty_buffer.c count = receive_buf(port, head, count); head 536 drivers/tty/tty_buffer.c head->read += count; head 574 drivers/tty/tty_buffer.c buf->head = &buf->sentinel; head 113 drivers/tty/vt/vt_ioctl.c struct list_head *pos, *head; head 118 drivers/tty/vt/vt_ioctl.c head = &vt_events; head 120 drivers/tty/vt/vt_ioctl.c list_for_each(pos, head) { head 1671 drivers/usb/core/hcd.c list_replace_init(&bh->head, &local_list); head 1686 drivers/usb/core/hcd.c if (!list_empty(&bh->head)) head 1732 drivers/usb/core/hcd.c list_add_tail(&urb->urb_list, &bh->head); head 2408 drivers/usb/core/hcd.c INIT_LIST_HEAD(&bh->head); head 81 drivers/usb/core/phy.c struct list_head *head; head 87 drivers/usb/core/phy.c head = &phy_roothub->list; head 89 drivers/usb/core/phy.c list_for_each_entry(roothub_entry, head, list) { head 98 drivers/usb/core/phy.c list_for_each_entry_continue_reverse(roothub_entry, head, list) head 108 drivers/usb/core/phy.c struct list_head *head; head 114 drivers/usb/core/phy.c head = &phy_roothub->list; head 116 drivers/usb/core/phy.c list_for_each_entry(roothub_entry, head, list) { head 130 drivers/usb/core/phy.c struct list_head *head; head 136 drivers/usb/core/phy.c head = &phy_roothub->list; head 138 drivers/usb/core/phy.c list_for_each_entry(roothub_entry, head, list) { head 147 drivers/usb/core/phy.c list_for_each_entry_continue_reverse(roothub_entry, head, list) head 157 drivers/usb/core/phy.c struct list_head *head; head 163 drivers/usb/core/phy.c head = &phy_roothub->list; head 165 drivers/usb/core/phy.c list_for_each_entry(roothub_entry, head, list) { head 178 drivers/usb/core/phy.c struct list_head *head; head 184 drivers/usb/core/phy.c head = &phy_roothub->list; head 186 drivers/usb/core/phy.c list_for_each_entry(roothub_entry, head, list) { head 195 drivers/usb/core/phy.c list_for_each_entry_continue_reverse(roothub_entry, head, list) head 487 drivers/usb/gadget/function/u_serial.c static void gs_free_requests(struct usb_ep *ep, struct list_head *head, head 492 drivers/usb/gadget/function/u_serial.c while (!list_empty(head)) { head 493 drivers/usb/gadget/function/u_serial.c req = list_entry(head->next, struct usb_request, list); head 501 drivers/usb/gadget/function/u_serial.c static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, head 516 drivers/usb/gadget/function/u_serial.c return list_empty(head) ? -ENOMEM : 0; head 518 drivers/usb/gadget/function/u_serial.c list_add_tail(&req->list, head); head 536 drivers/usb/gadget/function/u_serial.c struct list_head *head = &port->read_pool; head 547 drivers/usb/gadget/function/u_serial.c status = gs_alloc_requests(ep, head, gs_read_complete, head 555 drivers/usb/gadget/function/u_serial.c gs_free_requests(ep, head, &port->read_allocated); head 569 drivers/usb/gadget/function/u_serial.c gs_free_requests(ep, head, &port->read_allocated); head 177 drivers/usb/gadget/udc/fsl_udc_core.c next_td = req->head; head 740 drivers/usb/gadget/udc/fsl_udc_core.c cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK); head 765 drivers/usb/gadget/udc/fsl_udc_core.c fsl_prime_ep(ep, req->head); head 849 drivers/usb/gadget/udc/fsl_udc_core.c req->head = dtd; head 969 drivers/usb/gadget/udc/fsl_udc_core.c fsl_prime_ep(ep, next_req->head); head 1604 drivers/usb/gadget/udc/fsl_udc_core.c curr_td = curr_req->head; head 451 drivers/usb/gadget/udc/fsl_usb2_udc.h struct ep_td_struct *head, *tail; /* For dTD List head 241 drivers/usb/gadget/udc/mv_udc.h struct mv_dtd *dtd, *head, *tail; head 136 drivers/usb/gadget/udc/mv_udc_core.c curr_dtd = curr_req->head; head 227 drivers/usb/gadget/udc/mv_udc_core.c next_td = req->head; head 271 drivers/usb/gadget/udc/mv_udc_core.c req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; head 319 drivers/usb/gadget/udc/mv_udc_core.c dqh->next_dtd_ptr = req->head->td_dma head 418 drivers/usb/gadget/udc/mv_udc_core.c req->head = dtd; head 756 drivers/usb/gadget/udc/mv_udc_core.c dqh->next_dtd_ptr = req->head->td_dma head 578 drivers/usb/host/ehci-q.c struct list_head *head, head 595 drivers/usb/host/ehci-q.c list_add_tail (&qtd->qtd_list, head); head 618 drivers/usb/host/ehci-q.c list_add_tail (&qtd->qtd_list, head); head 689 drivers/usb/host/ehci-q.c list_add_tail (&qtd->qtd_list, head); head 725 drivers/usb/host/ehci-q.c list_add_tail (&qtd->qtd_list, head); head 735 drivers/usb/host/ehci-q.c return head; head 738 drivers/usb/host/ehci-q.c qtd_list_free (ehci, urb, head); head 984 drivers/usb/host/ehci-q.c struct ehci_qh *head; head 996 drivers/usb/host/ehci-q.c head = ehci->async; head 997 drivers/usb/host/ehci-q.c qh->qh_next = head->qh_next; head 998 drivers/usb/host/ehci-q.c qh->hw->hw_next = head->hw->hw_next; head 1001 drivers/usb/host/ehci-q.c head->qh_next.qh = qh; head 1002 drivers/usb/host/ehci-q.c head->hw->hw_next = dma; head 1175 drivers/usb/host/ehci-q.c struct list_head *head; head 1183 drivers/usb/host/ehci-q.c head = &qtd_list; head 1189 drivers/usb/host/ehci-q.c list_add_tail(&qtd->qtd_list, head); head 1240 drivers/usb/host/ehci-q.c list_add_tail(&qtd->qtd_list, head); head 1250 drivers/usb/host/ehci-q.c qtd_list_free(ehci, urb, head); head 2523 drivers/usb/host/fotg210-hcd.c struct list_head *head) head 2527 drivers/usb/host/fotg210-hcd.c list_for_each_entry_safe(qtd, temp, head, qtd_list) { head 2536 drivers/usb/host/fotg210-hcd.c struct urb *urb, struct list_head *head, gfp_t flags) head 2552 drivers/usb/host/fotg210-hcd.c list_add_tail(&qtd->qtd_list, head); head 2575 drivers/usb/host/fotg210-hcd.c list_add_tail(&qtd->qtd_list, head); head 2646 drivers/usb/host/fotg210-hcd.c list_add_tail(&qtd->qtd_list, head); head 2682 drivers/usb/host/fotg210-hcd.c list_add_tail(&qtd->qtd_list, head); head 2692 drivers/usb/host/fotg210-hcd.c return head; head 2695 drivers/usb/host/fotg210-hcd.c qtd_list_free(fotg210, urb, head); head 2915 drivers/usb/host/fotg210-hcd.c struct fotg210_qh *head; head 2927 drivers/usb/host/fotg210-hcd.c head = fotg210->async; head 2928 drivers/usb/host/fotg210-hcd.c qh->qh_next = head->qh_next; head 2929 drivers/usb/host/fotg210-hcd.c qh->hw->hw_next = head->hw->hw_next; head 2932 drivers/usb/host/fotg210-hcd.c head->qh_next.qh = qh; head 2933 drivers/usb/host/fotg210-hcd.c head->hw->hw_next = dma; head 750 drivers/usb/host/ohci-hcd.c u32 head; head 808 drivers/usb/host/ohci-hcd.c head = hc32_to_cpu(ohci, READ_ONCE(ed->hwHeadP)) & TD_MASK; head 812 drivers/usb/host/ohci-hcd.c if (head == (u32) td_next->td_dma) head 997 drivers/usb/host/ohci-q.c u32 head; head 1002 drivers/usb/host/ohci-q.c head = hc32_to_cpu(ohci, ed->hwHeadP) & TD_MASK; head 1003 drivers/usb/host/ohci-q.c if (td->td_dma != head && head 1601 drivers/usb/host/oxu210hp-hcd.c struct urb *urb, struct list_head *head) head 1605 drivers/usb/host/oxu210hp-hcd.c list_for_each_entry_safe(qtd, temp, head, qtd_list) { head 1615 drivers/usb/host/oxu210hp-hcd.c struct list_head *head, head 1632 drivers/usb/host/oxu210hp-hcd.c list_add_tail(&qtd->qtd_list, head); head 1663 drivers/usb/host/oxu210hp-hcd.c list_add_tail(&qtd->qtd_list, head); head 1724 drivers/usb/host/oxu210hp-hcd.c list_add_tail(&qtd->qtd_list, head); head 1757 drivers/usb/host/oxu210hp-hcd.c list_add_tail(&qtd->qtd_list, head); head 1766 drivers/usb/host/oxu210hp-hcd.c return head; head 1769 drivers/usb/host/oxu210hp-hcd.c qtd_list_free(oxu, urb, head); head 1919 drivers/usb/host/oxu210hp-hcd.c struct ehci_qh *head; head 1922 drivers/usb/host/oxu210hp-hcd.c head = oxu->async; head 1924 drivers/usb/host/oxu210hp-hcd.c if (!head->qh_next.qh) { head 1943 drivers/usb/host/oxu210hp-hcd.c qh->qh_next = head->qh_next; head 1944 drivers/usb/host/oxu210hp-hcd.c qh->hw_next = head->hw_next; head 1947 drivers/usb/host/oxu210hp-hcd.c head->qh_next.qh = qh; head 1948 drivers/usb/host/oxu210hp-hcd.c head->hw_next = dma; head 340 drivers/usb/host/u132-hcd.c struct list_head *head = &endp->endp_ring; head 343 drivers/usb/host/u132-hcd.c if (list_empty(head)) { head 345 drivers/usb/host/u132-hcd.c list_del(head); head 347 drivers/usb/host/u132-hcd.c struct u132_endp *next_endp = list_entry(head->next, head 350 drivers/usb/host/u132-hcd.c list_del(head); head 353 drivers/usb/host/u132-hcd.c list_del(head); head 380 drivers/usb/host/uhci-debug.c struct list_head *tmp, *head; head 431 drivers/usb/host/uhci-debug.c head = &td->fl_list; head 432 drivers/usb/host/uhci-debug.c tmp = head; head 452 drivers/usb/host/uhci-debug.c } while (tmp != head); head 506 drivers/usb/host/uhci-debug.c head = &qh->node; head 507 drivers/usb/host/uhci-debug.c tmp = head->next; head 509 drivers/usb/host/uhci-debug.c while (tmp != head) { head 135 drivers/usb/host/xhci-dbgtty.c xhci_dbc_alloc_requests(struct dbc_ep *dep, struct list_head *head, head 154 drivers/usb/host/xhci-dbgtty.c list_add_tail(&req->list_pool, head); head 157 drivers/usb/host/xhci-dbgtty.c return list_empty(head) ? -ENOMEM : 0; head 161 drivers/usb/host/xhci-dbgtty.c xhci_dbc_free_requests(struct dbc_ep *dep, struct list_head *head) head 165 drivers/usb/host/xhci-dbgtty.c while (!list_empty(head)) { head 166 drivers/usb/host/xhci-dbgtty.c req = list_entry(head->next, struct dbc_request, list_pool); head 1393 drivers/usb/isp1760/isp1760-hcd.c struct urb *urb, struct list_head *head, gfp_t flags) head 1423 drivers/usb/isp1760/isp1760-hcd.c list_add_tail(&qtd->qtd_list, head); head 1448 drivers/usb/isp1760/isp1760-hcd.c list_add_tail(&qtd->qtd_list, head); head 1483 drivers/usb/isp1760/isp1760-hcd.c list_add_tail(&qtd->qtd_list, head); head 1490 drivers/usb/isp1760/isp1760-hcd.c qtd_list_free(head); head 375 drivers/usb/mtu3/mtu3.h const struct list_head *head) head 377 drivers/usb/mtu3/mtu3.h return list_is_last(head, list); head 115 drivers/usb/musb/cppi_dma.c c->head = NULL; head 445 drivers/usb/musb/cppi_dma.c for (bd = rx->head; bd; bd = bd->next) head 610 drivers/usb/musb/cppi_dma.c tx->head = bd; head 832 drivers/usb/musb/cppi_dma.c rx->head = bd; head 874 drivers/usb/musb/cppi_dma.c bd = rx->head; head 885 drivers/usb/musb/cppi_dma.c for (d = rx->head; d; d = d->next) head 1016 drivers/usb/musb/cppi_dma.c bd = last ? last->next : rx->head; head 1098 drivers/usb/musb/cppi_dma.c WARN_ON(rx->head); head 1105 drivers/usb/musb/cppi_dma.c rx->head, rx->tail, head 1118 drivers/usb/musb/cppi_dma.c rx->head = bd; head 1133 drivers/usb/musb/cppi_dma.c rx->head = NULL; head 1187 drivers/usb/musb/cppi_dma.c bd = tx_ch->head; head 1240 drivers/usb/musb/cppi_dma.c tx_ch->head = NULL; head 1255 drivers/usb/musb/cppi_dma.c tx_ch->head = bd; head 1401 drivers/usb/musb/cppi_dma.c if (!cppi_ch->transmit && cppi_ch->head) head 1407 drivers/usb/musb/cppi_dma.c queue = cppi_ch->head; head 1408 drivers/usb/musb/cppi_dma.c cppi_ch->head = NULL; head 1452 drivers/usb/musb/cppi_dma.c cppi_ch->head = NULL; head 98 drivers/usb/musb/cppi_dma.h struct cppi_descriptor *head; head 346 drivers/usb/musb/musb_host.c struct list_head *head; head 375 drivers/usb/musb/musb_host.c head = qh->ring.prev; head 378 drivers/usb/musb/musb_host.c qh = first_qh(head); head 2022 drivers/usb/musb/musb_host.c struct list_head *head = NULL; head 2029 drivers/usb/musb/musb_host.c head = &musb->control; head 2093 drivers/usb/musb/musb_host.c head = &musb->in_bulk; head 2095 drivers/usb/musb/musb_host.c head = &musb->out_bulk; head 2121 drivers/usb/musb/musb_host.c if (head) { head 2122 drivers/usb/musb/musb_host.c idle = list_empty(head); head 2123 drivers/usb/musb/musb_host.c list_add_tail(&qh->ring, head); head 42 drivers/usb/phy/phy.c list_for_each_entry(phy, list, head) { head 59 drivers/usb/phy/phy.c list_for_each_entry(phy, &phy_list, head) { head 635 drivers/usb/phy/phy.c list_for_each_entry(phy, &phy_list, head) { head 645 drivers/usb/phy/phy.c list_add_tail(&x->head, &phy_list); head 679 drivers/usb/phy/phy.c list_add_tail(&x->head, &phy_list); head 698 drivers/usb/phy/phy.c list_del(&x->head); head 153 drivers/usb/serial/io_edgeport.c unsigned int head; /* index to head pointer (write) */ head 986 drivers/usb/serial/io_edgeport.c edge_port->txfifo.head = 0; head 1228 drivers/usb/serial/io_edgeport.c bytesleft = fifo->size - fifo->head; head 1234 drivers/usb/serial/io_edgeport.c memcpy(&fifo->fifo[fifo->head], data, firsthalf); head 1235 drivers/usb/serial/io_edgeport.c usb_serial_debug_data(&port->dev, __func__, firsthalf, &fifo->fifo[fifo->head]); head 1238 drivers/usb/serial/io_edgeport.c fifo->head += firsthalf; head 1242 drivers/usb/serial/io_edgeport.c if (fifo->head == fifo->size) head 1243 drivers/usb/serial/io_edgeport.c fifo->head = 0; head 1249 drivers/usb/serial/io_edgeport.c memcpy(&fifo->fifo[fifo->head], &data[firsthalf], secondhalf); head 1250 drivers/usb/serial/io_edgeport.c usb_serial_debug_data(&port->dev, __func__, secondhalf, &fifo->fifo[fifo->head]); head 1253 drivers/usb/serial/io_edgeport.c fifo->head += secondhalf; head 1225 drivers/usb/storage/isd200.c unsigned char sectnum, head; head 1316 drivers/usb/storage/isd200.c head = ATA_ADDRESS_DEVHEAD_LBA_MODE | (unsigned char)(lba>>24 & 0x0F); head 1321 drivers/usb/storage/isd200.c head = (u8)((lba / id[ATA_ID_SECTORS]) % head 1335 drivers/usb/storage/isd200.c ataCdb->write.DeviceHeadByte = (head | ATA_ADDRESS_DEVHEAD_STD); head 1348 drivers/usb/storage/isd200.c head = ATA_ADDRESS_DEVHEAD_LBA_MODE | (unsigned char)(lba>>24 & 0x0F); head 1353 drivers/usb/storage/isd200.c head = (u8)((lba / id[ATA_ID_SECTORS]) % head 1367 drivers/usb/storage/isd200.c ataCdb->write.DeviceHeadByte = (head | ATA_ADDRESS_DEVHEAD_STD); head 407 drivers/vfio/vfio.c WARN_ON(group->notifier.head); head 1597 drivers/vfio/vfio.c if (WARN_ON(group->notifier.head)) head 303 drivers/vfio/vfio_iommu_type1.c struct page *head = compound_head(tail); head 304 drivers/vfio/vfio_iommu_type1.c reserved = !!(PageReserved(head)); head 305 drivers/vfio/vfio_iommu_type1.c if (head != tail) { head 562 drivers/vfio/vfio_iommu_type1.c if (!iommu->notifier.head) { head 1439 drivers/vfio/vfio_iommu_type1.c static int vfio_iommu_iova_insert(struct list_head *head, head 1452 drivers/vfio/vfio_iommu_type1.c list_add_tail(®ion->list, head); head 1899 drivers/vfio/vfio_iommu_type1.c WARN_ON(iommu->notifier.head); head 104 drivers/vhost/net.c int head; head 153 drivers/vhost/net.c if (rxq->tail != rxq->head) head 154 drivers/vhost/net.c return rxq->queue[rxq->head]; head 161 drivers/vhost/net.c return rxq->tail - rxq->head; head 166 drivers/vhost/net.c return rxq->tail == rxq->head; head 172 drivers/vhost/net.c ++rxq->head; head 180 drivers/vhost/net.c rxq->head = 0; head 191 drivers/vhost/net.c ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, head 194 drivers/vhost/net.c rxq->head = rxq->tail = 0; head 225 drivers/vhost/net.c rxq->head = rxq->tail = 0; head 762 drivers/vhost/net.c int head; head 781 drivers/vhost/net.c head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, head 784 drivers/vhost/net.c if (unlikely(head < 0)) head 787 drivers/vhost/net.c if (head == vq->num) { head 838 drivers/vhost/net.c vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); head 851 drivers/vhost/net.c int head; head 873 drivers/vhost/net.c head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, head 876 drivers/vhost/net.c if (unlikely(head < 0)) head 879 drivers/vhost/net.c if (head == vq->num) { head 898 drivers/vhost/net.c vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); head 939 drivers/vhost/net.c vhost_add_used_and_signal(&net->dev, vq, head, 0); head 976 drivers/vhost/net.c struct sk_buff *head; head 984 drivers/vhost/net.c head = skb_peek(&sk->sk_receive_queue); head 985 drivers/vhost/net.c if (likely(head)) { head 986 drivers/vhost/net.c len = head->len; head 987 drivers/vhost/net.c if (skb_vlan_tag_present(head)) head 216 drivers/vhost/scsi.c int head; head 453 drivers/vhost/scsi.c int head, ret; head 462 drivers/vhost/scsi.c head = vhost_get_vq_desc(vq, vq->iov, head 465 drivers/vhost/scsi.c if (head < 0) { head 469 drivers/vhost/scsi.c if (head == vq->num) { head 491 drivers/vhost/scsi.c vhost_add_used_and_signal(&vs->dev, vq, head, 0); head 796 drivers/vhost/scsi.c int head, unsigned out) head 807 drivers/vhost/scsi.c vhost_add_used_and_signal(&vs->dev, vq, head, 0); head 818 drivers/vhost/scsi.c vc->head = vhost_get_vq_desc(vq, vq->iov, head 823 drivers/vhost/scsi.c vc->head, vc->out, vc->in); head 826 drivers/vhost/scsi.c if (unlikely(vc->head < 0)) head 830 drivers/vhost/scsi.c if (vc->head == vq->num) { head 1100 drivers/vhost/scsi.c cmd->tvc_vq_desc = vc.head; head 1120 drivers/vhost/scsi.c vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); head 1143 drivers/vhost/scsi.c vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); head 1165 drivers/vhost/scsi.c vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); head 1272 drivers/vhost/scsi.c vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out); head 47 drivers/vhost/test.c int head; head 61 drivers/vhost/test.c head = vhost_get_vq_desc(vq, vq->iov, head 66 drivers/vhost/test.c if (unlikely(head < 0)) head 69 drivers/vhost/test.c if (head == vq->num) { head 87 drivers/vhost/test.c vhost_add_used_and_signal(&n->dev, vq, head, 0); head 921 drivers/vhost/vhost.c struct vring_used_elem *head, int idx, head 924 drivers/vhost/vhost.c return vhost_copy_to_user(vq, vq->used->ring + idx, head, head 925 drivers/vhost/vhost.c count * sizeof(*head)); head 987 drivers/vhost/vhost.c __virtio16 *head, int idx) head 989 drivers/vhost/vhost.c return vhost_get_avail(vq, *head, head 2214 drivers/vhost/vhost.c unsigned int i, head, found = 0; head 2258 drivers/vhost/vhost.c head = vhost16_to_cpu(vq, ring_head); head 2261 drivers/vhost/vhost.c if (unlikely(head >= vq->num)) { head 2263 drivers/vhost/vhost.c head, vq->num); head 2272 drivers/vhost/vhost.c i = head; head 2277 drivers/vhost/vhost.c i, vq->num, head); head 2283 drivers/vhost/vhost.c i, vq->num, head); head 2345 drivers/vhost/vhost.c return head; head 2358 drivers/vhost/vhost.c int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) head 2361 drivers/vhost/vhost.c cpu_to_vhost32(vq, head), head 2487 drivers/vhost/vhost.c unsigned int head, int len) head 2489 drivers/vhost/vhost.c vhost_add_used(vq, head, len); head 2592 drivers/vhost/vhost.c void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head, head 2596 drivers/vhost/vhost.c list_add_tail(&node->node, head); head 2604 drivers/vhost/vhost.c struct list_head *head) head 2609 drivers/vhost/vhost.c if (!list_empty(head)) { head 2610 drivers/vhost/vhost.c node = list_first_entry(head, struct vhost_msg_node, head 200 drivers/vhost/vhost.h int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len); head 219 drivers/vhost/vhost.h struct list_head *head, head 222 drivers/vhost/vhost.h struct list_head *head); head 38 drivers/vhost/vringh.c u16 avail_idx, i, head; head 56 drivers/vhost/vringh.c err = getu16(vrh, &head, &vrh->vring.avail->ring[i]); head 63 drivers/vhost/vringh.c if (head >= vrh->vring.num) { head 65 drivers/vhost/vringh.c head, vrh->vring.num); head 70 drivers/vhost/vringh.c return head; head 655 drivers/vhost/vringh.c u16 *head) head 659 drivers/vhost/vringh.c *head = vrh->vring.num; head 688 drivers/vhost/vringh.c *head = err; head 689 drivers/vhost/vringh.c err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov, head 755 drivers/vhost/vringh.c int vringh_complete_user(struct vringh *vrh, u16 head, u32 len) head 759 drivers/vhost/vringh.c used.id = cpu_to_vringh32(vrh, head); head 919 drivers/vhost/vringh.c u16 *head, head 932 drivers/vhost/vringh.c *head = err; head 933 drivers/vhost/vringh.c err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, head 996 drivers/vhost/vringh.c int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len) head 1000 drivers/vhost/vringh.c used.id = cpu_to_vringh32(vrh, head); head 106 drivers/vhost/vsock.c int head; head 120 drivers/vhost/vsock.c head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), head 122 drivers/vhost/vsock.c if (head < 0) { head 129 drivers/vhost/vsock.c if (head == vq->num) { head 189 drivers/vhost/vsock.c vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); head 394 drivers/vhost/vsock.c int head, pkts = 0, total_len = 0; head 415 drivers/vhost/vsock.c head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), head 417 drivers/vhost/vsock.c if (head < 0) head 420 drivers/vhost/vsock.c if (head == vq->num) { head 448 drivers/vhost/vsock.c vhost_add_used(vq, head, len); head 955 drivers/video/fbdev/core/modedb.c struct list_head *head) head 962 drivers/video/fbdev/core/modedb.c list_for_each(pos, head) { head 993 drivers/video/fbdev/core/modedb.c struct list_head *head) head 1000 drivers/video/fbdev/core/modedb.c list_for_each(pos, head) { head 1033 drivers/video/fbdev/core/modedb.c struct list_head *head) head 1040 drivers/video/fbdev/core/modedb.c list_for_each(pos, head) { head 1057 drivers/video/fbdev/core/modedb.c int fb_add_videomode(const struct fb_videomode *mode, struct list_head *head) head 1064 drivers/video/fbdev/core/modedb.c list_for_each(pos, head) { head 1079 drivers/video/fbdev/core/modedb.c list_add(&modelist->list, head); head 1093 drivers/video/fbdev/core/modedb.c struct list_head *head) head 1099 drivers/video/fbdev/core/modedb.c list_for_each_safe(pos, n, head) { head 1113 drivers/video/fbdev/core/modedb.c void fb_destroy_modelist(struct list_head *head) head 1117 drivers/video/fbdev/core/modedb.c list_for_each_safe(pos, n, head) { head 1131 drivers/video/fbdev/core/modedb.c struct list_head *head) head 1135 drivers/video/fbdev/core/modedb.c INIT_LIST_HEAD(head); head 1138 drivers/video/fbdev/core/modedb.c if (fb_add_videomode(&modedb[i], head)) head 1144 drivers/video/fbdev/core/modedb.c struct list_head *head) head 1151 drivers/video/fbdev/core/modedb.c if (!head->prev || !head->next || list_empty(head)) head 1155 drivers/video/fbdev/core/modedb.c list_for_each(pos, head) { head 1183 drivers/video/fbdev/core/modedb.c m = fb_find_best_mode(&var, head); head 62 drivers/video/fbdev/i810/i810_accel.c u32 head, count = WAIT_COUNT, tail; head 67 drivers/video/fbdev/i810/i810_accel.c head = i810_readl(IRING + 4, mmio) & RBUFFER_HEAD_MASK; head 68 drivers/video/fbdev/i810/i810_accel.c if ((tail == head) || head 69 drivers/video/fbdev/i810/i810_accel.c (tail > head && head 70 drivers/video/fbdev/i810/i810_accel.c (par->iring.size - tail + head) >= space) || head 71 drivers/video/fbdev/i810/i810_accel.c (tail < head && (head - tail) >= space)) { head 551 drivers/video/fbdev/intelfb/intelfbhw.h u32 head, tail; \ head 553 drivers/video/fbdev/intelfb/intelfbhw.h head = INREG(PRI_RING_HEAD) & RING_HEAD_MASK; \ head 556 drivers/video/fbdev/intelfb/intelfbhw.h } while (head != tail); \ head 1519 drivers/video/fbdev/nvidia/nv_hw.c NV_WR32(par->PCRTC0, 0x0860, state->head); head 1661 drivers/video/fbdev/nvidia/nv_hw.c state->head = NV_RD32(par->PCRTC0, 0x0860); head 185 drivers/video/fbdev/nvidia/nv_setup.c static void NVSelectHeadRegisters(struct nvidia_par *par, int head) head 187 drivers/video/fbdev/nvidia/nv_setup.c if (head) { head 78 drivers/video/fbdev/nvidia/nv_type.h u32 head; head 444 drivers/video/fbdev/nvidia/nvidia.c state->head = NV_RD32(par->PCRTC0, 0x00000860) & ~0x00001000; head 452 drivers/video/fbdev/nvidia/nvidia.c state->head = NV_RD32(par->PCRTC0, 0x00000860) | 0x00001000; head 295 drivers/video/fbdev/omap/hwa742.c static void submit_req_list(struct list_head *head) head 303 drivers/video/fbdev/omap/hwa742.c list_splice_init(head, hwa742.pending_req_list.prev); head 913 drivers/video/fbdev/ps3fb.c struct display_head *head = &ps3fb.dinfo->display_head[1]; head 924 drivers/video/fbdev/ps3fb.c ps3fb.vblank_count = head->vblank_count; head 795 drivers/video/fbdev/riva/fbdev.c newmode.ext.head = NV_RD32(par->riva.PCRTC0, 0x00000860) & head 803 drivers/video/fbdev/riva/fbdev.c newmode.ext.head = NV_RD32(par->riva.PCRTC0, 0x00000860) | head 1567 drivers/video/fbdev/riva/riva_hw.c NV_WR32(chip->PCRTC0, 0x00000860, state->head); head 1813 drivers/video/fbdev/riva/riva_hw.c state->head = NV_RD32(chip->PCRTC0, 0x00000860); head 514 drivers/video/fbdev/riva/riva_hw.h U032 head; head 104 drivers/video/fbdev/sm501fb.c enum sm501_controller head; head 442 drivers/video/fbdev/sm501fb.c switch (par->head) { head 531 drivers/video/fbdev/sm501fb.c if (par->head == HEAD_CRT) head 541 drivers/video/fbdev/sm501fb.c smc501_writel(reg, fbi->regs + (par->head == HEAD_CRT ? head 920 drivers/video/fbdev/sm501fb.c if (par->head == HEAD_CRT) head 1048 drivers/video/fbdev/sm501fb.c if (par->head == HEAD_CRT) head 1182 drivers/video/fbdev/sm501fb.c enum sm501_controller head; head 1189 drivers/video/fbdev/sm501fb.c head = HEAD_CRT; head 1191 drivers/video/fbdev/sm501fb.c head = HEAD_PANEL; head 1195 drivers/video/fbdev/sm501fb.c dev_info(dev, "setting crt source to head %d\n", head); head 1199 drivers/video/fbdev/sm501fb.c if (head == HEAD_CRT) { head 1674 drivers/video/fbdev/sm501fb.c static int sm501fb_init_fb(struct fb_info *fb, enum sm501_controller head, head 1684 drivers/video/fbdev/sm501fb.c switch (head) { head 1716 drivers/video/fbdev/sm501fb.c if (head == HEAD_CRT && info->pdata->fb_route == SM501_FB_CRT_PANEL) { head 1725 drivers/video/fbdev/sm501fb.c (head == HEAD_CRT) ? &sm501fb_ops_crt : &sm501fb_ops_pnl, head 1860 drivers/video/fbdev/sm501fb.c enum sm501_controller head) head 1862 drivers/video/fbdev/sm501fb.c unsigned char *name = (head == HEAD_CRT) ? "crt" : "panel"; head 1867 drivers/video/fbdev/sm501fb.c pd = (head == HEAD_CRT) ? info->pdata->fb_crt : info->pdata->fb_pnl; head 1881 drivers/video/fbdev/sm501fb.c par->head = head; head 1884 drivers/video/fbdev/sm501fb.c info->fb[head] = fbi; head 1892 drivers/video/fbdev/sm501fb.c enum sm501_controller head) head 1894 drivers/video/fbdev/sm501fb.c struct fb_info *fbi = info->fb[head]; head 1903 drivers/video/fbdev/sm501fb.c enum sm501_controller head, const char *drvname) head 1905 drivers/video/fbdev/sm501fb.c struct fb_info *fbi = info->fb[head]; head 1911 drivers/video/fbdev/sm501fb.c mutex_init(&info->fb[head]->mm_lock); head 1913 drivers/video/fbdev/sm501fb.c ret = sm501fb_init_fb(info->fb[head], head, drvname); head 1919 drivers/video/fbdev/sm501fb.c ret = register_framebuffer(info->fb[head]); head 1922 drivers/video/fbdev/sm501fb.c sm501_free_init_fb(info, head); head 2074 drivers/video/fbdev/sm501fb.c enum sm501_controller head) head 2076 drivers/video/fbdev/sm501fb.c struct fb_info *fbi = info->fb[head]; head 2125 drivers/video/fbdev/sm501fb.c enum sm501_controller head) head 2127 drivers/video/fbdev/sm501fb.c struct fb_info *fbi = info->fb[head]; head 501 drivers/video/fbdev/vermilion/vermilion.c INIT_LIST_HEAD(&vinfo->head); head 877 drivers/video/fbdev/vermilion/vermilion.c list_move(&vinfo->head, (subsys) ? &global_has_mode : &global_no_mode); head 1089 drivers/video/fbdev/vermilion/vermilion.c entry = list_entry(list, struct vml_info, head); head 1146 drivers/video/fbdev/vermilion/vermilion.c list_for_each_entry_safe(entry, next, &global_has_mode, head) { head 1149 drivers/video/fbdev/vermilion/vermilion.c list_move_tail(&entry->head, &global_no_mode); head 201 drivers/video/fbdev/vermilion/vermilion.h struct list_head head; head 471 drivers/virt/fsl_hypervisor.c unsigned int head; head 499 drivers/virt/fsl_hypervisor.c if (dbq->head != nextp(dbq->tail)) { head 580 drivers/virt/fsl_hypervisor.c mask = (dbq->head == dbq->tail) ? 0 : (EPOLLIN | EPOLLRDNORM); head 613 drivers/virt/fsl_hypervisor.c if (dbq->head == dbq->tail) { head 620 drivers/virt/fsl_hypervisor.c dbq->head != dbq->tail)) head 638 drivers/virt/fsl_hypervisor.c dbell = dbq->q[dbq->head]; head 639 drivers/virt/fsl_hypervisor.c dbq->head = nextp(dbq->head); head 428 drivers/virtio/virtio_ring.c int head; head 445 drivers/virtio/virtio_ring.c head = vq->free_head; head 463 drivers/virtio/virtio_ring.c i = head; head 518 drivers/virtio/virtio_ring.c vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, head 520 drivers/virtio/virtio_ring.c vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, head 523 drivers/virtio/virtio_ring.c vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev, head 533 drivers/virtio/virtio_ring.c vq->split.vring.desc[head].next); head 538 drivers/virtio/virtio_ring.c vq->split.desc_state[head].data = data; head 540 drivers/virtio/virtio_ring.c vq->split.desc_state[head].indir_desc = desc; head 542 drivers/virtio/virtio_ring.c vq->split.desc_state[head].indir_desc = ctx; head 547 drivers/virtio/virtio_ring.c vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); head 557 drivers/virtio/virtio_ring.c pr_debug("Added buffer head %i to %p\n", head, vq); head 573 drivers/virtio/virtio_ring.c i = head; head 620 drivers/virtio/virtio_ring.c static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, head 627 drivers/virtio/virtio_ring.c vq->split.desc_state[head].data = NULL; head 630 drivers/virtio/virtio_ring.c i = head; head 641 drivers/virtio/virtio_ring.c vq->free_head = head; head 648 drivers/virtio/virtio_ring.c vq->split.desc_state[head].indir_desc; head 656 drivers/virtio/virtio_ring.c vq->split.vring.desc[head].len); head 658 drivers/virtio/virtio_ring.c BUG_ON(!(vq->split.vring.desc[head].flags & head 666 drivers/virtio/virtio_ring.c vq->split.desc_state[head].indir_desc = NULL; head 668 drivers/virtio/virtio_ring.c *ctx = vq->split.desc_state[head].indir_desc; head 990 drivers/virtio/virtio_ring.c u16 head, id; head 993 drivers/virtio/virtio_ring.c head = vq->packed.next_avail_idx; head 1029 drivers/virtio/virtio_ring.c vq->packed.vring.desc[head].addr = cpu_to_le64(addr); head 1030 drivers/virtio/virtio_ring.c vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * head 1032 drivers/virtio/virtio_ring.c vq->packed.vring.desc[head].id = cpu_to_le16(id); head 1048 drivers/virtio/virtio_ring.c vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT | head 1055 drivers/virtio/virtio_ring.c n = head + 1; head 1074 drivers/virtio/virtio_ring.c pr_debug("Added buffer head %i to %p\n", head, vq); head 1105 drivers/virtio/virtio_ring.c u16 head, id, uninitialized_var(prev), curr, avail_used_flags; head 1125 drivers/virtio/virtio_ring.c head = vq->packed.next_avail_idx; head 1131 drivers/virtio/virtio_ring.c i = head; head 1156 drivers/virtio/virtio_ring.c if (i == head) head 1183 drivers/virtio/virtio_ring.c if (i < head) head 1205 drivers/virtio/virtio_ring.c vq->packed.vring.desc[head].flags = head_flags; head 1208 drivers/virtio/virtio_ring.c pr_debug("Added buffer head %i to %p\n", head, vq); head 1215 drivers/virtio/virtio_ring.c i = head; head 220 drivers/visorbus/visorchannel.c if (sig_hdr.head == sig_hdr.tail) head 274 drivers/visorbus/visorchannel.c return (sig_hdr.head == sig_hdr.tail); head 309 drivers/visorbus/visorchannel.c sig_hdr.head = (sig_hdr.head + 1) % sig_hdr.max_slots; head 310 drivers/visorbus/visorchannel.c if (sig_hdr.head == sig_hdr.tail) { head 317 drivers/visorbus/visorchannel.c err = sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg); head 326 drivers/visorbus/visorchannel.c err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head); head 61 drivers/xen/events/events_fifo.c uint32_t head[EVTCHN_FIFO_MAX_QUEUES]; head 112 drivers/xen/events/events_fifo.c q->head[i] = 0; head 288 drivers/xen/events/events_fifo.c uint32_t head; head 292 drivers/xen/events/events_fifo.c head = q->head[priority]; head 298 drivers/xen/events/events_fifo.c if (head == 0) { head 300 drivers/xen/events/events_fifo.c head = control_block->head[priority]; head 303 drivers/xen/events/events_fifo.c port = head; head 305 drivers/xen/events/events_fifo.c head = clear_linked(word); head 314 drivers/xen/events/events_fifo.c if (head == 0) head 324 drivers/xen/events/events_fifo.c q->head[priority] = head; head 174 drivers/xen/grant-table.c grant_ref_t head; head 184 drivers/xen/grant-table.c ref = head = gnttab_free_head; head 187 drivers/xen/grant-table.c head = gnttab_entry(head); head 188 drivers/xen/grant-table.c gnttab_free_head = gnttab_entry(head); head 189 drivers/xen/grant-table.c gnttab_entry(head) = GNTTAB_LIST_END; head 554 drivers/xen/grant-table.c void gnttab_free_grant_references(grant_ref_t head) head 559 drivers/xen/grant-table.c if (head == GNTTAB_LIST_END) head 562 drivers/xen/grant-table.c ref = head; head 568 drivers/xen/grant-table.c gnttab_free_head = head; head 575 drivers/xen/grant-table.c int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) head 582 drivers/xen/grant-table.c *head = h; head 25 drivers/xen/xen-pciback/vpci.c static inline struct list_head *list_first(struct list_head *head) head 27 drivers/xen/xen-pciback/vpci.c return head->next; head 631 drivers/xen/xen-scsiback.c struct list_head *head = &(info->v2p_entry_lists); head 635 drivers/xen/xen-scsiback.c list_for_each_entry(entry, head, l) { head 877 drivers/xen/xen-scsiback.c struct list_head *head = &(info->v2p_entry_lists); head 880 drivers/xen/xen-scsiback.c list_for_each_entry(entry, head, l) head 1211 drivers/xen/xen-scsiback.c struct list_head *head = &(info->v2p_entry_lists); head 1216 drivers/xen/xen-scsiback.c list_for_each_entry_safe(entry, tmp, head, l) head 40 fs/9p/vfs_dir.c int head; head 108 fs/9p/vfs_dir.c if (rdir->tail == rdir->head) { head 119 fs/9p/vfs_dir.c rdir->head = 0; head 122 fs/9p/vfs_dir.c while (rdir->head < rdir->tail) { head 123 fs/9p/vfs_dir.c err = p9stat_read(fid->clnt, rdir->buf + rdir->head, head 124 fs/9p/vfs_dir.c rdir->tail - rdir->head, &st); head 136 fs/9p/vfs_dir.c rdir->head += err; head 166 fs/9p/vfs_dir.c if (rdir->tail == rdir->head) { head 172 fs/9p/vfs_dir.c rdir->head = 0; head 176 fs/9p/vfs_dir.c while (rdir->head < rdir->tail) { head 178 fs/9p/vfs_dir.c err = p9dirent_read(fid->clnt, rdir->buf + rdir->head, head 179 fs/9p/vfs_dir.c rdir->tail - rdir->head, head 193 fs/9p/vfs_dir.c rdir->head += err; head 61 fs/aio.c unsigned head; /* Written to by userland or under ring_lock head 184 fs/aio.c struct wait_queue_head *head; head 547 fs/aio.c ring->head = ring->tail = 0; head 957 fs/aio.c static void refill_reqs_available(struct kioctx *ctx, unsigned head, head 963 fs/aio.c head %= ctx->nr_events; head 964 fs/aio.c if (head <= tail) head 965 fs/aio.c events_in_ring = tail - head; head 967 fs/aio.c events_in_ring = ctx->nr_events - (head - tail); head 991 fs/aio.c unsigned head; head 1003 fs/aio.c head = ring->head; head 1006 fs/aio.c refill_reqs_available(ctx, head, ctx->tail); head 1094 fs/aio.c unsigned tail, pos, head; head 1130 fs/aio.c head = ring->head; head 1137 fs/aio.c refill_reqs_available(ctx, head, tail); head 1178 fs/aio.c unsigned head, tail, pos; head 1193 fs/aio.c head = ring->head; head 1203 fs/aio.c pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); head 1205 fs/aio.c if (head == tail) head 1208 fs/aio.c head %= ctx->nr_events; head 1216 fs/aio.c avail = (head <= tail ? tail : ctx->nr_events) - head; head 1217 fs/aio.c if (head == tail) head 1220 fs/aio.c pos = head + AIO_EVENTS_OFFSET; head 1238 fs/aio.c head += avail; head 1239 fs/aio.c head %= ctx->nr_events; head 1243 fs/aio.c ring->head = head; head 1247 fs/aio.c pr_debug("%li h%u t%u\n", ret, head, tail); head 1649 fs/aio.c add_wait_queue(req->head, &req->wait); head 1667 fs/aio.c spin_lock(&req->head->lock); head 1673 fs/aio.c spin_unlock(&req->head->lock); head 1725 fs/aio.c aio_poll_queue_proc(struct file *file, struct wait_queue_head *head, head 1731 fs/aio.c if (unlikely(pt->iocb->poll.head)) { head 1737 fs/aio.c pt->iocb->poll.head = head; head 1738 fs/aio.c add_wait_queue(head, &pt->iocb->poll.wait); head 1759 fs/aio.c req->head = NULL; head 1774 fs/aio.c if (likely(req->head)) { head 1775 fs/aio.c spin_lock(&req->head->lock); head 1790 fs/aio.c spin_unlock(&req->head->lock); head 134 fs/autofs/root.c struct list_head *p, *head; head 136 fs/autofs/root.c head = &sbi->active_list; head 137 fs/autofs/root.c if (list_empty(head)) head 140 fs/autofs/root.c list_for_each(p, head) { head 189 fs/autofs/root.c struct list_head *p, *head; head 191 fs/autofs/root.c head = &sbi->expiring_list; head 192 fs/autofs/root.c if (list_empty(head)) head 195 fs/autofs/root.c list_for_each(p, head) { head 82 fs/befs/btree.c befs_host_btree_nodehead head; /* head of node converted to cpu byteorder */ head 214 fs/befs/btree.c node->head.left = fs64_to_cpu(sb, node->od_node->left); head 215 fs/befs/btree.c node->head.right = fs64_to_cpu(sb, node->od_node->right); head 216 fs/befs/btree.c node->head.overflow = fs64_to_cpu(sb, node->od_node->overflow); head 217 fs/befs/btree.c node->head.all_key_count = head 219 fs/befs/btree.c node->head.all_key_length = head 283 fs/befs/btree.c node_off = this_node->head.overflow; head 345 fs/befs/btree.c last = node->head.all_key_count - 1; head 459 fs/befs/btree.c while (key_sum + this_node->head.all_key_count <= key_no) { head 462 fs/befs/btree.c if (this_node->head.right == BEFS_BT_INVAL) { head 468 fs/befs/btree.c key_sum + this_node->head.all_key_count); head 474 fs/befs/btree.c key_sum += this_node->head.all_key_count; head 475 fs/befs/btree.c node_off = this_node->head.right; head 557 fs/befs/btree.c if (this_node->head.all_key_count == 0 && befs_leafnode(this_node)) { head 564 fs/befs/btree.c if (this_node->head.all_key_count == 0) { head 568 fs/befs/btree.c this_node->head.overflow); head 569 fs/befs/btree.c *node_off = this_node->head.overflow; head 602 fs/befs/btree.c if (node->head.overflow == BEFS_BT_INVAL) head 626 fs/befs/btree.c (sizeof (befs_btree_nodehead) + node->head.all_key_length); head 646 fs/befs/btree.c size_t keylen_index_size = node->head.all_key_count * sizeof (fs16); head 682 fs/befs/btree.c if (index < 0 || index > node->head.all_key_count) { head 760 fs/btrfs/backref.c struct btrfs_delayed_ref_head *head, u64 seq, head 765 fs/btrfs/backref.c struct btrfs_delayed_extent_op *extent_op = head->extent_op; head 775 fs/btrfs/backref.c spin_lock(&head->lock); head 776 fs/btrfs/backref.c for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { head 867 fs/btrfs/backref.c spin_unlock(&head->lock); head 1122 fs/btrfs/backref.c struct btrfs_delayed_ref_head *head; head 1160 fs/btrfs/backref.c head = NULL; head 1179 fs/btrfs/backref.c head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); head 1180 fs/btrfs/backref.c if (head) { head 1181 fs/btrfs/backref.c if (!mutex_trylock(&head->mutex)) { head 1182 fs/btrfs/backref.c refcount_inc(&head->refs); head 1191 fs/btrfs/backref.c mutex_lock(&head->mutex); head 1192 fs/btrfs/backref.c mutex_unlock(&head->mutex); head 1193 fs/btrfs/backref.c btrfs_put_delayed_ref_head(head); head 1197 fs/btrfs/backref.c ret = add_delayed_refs(fs_info, head, time_seq, head 1199 fs/btrfs/backref.c mutex_unlock(&head->mutex); head 839 fs/btrfs/block-group.c struct list_head *head = &fs_info->space_info; head 842 fs/btrfs/block-group.c list_for_each_entry_rcu(sinfo, head, list) { head 2811 fs/btrfs/block-group.c struct list_head *head = &info->space_info; head 2815 fs/btrfs/block-group.c list_for_each_entry_rcu(found, head, list) { head 2403 fs/btrfs/ctree.h struct btrfs_delayed_ref_head *head); head 706 fs/btrfs/delayed-inode.c struct list_head head; head 716 fs/btrfs/delayed-inode.c INIT_LIST_HEAD(&head); head 728 fs/btrfs/delayed-inode.c list_add_tail(&next->tree_list, &head); head 766 fs/btrfs/delayed-inode.c list_for_each_entry(next, &head, tree_list) { head 778 fs/btrfs/delayed-inode.c list_for_each_entry_safe(curr, next, &head, tree_list) { head 882 fs/btrfs/delayed-inode.c struct list_head head; head 896 fs/btrfs/delayed-inode.c INIT_LIST_HEAD(&head); head 903 fs/btrfs/delayed-inode.c list_add_tail(&next->tree_list, &head); head 927 fs/btrfs/delayed-inode.c list_for_each_entry_safe(curr, next, &head, tree_list) { head 395 fs/btrfs/delayed-ref.c struct btrfs_delayed_ref_head *head) head 398 fs/btrfs/delayed-ref.c if (mutex_trylock(&head->mutex)) head 401 fs/btrfs/delayed-ref.c refcount_inc(&head->refs); head 404 fs/btrfs/delayed-ref.c mutex_lock(&head->mutex); head 406 fs/btrfs/delayed-ref.c if (RB_EMPTY_NODE(&head->href_node)) { head 407 fs/btrfs/delayed-ref.c mutex_unlock(&head->mutex); head 408 fs/btrfs/delayed-ref.c btrfs_put_delayed_ref_head(head); head 411 fs/btrfs/delayed-ref.c btrfs_put_delayed_ref_head(head); head 417 fs/btrfs/delayed-ref.c struct btrfs_delayed_ref_head *head, head 420 fs/btrfs/delayed-ref.c lockdep_assert_held(&head->lock); head 421 fs/btrfs/delayed-ref.c rb_erase_cached(&ref->ref_node, &head->ref_tree); head 432 fs/btrfs/delayed-ref.c struct btrfs_delayed_ref_head *head, head 460 fs/btrfs/delayed-ref.c drop_delayed_ref(trans, delayed_refs, head, next); head 463 fs/btrfs/delayed-ref.c drop_delayed_ref(trans, delayed_refs, head, ref); head 479 fs/btrfs/delayed-ref.c struct btrfs_delayed_ref_head *head) head 486 fs/btrfs/delayed-ref.c lockdep_assert_held(&head->lock); head 488 fs/btrfs/delayed-ref.c if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) head 492 fs/btrfs/delayed-ref.c if (head->is_data) head 506 fs/btrfs/delayed-ref.c for (node = rb_first_cached(&head->ref_tree); node; head 511 fs/btrfs/delayed-ref.c if (merge_ref(trans, delayed_refs, head, ref, seq)) head 541 fs/btrfs/delayed-ref.c struct btrfs_delayed_ref_head *head; head 544 fs/btrfs/delayed-ref.c head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start, head 546 fs/btrfs/delayed-ref.c if (!head && delayed_refs->run_delayed_start != 0) { head 548 fs/btrfs/delayed-ref.c head = find_first_ref_head(delayed_refs); head 550 fs/btrfs/delayed-ref.c if (!head) head 553 fs/btrfs/delayed-ref.c while (head->processing) { head 556 fs/btrfs/delayed-ref.c node = rb_next(&head->href_node); head 563 fs/btrfs/delayed-ref.c head = rb_entry(node, struct btrfs_delayed_ref_head, head 567 fs/btrfs/delayed-ref.c head->processing = 1; head 570 fs/btrfs/delayed-ref.c delayed_refs->run_delayed_start = head->bytenr + head 571 fs/btrfs/delayed-ref.c head->num_bytes; head 572 fs/btrfs/delayed-ref.c return head; head 576 fs/btrfs/delayed-ref.c struct btrfs_delayed_ref_head *head) head 579 fs/btrfs/delayed-ref.c lockdep_assert_held(&head->lock); head 581 fs/btrfs/delayed-ref.c rb_erase_cached(&head->href_node, &delayed_refs->href_root); head 582 fs/btrfs/delayed-ref.c RB_CLEAR_NODE(&head->href_node); head 585 fs/btrfs/delayed-ref.c if (head->processing == 0) head 329 fs/btrfs/delayed-ref.h static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head) head 331 fs/btrfs/delayed-ref.h if (refcount_dec_and_test(&head->refs)) head 332 fs/btrfs/delayed-ref.h kmem_cache_free(btrfs_delayed_ref_head_cachep, head); head 348 fs/btrfs/delayed-ref.h struct btrfs_delayed_ref_head *head); head 354 fs/btrfs/delayed-ref.h struct btrfs_delayed_ref_head *head); head 355 fs/btrfs/delayed-ref.h static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head) head 357 fs/btrfs/delayed-ref.h mutex_unlock(&head->mutex); head 360 fs/btrfs/delayed-ref.h struct btrfs_delayed_ref_head *head); head 3667 fs/btrfs/disk-io.c struct list_head *head; head 3674 fs/btrfs/disk-io.c head = &info->fs_devices->devices; head 3675 fs/btrfs/disk-io.c list_for_each_entry(dev, head, dev_list) { head 3689 fs/btrfs/disk-io.c list_for_each_entry(dev, head, dev_list) { head 3751 fs/btrfs/disk-io.c struct list_head *head; head 3775 fs/btrfs/disk-io.c head = &fs_info->fs_devices->devices; head 3789 fs/btrfs/disk-io.c list_for_each_entry(dev, head, dev_list) { head 3840 fs/btrfs/disk-io.c list_for_each_entry(dev, head, dev_list) { head 4283 fs/btrfs/disk-io.c struct btrfs_delayed_ref_head *head; head 4287 fs/btrfs/disk-io.c head = rb_entry(node, struct btrfs_delayed_ref_head, head 4289 fs/btrfs/disk-io.c if (btrfs_delayed_ref_lock(delayed_refs, head)) head 4292 fs/btrfs/disk-io.c spin_lock(&head->lock); head 4293 fs/btrfs/disk-io.c while ((n = rb_first_cached(&head->ref_tree)) != NULL) { head 4297 fs/btrfs/disk-io.c rb_erase_cached(&ref->ref_node, &head->ref_tree); head 4304 fs/btrfs/disk-io.c if (head->must_insert_reserved) head 4306 fs/btrfs/disk-io.c btrfs_free_delayed_extent_op(head->extent_op); head 4307 fs/btrfs/disk-io.c btrfs_delete_ref_head(delayed_refs, head); head 4308 fs/btrfs/disk-io.c spin_unlock(&head->lock); head 4310 fs/btrfs/disk-io.c mutex_unlock(&head->mutex); head 4313 fs/btrfs/disk-io.c btrfs_pin_extent(fs_info, head->bytenr, head 4314 fs/btrfs/disk-io.c head->num_bytes, 1); head 4315 fs/btrfs/disk-io.c btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); head 4316 fs/btrfs/disk-io.c btrfs_put_delayed_ref_head(head); head 154 fs/btrfs/extent-tree.c struct btrfs_delayed_ref_head *head; head 238 fs/btrfs/extent-tree.c head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); head 239 fs/btrfs/extent-tree.c if (head) { head 240 fs/btrfs/extent-tree.c if (!mutex_trylock(&head->mutex)) { head 241 fs/btrfs/extent-tree.c refcount_inc(&head->refs); head 250 fs/btrfs/extent-tree.c mutex_lock(&head->mutex); head 251 fs/btrfs/extent-tree.c mutex_unlock(&head->mutex); head 252 fs/btrfs/extent-tree.c btrfs_put_delayed_ref_head(head); head 255 fs/btrfs/extent-tree.c spin_lock(&head->lock); head 256 fs/btrfs/extent-tree.c if (head->extent_op && head->extent_op->update_flags) head 257 fs/btrfs/extent-tree.c extent_flags |= head->extent_op->flags_to_set; head 261 fs/btrfs/extent-tree.c num_refs += head->ref_mod; head 262 fs/btrfs/extent-tree.c spin_unlock(&head->lock); head 263 fs/btrfs/extent-tree.c mutex_unlock(&head->mutex); head 1551 fs/btrfs/extent-tree.c struct btrfs_delayed_ref_head *head, head 1574 fs/btrfs/extent-tree.c key.objectid = head->bytenr; head 1581 fs/btrfs/extent-tree.c key.offset = head->num_bytes; head 1598 fs/btrfs/extent-tree.c if (key.objectid == head->bytenr && head 1600 fs/btrfs/extent-tree.c key.offset == head->num_bytes) head 1607 fs/btrfs/extent-tree.c key.objectid = head->bytenr; head 1608 fs/btrfs/extent-tree.c key.offset = head->num_bytes; head 1708 fs/btrfs/extent-tree.c select_delayed_ref(struct btrfs_delayed_ref_head *head) head 1712 fs/btrfs/extent-tree.c if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) head 1721 fs/btrfs/extent-tree.c if (!list_empty(&head->ref_add_list)) head 1722 fs/btrfs/extent-tree.c return list_first_entry(&head->ref_add_list, head 1725 fs/btrfs/extent-tree.c ref = rb_entry(rb_first_cached(&head->ref_tree), head 1732 fs/btrfs/extent-tree.c struct btrfs_delayed_ref_head *head) head 1735 fs/btrfs/extent-tree.c head->processing = 0; head 1738 fs/btrfs/extent-tree.c btrfs_delayed_ref_unlock(head); head 1742 fs/btrfs/extent-tree.c struct btrfs_delayed_ref_head *head) head 1744 fs/btrfs/extent-tree.c struct btrfs_delayed_extent_op *extent_op = head->extent_op; head 1749 fs/btrfs/extent-tree.c if (head->must_insert_reserved) { head 1750 fs/btrfs/extent-tree.c head->extent_op = NULL; head 1758 fs/btrfs/extent-tree.c struct btrfs_delayed_ref_head *head) head 1763 fs/btrfs/extent-tree.c extent_op = cleanup_extent_op(head); head 1766 fs/btrfs/extent-tree.c head->extent_op = NULL; head 1767 fs/btrfs/extent-tree.c spin_unlock(&head->lock); head 1768 fs/btrfs/extent-tree.c ret = run_delayed_extent_op(trans, head, extent_op); head 1775 fs/btrfs/extent-tree.c struct btrfs_delayed_ref_head *head) head 1779 fs/btrfs/extent-tree.c if (head->total_ref_mod < 0) { head 1783 fs/btrfs/extent-tree.c if (head->is_data) head 1785 fs/btrfs/extent-tree.c else if (head->is_system) head 1792 fs/btrfs/extent-tree.c -head->num_bytes, head 1800 fs/btrfs/extent-tree.c if (head->is_data) { head 1802 fs/btrfs/extent-tree.c delayed_refs->pending_csums -= head->num_bytes; head 1805 fs/btrfs/extent-tree.c head->num_bytes); head 1813 fs/btrfs/extent-tree.c struct btrfs_delayed_ref_head *head) head 1822 fs/btrfs/extent-tree.c ret = run_and_cleanup_extent_op(trans, head); head 1824 fs/btrfs/extent-tree.c unselect_delayed_ref_head(delayed_refs, head); head 1835 fs/btrfs/extent-tree.c spin_unlock(&head->lock); head 1837 fs/btrfs/extent-tree.c spin_lock(&head->lock); head 1838 fs/btrfs/extent-tree.c if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) { head 1839 fs/btrfs/extent-tree.c spin_unlock(&head->lock); head 1843 fs/btrfs/extent-tree.c btrfs_delete_ref_head(delayed_refs, head); head 1844 fs/btrfs/extent-tree.c spin_unlock(&head->lock); head 1847 fs/btrfs/extent-tree.c if (head->must_insert_reserved) { head 1848 fs/btrfs/extent-tree.c btrfs_pin_extent(fs_info, head->bytenr, head 1849 fs/btrfs/extent-tree.c head->num_bytes, 1); head 1850 fs/btrfs/extent-tree.c if (head->is_data) { head 1852 fs/btrfs/extent-tree.c head->bytenr, head->num_bytes); head 1856 fs/btrfs/extent-tree.c btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); head 1858 fs/btrfs/extent-tree.c trace_run_delayed_ref_head(fs_info, head, 0); head 1859 fs/btrfs/extent-tree.c btrfs_delayed_ref_unlock(head); head 1860 fs/btrfs/extent-tree.c btrfs_put_delayed_ref_head(head); head 1869 fs/btrfs/extent-tree.c struct btrfs_delayed_ref_head *head = NULL; head 1873 fs/btrfs/extent-tree.c head = btrfs_select_ref_head(delayed_refs); head 1874 fs/btrfs/extent-tree.c if (!head) { head 1876 fs/btrfs/extent-tree.c return head; head 1883 fs/btrfs/extent-tree.c ret = btrfs_delayed_ref_lock(delayed_refs, head); head 1892 fs/btrfs/extent-tree.c head = ERR_PTR(-EAGAIN); head 1894 fs/btrfs/extent-tree.c return head; head 2167 fs/btrfs/extent-tree.c struct btrfs_delayed_ref_head *head; head 2201 fs/btrfs/extent-tree.c head = rb_entry(node, struct btrfs_delayed_ref_head, head 2203 fs/btrfs/extent-tree.c refcount_inc(&head->refs); head 2207 fs/btrfs/extent-tree.c mutex_lock(&head->mutex); head 2208 fs/btrfs/extent-tree.c mutex_unlock(&head->mutex); head 2210 fs/btrfs/extent-tree.c btrfs_put_delayed_ref_head(head); head 2245 fs/btrfs/extent-tree.c struct btrfs_delayed_ref_head *head; head 2263 fs/btrfs/extent-tree.c head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); head 2264 fs/btrfs/extent-tree.c if (!head) { head 2270 fs/btrfs/extent-tree.c if (!mutex_trylock(&head->mutex)) { head 2271 fs/btrfs/extent-tree.c refcount_inc(&head->refs); head 2280 fs/btrfs/extent-tree.c mutex_lock(&head->mutex); head 2281 fs/btrfs/extent-tree.c mutex_unlock(&head->mutex); head 2282 fs/btrfs/extent-tree.c btrfs_put_delayed_ref_head(head); head 2288 fs/btrfs/extent-tree.c spin_lock(&head->lock); head 2293 fs/btrfs/extent-tree.c for (node = rb_first_cached(&head->ref_tree); node; head 2315 fs/btrfs/extent-tree.c spin_unlock(&head->lock); head 2316 fs/btrfs/extent-tree.c mutex_unlock(&head->mutex); head 3194 fs/btrfs/extent-tree.c struct btrfs_delayed_ref_head *head; head 3200 fs/btrfs/extent-tree.c head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); head 3201 fs/btrfs/extent-tree.c if (!head) head 3204 fs/btrfs/extent-tree.c spin_lock(&head->lock); head 3205 fs/btrfs/extent-tree.c if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root)) head 3208 fs/btrfs/extent-tree.c if (cleanup_extent_op(head) != NULL) head 3215 fs/btrfs/extent-tree.c if (!mutex_trylock(&head->mutex)) head 3218 fs/btrfs/extent-tree.c btrfs_delete_ref_head(delayed_refs, head); head 3219 fs/btrfs/extent-tree.c head->processing = 0; head 3221 fs/btrfs/extent-tree.c spin_unlock(&head->lock); head 3224 fs/btrfs/extent-tree.c BUG_ON(head->extent_op); head 3225 fs/btrfs/extent-tree.c if (head->must_insert_reserved) head 3228 fs/btrfs/extent-tree.c btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head); head 3229 fs/btrfs/extent-tree.c mutex_unlock(&head->mutex); head 3230 fs/btrfs/extent-tree.c btrfs_put_delayed_ref_head(head); head 3233 fs/btrfs/extent-tree.c spin_unlock(&head->lock); head 43 fs/btrfs/extent_io.c void btrfs_leak_debug_add(struct list_head *new, struct list_head *head) head 48 fs/btrfs/extent_io.c list_add(new, head); head 106 fs/btrfs/extent_io.c #define btrfs_leak_debug_add(new, head) do {} while (0) head 5275 fs/btrfs/extent_io.c static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head) head 5278 fs/btrfs/extent_io.c container_of(head, struct extent_buffer, rcu_head); head 2908 fs/btrfs/file.c static int add_falloc_range(struct list_head *head, u64 start, u64 len) head 2913 fs/btrfs/file.c if (list_empty(head)) head 2920 fs/btrfs/file.c prev = list_entry(head->prev, struct falloc_range, list); head 2931 fs/btrfs/file.c list_add_tail(&range->list, head); head 2611 fs/btrfs/free-space-cache.c struct list_head *head; head 2614 fs/btrfs/free-space-cache.c while ((head = block_group->cluster_list.next) != head 2616 fs/btrfs/free-space-cache.c cluster = list_entry(head, struct btrfs_free_cluster, head 2470 fs/btrfs/inode.c struct list_head head; head 2668 fs/btrfs/inode.c list_for_each_entry_safe(old, tmp, &new->head, list) { head 2683 fs/btrfs/inode.c if (list_empty(&new->head)) head 2920 fs/btrfs/inode.c list_for_each_entry_safe(old, tmp, &new->head, list) { head 2997 fs/btrfs/inode.c INIT_LIST_HEAD(&new->head); head 3070 fs/btrfs/inode.c list_add_tail(&old->list, &new->head); head 798 fs/btrfs/raid56.c struct list_head *head = rbio->plug_list.next; head 800 fs/btrfs/raid56.c next = list_entry(head, struct btrfs_raid_bio, head 1678 fs/btrfs/raid56.c u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; head 1679 fs/btrfs/raid56.c u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; head 2782 fs/btrfs/send.c static int __record_ref(struct list_head *head, u64 dir, head 2794 fs/btrfs/send.c list_add_tail(&ref->list, head); head 2814 fs/btrfs/send.c static void __free_recorded_refs(struct list_head *head) head 2818 fs/btrfs/send.c while (!list_empty(head)) { head 2819 fs/btrfs/send.c cur = list_entry(head->next, struct recorded_ref, list); head 28 fs/btrfs/space-info.c struct list_head *head = &info->space_info; head 32 fs/btrfs/space-info.c list_for_each_entry_rcu(found, head, list) head 142 fs/btrfs/space-info.c struct list_head *head = &info->space_info; head 148 fs/btrfs/space-info.c list_for_each_entry_rcu(found, head, list) { head 216 fs/btrfs/space-info.c struct list_head *head; head 221 fs/btrfs/space-info.c head = &space_info->priority_tickets; head 223 fs/btrfs/space-info.c while (!list_empty(head)) { head 227 fs/btrfs/space-info.c ticket = list_first_entry(head, struct reserve_ticket, list); head 245 fs/btrfs/space-info.c if (head == &space_info->priority_tickets) { head 246 fs/btrfs/space-info.c head = &space_info->tickets; head 2027 fs/btrfs/super.c struct list_head *head = &fs_info->space_info; head 2041 fs/btrfs/super.c list_for_each_entry_rcu(found, head, list) { head 2259 fs/btrfs/super.c struct list_head *head; head 2271 fs/btrfs/super.c head = &cur_devices->devices; head 2272 fs/btrfs/super.c list_for_each_entry_rcu(dev, head, dev_list) { head 1676 fs/btrfs/transaction.c struct list_head *head = &trans->transaction->pending_snapshots; head 1679 fs/btrfs/transaction.c list_for_each_entry_safe(pending, next, head, list) { head 1916 fs/btrfs/transaction.c struct list_head *head = &trans->transaction->pending_snapshots; head 1925 fs/btrfs/transaction.c list_for_each_entry(pending, head, list) { head 1944 fs/btrfs/transaction.c struct list_head *head = &trans->transaction->pending_snapshots; head 1952 fs/btrfs/transaction.c list_for_each_entry(pending, head, list) head 505 fs/btrfs/volumes.c struct bio *head, struct bio *tail) head 510 fs/btrfs/volumes.c old_head = pending_bios->head; head 511 fs/btrfs/volumes.c pending_bios->head = head; head 566 fs/btrfs/volumes.c if (!force_reg && device->pending_sync_bios.head) { head 574 fs/btrfs/volumes.c pending = pending_bios->head; head 586 fs/btrfs/volumes.c if (device->pending_sync_bios.head == NULL && head 587 fs/btrfs/volumes.c device->pending_bios.head == NULL) { head 595 fs/btrfs/volumes.c pending_bios->head = NULL; head 608 fs/btrfs/volumes.c device->pending_sync_bios.head) || head 610 fs/btrfs/volumes.c device->pending_bios.head)) { head 694 fs/btrfs/volumes.c if (device->pending_bios.head || device->pending_sync_bios.head) head 6501 fs/btrfs/volumes.c if (!pending_bios->head) head 6502 fs/btrfs/volumes.c pending_bios->head = bio; head 22 fs/btrfs/volumes.h struct bio *head; head 86 fs/buffer.c struct buffer_head *head, *bh; head 98 fs/buffer.c head = page_buffers(page); head 99 fs/buffer.c bh = head; head 108 fs/buffer.c } while (bh != head); head 201 fs/buffer.c struct buffer_head *head; head 214 fs/buffer.c head = page_buffers(page); head 215 fs/buffer.c bh = head; head 225 fs/buffer.c } while (bh != head); head 626 fs/buffer.c struct buffer_head *head = page_buffers(page); head 627 fs/buffer.c struct buffer_head *bh = head; head 632 fs/buffer.c } while (bh != head); head 817 fs/buffer.c struct buffer_head *bh, *head; head 828 fs/buffer.c head = NULL; head 835 fs/buffer.c bh->b_this_page = head; head 837 fs/buffer.c head = bh; head 847 fs/buffer.c return head; head 852 fs/buffer.c if (head) { head 854 fs/buffer.c bh = head; head 855 fs/buffer.c head = head->b_this_page; head 857 fs/buffer.c } while (head); head 865 fs/buffer.c link_dev_buffers(struct page *page, struct buffer_head *head) head 869 fs/buffer.c bh = head; head 874 fs/buffer.c tail->b_this_page = head; head 875 fs/buffer.c attach_page_buffers(page, head); head 897 fs/buffer.c struct buffer_head *head = page_buffers(page); head 898 fs/buffer.c struct buffer_head *bh = head; head 915 fs/buffer.c } while (bh != head); head 1472 fs/buffer.c struct buffer_head *head, *bh, *next; head 1485 fs/buffer.c head = page_buffers(page); head 1486 fs/buffer.c bh = head; head 1504 fs/buffer.c } while (bh != head); head 1527 fs/buffer.c struct buffer_head *bh, *head, *tail; head 1529 fs/buffer.c head = alloc_page_buffers(page, blocksize, true); head 1530 fs/buffer.c bh = head; head 1536 fs/buffer.c tail->b_this_page = head; head 1540 fs/buffer.c bh = head; head 1547 fs/buffer.c } while (bh != head); head 1549 fs/buffer.c attach_page_buffers(page, head); head 1583 fs/buffer.c struct buffer_head *head; head 1603 fs/buffer.c head = page_buffers(page); head 1604 fs/buffer.c bh = head; head 1615 fs/buffer.c } while (bh != head); head 1687 fs/buffer.c struct buffer_head *bh, *head; head 1692 fs/buffer.c head = create_page_buffers(page, inode, head 1705 fs/buffer.c bh = head; head 1743 fs/buffer.c } while (bh != head); head 1766 fs/buffer.c } while ((bh = bh->b_this_page) != head); head 1783 fs/buffer.c } while (bh != head); head 1810 fs/buffer.c bh = head; head 1824 fs/buffer.c } while ((bh = bh->b_this_page) != head); head 1838 fs/buffer.c } while (bh != head); head 1852 fs/buffer.c struct buffer_head *head, *bh; head 1858 fs/buffer.c bh = head = page_buffers(page); head 1882 fs/buffer.c } while (bh != head); head 1951 fs/buffer.c struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; head 1958 fs/buffer.c head = create_page_buffers(page, inode, 0); head 1959 fs/buffer.c blocksize = head->b_size; head 1964 fs/buffer.c for(bh = head, block_start = 0; bh != head || !block_start; head 2039 fs/buffer.c struct buffer_head *bh, *head; head 2041 fs/buffer.c bh = head = page_buffers(page); head 2058 fs/buffer.c } while (bh != head); head 2187 fs/buffer.c struct buffer_head *bh, *head; head 2193 fs/buffer.c head = page_buffers(page); head 2194 fs/buffer.c blocksize = head->b_size; head 2200 fs/buffer.c bh = head; head 2214 fs/buffer.c } while (bh != head); head 2231 fs/buffer.c struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; head 2236 fs/buffer.c head = create_page_buffers(page, inode, 0); head 2237 fs/buffer.c blocksize = head->b_size; head 2242 fs/buffer.c bh = head; head 2274 fs/buffer.c } while (i++, iblock++, (bh = bh->b_this_page) != head); head 2521 fs/buffer.c static void attach_nobh_buffers(struct page *page, struct buffer_head *head) head 2528 fs/buffer.c bh = head; head 2533 fs/buffer.c bh->b_this_page = head; head 2535 fs/buffer.c } while (bh != head); head 2536 fs/buffer.c attach_page_buffers(page, head); head 2553 fs/buffer.c struct buffer_head *head, *bh; head 2593 fs/buffer.c head = alloc_page_buffers(page, blocksize, false); head 2594 fs/buffer.c if (!head) { head 2606 fs/buffer.c for (block_start = 0, block_in_page = 0, bh = head; head 2649 fs/buffer.c for (bh = head; bh; bh = bh->b_this_page) { head 2661 fs/buffer.c *fsdata = head; /* to be released by nobh_write_end */ head 2674 fs/buffer.c attach_nobh_buffers(page, head); head 2691 fs/buffer.c struct buffer_head *head = fsdata; head 2695 fs/buffer.c if (unlikely(copied < len) && head) head 2696 fs/buffer.c attach_nobh_buffers(page, head); head 2711 fs/buffer.c while (head) { head 2712 fs/buffer.c bh = head; head 2713 fs/buffer.c head = head->b_this_page; head 3221 fs/buffer.c struct buffer_head *head = page_buffers(page); head 3224 fs/buffer.c bh = head; head 3229 fs/buffer.c } while (bh != head); head 3237 fs/buffer.c } while (bh != head); head 3238 fs/buffer.c *buffers_to_free = head; head 703 fs/ceph/dir.c if (!req->r_reply_info.head->is_dentry) { head 866 fs/ceph/dir.c if (!err && !req->r_reply_info.head->is_dentry) head 924 fs/ceph/dir.c if (!err && !req->r_reply_info.head->is_dentry) head 988 fs/ceph/dir.c !req->r_reply_info.head->is_target && head 989 fs/ceph/dir.c !req->r_reply_info.head->is_dentry) head 1031 fs/ceph/dir.c } else if (!req->r_reply_info.head->is_dentry) { head 1075 fs/ceph/dir.c if (!err && !req->r_reply_info.head->is_dentry) head 1131 fs/ceph/dir.c if (!err && !req->r_reply_info.head->is_dentry) { head 501 fs/ceph/file.c if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry) head 1225 fs/ceph/inode.c rinfo->head->is_dentry, rinfo->head->is_target); head 1227 fs/ceph/inode.c if (!rinfo->head->is_target && !rinfo->head->is_dentry) { head 1229 fs/ceph/inode.c if (rinfo->head->result == 0 && req->r_parent) head 1234 fs/ceph/inode.c if (rinfo->head->is_dentry) { head 1254 fs/ceph/inode.c BUG_ON(!rinfo->head->is_target); head 1296 fs/ceph/inode.c if (rinfo->head->is_target) { head 1310 fs/ceph/inode.c rinfo->head->result == 0) ? req->r_fmode : -1, head 1323 fs/ceph/inode.c if (rinfo->head->is_dentry && head 1326 fs/ceph/inode.c (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, head 1397 fs/ceph/inode.c if (!rinfo->head->is_target) { head 1451 fs/ceph/inode.c } else if (rinfo->head->is_dentry && req->r_dentry) { head 1456 fs/ceph/inode.c if (rinfo->head->is_target) { head 1598 fs/ceph/inode.c if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { head 59 fs/ceph/mds_client.c struct list_head *head); head 292 fs/ceph/mds_client.c if (info->head->is_dentry) { head 311 fs/ceph/mds_client.c if (info->head->is_target) { head 452 fs/ceph/mds_client.c u32 op = le32_to_cpu(info->head->op); head 475 fs/ceph/mds_client.c info->head = msg->front.iov_base; head 1861 fs/ceph/mds_client.c struct ceph_mds_cap_release *head; head 1886 fs/ceph/mds_client.c head = msg->front.iov_base; head 1887 fs/ceph/mds_client.c head->num = cpu_to_le32(0); head 1888 fs/ceph/mds_client.c msg->front.iov_len = sizeof(*head); head 1899 fs/ceph/mds_client.c head = msg->front.iov_base; head 1900 fs/ceph/mds_client.c put_unaligned_le32(get_unaligned_le32(&head->num) + 1, head 1901 fs/ceph/mds_client.c &head->num); head 1911 fs/ceph/mds_client.c if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { head 2289 fs/ceph/mds_client.c struct ceph_mds_request_head *head; head 2320 fs/ceph/mds_client.c len = sizeof(*head) + head 2342 fs/ceph/mds_client.c head = msg->front.iov_base; head 2343 fs/ceph/mds_client.c p = msg->front.iov_base + sizeof(*head); head 2346 fs/ceph/mds_client.c head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); head 2347 fs/ceph/mds_client.c head->op = cpu_to_le32(req->r_op); head 2348 fs/ceph/mds_client.c head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid)); head 2349 fs/ceph/mds_client.c head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid)); head 2350 fs/ceph/mds_client.c head->args = req->r_args; head 2383 fs/ceph/mds_client.c head->num_releases = cpu_to_le16(releases); head 2624 fs/ceph/mds_client.c struct list_head *head) head 2629 fs/ceph/mds_client.c list_splice_init(head, &tmp_list); head 2716 fs/ceph/mds_client.c err = le32_to_cpu(req->r_reply_info.head->result); head 2792 fs/ceph/mds_client.c struct ceph_mds_reply_head *head = msg->front.iov_base; head 2799 fs/ceph/mds_client.c if (msg->front.iov_len < sizeof(*head)) { head 2826 fs/ceph/mds_client.c if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) || head 2827 fs/ceph/mds_client.c (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) { head 2829 fs/ceph/mds_client.c head->safe ? "safe" : "unsafe", tid, mds); head 2840 fs/ceph/mds_client.c result = le32_to_cpu(head->result); head 2871 fs/ceph/mds_client.c if (head->safe) { head 2925 fs/ceph/mds_client.c le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, head 3296 fs/ceph/mds_client.c page = list_first_entry(&recon_state->pagelist->head, struct page, lru); head 3698 fs/ceph/mds_client.c list_first_entry(&recon_state.pagelist->head, head 92 fs/ceph/mds_client.h struct ceph_mds_reply_head *head; head 1244 fs/ceph/xattr.c if (list_is_singular(&pagelist->head)) { head 1247 fs/ceph/xattr.c struct page *page = list_first_entry(&pagelist->head, head 764 fs/cifs/dfs_cache.c struct list_head *head = &tl->tl_list; head 769 fs/cifs/dfs_cache.c INIT_LIST_HEAD(head); head 787 fs/cifs/dfs_cache.c list_add(&it->it_list, head); head 789 fs/cifs/dfs_cache.c list_add_tail(&it->it_list, head); head 796 fs/cifs/dfs_cache.c list_for_each_entry_safe(it, nit, head, it_list) { head 1259 fs/cifs/dfs_cache.c static void get_tcons(struct TCP_Server_Info *server, struct list_head *head) head 1264 fs/cifs/dfs_cache.c INIT_LIST_HEAD(head); head 1272 fs/cifs/dfs_cache.c list_add_tail(&tcon->ulist, head); head 1277 fs/cifs/dfs_cache.c list_add_tail(&ses->tcon_ipc->ulist, head); head 257 fs/dcache.c struct rcu_head head; head 267 fs/dcache.c static void __d_free(struct rcu_head *head) head 269 fs/dcache.c struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); head 274 fs/dcache.c static void __d_free_external(struct rcu_head *head) head 276 fs/dcache.c struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); head 307 fs/dcache.c kfree_rcu(p, u.head); head 2788 fs/dcache.c kfree_rcu(old_name, u.head); head 110 fs/direct-io.c unsigned head; /* next page to process */ head 161 fs/direct-io.c return sdio->tail - sdio->head; head 185 fs/direct-io.c sdio->head = 0; head 195 fs/direct-io.c sdio->head = 0; head 220 fs/direct-io.c return dio->pages[sdio->head]; head 493 fs/direct-io.c while (sdio->head < sdio->tail) head 494 fs/direct-io.c put_page(dio->pages[sdio->head++]); head 978 fs/direct-io.c from = sdio->head ? 0 : sdio->from; head 979 fs/direct-io.c to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE; head 980 fs/direct-io.c sdio->head++; head 1298 fs/dlm/lock.c static void lkb_add_ordered(struct list_head *new, struct list_head *head, head 1303 fs/dlm/lock.c list_for_each_entry(lkb, head, lkb_statequeue) head 2190 fs/dlm/lock.c static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head) head 2192 fs/dlm/lock.c struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb, head 2202 fs/dlm/lock.c static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb) head 2206 fs/dlm/lock.c list_for_each_entry(this, head, lkb_statequeue) { head 2661 fs/dlm/lock.c static void send_bast_queue(struct dlm_rsb *r, struct list_head *head, head 2666 fs/dlm/lock.c list_for_each_entry(gr, head, lkb_statequeue) { head 5545 fs/dlm/lock.c static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid, head 5550 fs/dlm/lock.c list_for_each_entry(lkb, head, lkb_statequeue) { head 295 fs/dlm/member.c struct list_head *head = &ls->ls_nodes; head 297 fs/dlm/member.c list_for_each(tmp, head) { head 304 fs/dlm/member.c list_add_tail(newlist, head); head 337 fs/dlm/member.c static struct dlm_member *find_memb(struct list_head *head, int nodeid) head 341 fs/dlm/member.c list_for_each_entry(memb, head, list) { head 362 fs/dlm/member.c static void clear_memb_list(struct list_head *head) head 366 fs/dlm/member.c while (!list_empty(head)) { head 367 fs/dlm/member.c memb = list_entry(head->next, struct dlm_member, list); head 617 fs/dlm/recover.c static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head) head 622 fs/dlm/recover.c list_for_each_entry(lkb, head, lkb_statequeue) { head 54 fs/ecryptfs/dentry.c static void ecryptfs_dentry_free_rcu(struct rcu_head *head) head 57 fs/ecryptfs/dentry.c container_of(head, struct ecryptfs_dentry_info, rcu)); head 51 fs/erofs/namei.c int head, back; head 56 fs/erofs/namei.c head = 1; head 60 fs/erofs/namei.c while (head <= back) { head 61 fs/erofs/namei.c const int mid = head + (back - head) / 2; head 79 fs/erofs/namei.c head = mid + 1; head 95 fs/erofs/namei.c int head, back; head 100 fs/erofs/namei.c head = 0; head 103 fs/erofs/namei.c while (head <= back) { head 104 fs/erofs/namei.c const int mid = head + (back - head) / 2; head 145 fs/erofs/namei.c head = mid + 1; head 498 fs/erofs/zdata.c static void z_erofs_rcu_callback(struct rcu_head *head) head 501 fs/erofs/zdata.c container_of(head, struct z_erofs_collection, rcu); head 960 fs/erofs/zdata.c z_erofs_next_pcluster_t owned = io->head; head 984 fs/erofs/zdata.c DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED); head 1138 fs/erofs/zdata.c io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; head 1160 fs/erofs/zdata.c qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; head 1163 fs/erofs/zdata.c qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; head 1227 fs/erofs/zdata.c q[JQ_SUBMIT]->head = owned_head; head 1378 fs/erofs/zdata.c struct page *head = NULL; head 1397 fs/erofs/zdata.c sync &= !(PageReadahead(page) && !head); head 1404 fs/erofs/zdata.c set_page_private(page, (unsigned long)head); head 1405 fs/erofs/zdata.c head = page; head 1408 fs/erofs/zdata.c while (head) { head 1409 fs/erofs/zdata.c struct page *page = head; head 1413 fs/erofs/zdata.c head = (void *)page_private(page); head 89 fs/erofs/zdata.h z_erofs_next_pcluster_t head; head 764 fs/eventpoll.c static void epi_rcu_free(struct rcu_head *head) head 766 fs/eventpoll.c struct epitem *epi = container_of(head, struct epitem, rcu); head 874 fs/eventpoll.c static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head, head 903 fs/eventpoll.c static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head, head 913 fs/eventpoll.c list_for_each_entry_safe(epi, tmp, head, rdllink) { head 1135 fs/eventpoll.c struct list_head *head) head 1145 fs/eventpoll.c if (cmpxchg(&new->next, new, head) != new) head 1156 fs/eventpoll.c prev = xchg(&head->prev, new); head 1701 fs/eventpoll.c static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head, head 1721 fs/eventpoll.c list_for_each_entry_safe(epi, tmp, head, rdllink) { head 1755 fs/eventpoll.c list_add(&epi->rdllink, head); head 2591 fs/ext4/ext4.h struct buffer_head *head, head 624 fs/ext4/fsmap.c int ext4_getfsmap(struct super_block *sb, struct ext4_fsmap_head *head, head 633 fs/ext4/fsmap.c if (head->fmh_iflags & ~FMH_IF_VALID) head 635 fs/ext4/fsmap.c if (!ext4_getfsmap_is_valid_device(sb, &head->fmh_keys[0]) || head 636 fs/ext4/fsmap.c !ext4_getfsmap_is_valid_device(sb, &head->fmh_keys[1])) head 639 fs/ext4/fsmap.c head->fmh_entries = 0; head 665 fs/ext4/fsmap.c dkeys[0] = head->fmh_keys[0]; head 671 fs/ext4/fsmap.c if (!ext4_getfsmap_check_keys(dkeys, &head->fmh_keys[1])) head 674 fs/ext4/fsmap.c info.gfi_next_fsblk = head->fmh_keys[0].fmr_physical + head 675 fs/ext4/fsmap.c head->fmh_keys[0].fmr_length; head 678 fs/ext4/fsmap.c info.gfi_head = head; head 685 fs/ext4/fsmap.c if (head->fmh_keys[0].fmr_device > handlers[i].gfd_dev) head 687 fs/ext4/fsmap.c if (head->fmh_keys[1].fmr_device < handlers[i].gfd_dev) head 697 fs/ext4/fsmap.c if (handlers[i].gfd_dev == head->fmh_keys[1].fmr_device) head 698 fs/ext4/fsmap.c dkeys[1] = head->fmh_keys[1]; head 699 fs/ext4/fsmap.c if (handlers[i].gfd_dev > head->fmh_keys[0].fmr_device) head 711 fs/ext4/fsmap.c head->fmh_oflags = FMH_OF_DEV_T; head 39 fs/ext4/fsmap.h int ext4_getfsmap(struct super_block *sb, struct ext4_fsmap_head *head, head 1091 fs/ext4/inode.c struct buffer_head *head, head 1100 fs/ext4/inode.c unsigned blocksize = head->b_size; head 1104 fs/ext4/inode.c for (bh = head, block_start = 0; head 1105 fs/ext4/inode.c ret == 0 && (bh != head || !block_start); head 1182 fs/ext4/inode.c struct buffer_head *bh, *head, *wait[2]; head 1193 fs/ext4/inode.c head = page_buffers(page); head 1197 fs/ext4/inode.c for (bh = head, block_start = 0; bh != head || !block_start; head 1494 fs/ext4/inode.c struct buffer_head *head, *bh; head 1496 fs/ext4/inode.c bh = head = page_buffers(page); head 1515 fs/ext4/inode.c } while (bh != head); head 2317 fs/ext4/inode.c struct buffer_head *head, head 2342 fs/ext4/inode.c } while (lblk++, (bh = bh->b_this_page) != head); head 2345 fs/ext4/inode.c err = mpage_submit_page(mpd, head->b_page); head 2371 fs/ext4/inode.c struct buffer_head *head, *bh; head 2392 fs/ext4/inode.c bh = head = page_buffers(page); head 2410 fs/ext4/inode.c err = mpage_process_page_bufs(mpd, head, head 2422 fs/ext4/inode.c } while (lblk++, (bh = bh->b_this_page) != head); head 2643 fs/ext4/inode.c struct buffer_head *head; head 2702 fs/ext4/inode.c head = page_buffers(page); head 2703 fs/ext4/inode.c err = mpage_process_page_bufs(mpd, head, head, lblk); head 643 fs/ext4/ioctl.c struct fsmap_head head; head 647 fs/ext4/ioctl.c if (copy_from_user(&head, arg, sizeof(struct fsmap_head))) head 649 fs/ext4/ioctl.c if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) || head 650 fs/ext4/ioctl.c memchr_inv(head.fmh_keys[0].fmr_reserved, 0, head 651 fs/ext4/ioctl.c sizeof(head.fmh_keys[0].fmr_reserved)) || head 652 fs/ext4/ioctl.c memchr_inv(head.fmh_keys[1].fmr_reserved, 0, head 653 fs/ext4/ioctl.c sizeof(head.fmh_keys[1].fmr_reserved))) head 659 fs/ext4/ioctl.c if (head.fmh_keys[0].fmr_offset || head 660 fs/ext4/ioctl.c (head.fmh_keys[1].fmr_offset != 0 && head 661 fs/ext4/ioctl.c head.fmh_keys[1].fmr_offset != -1ULL)) head 664 fs/ext4/ioctl.c xhead.fmh_iflags = head.fmh_iflags; head 665 fs/ext4/ioctl.c xhead.fmh_count = head.fmh_count; head 666 fs/ext4/ioctl.c ext4_fsmap_to_internal(sb, &xhead.fmh_keys[0], &head.fmh_keys[0]); head 667 fs/ext4/ioctl.c ext4_fsmap_to_internal(sb, &xhead.fmh_keys[1], &head.fmh_keys[1]); head 691 fs/ext4/ioctl.c head.fmh_entries = xhead.fmh_entries; head 692 fs/ext4/ioctl.c head.fmh_oflags = xhead.fmh_oflags; head 693 fs/ext4/ioctl.c if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) head 3572 fs/ext4/mballoc.c static void ext4_mb_pa_callback(struct rcu_head *head) head 3575 fs/ext4/mballoc.c pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); head 171 fs/ext4/move_extent.c struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; head 184 fs/ext4/move_extent.c head = page_buffers(page); head 186 fs/ext4/move_extent.c for (bh = head, block_start = 0; bh != head || !block_start; head 70 fs/ext4/page-io.c struct buffer_head *bh, *head; head 88 fs/ext4/page-io.c bh = head = page_buffers(page); head 94 fs/ext4/page-io.c bit_spin_lock(BH_Uptodate_Lock, &head->b_state); head 105 fs/ext4/page-io.c } while ((bh = bh->b_this_page) != head); head 106 fs/ext4/page-io.c bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); head 165 fs/ext4/page-io.c static void dump_completed_IO(struct inode *inode, struct list_head *head) head 171 fs/ext4/page-io.c if (list_empty(head)) head 175 fs/ext4/page-io.c list_for_each_entry(io, head, list) { head 208 fs/ext4/page-io.c struct list_head *head) head 217 fs/ext4/page-io.c dump_completed_IO(inode, head); head 218 fs/ext4/page-io.c list_replace_init(head, &unwritten); head 413 fs/ext4/page-io.c struct buffer_head *bh, *head; head 445 fs/ext4/page-io.c bh = head = page_buffers(page); head 466 fs/ext4/page-io.c } while ((bh = bh->b_this_page) != head); head 468 fs/ext4/page-io.c bh = head = page_buffers(page); head 523 fs/ext4/page-io.c } while ((bh = bh->b_this_page) != head); head 534 fs/ext4/page-io.c } while (bh != head); head 25 fs/ext4/resize.c static void ext4_rcu_ptr_callback(struct rcu_head *head) head 29 fs/ext4/resize.c ptr = container_of(head, struct ext4_rcu_ptr, rcu); head 741 fs/f2fs/checkpoint.c struct list_head *head; head 757 fs/f2fs/checkpoint.c head = &im->ino_list; head 760 fs/f2fs/checkpoint.c list_for_each_entry(orphan, head, list) { head 1037 fs/f2fs/checkpoint.c struct list_head *head; head 1052 fs/f2fs/checkpoint.c head = &sbi->inode_list[type]; head 1053 fs/f2fs/checkpoint.c if (list_empty(head)) { head 1060 fs/f2fs/checkpoint.c fi = list_first_entry(head, struct f2fs_inode_info, dirty_list); head 1091 fs/f2fs/checkpoint.c struct list_head *head = &sbi->inode_list[DIRTY_META]; head 1101 fs/f2fs/checkpoint.c if (list_empty(head)) { head 1105 fs/f2fs/checkpoint.c fi = list_first_entry(head, struct f2fs_inode_info, head 218 fs/f2fs/node.c struct nat_entry_set *head; head 220 fs/f2fs/node.c head = radix_tree_lookup(&nm_i->nat_set_root, set); head 221 fs/f2fs/node.c if (!head) { head 222 fs/f2fs/node.c head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS); head 224 fs/f2fs/node.c INIT_LIST_HEAD(&head->entry_list); head 225 fs/f2fs/node.c INIT_LIST_HEAD(&head->set_list); head 226 fs/f2fs/node.c head->set = set; head 227 fs/f2fs/node.c head->entry_cnt = 0; head 228 fs/f2fs/node.c f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); head 230 fs/f2fs/node.c return head; head 236 fs/f2fs/node.c struct nat_entry_set *head; head 240 fs/f2fs/node.c head = __grab_nat_entry_set(nm_i, ne); head 249 fs/f2fs/node.c head->entry_cnt++; head 263 fs/f2fs/node.c list_move_tail(&ne->list, &head->entry_list); head 1934 fs/f2fs/node.c struct list_head *head = &sbi->fsync_node_list; head 1941 fs/f2fs/node.c if (list_empty(head)) { head 1945 fs/f2fs/node.c fn = list_first_entry(head, struct fsync_node_entry, list); head 2740 fs/f2fs/node.c struct list_head *head, int max) head 2747 fs/f2fs/node.c list_for_each_entry(cur, head, set_list) { head 2754 fs/f2fs/node.c list_add_tail(&nes->set_list, head); head 56 fs/f2fs/recovery.c static struct fsync_inode_entry *get_fsync_inode(struct list_head *head, head 61 fs/f2fs/recovery.c list_for_each_entry(entry, head, list) head 69 fs/f2fs/recovery.c struct list_head *head, nid_t ino, bool quota_inode) head 91 fs/f2fs/recovery.c list_add_tail(&entry->list, head); head 299 fs/f2fs/recovery.c static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head, head 334 fs/f2fs/recovery.c entry = get_fsync_inode(head, ino_of_node(page)); head 352 fs/f2fs/recovery.c entry = add_fsync_inode(sbi, head, ino_of_node(page), head 389 fs/f2fs/recovery.c static void destroy_fsync_dnodes(struct list_head *head, int drop) head 393 fs/f2fs/recovery.c list_for_each_entry_safe(entry, tmp, head, list) head 211 fs/f2fs/segment.c struct list_head *head, bool drop, bool recover, head 218 fs/f2fs/segment.c list_for_each_entry_safe(cur, tmp, head, list) { head 288 fs/f2fs/segment.c struct list_head *head = &sbi->inode_list[ATOMIC_FILE]; head 295 fs/f2fs/segment.c if (list_empty(head)) { head 299 fs/f2fs/segment.c fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist); head 302 fs/f2fs/segment.c list_move_tail(&fi->inmem_ilist, head); head 353 fs/f2fs/segment.c struct list_head *head = &fi->inmem_pages; head 359 fs/f2fs/segment.c list_for_each_entry(cur, head, list) { head 364 fs/f2fs/segment.c f2fs_bug_on(sbi, list_empty(head) || cur->page != page); head 1854 fs/f2fs/segment.c struct list_head *head = &SM_I(sbi)->dcc_info->entry_list; head 1890 fs/f2fs/segment.c list_add_tail(&de->list, head); head 1909 fs/f2fs/segment.c struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list); head 1913 fs/f2fs/segment.c list_for_each_entry_safe(entry, this, head, list) head 1935 fs/f2fs/segment.c struct list_head *head = &dcc->entry_list; head 1996 fs/f2fs/segment.c list_for_each_entry_safe(entry, this, head, list) { head 3755 fs/f2fs/segment.c struct list_head *head) head 3759 fs/f2fs/segment.c if (list_is_last(&ses->set_list, head)) head 3762 fs/f2fs/segment.c list_for_each_entry_continue(next, head, set_list) head 3769 fs/f2fs/segment.c static void add_sit_entry(unsigned int segno, struct list_head *head) head 3774 fs/f2fs/segment.c list_for_each_entry(ses, head, set_list) { head 3777 fs/f2fs/segment.c adjust_sit_entry_set(ses, head); head 3786 fs/f2fs/segment.c list_add(&ses->set_list, head); head 3832 fs/f2fs/segment.c struct list_head *head = &SM_I(sbi)->sit_entry_set; head 3861 fs/f2fs/segment.c list_for_each_entry_safe(ses, tmp, head, set_list) { head 3929 fs/f2fs/segment.c f2fs_bug_on(sbi, !list_empty(head)); head 409 fs/fat/inode.c struct hlist_head *head = sbi->inode_hashtable head 414 fs/fat/inode.c hlist_add_head(&MSDOS_I(inode)->i_fat_hash, head); head 452 fs/fat/inode.c struct hlist_head *head = sbi->inode_hashtable + fat_hash(i_pos); head 457 fs/fat/inode.c hlist_for_each_entry(i, head, i_fat_hash) { head 26 fs/fat/nfs.c struct hlist_head *head; head 30 fs/fat/nfs.c head = sbi->dir_hashtable + fat_dir_hash(i_logstart); head 32 fs/fat/nfs.c hlist_for_each_entry(i, head, i_dir_hash) { head 852 fs/fcntl.c static void fasync_free_rcu(struct rcu_head *head) head 855 fs/fcntl.c container_of(head, struct fasync_struct, fa_rcu)); head 45 fs/file_table.c static void file_free_rcu(struct rcu_head *head) head 47 fs/file_table.c struct file *f = container_of(head, struct file, f_u.fu_rcuhead); head 71 fs/fs-writeback.c static inline struct inode *wb_inode(struct list_head *head) head 73 fs/fs-writeback.c return list_entry(head, struct inode, i_io_list); head 121 fs/fs-writeback.c struct list_head *head) head 125 fs/fs-writeback.c list_move(&inode->i_io_list, head); head 128 fs/fs-writeback.c if (head != &wb->b_dirty_time) head 447 fs/fs_context.c index = log->head & (logsize - 1); head 448 fs/fs_context.c BUILD_BUG_ON(sizeof(log->head) != sizeof(u8) || head 450 fs/fs_context.c if ((u8)(log->head - log->tail) == logsize) { head 460 fs/fs_context.c log->head++; head 39 fs/fsopen.c if (log->head == log->tail) { head 1055 fs/fuse/dev.c struct fuse_forget_link *head = fiq->forget_list_head.next; head 1056 fs/fuse/dev.c struct fuse_forget_link **newhead = &head; head 1070 fs/fuse/dev.c return head; head 1114 fs/fuse/dev.c struct fuse_forget_link *head; head 1128 fs/fuse/dev.c head = fuse_dequeue_forget(fiq, max_forgets, &count); head 1137 fs/fuse/dev.c while (head) { head 1138 fs/fuse/dev.c struct fuse_forget_link *forget = head; head 1144 fs/fuse/dev.c head = forget->next; head 2043 fs/fuse/dev.c static void end_requests(struct fuse_conn *fc, struct list_head *head) head 2045 fs/fuse/dev.c while (!list_empty(head)) { head 2047 fs/fuse/dev.c req = list_entry(head->next, struct fuse_req, list); head 43 fs/gfs2/aops.c struct buffer_head *head = page_buffers(page); head 44 fs/gfs2/aops.c unsigned int bsize = head->b_size; head 49 fs/gfs2/aops.c for (bh = head, start = 0; bh != head || !start; head 735 fs/gfs2/aops.c struct buffer_head *bh, *head; head 744 fs/gfs2/aops.c bh = head = page_buffers(page); head 753 fs/gfs2/aops.c } while (bh != head); head 774 fs/gfs2/aops.c struct buffer_head *bh, *head; head 792 fs/gfs2/aops.c head = bh = page_buffers(page); head 802 fs/gfs2/aops.c } while(bh != head); head 805 fs/gfs2/aops.c head = bh = page_buffers(page); head 818 fs/gfs2/aops.c } while (bh != head); head 57 fs/gfs2/glops.c struct list_head *head = &gl->gl_ail_list; head 64 fs/gfs2/glops.c list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { head 555 fs/gfs2/glops.c struct gfs2_log_header_host head; head 561 fs/gfs2/glops.c error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); head 564 fs/gfs2/glops.c if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) head 569 fs/gfs2/glops.c sdp->sd_log_sequence = head.lh_sequence + 1; head 570 fs/gfs2/glops.c gfs2_log_pointers_init(sdp, head.lh_blkno); head 55 fs/gfs2/incore.h struct gfs2_log_header_host *head, int pass); head 150 fs/gfs2/log.c struct list_head *head = &sdp->sd_ail1_list; head 159 fs/gfs2/log.c list_for_each_entry_reverse(tr, head, tr_list) { head 279 fs/gfs2/log.c struct list_head *head = &tr->tr_ail2_list; head 282 fs/gfs2/log.c while (!list_empty(head)) { head 283 fs/gfs2/log.c bd = list_entry(head->prev, struct gfs2_bufdata, head 413 fs/gfs2/lops.c struct gfs2_log_header_host *head, head 424 fs/gfs2/lops.c if (lh.lh_sequence >= head->lh_sequence) head 425 fs/gfs2/lops.c *head = lh; head 456 fs/gfs2/lops.c struct gfs2_log_header_host *head, head 468 fs/gfs2/lops.c *done = gfs2_jhead_pg_srch(jd, head, page); head 498 fs/gfs2/lops.c int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, head 515 fs/gfs2/lops.c memset(head, 0, sizeof(*head)); head 571 fs/gfs2/lops.c gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done); head 582 fs/gfs2/lops.c gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done); head 724 fs/gfs2/lops.c struct list_head *head; head 730 fs/gfs2/lops.c head = &tr->tr_buf; head 731 fs/gfs2/lops.c while (!list_empty(head)) { head 732 fs/gfs2/lops.c bd = list_entry(head->next, struct gfs2_bufdata, bd_list); head 739 fs/gfs2/lops.c struct gfs2_log_header_host *head, int pass) head 859 fs/gfs2/lops.c struct list_head *head = &sdp->sd_log_revokes; head 872 fs/gfs2/lops.c list_for_each_entry(bd, head, bd_list) { head 897 fs/gfs2/lops.c struct list_head *head = &sdp->sd_log_revokes; head 901 fs/gfs2/lops.c while (!list_empty(head)) { head 902 fs/gfs2/lops.c bd = list_entry(head->next, struct gfs2_bufdata, bd_list); head 911 fs/gfs2/lops.c struct gfs2_log_header_host *head, int pass) head 917 fs/gfs2/lops.c jd->jd_replay_tail = head->lh_tail; head 1073 fs/gfs2/lops.c struct list_head *head; head 1079 fs/gfs2/lops.c head = &tr->tr_databuf; head 1080 fs/gfs2/lops.c while (!list_empty(head)) { head 1081 fs/gfs2/lops.c bd = list_entry(head->next, struct gfs2_bufdata, bd_list); head 28 fs/gfs2/lops.h struct gfs2_log_header_host *head, bool keep_cache); head 65 fs/gfs2/lops.h struct gfs2_log_header_host *head, head 71 fs/gfs2/lops.h gfs2_log_ops[x]->lo_before_scan(jd, head, pass); head 35 fs/gfs2/meta_io.c struct buffer_head *bh, *head; head 42 fs/gfs2/meta_io.c head = page_buffers(page); head 43 fs/gfs2/meta_io.c bh = head; head 66 fs/gfs2/meta_io.c } while ((bh = bh->b_this_page) != head); head 82 fs/gfs2/meta_io.c } while (bh != head); head 19 fs/gfs2/meta_io.h static inline void gfs2_buffer_clear_tail(struct buffer_head *bh, int head) head 21 fs/gfs2/meta_io.h BUG_ON(head > bh->b_size); head 22 fs/gfs2/meta_io.h memset(bh->b_data + head, 0, bh->b_size - head); head 612 fs/gfs2/ops_fstype.c struct gfs2_log_header_host head; head 627 fs/gfs2/ops_fstype.c error = gfs2_find_jhead(jd, &head, false); head 632 fs/gfs2/ops_fstype.c if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { head 1438 fs/gfs2/quota.c struct list_head *head = &sdp->sd_quota_list; head 1442 fs/gfs2/quota.c while (!list_empty(head)) { head 1443 fs/gfs2/quota.c qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); head 57 fs/gfs2/recovery.c struct list_head *head = &jd->jd_revoke_list; head 61 fs/gfs2/recovery.c list_for_each_entry(rr, head, rr_list) { head 79 fs/gfs2/recovery.c list_add(&rr->rr_list, head); head 110 fs/gfs2/recovery.c struct list_head *head = &jd->jd_revoke_list; head 113 fs/gfs2/recovery.c while (!list_empty(head)) { head 114 fs/gfs2/recovery.c rr = list_entry(head->next, struct gfs2_revoke_replay, rr_list); head 121 fs/gfs2/recovery.c unsigned int blkno, struct gfs2_log_header_host *head) head 142 fs/gfs2/recovery.c head->lh_sequence = be64_to_cpu(lh->lh_sequence); head 143 fs/gfs2/recovery.c head->lh_flags = be32_to_cpu(lh->lh_flags); head 144 fs/gfs2/recovery.c head->lh_tail = be32_to_cpu(lh->lh_tail); head 145 fs/gfs2/recovery.c head->lh_blkno = be32_to_cpu(lh->lh_blkno); head 164 fs/gfs2/recovery.c struct gfs2_log_header_host *head) head 175 fs/gfs2/recovery.c blk, head); head 260 fs/gfs2/recovery.c struct gfs2_log_header_host *head) head 263 fs/gfs2/recovery.c u32 lblock = head->lh_blkno; head 268 fs/gfs2/recovery.c gfs2_write_log_header(sdp, jd, head->lh_sequence + 1, 0, lblock, head 298 fs/gfs2/recovery.c struct gfs2_log_header_host head; head 346 fs/gfs2/recovery.c error = gfs2_find_jhead(jd, &head, true); head 353 fs/gfs2/recovery.c if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { head 392 fs/gfs2/recovery.c jd->jd_jid, head.lh_tail, head.lh_blkno); head 395 fs/gfs2/recovery.c lops_before_scan(jd, &head, pass); head 396 fs/gfs2/recovery.c error = foreach_descriptor(jd, head.lh_tail, head 397 fs/gfs2/recovery.c head.lh_blkno, pass); head 403 fs/gfs2/recovery.c clean_journal(jd, &head); head 31 fs/gfs2/recovery.h struct gfs2_log_header_host *head); head 73 fs/gfs2/super.c static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid) head 78 fs/gfs2/super.c list_for_each_entry(jd, head, jd_list) { head 161 fs/gfs2/super.c struct gfs2_log_header_host head; head 175 fs/gfs2/super.c error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); head 179 fs/gfs2/super.c if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { head 186 fs/gfs2/super.c sdp->sd_log_sequence = head.lh_sequence + 1; head 187 fs/gfs2/super.c gfs2_log_pointers_init(sdp, head.lh_blkno); head 22 fs/hfs/btree.c struct hfs_btree_header_rec *head; head 83 fs/hfs/btree.c head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); head 84 fs/hfs/btree.c tree->root = be32_to_cpu(head->root); head 85 fs/hfs/btree.c tree->leaf_count = be32_to_cpu(head->leaf_count); head 86 fs/hfs/btree.c tree->leaf_head = be32_to_cpu(head->leaf_head); head 87 fs/hfs/btree.c tree->leaf_tail = be32_to_cpu(head->leaf_tail); head 88 fs/hfs/btree.c tree->node_count = be32_to_cpu(head->node_count); head 89 fs/hfs/btree.c tree->free_nodes = be32_to_cpu(head->free_nodes); head 90 fs/hfs/btree.c tree->attributes = be32_to_cpu(head->attributes); head 91 fs/hfs/btree.c tree->node_size = be16_to_cpu(head->node_size); head 92 fs/hfs/btree.c tree->max_key_len = be16_to_cpu(head->max_key_len); head 93 fs/hfs/btree.c tree->depth = be16_to_cpu(head->depth); head 162 fs/hfs/btree.c struct hfs_btree_header_rec *head; head 172 fs/hfs/btree.c head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); head 174 fs/hfs/btree.c head->root = cpu_to_be32(tree->root); head 175 fs/hfs/btree.c head->leaf_count = cpu_to_be32(tree->leaf_count); head 176 fs/hfs/btree.c head->leaf_head = cpu_to_be32(tree->leaf_head); head 177 fs/hfs/btree.c head->leaf_tail = cpu_to_be32(tree->leaf_tail); head 178 fs/hfs/btree.c head->node_count = cpu_to_be32(tree->node_count); head 179 fs/hfs/btree.c head->free_nodes = cpu_to_be32(tree->free_nodes); head 180 fs/hfs/btree.c head->attributes = cpu_to_be32(tree->attributes); head 181 fs/hfs/btree.c head->depth = cpu_to_be16(tree->depth); head 136 fs/hfsplus/btree.c struct hfs_btree_header_rec *head; head 166 fs/hfsplus/btree.c head = (struct hfs_btree_header_rec *)(kmap(page) + head 168 fs/hfsplus/btree.c tree->root = be32_to_cpu(head->root); head 169 fs/hfsplus/btree.c tree->leaf_count = be32_to_cpu(head->leaf_count); head 170 fs/hfsplus/btree.c tree->leaf_head = be32_to_cpu(head->leaf_head); head 171 fs/hfsplus/btree.c tree->leaf_tail = be32_to_cpu(head->leaf_tail); head 172 fs/hfsplus/btree.c tree->node_count = be32_to_cpu(head->node_count); head 173 fs/hfsplus/btree.c tree->free_nodes = be32_to_cpu(head->free_nodes); head 174 fs/hfsplus/btree.c tree->attributes = be32_to_cpu(head->attributes); head 175 fs/hfsplus/btree.c tree->node_size = be16_to_cpu(head->node_size); head 176 fs/hfsplus/btree.c tree->max_key_len = be16_to_cpu(head->max_key_len); head 177 fs/hfsplus/btree.c tree->depth = be16_to_cpu(head->depth); head 206 fs/hfsplus/btree.c (head->key_type == HFSPLUS_KEY_BINARY)) head 284 fs/hfsplus/btree.c struct hfs_btree_header_rec *head; head 294 fs/hfsplus/btree.c head = (struct hfs_btree_header_rec *)(kmap(page) + head 297 fs/hfsplus/btree.c head->root = cpu_to_be32(tree->root); head 298 fs/hfsplus/btree.c head->leaf_count = cpu_to_be32(tree->leaf_count); head 299 fs/hfsplus/btree.c head->leaf_head = cpu_to_be32(tree->leaf_head); head 300 fs/hfsplus/btree.c head->leaf_tail = cpu_to_be32(tree->leaf_tail); head 301 fs/hfsplus/btree.c head->node_count = cpu_to_be32(tree->node_count); head 302 fs/hfsplus/btree.c head->free_nodes = cpu_to_be32(tree->free_nodes); head 303 fs/hfsplus/btree.c head->attributes = cpu_to_be32(tree->attributes); head 304 fs/hfsplus/btree.c head->depth = cpu_to_be16(tree->depth); head 58 fs/hfsplus/xattr.c struct hfs_btree_header_rec *head; head 79 fs/hfsplus/xattr.c head = (struct hfs_btree_header_rec *)(buf + offset); head 80 fs/hfsplus/xattr.c head->node_size = cpu_to_be16(node_size); head 83 fs/hfsplus/xattr.c head->node_count = cpu_to_be32(tmp); head 84 fs/hfsplus/xattr.c head->free_nodes = cpu_to_be32(be32_to_cpu(head->node_count) - 1); head 85 fs/hfsplus/xattr.c head->clump_size = cpu_to_be32(clump_size); head 86 fs/hfsplus/xattr.c head->attributes |= cpu_to_be32(HFS_TREE_BIGKEYS | HFS_TREE_VARIDXKEYS); head 87 fs/hfsplus/xattr.c head->max_key_len = cpu_to_be16(HFSPLUS_ATTR_KEYLEN - sizeof(u16)); head 94 fs/hfsplus/xattr.c if (be32_to_cpu(head->node_count) > hdr_node_map_rec_bits) { head 98 fs/hfsplus/xattr.c desc->next = cpu_to_be32(be32_to_cpu(head->leaf_tail) + 1); head 101 fs/hfsplus/xattr.c map_nodes = (be32_to_cpu(head->node_count) - head 104 fs/hfsplus/xattr.c be32_add_cpu(&head->free_nodes, 0 - map_nodes); head 109 fs/hfsplus/xattr.c be32_to_cpu(head->node_count) - be32_to_cpu(head->free_nodes); head 873 fs/hugetlbfs/inode.c struct page *head = compound_head(page); head 875 fs/hugetlbfs/inode.c SetPageDirty(head); head 216 fs/inode.c static void i_callback(struct rcu_head *head) head 218 fs/inode.c struct inode *inode = container_of(head, struct inode, i_rcu); head 602 fs/inode.c static void dispose_list(struct list_head *head) head 604 fs/inode.c while (!list_empty(head)) { head 607 fs/inode.c inode = list_first_entry(head, struct inode, i_lru); head 814 fs/inode.c struct hlist_head *head, head 821 fs/inode.c hlist_for_each_entry(inode, head, i_hash) { head 847 fs/inode.c struct hlist_head *head, unsigned long ino) head 852 fs/inode.c hlist_for_each_entry(inode, head, i_hash) { head 1075 fs/inode.c struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval); head 1081 fs/inode.c old = find_inode(inode->i_sb, head, test, data); head 1109 fs/inode.c hlist_add_head(&inode->i_hash, head); head 1175 fs/inode.c struct hlist_head *head = inode_hashtable + hash(sb, ino); head 1179 fs/inode.c inode = find_inode_fast(sb, head, ino); head 1198 fs/inode.c old = find_inode_fast(sb, head, ino); head 1203 fs/inode.c hlist_add_head(&inode->i_hash, head); head 1333 fs/inode.c struct hlist_head *head = inode_hashtable + hash(sb, hashval); head 1337 fs/inode.c inode = find_inode(sb, head, test, data); head 1388 fs/inode.c struct hlist_head *head = inode_hashtable + hash(sb, ino); head 1392 fs/inode.c inode = find_inode_fast(sb, head, ino); head 1437 fs/inode.c struct hlist_head *head = inode_hashtable + hash(sb, hashval); head 1442 fs/inode.c hlist_for_each_entry(inode, head, i_hash) { head 1462 fs/inode.c struct hlist_head *head = inode_hashtable + hash(sb, ino); head 1467 fs/inode.c hlist_for_each_entry(old, head, i_hash) { head 1482 fs/inode.c hlist_add_head(&inode->i_hash, head); head 84 fs/io_uring.c u32 head ____cacheline_aligned_in_smp; head 287 fs/io_uring.c struct wait_queue_head *head; head 563 fs/io_uring.c if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries) head 751 fs/io_uring.c return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head); head 1727 fs/io_uring.c spin_lock(&poll->head->lock); head 1733 fs/io_uring.c spin_unlock(&poll->head->lock); head 1812 fs/io_uring.c add_wait_queue(poll->head, &poll->wait); head 1862 fs/io_uring.c static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, head 1867 fs/io_uring.c if (unlikely(pt->req->poll.head)) { head 1873 fs/io_uring.c pt->req->poll.head = head; head 1874 fs/io_uring.c add_wait_queue(head, &pt->req->poll.wait); head 1898 fs/io_uring.c poll->head = NULL; head 1916 fs/io_uring.c if (likely(poll->head)) { head 1917 fs/io_uring.c spin_lock(&poll->head->lock); head 1930 fs/io_uring.c spin_unlock(&poll->head->lock); head 2627 fs/io_uring.c if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) { head 2633 fs/io_uring.c smp_store_release(&rings->sq.head, ctx->cached_sq_head); head 2649 fs/io_uring.c unsigned head; head 2659 fs/io_uring.c head = ctx->cached_sq_head; head 2661 fs/io_uring.c if (head == smp_load_acquire(&rings->sq.tail)) head 2664 fs/io_uring.c head = READ_ONCE(sq_array[head & ctx->sq_mask]); head 2665 fs/io_uring.c if (head < ctx->sq_entries) { head 2666 fs/io_uring.c s->index = head; head 2667 fs/io_uring.c s->sqe = &ctx->sq_sqes[head]; head 3020 fs/io_uring.c return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; head 3665 fs/io_uring.c if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail) head 3944 fs/io_uring.c p->sq_off.head = offsetof(struct io_rings, sq.head); head 3953 fs/io_uring.c p->cq_off.head = offsetof(struct io_rings, cq.head); head 2075 fs/jbd2/transaction.c struct buffer_head *head; head 2081 fs/jbd2/transaction.c head = page_buffers(page); head 2082 fs/jbd2/transaction.c bh = head; head 2101 fs/jbd2/transaction.c } while ((bh = bh->b_this_page) != head); head 2374 fs/jbd2/transaction.c struct buffer_head *head, *bh, *next; head 2392 fs/jbd2/transaction.c head = bh = page_buffers(page); head 2412 fs/jbd2/transaction.c } while (bh != head); head 565 fs/jffs2/nodemgmt.c static inline int on_list(struct list_head *obj, struct list_head *head) head 569 fs/jffs2/nodemgmt.c list_for_each(this, head) { head 571 fs/jffs2/nodemgmt.c jffs2_dbg(1, "%p is on list at %p\n", obj, head); head 1130 fs/jffs2/scan.c static void rotate_list(struct list_head *head, uint32_t count) head 1132 fs/jffs2/scan.c struct list_head *n = head->next; head 1134 fs/jffs2/scan.c list_del(head); head 1138 fs/jffs2/scan.c list_add(head, n); head 2846 fs/jfs/jfs_imap.c int i, n, head; head 2908 fs/jfs/jfs_imap.c if ((head = imap->im_agctl[n].inofree) == -1) { head 2912 fs/jfs/jfs_imap.c if ((rc = diIAGRead(imap, head, &hbp))) { head 2918 fs/jfs/jfs_imap.c iagp->inofreefwd = cpu_to_le32(head); head 2934 fs/jfs/jfs_imap.c if ((head = imap->im_agctl[n].extfree) == -1) { head 2938 fs/jfs/jfs_imap.c if ((rc = diIAGRead(imap, head, &hbp))) { head 2944 fs/jfs/jfs_imap.c iagp->extfreefwd = cpu_to_le32(head); head 2920 fs/locks.c struct list_head *head, int *id, head 2925 fs/locks.c list_for_each_entry(fl, head, fl_list) { head 79 fs/mbcache.c struct hlist_bl_head *head; head 99 fs/mbcache.c head = mb_cache_entry_head(cache, key); head 100 fs/mbcache.c hlist_bl_lock(head); head 101 fs/mbcache.c hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { head 103 fs/mbcache.c hlist_bl_unlock(head); head 108 fs/mbcache.c hlist_bl_add_head(&entry->e_hash_list, head); head 109 fs/mbcache.c hlist_bl_unlock(head); head 134 fs/mbcache.c struct hlist_bl_head *head; head 136 fs/mbcache.c head = mb_cache_entry_head(cache, key); head 137 fs/mbcache.c hlist_bl_lock(head); head 141 fs/mbcache.c node = hlist_bl_first(head); head 153 fs/mbcache.c hlist_bl_unlock(head); head 202 fs/mbcache.c struct hlist_bl_head *head; head 205 fs/mbcache.c head = mb_cache_entry_head(cache, key); head 206 fs/mbcache.c hlist_bl_lock(head); head 207 fs/mbcache.c hlist_bl_for_each_entry(entry, node, head, e_hash_list) { head 215 fs/mbcache.c hlist_bl_unlock(head); head 230 fs/mbcache.c struct hlist_bl_head *head; head 233 fs/mbcache.c head = mb_cache_entry_head(cache, key); head 234 fs/mbcache.c hlist_bl_lock(head); head 235 fs/mbcache.c hlist_bl_for_each_entry(entry, node, head, e_hash_list) { head 239 fs/mbcache.c hlist_bl_unlock(head); head 253 fs/mbcache.c hlist_bl_unlock(head); head 284 fs/mbcache.c struct hlist_bl_head *head; head 303 fs/mbcache.c head = mb_cache_entry_head(cache, entry->e_key); head 304 fs/mbcache.c hlist_bl_lock(head); head 309 fs/mbcache.c hlist_bl_unlock(head); head 107 fs/mpage.c struct buffer_head *page_bh, *head; head 122 fs/mpage.c head = page_buffers(page); head 123 fs/mpage.c page_bh = head; head 133 fs/mpage.c } while (page_bh != head); head 464 fs/mpage.c struct buffer_head *bh, *head; head 467 fs/mpage.c head = page_buffers(page); head 468 fs/mpage.c bh = head; head 475 fs/mpage.c } while (bh != head); head 522 fs/mpage.c struct buffer_head *head = page_buffers(page); head 523 fs/mpage.c struct buffer_head *bh = head; head 557 fs/mpage.c } while ((bh = bh->b_this_page) != head); head 557 fs/namespace.c static void delayed_free_vfsmnt(struct rcu_head *head) head 559 fs/namespace.c free_vfsmnt(container_of(head, struct mount, mnt_rcu)); head 610 fs/namespace.c struct hlist_head *head = m_hash(mnt, dentry); head 613 fs/namespace.c hlist_for_each_entry_rcu(p, head, mnt_hash) head 880 fs/namespace.c LIST_HEAD(head); head 885 fs/namespace.c list_add_tail(&head, &mnt->mnt_list); head 886 fs/namespace.c list_for_each_entry(m, &head, mnt_list) head 889 fs/namespace.c list_splice(&head, n->list.prev); head 1107 fs/namespace.c static void __cleanup_mnt(struct rcu_head *head) head 1109 fs/namespace.c cleanup_mnt(container_of(head, struct mount, mnt_rcu)); head 1360 fs/namespace.c struct hlist_head head; head 1365 fs/namespace.c hlist_move_list(&unmounted, &head); head 1372 fs/namespace.c if (likely(hlist_empty(&head))) head 1377 fs/namespace.c hlist_for_each_entry_safe(m, p, &head, mnt_umount) { head 125 fs/nfs/blocklayout/extent_tree.c static void __ext_put_deviceids(struct list_head *head) head 129 fs/nfs/blocklayout/extent_tree.c list_for_each_entry_safe(be, tmp, head, be_list) { head 938 fs/nfs/callback_xdr.c rqstp->rq_arg.head[0].iov_base, NULL); head 940 fs/nfs/callback_xdr.c p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len); head 2180 fs/nfs/dir.c static void nfs_access_free_list(struct list_head *head) head 2184 fs/nfs/dir.c while (!list_empty(head)) { head 2185 fs/nfs/dir.c cache = list_entry(head->next, struct nfs_access_entry, lru); head 2194 fs/nfs/dir.c LIST_HEAD(head); head 2211 fs/nfs/dir.c list_move(&cache->lru, &head); head 2227 fs/nfs/dir.c nfs_access_free_list(&head); head 2265 fs/nfs/dir.c static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head) head 2275 fs/nfs/dir.c list_move(&entry->lru, head); head 2282 fs/nfs/dir.c LIST_HEAD(head); head 2292 fs/nfs/dir.c __nfs_access_zap_cache(NFS_I(inode), &head); head 2295 fs/nfs/dir.c nfs_access_free_list(&head); head 417 fs/nfs/direct.c static void nfs_read_sync_pgio_error(struct list_head *head, int error) head 421 fs/nfs/direct.c while (!list_empty(head)) { head 422 fs/nfs/direct.c req = nfs_list_entry(head->next); head 808 fs/nfs/direct.c static void nfs_write_sync_pgio_error(struct list_head *head, int error) head 812 fs/nfs/direct.c while (!list_empty(head)) { head 813 fs/nfs/direct.c req = nfs_list_entry(head->next); head 106 fs/nfs/dns_resolve.c static void nfs_dns_ent_free_rcu(struct rcu_head *head) head 110 fs/nfs/dns_resolve.c item = container_of(head, struct nfs_dns_ent, rcu_head); head 2112 fs/nfs/flexfilelayout/flexfilelayout.c .head = { head 2196 fs/nfs/flexfilelayout/flexfilelayout.c LIST_HEAD(head); head 2200 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1); head 2201 fs/nfs/flexfilelayout/flexfilelayout.c if (list_empty(&head)) head 2210 fs/nfs/flexfilelayout/flexfilelayout.c list_for_each_entry(pos, &head, list) { head 2218 fs/nfs/flexfilelayout/flexfilelayout.c if (!list_is_last(&pos->list, &head) && head 2227 fs/nfs/flexfilelayout/flexfilelayout.c ff_layout_free_ds_ioerr(&head); head 199 fs/nfs/flexfilelayout/flexfilelayout.h int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head); head 200 fs/nfs/flexfilelayout/flexfilelayout.h void ff_layout_free_ds_ioerr(struct list_head *head); head 203 fs/nfs/flexfilelayout/flexfilelayout.h struct list_head *head, head 227 fs/nfs/flexfilelayout/flexfilelayoutdev.c struct list_head *head = &flo->error_list; head 237 fs/nfs/flexfilelayout/flexfilelayoutdev.c head = &err->list; head 247 fs/nfs/flexfilelayout/flexfilelayoutdev.c list_add_tail(&dserr->list, head); head 459 fs/nfs/flexfilelayout/flexfilelayoutdev.c void ff_layout_free_ds_ioerr(struct list_head *head) head 463 fs/nfs/flexfilelayout/flexfilelayoutdev.c while (!list_empty(head)) { head 464 fs/nfs/flexfilelayout/flexfilelayoutdev.c err = list_first_entry(head, head 473 fs/nfs/flexfilelayout/flexfilelayoutdev.c int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head) head 478 fs/nfs/flexfilelayout/flexfilelayoutdev.c list_for_each_entry(err, head, list) { head 508 fs/nfs/flexfilelayout/flexfilelayoutdev.c struct list_head *head, head 525 fs/nfs/flexfilelayout/flexfilelayoutdev.c list_move(&err->list, head); head 535 fs/nfs/flexfilelayout/flexfilelayoutdev.c struct list_head *head, head 540 fs/nfs/flexfilelayout/flexfilelayoutdev.c ret = do_layout_fetch_ds_ioerr(lo, range, head, maxnum); head 479 fs/nfs/internal.h struct list_head *head, head 493 fs/nfs/internal.h int nfs_generic_commit_list(struct inode *inode, struct list_head *head, head 582 fs/nfs/nfs42proc.c LIST_HEAD(head); head 588 fs/nfs/nfs42proc.c pnfs_mark_layout_stateid_invalid(lo, &head); head 590 fs/nfs/nfs42proc.c pnfs_free_lseg_list(&head); head 749 fs/nfs/nfs42proc.c LIST_HEAD(head); head 755 fs/nfs/nfs42proc.c pnfs_mark_layout_stateid_invalid(lo, &head); head 757 fs/nfs/nfs42proc.c pnfs_free_lseg_list(&head); head 467 fs/nfs/nfs4_fs.h extern void nfs4_free_state_owners(struct list_head *head); head 9025 fs/nfs/nfs4proc.c LIST_HEAD(head); head 9087 fs/nfs/nfs4proc.c pnfs_mark_layout_stateid_invalid(lo, &head); head 9090 fs/nfs/nfs4proc.c pnfs_free_lseg_list(&head); head 633 fs/nfs/nfs4state.c void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head) head 640 fs/nfs/nfs4state.c list_move(&sp->so_lru, head); head 653 fs/nfs/nfs4state.c void nfs4_free_state_owners(struct list_head *head) head 657 fs/nfs/nfs4state.c list_for_each_entry_safe(sp, tmp, head, so_lru) { head 5294 fs/nfs/nfs4xdr.c pg_offset = xdr->buf->head[0].iov_len; head 147 fs/nfs/pagelist.c struct nfs_page *head = req->wb_head; head 149 fs/nfs/pagelist.c WARN_ON_ONCE(head != head->wb_head); head 151 fs/nfs/pagelist.c if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags)) head 154 fs/nfs/pagelist.c set_bit(PG_CONTENDED1, &head->wb_flags); head 156 fs/nfs/pagelist.c return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, head 167 fs/nfs/pagelist.c struct nfs_page *head = req->wb_head; head 169 fs/nfs/pagelist.c WARN_ON_ONCE(head != head->wb_head); head 172 fs/nfs/pagelist.c clear_bit(PG_HEADLOCK, &head->wb_flags); head 174 fs/nfs/pagelist.c if (!test_bit(PG_CONTENDED1, &head->wb_flags)) head 176 fs/nfs/pagelist.c wake_up_bit(&head->wb_flags, PG_HEADLOCK); head 187 fs/nfs/pagelist.c struct nfs_page *head = req->wb_head; head 190 fs/nfs/pagelist.c WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags)); head 278 fs/nfs/pagelist.c struct nfs_page *head = req->wb_head; head 295 fs/nfs/pagelist.c if (head != req) head 296 fs/nfs/pagelist.c nfs_release_request(head); head 766 fs/nfs/pagelist.c struct list_head *head = &mirror->pg_list; head 791 fs/nfs/pagelist.c while (!list_empty(head)) { head 792 fs/nfs/pagelist.c req = nfs_list_entry(head->next); head 1016 fs/nfs/pagelist.c LIST_HEAD(head); head 1018 fs/nfs/pagelist.c nfs_list_move_request(req, &head); head 1019 fs/nfs/pagelist.c desc->pg_completion_ops->error_cleanup(&head, desc->pg_error); head 1098 fs/nfs/pagelist.c LIST_HEAD(head); head 1101 fs/nfs/pagelist.c list_splice_init(&mirror->pg_list, &head); head 1107 fs/nfs/pagelist.c while (!list_empty(&head)) { head 1110 fs/nfs/pagelist.c req = list_first_entry(&head, struct nfs_page, wb_list); head 1114 fs/nfs/pagelist.c list_splice_tail(&head, &mirror->pg_list); head 376 fs/nfs/pnfs.c LIST_HEAD(head); head 390 fs/nfs/pnfs.c err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0); head 399 fs/nfs/pnfs.c pnfs_free_lseg_list(&head); head 463 fs/nfs/pnfs.c LIST_HEAD(head); head 467 fs/nfs/pnfs.c pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0); head 469 fs/nfs/pnfs.c pnfs_free_lseg_list(&head); head 1081 fs/nfs/pnfs.c struct list_head *head) head 1091 fs/nfs/pnfs.c pnfs_lseg_dec_and_remove_zero(lseg, head); head 221 fs/nfs/read.c nfs_async_read_error(struct list_head *head, int error) head 225 fs/nfs/read.c while (!list_empty(head)) { head 226 fs/nfs/read.c req = nfs_list_entry(head->next); head 277 fs/nfs/write.c nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) head 281 fs/nfs/write.c req = head; head 288 fs/nfs/write.c } while (req != head); head 394 fs/nfs/write.c nfs_unroll_locks(struct inode *inode, struct nfs_page *head, head 400 fs/nfs/write.c for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { head 479 fs/nfs/write.c struct nfs_page *head, *subreq; head 490 fs/nfs/write.c head = nfs_page_find_head_request(page); head 491 fs/nfs/write.c if (!head) head 495 fs/nfs/write.c if (!nfs_lock_request(head)) { head 496 fs/nfs/write.c ret = nfs_wait_on_request(head); head 497 fs/nfs/write.c nfs_release_request(head); head 504 fs/nfs/write.c if (head != nfs_page_private_request(page) && !PageSwapCache(page)) { head 505 fs/nfs/write.c nfs_unlock_and_release_request(head); head 509 fs/nfs/write.c ret = nfs_page_group_lock(head); head 514 fs/nfs/write.c total_bytes = head->wb_bytes; head 515 fs/nfs/write.c for (subreq = head->wb_this_page; subreq != head; head 519 fs/nfs/write.c if (subreq->wb_offset == head->wb_offset + total_bytes) head 529 fs/nfs/write.c nfs_page_group_unlock(head); head 532 fs/nfs/write.c ret = nfs_page_group_lock(head); head 534 fs/nfs/write.c nfs_unroll_locks(inode, head, subreq); head 543 fs/nfs/write.c if (subreq->wb_offset == (head->wb_offset + total_bytes)) { head 546 fs/nfs/write.c } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || head 548 fs/nfs/write.c (head->wb_offset + total_bytes)))) { head 549 fs/nfs/write.c nfs_page_group_unlock(head); head 550 fs/nfs/write.c nfs_unroll_locks(inode, head, subreq); head 559 fs/nfs/write.c subreq = head; head 563 fs/nfs/write.c } while (subreq != head); head 566 fs/nfs/write.c if (head->wb_this_page != head) { head 568 fs/nfs/write.c destroy_list = head->wb_this_page; head 569 fs/nfs/write.c head->wb_this_page = head; head 573 fs/nfs/write.c head->wb_bytes = total_bytes; head 577 fs/nfs/write.c if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) { head 578 fs/nfs/write.c set_bit(PG_INODE_REF, &head->wb_flags); head 579 fs/nfs/write.c kref_get(&head->wb_kref); head 583 fs/nfs/write.c nfs_page_group_unlock(head); head 585 fs/nfs/write.c nfs_destroy_unlinked_subrequests(destroy_list, head, inode); head 589 fs/nfs/write.c nfs_unlock_and_release_request(head); head 595 fs/nfs/write.c return head; head 598 fs/nfs/write.c nfs_unlock_and_release_request(head); head 796 fs/nfs/write.c struct nfs_page *head; head 799 fs/nfs/write.c head = req->wb_head; head 802 fs/nfs/write.c if (likely(head->wb_page && !PageSwapCache(head->wb_page))) { head 803 fs/nfs/write.c set_page_private(head->wb_page, 0); head 804 fs/nfs/write.c ClearPagePrivate(head->wb_page); head 805 fs/nfs/write.c clear_bit(PG_MAPPED, &head->wb_flags); head 1433 fs/nfs/write.c static void nfs_async_write_error(struct list_head *head, int error) head 1437 fs/nfs/write.c while (!list_empty(head)) { head 1438 fs/nfs/write.c req = nfs_list_entry(head->next); head 1727 fs/nfs/write.c static loff_t nfs_get_lwb(struct list_head *head) head 1732 fs/nfs/write.c list_for_each_entry(req, head, wb_list) head 1743 fs/nfs/write.c struct list_head *head, head 1747 fs/nfs/write.c struct nfs_page *first = nfs_list_entry(head->next); head 1754 fs/nfs/write.c list_splice_init(head, &data->pages); head 1806 fs/nfs/write.c nfs_commit_list(struct inode *inode, struct list_head *head, int how, head 1812 fs/nfs/write.c if (list_empty(head)) head 1818 fs/nfs/write.c nfs_init_commit(data, head, NULL, cinfo); head 1913 fs/nfs/write.c int nfs_generic_commit_list(struct inode *inode, struct list_head *head, head 1918 fs/nfs/write.c status = pnfs_commit_list(inode, head, how, cinfo); head 1920 fs/nfs/write.c status = nfs_commit_list(inode, head, how, cinfo); head 1927 fs/nfs/write.c LIST_HEAD(head); head 1935 fs/nfs/write.c ret = nscan = nfs_scan_commit(inode, &head, &cinfo); head 1938 fs/nfs/write.c ret = nfs_generic_commit_list(inode, &head, how, &cinfo); head 365 fs/nfsd/filecache.c struct list_head *head = arg; head 394 fs/nfsd/filecache.c list_lru_isolate_move(lru, &nf->nf_lru, head); head 403 fs/nfsd/filecache.c nfsd_file_lru_dispose(struct list_head *head) head 405 fs/nfsd/filecache.c while(!list_empty(head)) { head 406 fs/nfsd/filecache.c struct nfsd_file *nf = list_first_entry(head, head 425 fs/nfsd/filecache.c LIST_HEAD(head); head 428 fs/nfsd/filecache.c ret = list_lru_shrink_walk(&nfsd_file_lru, sc, nfsd_file_lru_cb, &head); head 429 fs/nfsd/filecache.c nfsd_file_lru_dispose(&head); head 507 fs/nfsd/filecache.c LIST_HEAD(head); head 509 fs/nfsd/filecache.c list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb, &head, LONG_MAX); head 514 fs/nfsd/filecache.c if (!list_empty(&head)) { head 515 fs/nfsd/filecache.c nfsd_file_lru_dispose(&head); head 202 fs/nfsd/nfs2acl.c struct kvec *head = rqstp->rq_arg.head; head 214 fs/nfsd/nfs2acl.c base = (char *)p - (char *)head->iov_base; head 266 fs/nfsd/nfs2acl.c struct kvec *head = rqstp->rq_res.head; head 284 fs/nfsd/nfs2acl.c base = (char *)p - (char *)head->iov_base; head 144 fs/nfsd/nfs3acl.c struct kvec *head = rqstp->rq_arg.head; head 156 fs/nfsd/nfs3acl.c base = (char *)p - (char *)head->iov_base; head 180 fs/nfsd/nfs3acl.c struct kvec *head = rqstp->rq_res.head; head 188 fs/nfsd/nfs3acl.c base = (char *)p - (char *)head->iov_base; head 402 fs/nfsd/nfs3xdr.c struct kvec *head = rqstp->rq_arg.head; head 413 fs/nfsd/nfs3xdr.c if ((void *)p > head->iov_base + head->iov_len) head 425 fs/nfsd/nfs3xdr.c hdr = (void*)p - head->iov_base; head 426 fs/nfsd/nfs3xdr.c dlen = head->iov_len + rqstp->rq_arg.page_len + tail->iov_len - hdr; head 444 fs/nfsd/nfs3xdr.c args->first.iov_len = head->iov_len - hdr; head 501 fs/nfsd/nfs3xdr.c args->first.iov_len = rqstp->rq_arg.head[0].iov_len; head 818 fs/nfsd/nfs3xdr.c if (rqstp->rq_res.head[0].iov_len + (2<<2) > PAGE_SIZE) head 1901 fs/nfsd/nfs4proc.c struct kvec *head = buf->head; head 1904 fs/nfsd/nfs4proc.c xdr->iov = head; head 1905 fs/nfsd/nfs4proc.c xdr->p = head->iov_base + head->iov_len; head 1906 fs/nfsd/nfs4proc.c xdr->end = head->iov_base + PAGE_SIZE - rqstp->rq_auth_slack; head 1908 fs/nfsd/nfs4proc.c buf->len = buf->head[0].iov_len; head 2950 fs/nfsd/nfs4xdr.c buf->head[0].iov_base = p; head 2951 fs/nfsd/nfs4xdr.c buf->head[0].iov_len = 0; head 2954 fs/nfsd/nfs4xdr.c xdr->iov = buf->head; head 3679 fs/nfsd/nfs4xdr.c resp->xdr.buf->head[0].iov_len = ((char *)resp->xdr.p) head 3680 fs/nfsd/nfs4xdr.c - (char *)resp->xdr.buf->head[0].iov_base; head 4555 fs/nfsd/nfs4xdr.c if (rqstp->rq_arg.head[0].iov_len % 4) { head 4562 fs/nfsd/nfs4xdr.c args->end = rqstp->rq_arg.head[0].iov_base + rqstp->rq_arg.head[0].iov_len; head 4583 fs/nfsd/nfs4xdr.c WARN_ON_ONCE(buf->len != buf->head[0].iov_len + buf->page_len + head 200 fs/nfsd/nfscache.c struct list_head *head = &nn->drc_hashtbl[i].lru_head; head 201 fs/nfsd/nfscache.c while (!list_empty(head)) { head 202 fs/nfsd/nfscache.c rp = list_first_entry(head, struct svc_cacherep, c_lru); head 298 fs/nfsd/nfscache.c const unsigned char *p = buf->head[0].iov_base; head 299 fs/nfsd/nfscache.c size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, head 301 fs/nfsd/nfscache.c size_t len = min(buf->head[0].iov_len, csum_len); head 458 fs/nfsd/nfscache.c svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); head 495 fs/nfsd/nfscache.c struct kvec *resv = &rqstp->rq_res.head[0], *cachv; head 555 fs/nfsd/nfscache.c struct kvec *vec = &rqstp->rq_res.head[0]; head 1007 fs/nfsd/nfssvc.c !proc->pc_decode(rqstp, (__be32*)rqstp->rq_arg.head[0].iov_base)) { head 1026 fs/nfsd/nfssvc.c nfserrp = rqstp->rq_res.head[0].iov_base head 1027 fs/nfsd/nfssvc.c + rqstp->rq_res.head[0].iov_len; head 1028 fs/nfsd/nfssvc.c rqstp->rq_res.head[0].iov_len += sizeof(__be32); head 274 fs/nfsd/nfsxdr.c struct kvec *head = rqstp->rq_arg.head; head 294 fs/nfsd/nfsxdr.c hdr = (void*)p - head->iov_base; head 295 fs/nfsd/nfsxdr.c if (hdr > head->iov_len) head 297 fs/nfsd/nfsxdr.c dlen = head->iov_len + rqstp->rq_arg.page_len - hdr; head 311 fs/nfsd/nfsxdr.c args->first.iov_len = head->iov_len - hdr; head 384 fs/nfsd/nfsxdr.c args->first.iov_len = rqstp->rq_arg.head[0].iov_len; head 2104 fs/nilfs2/btree.c struct list_head *head; head 2125 fs/nilfs2/btree.c list_for_each(head, &lists[level]) { head 2126 fs/nilfs2/btree.c cbh = list_entry(head, struct buffer_head, b_assoc_buffers); head 2132 fs/nilfs2/btree.c list_add_tail(&bh->b_assoc_buffers, head); head 2141 fs/nilfs2/btree.c struct buffer_head *bh, *head; head 2155 fs/nilfs2/btree.c bh = head = page_buffers(pvec.pages[i]); head 2160 fs/nilfs2/btree.c } while ((bh = bh->b_this_page) != head); head 72 fs/nilfs2/file.c struct buffer_head *bh, *head; head 75 fs/nilfs2/file.c bh = head = page_buffers(page); head 81 fs/nilfs2/file.c } while (bh = bh->b_this_page, bh != head); head 181 fs/nilfs2/gcinode.c struct list_head *head = &nilfs->ns_gc_inodes; head 184 fs/nilfs2/gcinode.c while (!list_empty(head)) { head 185 fs/nilfs2/gcinode.c ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); head 217 fs/nilfs2/inode.c struct buffer_head *bh, *head; head 226 fs/nilfs2/inode.c bh = head = page_buffers(page); head 234 fs/nilfs2/inode.c } while (bh = bh->b_this_page, bh != head); head 596 fs/nilfs2/mdt.c struct list_head *head = &shadow->frozen_buffers; head 599 fs/nilfs2/mdt.c while (!list_empty(head)) { head 600 fs/nilfs2/mdt.c bh = list_first_entry(head, struct buffer_head, head 143 fs/nilfs2/page.c struct buffer_head *bh, *head; head 145 fs/nilfs2/page.c bh = head = page_buffers(page); head 150 fs/nilfs2/page.c } while (bh != head); head 173 fs/nilfs2/page.c struct buffer_head *bh, *head; head 176 fs/nilfs2/page.c bh = head = page_buffers(page); head 183 fs/nilfs2/page.c } while (bh != head); head 402 fs/nilfs2/page.c struct buffer_head *bh, *head; head 408 fs/nilfs2/page.c bh = head = page_buffers(page); head 418 fs/nilfs2/page.c } while (bh = bh->b_this_page, bh != head); head 428 fs/nilfs2/page.c struct buffer_head *bh, *head; head 431 fs/nilfs2/page.c for (bh = head = page_buffers(page), block_start = 0; head 432 fs/nilfs2/page.c bh != head || !block_start; head 527 fs/nilfs2/page.c struct buffer_head *bh, *head; head 529 fs/nilfs2/page.c bh = head = page_buffers(page); head 540 fs/nilfs2/page.c } while (++b, bh = bh->b_this_page, bh != head); head 306 fs/nilfs2/recovery.c struct list_head *head) head 359 fs/nilfs2/recovery.c list_add_tail(&rb->list, head); head 375 fs/nilfs2/recovery.c static void dispose_recovery_list(struct list_head *head) head 377 fs/nilfs2/recovery.c while (!list_empty(head)) { head 380 fs/nilfs2/recovery.c rb = list_first_entry(head, struct nilfs_recovery_block, list); head 391 fs/nilfs2/recovery.c static int nilfs_segment_list_add(struct list_head *head, __u64 segnum) head 400 fs/nilfs2/recovery.c list_add_tail(&ent->list, head); head 404 fs/nilfs2/recovery.c void nilfs_dispose_segment_list(struct list_head *head) head 406 fs/nilfs2/recovery.c while (!list_empty(head)) { head 409 fs/nilfs2/recovery.c ent = list_first_entry(head, struct nilfs_segment_entry, list); head 419 fs/nilfs2/recovery.c struct list_head *head = &ri->ri_used_segments; head 440 fs/nilfs2/recovery.c err = nilfs_segment_list_add(head, segnum[i]); head 449 fs/nilfs2/recovery.c list_for_each_entry_safe(ent, n, head, list) { head 494 fs/nilfs2/recovery.c struct list_head *head, head 504 fs/nilfs2/recovery.c list_for_each_entry_safe(rb, n, head, list) { head 85 fs/nilfs2/segbuf.h #define NILFS_LIST_SEGBUF(head) \ head 86 fs/nilfs2/segbuf.h list_entry((head), struct nilfs_segment_buffer, sb_list) head 89 fs/nilfs2/segbuf.h #define NILFS_LAST_SEGBUF(head) NILFS_LIST_SEGBUF((head)->prev) head 90 fs/nilfs2/segbuf.h #define NILFS_FIRST_SEGBUF(head) NILFS_LIST_SEGBUF((head)->next) head 91 fs/nilfs2/segbuf.h #define NILFS_SEGBUF_IS_LAST(segbuf, head) ((segbuf)->sb_list.next == (head)) head 97 fs/nilfs2/segbuf.h #define NILFS_SEGBUF_FIRST_BH(head) \ head 98 fs/nilfs2/segbuf.h (list_entry((head)->next, struct buffer_head, b_assoc_buffers)) head 102 fs/nilfs2/segbuf.h #define NILFS_SEGBUF_BH_IS_LAST(bh, head) ((bh)->b_assoc_buffers.next == head) head 710 fs/nilfs2/segment.c struct buffer_head *bh, *head; head 718 fs/nilfs2/segment.c bh = head = page_buffers(page); head 730 fs/nilfs2/segment.c } while (bh = bh->b_this_page, bh != head); head 743 fs/nilfs2/segment.c struct buffer_head *bh, *head; head 752 fs/nilfs2/segment.c bh = head = page_buffers(pvec.pages[i]); head 761 fs/nilfs2/segment.c } while (bh != head); head 769 fs/nilfs2/segment.c struct list_head *head, int force) head 775 fs/nilfs2/segment.c while (!list_empty(head)) { head 777 fs/nilfs2/segment.c list_for_each_entry_safe(ii, n, head, i_dirty) { head 978 fs/nilfs2/segment.c static void nilfs_redirty_inodes(struct list_head *head) head 982 fs/nilfs2/segment.c list_for_each_entry(ii, head, i_dirty) { head 988 fs/nilfs2/segment.c static void nilfs_drop_collected_inodes(struct list_head *head) head 992 fs/nilfs2/segment.c list_for_each_entry(ii, head, i_dirty) { head 1116 fs/nilfs2/segment.c struct list_head *head; head 1144 fs/nilfs2/segment.c head = &sci->sc_gc_inodes; head 1146 fs/nilfs2/segment.c head, i_dirty); head 1147 fs/nilfs2/segment.c list_for_each_entry_continue(ii, head, i_dirty) { head 1164 fs/nilfs2/segment.c head = &sci->sc_dirty_files; head 1165 fs/nilfs2/segment.c ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head, head 1167 fs/nilfs2/segment.c list_for_each_entry_continue(ii, head, i_dirty) { head 2198 fs/nilfs2/segment.c list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) { head 2404 fs/nilfs2/segment.c nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head) head 2408 fs/nilfs2/segment.c list_for_each_entry_safe(ii, n, head, i_dirty) { head 725 fs/notify/mark.c struct list_head *head = &to_free; head 729 fs/notify/mark.c head = &group->marks_list; head 751 fs/notify/mark.c if (list_empty(head)) { head 755 fs/notify/mark.c mark = list_first_entry(head, struct fsnotify_mark, g_list); head 177 fs/ntfs/aops.c struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; head 201 fs/ntfs/aops.c bh = head = page_buffers(page); head 327 fs/ntfs/aops.c } while (i++, iblock++, (bh = bh->b_this_page) != head); head 544 fs/ntfs/aops.c struct buffer_head *bh, *head; head 579 fs/ntfs/aops.c bh = head = page_buffers(page); head 795 fs/ntfs/aops.c } while (block++, (bh = bh->b_this_page) != head); head 802 fs/ntfs/aops.c bh = head; head 810 fs/ntfs/aops.c bh = head; head 813 fs/ntfs/aops.c } while ((bh = bh->b_this_page) != head); head 835 fs/ntfs/aops.c } while ((bh = bh->b_this_page) != head); head 867 fs/ntfs/aops.c } while (bh != head); head 912 fs/ntfs/aops.c struct buffer_head *bh, *head, *tbh, *rec_start_bh; head 946 fs/ntfs/aops.c bh = head = page_buffers(page); head 1098 fs/ntfs/aops.c } while (block++, (bh = bh->b_this_page) != head); head 1718 fs/ntfs/aops.c struct buffer_head *bh, *head, *buffers_to_free = NULL; head 1727 fs/ntfs/aops.c bh = head = alloc_page_buffers(page, bh_size, true); head 1737 fs/ntfs/aops.c tail->b_this_page = head; head 1738 fs/ntfs/aops.c attach_page_buffers(page, head); head 1742 fs/ntfs/aops.c bh = head = page_buffers(page); head 1751 fs/ntfs/aops.c } while ((bh = bh->b_this_page) != head); head 2558 fs/ntfs/attrib.c struct buffer_head *bh, *head; head 2560 fs/ntfs/attrib.c bh = head = page_buffers(page); head 2563 fs/ntfs/attrib.c } while ((bh = bh->b_this_page) != head); head 582 fs/ntfs/file.c struct buffer_head *bh, *head, *wait[2], **wait_bh = wait; head 643 fs/ntfs/file.c bh = head = page_buffers(page); head 1174 fs/ntfs/file.c } while (bh_pos += blocksize, (bh = bh->b_this_page) != head); head 1217 fs/ntfs/file.c bh = head = page_buffers(pages[u]); head 1221 fs/ntfs/file.c } while ((bh = bh->b_this_page) != head); head 1335 fs/ntfs/file.c bh = head = page_buffers(page); head 1354 fs/ntfs/file.c } while ((bh = bh->b_this_page) != head); head 1391 fs/ntfs/file.c struct buffer_head *bh, *head; head 1411 fs/ntfs/file.c bh = head = page_buffers(page); head 1424 fs/ntfs/file.c } while (bh_pos += blocksize, (bh = bh->b_this_page) != head); head 461 fs/ntfs/mft.c struct buffer_head *bh, *head; head 500 fs/ntfs/mft.c bh = head = alloc_page_buffers(page, blocksize, true); head 506 fs/ntfs/mft.c tail->b_this_page = head; head 507 fs/ntfs/mft.c attach_page_buffers(page, head); head 509 fs/ntfs/mft.c bh = head = page_buffers(page); head 572 fs/ntfs/mft.c } while (block_start = block_end, (bh = bh->b_this_page) != head); head 668 fs/ntfs/mft.c struct buffer_head *bh, *head; head 689 fs/ntfs/mft.c bh = head = page_buffers(page); head 759 fs/ntfs/mft.c } while (block_start = block_end, (bh = bh->b_this_page) != head); head 6389 fs/ocfs2/alloc.c struct ocfs2_cached_block_free *head) head 6413 fs/ocfs2/alloc.c while (head) { head 6414 fs/ocfs2/alloc.c if (head->free_bg) head 6415 fs/ocfs2/alloc.c bg_blkno = head->free_bg; head 6417 fs/ocfs2/alloc.c bg_blkno = ocfs2_which_suballoc_group(head->free_blk, head 6418 fs/ocfs2/alloc.c head->free_bit); head 6427 fs/ocfs2/alloc.c (unsigned long long)head->free_blk, head->free_bit); head 6430 fs/ocfs2/alloc.c head->free_bit, bg_blkno, 1); head 6436 fs/ocfs2/alloc.c tmp = head; head 6437 fs/ocfs2/alloc.c head = head->free_next; head 6448 fs/ocfs2/alloc.c while(head) { head 6450 fs/ocfs2/alloc.c tmp = head; head 6451 fs/ocfs2/alloc.c head = head->free_next; head 6482 fs/ocfs2/alloc.c struct ocfs2_cached_block_free *head) head 6491 fs/ocfs2/alloc.c while (head) { head 6507 fs/ocfs2/alloc.c ret = ocfs2_truncate_log_append(osb, handle, head->free_blk, head 6508 fs/ocfs2/alloc.c head->free_bit); head 6511 fs/ocfs2/alloc.c tmp = head; head 6512 fs/ocfs2/alloc.c head = head->free_next; head 6523 fs/ocfs2/alloc.c while (head) { head 6525 fs/ocfs2/alloc.c tmp = head; head 6526 fs/ocfs2/alloc.c head = head->free_next; head 426 fs/ocfs2/aops.c struct buffer_head *head, head 435 fs/ocfs2/aops.c unsigned blocksize = head->b_size; head 439 fs/ocfs2/aops.c for ( bh = head, block_start = 0; head 440 fs/ocfs2/aops.c ret == 0 && (bh != head || !block_start); head 609 fs/ocfs2/aops.c struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; head 616 fs/ocfs2/aops.c head = page_buffers(page); head 617 fs/ocfs2/aops.c for (bh = head, block_start = 0; bh != head || !block_start; head 675 fs/ocfs2/aops.c bh = head; head 691 fs/ocfs2/aops.c } while (bh != head); head 827 fs/ocfs2/aops.c struct list_head *head) head 832 fs/ocfs2/aops.c list_for_each_entry_safe(ue, tmp, head, ue_node) { head 891 fs/ocfs2/aops.c struct buffer_head *head, *bh; head 897 fs/ocfs2/aops.c bh = head = page_buffers(page); head 921 fs/ocfs2/aops.c } while (bh != head); head 25 fs/ocfs2/aops.h struct buffer_head *head, head 278 fs/ocfs2/dlm/dlmast.c struct list_head *head = NULL; head 357 fs/ocfs2/dlm/dlmast.c head = &res->converting; head 359 fs/ocfs2/dlm/dlmast.c list_for_each_entry(lock, head, list) { head 366 fs/ocfs2/dlm/dlmast.c head = &res->blocked; head 368 fs/ocfs2/dlm/dlmast.c head = &res->granted; head 370 fs/ocfs2/dlm/dlmast.c list_for_each_entry(lock, head, list) { head 1074 fs/ocfs2/dlm/dlmcommon.h static inline int dlm_lock_on_list(struct list_head *head, head 1079 fs/ocfs2/dlm/dlmcommon.h list_for_each_entry(tmplock, head, list) { head 2029 fs/ocfs2/journal.c struct inode *head; head 2073 fs/ocfs2/journal.c OCFS2_I(iter)->ip_next_orphan = p->head; head 2074 fs/ocfs2/journal.c p->head = iter; head 2081 fs/ocfs2/journal.c struct inode **head, head 2089 fs/ocfs2/journal.c .head = *head, head 2115 fs/ocfs2/journal.c *head = priv.head; head 237 fs/ocfs2/quota_local.c static void ocfs2_release_local_quota_bitmaps(struct list_head *head) head 241 fs/ocfs2/quota_local.c list_for_each_entry_safe(pos, next, head, qc_chunk) { head 251 fs/ocfs2/quota_local.c struct list_head *head) head 256 fs/ocfs2/quota_local.c INIT_LIST_HEAD(head); head 260 fs/ocfs2/quota_local.c ocfs2_release_local_quota_bitmaps(head); head 271 fs/ocfs2/quota_local.c ocfs2_release_local_quota_bitmaps(head); head 274 fs/ocfs2/quota_local.c list_add_tail(&newchunk->qc_chunk, head); head 297 fs/ocfs2/quota_local.c struct list_head *head) head 312 fs/ocfs2/quota_local.c list_add_tail(&rc->rc_list, head); head 316 fs/ocfs2/quota_local.c static void free_recovery_list(struct list_head *head) head 321 fs/ocfs2/quota_local.c list_for_each_entry_safe(rchunk, next, head, rc_list) { head 341 fs/ocfs2/quota_local.c struct list_head *head) head 360 fs/ocfs2/quota_local.c status = ocfs2_add_recovery_chunk(sb, dchunk, i, head); head 366 fs/ocfs2/quota_local.c free_recovery_list(head); head 370 fs/orangefs/devorangefs-req.c } head; head 373 fs/orangefs/devorangefs-req.c int head_size = sizeof(head); head 388 fs/orangefs/devorangefs-req.c if (!copy_from_iter_full(&head, head_size, iter)) { head 393 fs/orangefs/devorangefs-req.c if (head.version < ORANGEFS_MINIMUM_USERSPACE_VERSION) { head 397 fs/orangefs/devorangefs-req.c head.version, head 402 fs/orangefs/devorangefs-req.c if (head.magic != ORANGEFS_DEVREQ_MAGIC) { head 408 fs/orangefs/devorangefs-req.c orangefs_userspace_version = head.version; head 409 fs/orangefs/devorangefs-req.c } else if (orangefs_userspace_version != head.version) { head 415 fs/orangefs/devorangefs-req.c op = orangefs_devreq_remove_op(head.tag); head 419 fs/orangefs/devorangefs-req.c __func__, llu(head.tag)); head 1112 fs/pipe.c unsigned int head; head 1120 fs/pipe.c head = pipe->nrbufs - tail; head 1121 fs/pipe.c if (head) head 1122 fs/pipe.c memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer)); head 1124 fs/pipe.c memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer)); head 35 fs/proc/inode.c struct ctl_table_header *head; head 48 fs/proc/inode.c head = PROC_I(inode)->sysctl; head 49 fs/proc/inode.c if (head) { head 51 fs/proc/inode.c proc_sys_evict_inode(inode, head); head 249 fs/proc/internal.h struct ctl_table_header *head); head 253 fs/proc/internal.h struct ctl_table_header *head) { } head 126 fs/proc/kcore.c static int kcore_ram_list(struct list_head *head) head 136 fs/proc/kcore.c list_add(&ent->list, head); head 145 fs/proc/kcore.c get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) head 157 fs/proc/kcore.c list_for_each_entry(tmp, head, list) { head 171 fs/proc/kcore.c list_add_tail(&vmm->list, head); head 178 fs/proc/kcore.c get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) head 188 fs/proc/kcore.c struct list_head *head = (struct list_head *)arg; head 223 fs/proc/kcore.c list_add_tail(&ent->list, head); head 225 fs/proc/kcore.c if (!get_sparsemem_vmemmap_info(ent, head)) { head 135 fs/proc/page.c struct page *head = compound_head(page); head 137 fs/proc/page.c if (PageLRU(head) || PageAnon(head)) head 139 fs/proc/page.c else if (is_huge_zero_page(head)) { head 35 fs/proc/proc_sysctl.c static bool is_empty_dir(struct ctl_table_header *head) head 37 fs/proc/proc_sysctl.c return head->ctl_table[0].child == sysctl_mount_point; head 83 fs/proc/proc_sysctl.c static int insert_links(struct ctl_table_header *head); head 112 fs/proc/proc_sysctl.c struct ctl_table_header *head; head 123 fs/proc/proc_sysctl.c head = ctl_node->header; head 124 fs/proc/proc_sysctl.c entry = &head->ctl_table[ctl_node - head->node]; head 133 fs/proc/proc_sysctl.c *phead = head; head 140 fs/proc/proc_sysctl.c static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry) head 142 fs/proc/proc_sysctl.c struct rb_node *node = &head->node[entry - head->ctl_table].node; head 143 fs/proc/proc_sysctl.c struct rb_node **p = &head->parent->root.rb_node; head 168 fs/proc/proc_sysctl.c sysctl_print_dir(head->parent); head 175 fs/proc/proc_sysctl.c rb_insert_color(node, &head->parent->root); head 179 fs/proc/proc_sysctl.c static void erase_entry(struct ctl_table_header *head, struct ctl_table *entry) head 181 fs/proc/proc_sysctl.c struct rb_node *node = &head->node[entry - head->ctl_table].node; head 183 fs/proc/proc_sysctl.c rb_erase(node, &head->parent->root); head 186 fs/proc/proc_sysctl.c static void init_header(struct ctl_table_header *head, head 190 fs/proc/proc_sysctl.c head->ctl_table = table; head 191 fs/proc/proc_sysctl.c head->ctl_table_arg = table; head 192 fs/proc/proc_sysctl.c head->used = 0; head 193 fs/proc/proc_sysctl.c head->count = 1; head 194 fs/proc/proc_sysctl.c head->nreg = 1; head 195 fs/proc/proc_sysctl.c head->unregistering = NULL; head 196 fs/proc/proc_sysctl.c head->root = root; head 197 fs/proc/proc_sysctl.c head->set = set; head 198 fs/proc/proc_sysctl.c head->parent = NULL; head 199 fs/proc/proc_sysctl.c head->node = node; head 200 fs/proc/proc_sysctl.c INIT_HLIST_HEAD(&head->inodes); head 204 fs/proc/proc_sysctl.c node->header = head; head 208 fs/proc/proc_sysctl.c static void erase_header(struct ctl_table_header *head) head 211 fs/proc/proc_sysctl.c for (entry = head->ctl_table; entry->procname; entry++) head 212 fs/proc/proc_sysctl.c erase_entry(head, entry); head 270 fs/proc/proc_sysctl.c static void proc_sys_prune_dcache(struct ctl_table_header *head) head 279 fs/proc/proc_sysctl.c node = hlist_first_rcu(&head->inodes); head 339 fs/proc/proc_sysctl.c static struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head) head 341 fs/proc/proc_sysctl.c BUG_ON(!head); head 343 fs/proc/proc_sysctl.c if (!use_table(head)) head 344 fs/proc/proc_sysctl.c head = ERR_PTR(-ENOENT); head 346 fs/proc/proc_sysctl.c return head; head 349 fs/proc/proc_sysctl.c static void sysctl_head_finish(struct ctl_table_header *head) head 351 fs/proc/proc_sysctl.c if (!head) head 354 fs/proc/proc_sysctl.c unuse_table(head); head 371 fs/proc/proc_sysctl.c struct ctl_table_header *head; head 375 fs/proc/proc_sysctl.c entry = find_entry(&head, dir, name, namelen); head 376 fs/proc/proc_sysctl.c if (entry && use_table(head)) head 377 fs/proc/proc_sysctl.c *phead = head; head 399 fs/proc/proc_sysctl.c struct ctl_table_header *head = NULL; head 407 fs/proc/proc_sysctl.c head = ctl_node->header; head 408 fs/proc/proc_sysctl.c entry = &head->ctl_table[ctl_node - head->node]; head 410 fs/proc/proc_sysctl.c *phead = head; head 416 fs/proc/proc_sysctl.c struct ctl_table_header *head = *phead; head 418 fs/proc/proc_sysctl.c struct ctl_node *ctl_node = &head->node[entry - head->ctl_table]; head 421 fs/proc/proc_sysctl.c unuse_table(head); head 425 fs/proc/proc_sysctl.c head = NULL; head 427 fs/proc/proc_sysctl.c head = ctl_node->header; head 428 fs/proc/proc_sysctl.c entry = &head->ctl_table[ctl_node - head->node]; head 430 fs/proc/proc_sysctl.c *phead = head; head 450 fs/proc/proc_sysctl.c static int sysctl_perm(struct ctl_table_header *head, struct ctl_table *table, int op) head 452 fs/proc/proc_sysctl.c struct ctl_table_root *root = head->root; head 456 fs/proc/proc_sysctl.c mode = root->permissions(head, table); head 464 fs/proc/proc_sysctl.c struct ctl_table_header *head, struct ctl_table *table) head 466 fs/proc/proc_sysctl.c struct ctl_table_root *root = head->root; head 479 fs/proc/proc_sysctl.c if (unlikely(head->unregistering)) { head 484 fs/proc/proc_sysctl.c ei->sysctl = head; head 486 fs/proc/proc_sysctl.c hlist_add_head_rcu(&ei->sysctl_inodes, &head->inodes); head 487 fs/proc/proc_sysctl.c head->count++; head 500 fs/proc/proc_sysctl.c if (is_empty_dir(head)) head 505 fs/proc/proc_sysctl.c root->set_ownership(head, table, &inode->i_uid, &inode->i_gid); head 514 fs/proc/proc_sysctl.c void proc_sys_evict_inode(struct inode *inode, struct ctl_table_header *head) head 518 fs/proc/proc_sysctl.c if (!--head->count) head 519 fs/proc/proc_sysctl.c kfree_rcu(head, rcu); head 525 fs/proc/proc_sysctl.c struct ctl_table_header *head = PROC_I(inode)->sysctl; head 526 fs/proc/proc_sysctl.c if (!head) head 527 fs/proc/proc_sysctl.c head = &sysctl_table_root.default_set.dir.header; head 528 fs/proc/proc_sysctl.c return sysctl_head_grab(head); head 534 fs/proc/proc_sysctl.c struct ctl_table_header *head = grab_header(dir); head 543 fs/proc/proc_sysctl.c if (IS_ERR(head)) head 544 fs/proc/proc_sysctl.c return ERR_CAST(head); head 546 fs/proc/proc_sysctl.c ctl_dir = container_of(head, struct ctl_dir, header); head 559 fs/proc/proc_sysctl.c inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p); head 571 fs/proc/proc_sysctl.c sysctl_head_finish(head); head 579 fs/proc/proc_sysctl.c struct ctl_table_header *head = grab_header(inode); head 584 fs/proc/proc_sysctl.c if (IS_ERR(head)) head 585 fs/proc/proc_sysctl.c return PTR_ERR(head); head 592 fs/proc/proc_sysctl.c if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ)) head 600 fs/proc/proc_sysctl.c error = BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, &count, head 622 fs/proc/proc_sysctl.c sysctl_head_finish(head); head 641 fs/proc/proc_sysctl.c struct ctl_table_header *head = grab_header(inode); head 645 fs/proc/proc_sysctl.c if (IS_ERR(head)) head 646 fs/proc/proc_sysctl.c return PTR_ERR(head); head 651 fs/proc/proc_sysctl.c sysctl_head_finish(head); head 659 fs/proc/proc_sysctl.c struct ctl_table_header *head = grab_header(inode); head 665 fs/proc/proc_sysctl.c if (IS_ERR(head)) head 683 fs/proc/proc_sysctl.c sysctl_head_finish(head); head 690 fs/proc/proc_sysctl.c struct ctl_table_header *head, head 711 fs/proc/proc_sysctl.c inode = proc_sys_make_inode(dir->d_sb, head, table); head 739 fs/proc/proc_sysctl.c struct ctl_table_header *head, head 744 fs/proc/proc_sysctl.c head = sysctl_head_grab(head); head 745 fs/proc/proc_sysctl.c if (IS_ERR(head)) head 749 fs/proc/proc_sysctl.c if (sysctl_follow_link(&head, &table)) head 752 fs/proc/proc_sysctl.c ret = proc_sys_fill_cache(file, ctx, head, table); head 754 fs/proc/proc_sysctl.c sysctl_head_finish(head); head 758 fs/proc/proc_sysctl.c static int scan(struct ctl_table_header *head, struct ctl_table *table, head 768 fs/proc/proc_sysctl.c res = proc_sys_link_fill_cache(file, ctx, head, table); head 770 fs/proc/proc_sysctl.c res = proc_sys_fill_cache(file, ctx, head, table); head 780 fs/proc/proc_sysctl.c struct ctl_table_header *head = grab_header(file_inode(file)); head 786 fs/proc/proc_sysctl.c if (IS_ERR(head)) head 787 fs/proc/proc_sysctl.c return PTR_ERR(head); head 789 fs/proc/proc_sysctl.c ctl_dir = container_of(head, struct ctl_dir, header); head 803 fs/proc/proc_sysctl.c sysctl_head_finish(head); head 813 fs/proc/proc_sysctl.c struct ctl_table_header *head; head 821 fs/proc/proc_sysctl.c head = grab_header(inode); head 822 fs/proc/proc_sysctl.c if (IS_ERR(head)) head 823 fs/proc/proc_sysctl.c return PTR_ERR(head); head 829 fs/proc/proc_sysctl.c error = sysctl_perm(head, table, mask & ~MAY_NOT_BLOCK); head 831 fs/proc/proc_sysctl.c sysctl_head_finish(head); head 856 fs/proc/proc_sysctl.c struct ctl_table_header *head = grab_header(inode); head 859 fs/proc/proc_sysctl.c if (IS_ERR(head)) head 860 fs/proc/proc_sysctl.c return PTR_ERR(head); head 866 fs/proc/proc_sysctl.c sysctl_head_finish(head); head 927 fs/proc/proc_sysctl.c struct ctl_table_header *head; head 940 fs/proc/proc_sysctl.c head = rcu_dereference(PROC_I(inode)->sysctl); head 941 fs/proc/proc_sysctl.c return !head || !sysctl_is_seen(head); head 953 fs/proc/proc_sysctl.c struct ctl_table_header *head; head 956 fs/proc/proc_sysctl.c entry = find_entry(&head, dir, name, namelen); head 961 fs/proc/proc_sysctl.c return container_of(head, struct ctl_dir, header); head 1068 fs/proc/proc_sysctl.c struct ctl_table_header *head; head 1084 fs/proc/proc_sysctl.c head = NULL; head 1085 fs/proc/proc_sysctl.c entry = find_entry(&head, dir, procname, strlen(procname)); head 1087 fs/proc/proc_sysctl.c if (entry && use_table(head)) { head 1089 fs/proc/proc_sysctl.c *phead = head; head 1208 fs/proc/proc_sysctl.c struct ctl_table_header *head; head 1214 fs/proc/proc_sysctl.c link = find_entry(&head, dir, procname, strlen(procname)); head 1227 fs/proc/proc_sysctl.c link = find_entry(&head, dir, procname, strlen(procname)); head 1228 fs/proc/proc_sysctl.c head->nreg++; head 1233 fs/proc/proc_sysctl.c static int insert_links(struct ctl_table_header *head) head 1240 fs/proc/proc_sysctl.c if (head->set == root_set) head 1243 fs/proc/proc_sysctl.c core_parent = xlate_dir(root_set, head->parent); head 1247 fs/proc/proc_sysctl.c if (get_links(core_parent, head->ctl_table, head->root)) head 1253 fs/proc/proc_sysctl.c links = new_links(core_parent, head->ctl_table, head->root); head 1261 fs/proc/proc_sysctl.c if (get_links(core_parent, head->ctl_table, head->root)) { head 277 fs/quota/dquot.c struct hlist_head *head; head 278 fs/quota/dquot.c head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id); head 279 fs/quota/dquot.c hlist_add_head(&dquot->dq_hash, head); head 182 fs/reiserfs/file.c struct buffer_head *bh, *head; head 201 fs/reiserfs/file.c for (bh = head = page_buffers(page), block_start = 0; head 202 fs/reiserfs/file.c bh != head || !block_start; head 195 fs/reiserfs/inode.c struct buffer_head *head, *next, *bh; head 198 fs/reiserfs/inode.c head = page_buffers(page); head 199 fs/reiserfs/inode.c bh = head; head 206 fs/reiserfs/inode.c } while (bh != head); head 2201 fs/reiserfs/inode.c struct buffer_head *head; head 2227 fs/reiserfs/inode.c head = page_buffers(page); head 2228 fs/reiserfs/inode.c bh = head; head 2235 fs/reiserfs/inode.c } while (bh != head); head 2535 fs/reiserfs/inode.c struct buffer_head *head, *bh; head 2561 fs/reiserfs/inode.c head = page_buffers(page); head 2578 fs/reiserfs/inode.c bh = head; head 2606 fs/reiserfs/inode.c } while (bh != head); head 2654 fs/reiserfs/inode.c } while ((bh = bh->b_this_page) != head); head 2679 fs/reiserfs/inode.c } while (bh != head); head 2690 fs/reiserfs/inode.c bh = head; head 2697 fs/reiserfs/inode.c } while (bh != head); head 2711 fs/reiserfs/inode.c bh = head; head 2725 fs/reiserfs/inode.c } while (bh != head); head 2739 fs/reiserfs/inode.c } while (bh != head); head 3161 fs/reiserfs/inode.c struct buffer_head *head, *bh, *next; head 3176 fs/reiserfs/inode.c head = page_buffers(page); head 3177 fs/reiserfs/inode.c bh = head; head 3196 fs/reiserfs/inode.c } while (bh != head); head 3234 fs/reiserfs/inode.c struct buffer_head *head; head 3240 fs/reiserfs/inode.c head = page_buffers(page); head 3241 fs/reiserfs/inode.c bh = head; head 3252 fs/reiserfs/inode.c } while (bh != head); head 348 fs/reiserfs/journal.c struct reiserfs_journal_cnode *head; head 353 fs/reiserfs/journal.c head = vzalloc(array_size(num_cnodes, head 355 fs/reiserfs/journal.c if (!head) { head 358 fs/reiserfs/journal.c head[0].prev = NULL; head 359 fs/reiserfs/journal.c head[0].next = head + 1; head 361 fs/reiserfs/journal.c head[i].prev = head + (i - 1); head 362 fs/reiserfs/journal.c head[i].next = head + (i + 1); /* if last one, overwrite it after the if */ head 364 fs/reiserfs/journal.c head[num_cnodes - 1].next = NULL; head 365 fs/reiserfs/journal.c return head; head 1846 fs/reiserfs/journal.c struct reiserfs_journal_cnode **head; head 1848 fs/reiserfs/journal.c head = &(journal_hash(table, sb, block)); head 1849 fs/reiserfs/journal.c if (!head) { head 1852 fs/reiserfs/journal.c cur = *head; head 1863 fs/reiserfs/journal.c *head = cur->hnext; head 1504 fs/reiserfs/stree.c struct buffer_head *head; head 1513 fs/reiserfs/stree.c head = page_buffers(page); head 1514 fs/reiserfs/stree.c bh = head; head 1531 fs/reiserfs/stree.c } while (bh != head); head 969 fs/select.c struct poll_list *const head = (struct poll_list *)stack_pps; head 970 fs/select.c struct poll_list *walk = head; head 1001 fs/select.c fdcount = do_poll(head, &table, end_time); head 1004 fs/select.c for (walk = head; walk; walk = walk->next) { head 1015 fs/select.c walk = head->next; head 896 fs/seq_file.c struct list_head *seq_list_start(struct list_head *head, loff_t pos) head 900 fs/seq_file.c list_for_each(lh, head) head 908 fs/seq_file.c struct list_head *seq_list_start_head(struct list_head *head, loff_t pos) head 911 fs/seq_file.c return head; head 913 fs/seq_file.c return seq_list_start(head, pos - 1); head 917 fs/seq_file.c struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos) head 923 fs/seq_file.c return lh == head ? NULL : lh; head 934 fs/seq_file.c struct hlist_node *seq_hlist_start(struct hlist_head *head, loff_t pos) head 938 fs/seq_file.c hlist_for_each(node, head) head 953 fs/seq_file.c struct hlist_node *seq_hlist_start_head(struct hlist_head *head, loff_t pos) head 958 fs/seq_file.c return seq_hlist_start(head, pos - 1); head 970 fs/seq_file.c struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head, head 977 fs/seq_file.c return head->first; head 994 fs/seq_file.c struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head, head 999 fs/seq_file.c __hlist_for_each_rcu(node, head) head 1018 fs/seq_file.c struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head, head 1024 fs/seq_file.c return seq_hlist_start_rcu(head, pos - 1); head 1041 fs/seq_file.c struct hlist_head *head, head 1048 fs/seq_file.c return rcu_dereference(head->first); head 1063 fs/seq_file.c seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos) head 1068 fs/seq_file.c hlist_for_each(node, per_cpu_ptr(head, *cpu)) { head 1087 fs/seq_file.c seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, head 1099 fs/seq_file.c struct hlist_head *bucket = per_cpu_ptr(head, *cpu); head 168 fs/super.c static void destroy_super_rcu(struct rcu_head *head) head 170 fs/super.c struct super_block *s = container_of(head, struct super_block, rcu); head 711 fs/ubifs/debug.c int head = 0; head 721 fs/ubifs/debug.c head = 1; head 724 fs/ubifs/debug.c if (!head) head 2298 fs/ubifs/debug.c int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head) head 2306 fs/ubifs/debug.c for (cur = head->next; cur->next != head; cur = cur->next) { head 2365 fs/ubifs/debug.c int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head) head 2373 fs/ubifs/debug.c for (cur = head->next; cur->next != head; cur = cur->next) { head 290 fs/ubifs/debug.h int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head); head 291 fs/ubifs/debug.h int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head); head 357 fs/ubifs/scan.c struct list_head *head; head 359 fs/ubifs/scan.c head = &sleb->nodes; head 360 fs/ubifs/scan.c while (!list_empty(head)) { head 361 fs/ubifs/scan.c node = list_entry(head->next, struct ubifs_scan_node, list); head 251 fs/ufs/balloc.c struct buffer_head *head, *bh; head 279 fs/ufs/balloc.c head = page_buffers(page); head 280 fs/ufs/balloc.c bh = head; head 317 fs/ufs/balloc.c } while (bh != head); head 958 fs/userfaultfd.c wq = list_last_entry(&wqh->head, typeof(*wq), entry); head 1902 fs/userfaultfd.c list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { head 1906 fs/userfaultfd.c list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { head 839 fs/xattr.c list_for_each_entry(xattr, &xattrs->head, list) { head 891 fs/xattr.c list_for_each_entry(xattr, &xattrs->head, list) { head 908 fs/xattr.c list_add(&new_xattr->list, &xattrs->head); head 969 fs/xattr.c list_for_each_entry(xattr, &xattrs->head, list) { head 990 fs/xattr.c list_add(&new_xattr->list, &xattrs->head); head 156 fs/xfs/xfs_fsmap.c struct xfs_fsmap_head *head; head 257 fs/xfs/xfs_fsmap.c if (info->head->fmh_count == 0) { head 259 fs/xfs/xfs_fsmap.c info->head->fmh_entries++; head 264 fs/xfs/xfs_fsmap.c info->head->fmh_entries++; head 278 fs/xfs/xfs_fsmap.c if (info->head->fmh_entries >= info->head->fmh_count) head 290 fs/xfs/xfs_fsmap.c info->head->fmh_entries++; head 297 fs/xfs/xfs_fsmap.c if (info->head->fmh_entries >= info->head->fmh_count) head 325 fs/xfs/xfs_fsmap.c info->head->fmh_entries++; head 817 fs/xfs/xfs_fsmap.c struct xfs_fsmap_head *head, head 829 fs/xfs/xfs_fsmap.c if (head->fmh_iflags & ~FMH_IF_VALID) head 831 fs/xfs/xfs_fsmap.c if (!xfs_getfsmap_is_valid_device(mp, &head->fmh_keys[0]) || head 832 fs/xfs/xfs_fsmap.c !xfs_getfsmap_is_valid_device(mp, &head->fmh_keys[1])) head 837 fs/xfs/xfs_fsmap.c head->fmh_entries = 0; head 878 fs/xfs/xfs_fsmap.c dkeys[0] = head->fmh_keys[0]; head 889 fs/xfs/xfs_fsmap.c if (!xfs_getfsmap_check_keys(dkeys, &head->fmh_keys[1])) head 892 fs/xfs/xfs_fsmap.c info.next_daddr = head->fmh_keys[0].fmr_physical + head 893 fs/xfs/xfs_fsmap.c head->fmh_keys[0].fmr_length; head 896 fs/xfs/xfs_fsmap.c info.head = head; head 903 fs/xfs/xfs_fsmap.c if (head->fmh_keys[0].fmr_device > handlers[i].dev) head 905 fs/xfs/xfs_fsmap.c if (head->fmh_keys[1].fmr_device < handlers[i].dev) head 915 fs/xfs/xfs_fsmap.c if (handlers[i].dev == head->fmh_keys[1].fmr_device) head 916 fs/xfs/xfs_fsmap.c dkeys[1] = head->fmh_keys[1]; head 917 fs/xfs/xfs_fsmap.c if (handlers[i].dev > head->fmh_keys[0].fmr_device) head 937 fs/xfs/xfs_fsmap.c head->fmh_oflags = FMH_OF_DEV_T; head 36 fs/xfs/xfs_fsmap.h int xfs_getfsmap(struct xfs_mount *mp, struct xfs_fsmap_head *head, head 82 fs/xfs/xfs_icache.c struct rcu_head *head) head 84 fs/xfs/xfs_icache.c struct inode *inode = container_of(head, struct inode, i_rcu); head 1865 fs/xfs/xfs_ioctl.c struct fsmap_head head; head 1869 fs/xfs/xfs_ioctl.c if (copy_from_user(&head, arg, sizeof(struct fsmap_head))) head 1871 fs/xfs/xfs_ioctl.c if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) || head 1872 fs/xfs/xfs_ioctl.c memchr_inv(head.fmh_keys[0].fmr_reserved, 0, head 1873 fs/xfs/xfs_ioctl.c sizeof(head.fmh_keys[0].fmr_reserved)) || head 1874 fs/xfs/xfs_ioctl.c memchr_inv(head.fmh_keys[1].fmr_reserved, 0, head 1875 fs/xfs/xfs_ioctl.c sizeof(head.fmh_keys[1].fmr_reserved))) head 1878 fs/xfs/xfs_ioctl.c xhead.fmh_iflags = head.fmh_iflags; head 1879 fs/xfs/xfs_ioctl.c xhead.fmh_count = head.fmh_count; head 1880 fs/xfs/xfs_ioctl.c xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]); head 1881 fs/xfs/xfs_ioctl.c xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]); head 1904 fs/xfs/xfs_ioctl.c head.fmh_entries = xhead.fmh_entries; head 1905 fs/xfs/xfs_ioctl.c head.fmh_oflags = xhead.fmh_oflags; head 1906 fs/xfs/xfs_ioctl.c if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) head 43 fs/xfs/xfs_log.c atomic64_t *head); head 119 fs/xfs/xfs_log.c atomic64_t *head, head 122 fs/xfs/xfs_log.c int64_t head_val = atomic64_read(head); head 138 fs/xfs/xfs_log.c head_val = atomic64_cmpxchg(head, old, new); head 145 fs/xfs/xfs_log.c atomic64_t *head, head 148 fs/xfs/xfs_log.c int64_t head_val = atomic64_read(head); head 167 fs/xfs/xfs_log.c head_val = atomic64_cmpxchg(head, old, new); head 173 fs/xfs/xfs_log.c struct xlog_grant_head *head) head 175 fs/xfs/xfs_log.c xlog_assign_grant_head(&head->grant, 1, 0); head 176 fs/xfs/xfs_log.c INIT_LIST_HEAD(&head->waiters); head 177 fs/xfs/xfs_log.c spin_lock_init(&head->lock); head 182 fs/xfs/xfs_log.c struct xlog_grant_head *head) head 186 fs/xfs/xfs_log.c spin_lock(&head->lock); head 187 fs/xfs/xfs_log.c list_for_each_entry(tic, &head->waiters, t_queue) head 189 fs/xfs/xfs_log.c spin_unlock(&head->lock); head 195 fs/xfs/xfs_log.c struct xlog_grant_head *head, head 198 fs/xfs/xfs_log.c if (head == &log->l_write_head) { head 212 fs/xfs/xfs_log.c struct xlog_grant_head *head, head 219 fs/xfs/xfs_log.c list_for_each_entry(tic, &head->waiters, t_queue) { head 242 fs/xfs/xfs_log.c need_bytes = xlog_ticket_reservation(log, head, tic); head 261 fs/xfs/xfs_log.c struct xlog_grant_head *head, head 263 fs/xfs/xfs_log.c int need_bytes) __releases(&head->lock) head 264 fs/xfs/xfs_log.c __acquires(&head->lock) head 266 fs/xfs/xfs_log.c list_add_tail(&tic->t_queue, &head->waiters); head 274 fs/xfs/xfs_log.c spin_unlock(&head->lock); head 282 fs/xfs/xfs_log.c spin_lock(&head->lock); head 285 fs/xfs/xfs_log.c } while (xlog_space_left(log, &head->grant) < need_bytes); head 314 fs/xfs/xfs_log.c struct xlog_grant_head *head, head 329 fs/xfs/xfs_log.c *need_bytes = xlog_ticket_reservation(log, head, tic); head 330 fs/xfs/xfs_log.c free_bytes = xlog_space_left(log, &head->grant); head 331 fs/xfs/xfs_log.c if (!list_empty_careful(&head->waiters)) { head 332 fs/xfs/xfs_log.c spin_lock(&head->lock); head 333 fs/xfs/xfs_log.c if (!xlog_grant_head_wake(log, head, &free_bytes) || head 335 fs/xfs/xfs_log.c error = xlog_grant_head_wait(log, head, tic, head 338 fs/xfs/xfs_log.c spin_unlock(&head->lock); head 340 fs/xfs/xfs_log.c spin_lock(&head->lock); head 341 fs/xfs/xfs_log.c error = xlog_grant_head_wait(log, head, tic, *need_bytes); head 342 fs/xfs/xfs_log.c spin_unlock(&head->lock); head 1190 fs/xfs/xfs_log.c atomic64_t *head) head 1198 fs/xfs/xfs_log.c xlog_crack_grant_head(head, &head_cycle, &head_bytes); head 1360 fs/xfs/xfs_log.c xlog_rec_header_t *head; head 1452 fs/xfs/xfs_log.c head = &iclog->ic_header; head 1453 fs/xfs/xfs_log.c memset(head, 0, sizeof(xlog_rec_header_t)); head 1454 fs/xfs/xfs_log.c head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); head 1455 fs/xfs/xfs_log.c head->h_version = cpu_to_be32( head 1457 fs/xfs/xfs_log.c head->h_size = cpu_to_be32(log->l_iclog_size); head 1459 fs/xfs/xfs_log.c head->h_fmt = cpu_to_be32(XLOG_FMT); head 1460 fs/xfs/xfs_log.c memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); head 2986 fs/xfs/xfs_log.c xlog_rec_header_t *head; head 3006 fs/xfs/xfs_log.c head = &iclog->ic_header; head 3021 fs/xfs/xfs_log.c head->h_cycle = cpu_to_be32(log->l_curr_cycle); head 3022 fs/xfs/xfs_log.c head->h_lsn = cpu_to_be64( head 495 fs/xfs/xfs_log_priv.h xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space) head 497 fs/xfs/xfs_log_priv.h xlog_crack_grant_head_val(atomic64_read(head), cycle, space); head 507 fs/xfs/xfs_log_priv.h xlog_assign_grant_head(atomic64_t *head, int cycle, int space) head 509 fs/xfs/xfs_log_priv.h atomic64_set(head, xlog_assign_grant_head_val(cycle, space)); head 221 fs/xfs/xfs_log_recover.c xlog_rec_header_t *head) head 226 fs/xfs/xfs_log_recover.c &head->h_fs_uuid, be32_to_cpu(head->h_fmt)); head 229 fs/xfs/xfs_log_recover.c #define xlog_header_check_dump(mp, head) head 238 fs/xfs/xfs_log_recover.c xlog_rec_header_t *head) head 240 fs/xfs/xfs_log_recover.c ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); head 247 fs/xfs/xfs_log_recover.c if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) { head 250 fs/xfs/xfs_log_recover.c xlog_header_check_dump(mp, head); head 254 fs/xfs/xfs_log_recover.c } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { head 257 fs/xfs/xfs_log_recover.c xlog_header_check_dump(mp, head); head 271 fs/xfs/xfs_log_recover.c xlog_rec_header_t *head) head 273 fs/xfs/xfs_log_recover.c ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); head 275 fs/xfs/xfs_log_recover.c if (uuid_is_null(&head->h_fs_uuid)) { head 282 fs/xfs/xfs_log_recover.c } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { head 284 fs/xfs/xfs_log_recover.c xlog_header_check_dump(mp, head); head 447 fs/xfs/xfs_log_recover.c xlog_rec_header_t *head = NULL; head 484 fs/xfs/xfs_log_recover.c head = (xlog_rec_header_t *)offset; head 486 fs/xfs/xfs_log_recover.c if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) head 507 fs/xfs/xfs_log_recover.c if ((error = xlog_header_check_mount(log->l_mp, head))) head 518 fs/xfs/xfs_log_recover.c uint h_size = be32_to_cpu(head->h_size); head 528 fs/xfs/xfs_log_recover.c BTOBB(be32_to_cpu(head->h_len)) + xhdrs) head 4162 fs/xfs/xfs_log_recover.c struct list_head *head) head 4168 fs/xfs/xfs_log_recover.c list_add_tail(&item->ri_list, head); head 125 fs/xfs/xfs_mount.c struct rcu_head *head) head 127 fs/xfs/xfs_mount.c struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head); head 575 include/asm-generic/vmlinux.lds.h #define HEAD_TEXT KEEP(*(.head.text)) head 578 include/asm-generic/vmlinux.lds.h .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \ head 186 include/crypto/algapi.h unsigned int head); head 22 include/crypto/internal/aead.h char head[offsetof(struct aead_alg, base)]; head 17 include/crypto/internal/akcipher.h char head[offsetof(struct akcipher_alg, base)]; head 23 include/crypto/internal/skcipher.h char head[offsetof(struct skcipher_alg, base)]; head 19 include/crypto/scatterwalk.h static inline void scatterwalk_crypto_chain(struct scatterlist *head, head 23 include/crypto/scatterwalk.h sg_chain(head, num, sg); head 25 include/crypto/scatterwalk.h sg_mark_end(head); head 248 include/drm/drm_atomic.h struct list_head head; head 279 include/drm/drm_atomic.h list_for_each_entry(privobj, &(dev)->mode_config.privobj_list, head) head 1098 include/drm/drm_connector.h struct list_head head; head 891 include/drm/drm_crtc.h struct list_head head; head 1206 include/drm/drm_crtc.h list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) head 58 include/drm/drm_debugfs_crc.h int head, tail; head 101 include/drm/drm_encoder.h struct list_head head; head 256 include/drm/drm_encoder.h list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \ head 267 include/drm/drm_encoder.h list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head) head 126 include/drm/drm_framebuffer.h struct list_head head; head 291 include/drm/drm_framebuffer.h struct drm_framebuffer, head); \ head 292 include/drm/drm_framebuffer.h &fb->head != (&(dev)->mode_config.fb_list); \ head 293 include/drm/drm_framebuffer.h fb = list_next_entry(fb, head)) head 43 include/drm/drm_hashtab.h struct hlist_node head; head 152 include/drm/drm_legacy.h struct list_head head; /**< list head */ head 230 include/drm/drm_modes.h struct list_head head; head 86 include/drm/drm_modeset_lock.h struct list_head head; head 105 include/drm/drm_modeset_lock.h WARN_ON(!list_empty(&lock->head)); head 567 include/drm/drm_plane.h struct list_head head; head 785 include/drm/drm_plane.h list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ head 798 include/drm/drm_plane.h list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \ head 809 include/drm/drm_plane.h list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) head 43 include/drm/drm_property.h struct list_head head; head 77 include/drm/drm_property.h struct list_head head; head 40 include/drm/spsc_queue.h struct spsc_node *head; head 50 include/drm/spsc_queue.h queue->head = NULL; head 51 include/drm/spsc_queue.h atomic_long_set(&queue->tail, (long)&queue->head); head 57 include/drm/spsc_queue.h return queue->head; head 85 include/drm/spsc_queue.h return tail == &queue->head; head 96 include/drm/spsc_queue.h node = READ_ONCE(queue->head); head 102 include/drm/spsc_queue.h WRITE_ONCE(queue->head, next); head 108 include/drm/spsc_queue.h (long)&node->next, (long) &queue->head) != (long)&node->next) { head 112 include/drm/spsc_queue.h } while (unlikely(!(queue->head = READ_ONCE(node->next)))); head 47 include/drm/ttm/ttm_execbuf_util.h struct list_head head; head 38 include/linux/acpi_iort.h int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); head 54 include/linux/acpi_iort.h int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) head 1057 include/linux/ata.h unsigned long lba_sects, chs_sects, head, tail; head 1085 include/linux/ata.h head = (lba_sects >> 16) & 0xffff; head 1087 include/linux/ata.h lba_sects = head | (tail << 16); head 576 include/linux/bio.h struct bio *head; head 582 include/linux/bio.h return bl->head == NULL; head 587 include/linux/bio.h bl->head = bl->tail = NULL; head 593 include/linux/bio.h for (bio = (bl)->head; bio; bio = bio->bi_next) head 613 include/linux/bio.h bl->head = bio; head 620 include/linux/bio.h bio->bi_next = bl->head; head 622 include/linux/bio.h bl->head = bio; head 630 include/linux/bio.h if (!bl2->head) head 634 include/linux/bio.h bl->tail->bi_next = bl2->head; head 636 include/linux/bio.h bl->head = bl2->head; head 644 include/linux/bio.h if (!bl2->head) head 647 include/linux/bio.h if (bl->head) head 648 include/linux/bio.h bl2->tail->bi_next = bl->head; head 652 include/linux/bio.h bl->head = bl2->head; head 657 include/linux/bio.h return bl->head; head 662 include/linux/bio.h struct bio *bio = bl->head; head 665 include/linux/bio.h bl->head = bl->head->bi_next; head 666 include/linux/bio.h if (!bl->head) head 677 include/linux/bio.h struct bio *bio = bl->head; head 679 include/linux/bio.h bl->head = bl->tail = NULL; head 121 include/linux/bpf-cgroup.h int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, head 287 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \ head 291 include/linux/bpf-cgroup.h __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ head 399 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; }) head 355 include/linux/bpf_verifier.h struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ head 6 include/linux/btree-128.h static inline void btree_init_mempool128(struct btree_head128 *head, head 9 include/linux/btree-128.h btree_init_mempool(&head->h, mempool); head 12 include/linux/btree-128.h static inline int btree_init128(struct btree_head128 *head) head 14 include/linux/btree-128.h return btree_init(&head->h); head 17 include/linux/btree-128.h static inline void btree_destroy128(struct btree_head128 *head) head 19 include/linux/btree-128.h btree_destroy(&head->h); head 22 include/linux/btree-128.h static inline void *btree_lookup128(struct btree_head128 *head, u64 k1, u64 k2) head 25 include/linux/btree-128.h return btree_lookup(&head->h, &btree_geo128, (unsigned long *)&key); head 28 include/linux/btree-128.h static inline void *btree_get_prev128(struct btree_head128 *head, head 34 include/linux/btree-128.h val = btree_get_prev(&head->h, &btree_geo128, head 41 include/linux/btree-128.h static inline int btree_insert128(struct btree_head128 *head, u64 k1, u64 k2, head 45 include/linux/btree-128.h return btree_insert(&head->h, &btree_geo128, head 49 include/linux/btree-128.h static inline int btree_update128(struct btree_head128 *head, u64 k1, u64 k2, head 53 include/linux/btree-128.h return btree_update(&head->h, &btree_geo128, head 57 include/linux/btree-128.h static inline void *btree_remove128(struct btree_head128 *head, u64 k1, u64 k2) head 60 include/linux/btree-128.h return btree_remove(&head->h, &btree_geo128, (unsigned long *)&key); head 63 include/linux/btree-128.h static inline void *btree_last128(struct btree_head128 *head, u64 *k1, u64 *k2) head 68 include/linux/btree-128.h val = btree_last(&head->h, &btree_geo128, (unsigned long *)&key[0]); head 90 include/linux/btree-128.h static inline size_t btree_visitor128(struct btree_head128 *head, head 94 include/linux/btree-128.h return btree_visitor(&head->h, &btree_geo128, opaque, head 98 include/linux/btree-128.h static inline size_t btree_grim_visitor128(struct btree_head128 *head, head 102 include/linux/btree-128.h return btree_grim_visitor(&head->h, &btree_geo128, opaque, head 106 include/linux/btree-128.h #define btree_for_each_safe128(head, k1, k2, val) \ head 107 include/linux/btree-128.h for (val = btree_last128(head, &k1, &k2); \ head 109 include/linux/btree-128.h val = btree_get_prev128(head, &k1, &k2)) head 14 include/linux/btree-type.h static inline void BTREE_FN(init_mempool)(BTREE_TYPE_HEAD *head, head 17 include/linux/btree-type.h btree_init_mempool(&head->h, mempool); head 20 include/linux/btree-type.h static inline int BTREE_FN(init)(BTREE_TYPE_HEAD *head) head 22 include/linux/btree-type.h return btree_init(&head->h); head 25 include/linux/btree-type.h static inline void BTREE_FN(destroy)(BTREE_TYPE_HEAD *head) head 27 include/linux/btree-type.h btree_destroy(&head->h); head 38 include/linux/btree-type.h static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) head 41 include/linux/btree-type.h return btree_lookup(&head->h, BTREE_TYPE_GEO, &_key); head 44 include/linux/btree-type.h static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, head 48 include/linux/btree-type.h return btree_insert(&head->h, BTREE_TYPE_GEO, &_key, val, gfp); head 51 include/linux/btree-type.h static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, head 55 include/linux/btree-type.h return btree_update(&head->h, BTREE_TYPE_GEO, &_key, val); head 58 include/linux/btree-type.h static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) head 61 include/linux/btree-type.h return btree_remove(&head->h, BTREE_TYPE_GEO, &_key); head 64 include/linux/btree-type.h static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) head 67 include/linux/btree-type.h void *val = btree_last(&head->h, BTREE_TYPE_GEO, &_key); head 73 include/linux/btree-type.h static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) head 76 include/linux/btree-type.h void *val = btree_get_prev(&head->h, BTREE_TYPE_GEO, &_key); head 82 include/linux/btree-type.h static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) head 84 include/linux/btree-type.h return btree_lookup(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); head 87 include/linux/btree-type.h static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, head 90 include/linux/btree-type.h return btree_insert(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, head 94 include/linux/btree-type.h static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key, head 97 include/linux/btree-type.h return btree_update(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, val); head 100 include/linux/btree-type.h static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key) head 102 include/linux/btree-type.h return btree_remove(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key); head 105 include/linux/btree-type.h static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) head 107 include/linux/btree-type.h return btree_last(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); head 110 include/linux/btree-type.h static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key) head 112 include/linux/btree-type.h return btree_get_prev(&head->h, BTREE_TYPE_GEO, (unsigned long *)key); head 122 include/linux/btree-type.h static inline size_t BTREE_FN(visitor)(BTREE_TYPE_HEAD *head, head 126 include/linux/btree-type.h return btree_visitor(&head->h, BTREE_TYPE_GEO, opaque, head 130 include/linux/btree-type.h static inline size_t BTREE_FN(grim_visitor)(BTREE_TYPE_HEAD *head, head 134 include/linux/btree-type.h return btree_grim_visitor(&head->h, BTREE_TYPE_GEO, opaque, head 67 include/linux/btree.h void btree_init_mempool(struct btree_head *head, mempool_t *mempool); head 79 include/linux/btree.h int __must_check btree_init(struct btree_head *head); head 89 include/linux/btree.h void btree_destroy(struct btree_head *head); head 100 include/linux/btree.h void *btree_lookup(struct btree_head *head, struct btree_geo *geo, head 115 include/linux/btree.h int __must_check btree_insert(struct btree_head *head, struct btree_geo *geo, head 128 include/linux/btree.h int btree_update(struct btree_head *head, struct btree_geo *geo, head 140 include/linux/btree.h void *btree_remove(struct btree_head *head, struct btree_geo *geo, head 172 include/linux/btree.h void *btree_last(struct btree_head *head, struct btree_geo *geo, head 186 include/linux/btree.h void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, head 191 include/linux/btree.h size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, head 199 include/linux/btree.h size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, head 216 include/linux/btree.h #define btree_for_each_safel(head, key, val) \ head 217 include/linux/btree.h for (val = btree_lastl(head, &key); \ head 219 include/linux/btree.h val = btree_get_prevl(head, &key)) head 227 include/linux/btree.h #define btree_for_each_safe32(head, key, val) \ head 228 include/linux/btree.h for (val = btree_last32(head, &key); \ head 230 include/linux/btree.h val = btree_get_prev32(head, &key)) head 239 include/linux/btree.h #define btree_for_each_safe64(head, key, val) \ head 240 include/linux/btree.h for (val = btree_last64(head, &key); \ head 242 include/linux/btree.h val = btree_get_prev64(head, &key)) head 276 include/linux/buffer_head.h struct buffer_head *head) head 280 include/linux/buffer_head.h set_page_private(page, (unsigned long)head); head 42 include/linux/can/skb.h return (struct can_skb_priv *)(skb->head); head 11 include/linux/ceph/pagelist.h struct list_head head; head 11 include/linux/circ_buf.h int head; head 16 include/linux/circ_buf.h #define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1)) head 21 include/linux/circ_buf.h #define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size)) head 26 include/linux/circ_buf.h #define CIRC_CNT_TO_END(head,tail,size) \ head 28 include/linux/circ_buf.h int n = ((head) + end) & ((size)-1); \ head 32 include/linux/circ_buf.h #define CIRC_SPACE_TO_END(head,tail,size) \ head 33 include/linux/circ_buf.h ({int end = (size) - 1 - (head); \ head 661 include/linux/compat.h compat_sys_set_robust_list(struct compat_robust_list_head __user *head, head 62 include/linux/dma-fence-chain.h #define dma_fence_chain_for_each(iter, head) \ head 63 include/linux/dma-fence-chain.h for (iter = dma_fence_get(head); iter; \ head 1487 include/linux/efi.h void *data, bool duplicates, struct list_head *head); head 1489 include/linux/efi.h int efivar_entry_add(struct efivar_entry *entry, struct list_head *head); head 1501 include/linux/efi.h unsigned long size, void *data, struct list_head *head); head 1511 include/linux/efi.h struct list_head *head, void *data, head 1514 include/linux/efi.h struct list_head *head, void *data); head 1517 include/linux/efi.h struct list_head *head, bool remove); head 59 include/linux/etherdevice.h struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb); head 693 include/linux/fb.h extern void fb_cleanup_device(struct fb_info *head); head 741 include/linux/fb.h struct list_head *head); head 743 include/linux/fb.h struct list_head *head); head 745 include/linux/fb.h struct list_head *head); head 747 include/linux/fb.h struct list_head *head); head 749 include/linux/fb.h struct list_head *head); head 750 include/linux/fb.h extern void fb_destroy_modelist(struct list_head *head); head 752 include/linux/fb.h struct list_head *head); head 754 include/linux/fb.h struct list_head *head); head 13 include/linux/fd.h compat_uint_t head; head 1212 include/linux/filter.h struct ctl_table_header *head; head 178 include/linux/fs_context.h u8 head; /* Insertion index in buffer[] */ head 75 include/linux/genhd.h unsigned char head; /* starting head */ head 29 include/linux/hidraw.h int head; head 573 include/linux/ide.h u8 head; /* "real" number of heads */ head 63 include/linux/if_macvlan.h extern void macvlan_dellink(struct net_device *dev, struct list_head *head); head 255 include/linux/if_team.h struct hlist_head *head = team_port_index_hash(team, port_index); head 257 include/linux/if_team.h hlist_for_each_entry(port, head, hlist) head 276 include/linux/if_team.h struct hlist_head *head = team_port_index_hash(team, port_index); head 278 include/linux/if_team.h hlist_for_each_entry_rcu(port, head, hlist) head 54 include/linux/iio/buffer-dma.h struct list_head head; head 446 include/linux/iommu.h struct list_head *head); head 727 include/linux/iommu.h struct list_head *head) head 63 include/linux/iova.h unsigned head, tail; head 106 include/linux/isdn/capilli.h void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize); head 107 include/linux/isdn/capilli.h void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci); head 108 include/linux/isdn/capilli.h void capilib_release_appl(struct list_head *head, u16 applid); head 109 include/linux/isdn/capilli.h void capilib_release(struct list_head *head); head 110 include/linux/isdn/capilli.h void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid); head 111 include/linux/isdn/capilli.h u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid); head 1266 include/linux/jbd2.h static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh) head 1268 include/linux/jbd2.h list_add_tail(&bh->b_assoc_buffers, head); head 245 include/linux/kexec.h kimage_entry_t head; head 333 include/linux/kprobes.h struct hlist_head **head, unsigned long *flags); head 366 include/linux/kprobes.h void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); head 77 include/linux/list.h static inline void list_add(struct list_head *new, struct list_head *head) head 79 include/linux/list.h __list_add(new, head, head->next); head 91 include/linux/list.h static inline void list_add_tail(struct list_head *new, struct list_head *head) head 93 include/linux/list.h __list_add(new, head->prev, head); head 199 include/linux/list.h static inline void list_move(struct list_head *list, struct list_head *head) head 202 include/linux/list.h list_add(list, head); head 211 include/linux/list.h struct list_head *head) head 214 include/linux/list.h list_add_tail(list, head); head 226 include/linux/list.h static inline void list_bulk_move_tail(struct list_head *head, head 233 include/linux/list.h head->prev->next = first; head 234 include/linux/list.h first->prev = head->prev; head 236 include/linux/list.h last->next = head; head 237 include/linux/list.h head->prev = last; head 246 include/linux/list.h const struct list_head *head) head 248 include/linux/list.h return list->prev == head; head 257 include/linux/list.h const struct list_head *head) head 259 include/linux/list.h return list->next == head; head 266 include/linux/list.h static inline int list_empty(const struct list_head *head) head 268 include/linux/list.h return READ_ONCE(head->next) == head; head 284 include/linux/list.h static inline int list_empty_careful(const struct list_head *head) head 286 include/linux/list.h struct list_head *next = head->next; head 287 include/linux/list.h return (next == head) && (next == head->prev); head 294 include/linux/list.h static inline void list_rotate_left(struct list_head *head) head 298 include/linux/list.h if (!list_empty(head)) { head 299 include/linux/list.h first = head->next; head 300 include/linux/list.h list_move_tail(first, head); head 312 include/linux/list.h struct list_head *head) head 319 include/linux/list.h list_move_tail(head, list); head 326 include/linux/list.h static inline int list_is_singular(const struct list_head *head) head 328 include/linux/list.h return !list_empty(head) && (head->next == head->prev); head 332 include/linux/list.h struct list_head *head, struct list_head *entry) head 335 include/linux/list.h list->next = head->next; head 339 include/linux/list.h head->next = new_first; head 340 include/linux/list.h new_first->prev = head; head 358 include/linux/list.h struct list_head *head, struct list_head *entry) head 360 include/linux/list.h if (list_empty(head)) head 362 include/linux/list.h if (list_is_singular(head) && head 363 include/linux/list.h (head->next != entry && head != entry)) head 365 include/linux/list.h if (entry == head) head 368 include/linux/list.h __list_cut_position(list, head, entry); head 386 include/linux/list.h struct list_head *head, head 389 include/linux/list.h if (head->next == entry) { head 393 include/linux/list.h list->next = head->next; head 397 include/linux/list.h head->next = entry; head 398 include/linux/list.h entry->prev = head; head 421 include/linux/list.h struct list_head *head) head 424 include/linux/list.h __list_splice(list, head, head->next); head 433 include/linux/list.h struct list_head *head) head 436 include/linux/list.h __list_splice(list, head->prev, head); head 447 include/linux/list.h struct list_head *head) head 450 include/linux/list.h __list_splice(list, head, head->next); head 464 include/linux/list.h struct list_head *head) head 467 include/linux/list.h __list_splice(list, head->prev, head); head 538 include/linux/list.h #define list_for_each(pos, head) \ head 539 include/linux/list.h for (pos = (head)->next; pos != (head); pos = pos->next) head 546 include/linux/list.h #define list_for_each_prev(pos, head) \ head 547 include/linux/list.h for (pos = (head)->prev; pos != (head); pos = pos->prev) head 555 include/linux/list.h #define list_for_each_safe(pos, n, head) \ head 556 include/linux/list.h for (pos = (head)->next, n = pos->next; pos != (head); \ head 565 include/linux/list.h #define list_for_each_prev_safe(pos, n, head) \ head 566 include/linux/list.h for (pos = (head)->prev, n = pos->prev; \ head 567 include/linux/list.h pos != (head); \ head 576 include/linux/list.h #define list_for_each_entry(pos, head, member) \ head 577 include/linux/list.h for (pos = list_first_entry(head, typeof(*pos), member); \ head 578 include/linux/list.h &pos->member != (head); \ head 587 include/linux/list.h #define list_for_each_entry_reverse(pos, head, member) \ head 588 include/linux/list.h for (pos = list_last_entry(head, typeof(*pos), member); \ head 589 include/linux/list.h &pos->member != (head); \ head 600 include/linux/list.h #define list_prepare_entry(pos, head, member) \ head 601 include/linux/list.h ((pos) ? : list_entry(head, typeof(*pos), member)) head 612 include/linux/list.h #define list_for_each_entry_continue(pos, head, member) \ head 614 include/linux/list.h &pos->member != (head); \ head 626 include/linux/list.h #define list_for_each_entry_continue_reverse(pos, head, member) \ head 628 include/linux/list.h &pos->member != (head); \ head 639 include/linux/list.h #define list_for_each_entry_from(pos, head, member) \ head 640 include/linux/list.h for (; &pos->member != (head); \ head 652 include/linux/list.h #define list_for_each_entry_from_reverse(pos, head, member) \ head 653 include/linux/list.h for (; &pos->member != (head); \ head 663 include/linux/list.h #define list_for_each_entry_safe(pos, n, head, member) \ head 664 include/linux/list.h for (pos = list_first_entry(head, typeof(*pos), member), \ head 666 include/linux/list.h &pos->member != (head); \ head 679 include/linux/list.h #define list_for_each_entry_safe_continue(pos, n, head, member) \ head 682 include/linux/list.h &pos->member != (head); \ head 695 include/linux/list.h #define list_for_each_entry_safe_from(pos, n, head, member) \ head 697 include/linux/list.h &pos->member != (head); \ head 710 include/linux/list.h #define list_for_each_entry_safe_reverse(pos, n, head, member) \ head 711 include/linux/list.h for (pos = list_last_entry(head, typeof(*pos), member), \ head 713 include/linux/list.h &pos->member != (head); \ head 849 include/linux/list.h #define hlist_for_each(pos, head) \ head 850 include/linux/list.h for (pos = (head)->first; pos ; pos = pos->next) head 852 include/linux/list.h #define hlist_for_each_safe(pos, n, head) \ head 853 include/linux/list.h for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ head 867 include/linux/list.h #define hlist_for_each_entry(pos, head, member) \ head 868 include/linux/list.h for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ head 898 include/linux/list.h #define hlist_for_each_entry_safe(pos, n, head, member) \ head 899 include/linux/list.h for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\ head 169 include/linux/list_bl.h #define hlist_bl_for_each_entry(tpos, pos, head, member) \ head 170 include/linux/list_bl.h for (pos = hlist_bl_first(head); \ head 183 include/linux/list_bl.h #define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \ head 184 include/linux/list_bl.h for (pos = hlist_bl_first(head); \ head 139 include/linux/list_lru.h struct list_head *head); head 105 include/linux/list_nulls.h #define hlist_nulls_for_each_entry(tpos, pos, head, member) \ head 106 include/linux/list_nulls.h for (pos = (head)->first; \ head 10 include/linux/list_sort.h void list_sort(void *priv, struct list_head *head, head 187 include/linux/llist.h static inline bool llist_empty(const struct llist_head *head) head 189 include/linux/llist.h return READ_ONCE(head->first) == NULL; head 199 include/linux/llist.h struct llist_head *head); head 207 include/linux/llist.h static inline bool llist_add(struct llist_node *new, struct llist_head *head) head 209 include/linux/llist.h return llist_add_batch(new, new, head); head 220 include/linux/llist.h static inline struct llist_node *llist_del_all(struct llist_head *head) head 222 include/linux/llist.h return xchg(&head->first, NULL); head 225 include/linux/llist.h extern struct llist_node *llist_del_first(struct llist_head *head); head 227 include/linux/llist.h struct llist_node *llist_reverse_order(struct llist_node *head); head 2071 include/linux/lsm_hooks.h struct hlist_head *head; head 2095 include/linux/lsm_hooks.h { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } } head 481 include/linux/mISDNif.h struct hlist_head head; head 820 include/linux/memcontrol.h void mem_cgroup_split_huge_fixup(struct page *head); head 1146 include/linux/memcontrol.h static inline void mem_cgroup_split_huge_fixup(struct page *head) head 256 include/linux/mlx5/driver.h struct list_head head; head 945 include/linux/mlx5/driver.h struct mlx5_cmd_mailbox *head); head 221 include/linux/mm.h #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) head 155 include/linux/mroute_base.h void (*free)(struct rcu_head *head); head 42 include/linux/mtd/inftl.h int head,sect,cyl; head 33 include/linux/mtd/nftl.h int head,sect,cyl; head 2329 include/linux/netdevice.h struct list_head *head, head 2337 include/linux/netdevice.h return cb(head, skb); head 2344 include/linux/netdevice.h struct list_head *head, head 2352 include/linux/netdevice.h return cb(sk, head, skb); head 2375 include/linux/netdevice.h struct sk_buff *(*gro_receive)(struct list_head *head, head 2629 include/linux/netdevice.h void dev_close_many(struct list_head *head, bool unlink); head 2640 include/linux/netdevice.h void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); head 2641 include/linux/netdevice.h void unregister_netdevice_many(struct list_head *head); head 3634 include/linux/netdevice.h void netif_receive_skb_list(struct list_head *head); head 96 include/linux/netfilter.h struct rcu_head head; head 311 include/linux/netfilter.h struct list_head *head, struct net_device *in, struct net_device *out, head 318 include/linux/netfilter.h list_for_each_entry_safe(skb, next, head, list) { head 324 include/linux/netfilter.h list_splice(&sublist, head); head 405 include/linux/netfilter.h struct list_head *head, struct net_device *in, struct net_device *out, head 180 include/linux/netfilter/ipset/ip_set.h int (*head)(struct ip_set *set, struct sk_buff *skb); head 159 include/linux/netlink.h if (is_vmalloc_addr(skb->head)) head 162 include/linux/nfs_page.h nfs_list_add_request(struct nfs_page *req, struct list_head *head) head 164 include/linux/nfs_page.h list_add_tail(&req->wb_list, head); head 173 include/linux/nfs_page.h nfs_list_move_request(struct nfs_page *req, struct list_head *head) head 175 include/linux/nfs_page.h list_move_tail(&req->wb_list, head); head 191 include/linux/nfs_page.h nfs_list_entry(struct list_head *head) head 193 include/linux/nfs_page.h return list_entry(head, struct nfs_page, wb_list); head 1589 include/linux/nfs_xdr.h void (*error_cleanup)(struct list_head *head, int); head 62 include/linux/notifier.h struct notifier_block __rcu *head; head 67 include/linux/notifier.h struct notifier_block __rcu *head; head 71 include/linux/notifier.h struct notifier_block __rcu *head; head 77 include/linux/notifier.h struct notifier_block __rcu *head; head 82 include/linux/notifier.h (name)->head = NULL; \ head 86 include/linux/notifier.h (name)->head = NULL; \ head 89 include/linux/notifier.h (name)->head = NULL; \ head 99 include/linux/notifier.h .head = NULL } head 102 include/linux/notifier.h .head = NULL } head 104 include/linux/notifier.h .head = NULL } head 109 include/linux/notifier.h .head = NULL, \ head 174 include/linux/page-flags.h unsigned long head = READ_ONCE(page->compound_head); head 176 include/linux/page-flags.h if (unlikely(head & 1)) head 177 include/linux/page-flags.h return (struct page *) (head - 1); head 550 include/linux/page-flags.h __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) head 552 include/linux/page-flags.h static __always_inline void set_compound_head(struct page *page, struct page *head) head 554 include/linux/page-flags.h WRITE_ONCE(page->compound_head, (unsigned long)head + 1); head 635 include/linux/page-flags.h struct page *head; head 643 include/linux/page-flags.h head = compound_head(page); head 646 include/linux/page-flags.h atomic_read(compound_mapcount_ptr(head)); head 835 include/linux/perf_event.h unsigned long head; head 1262 include/linux/perf_event.h struct hlist_head *head, int rctx, head 93 include/linux/plist.h #define PLIST_HEAD_INIT(head) \ head 95 include/linux/plist.h .node_list = LIST_HEAD_INIT((head).node_list) \ head 102 include/linux/plist.h #define PLIST_HEAD(head) \ head 103 include/linux/plist.h struct plist_head head = PLIST_HEAD_INIT(head) head 122 include/linux/plist.h plist_head_init(struct plist_head *head) head 124 include/linux/plist.h INIT_LIST_HEAD(&head->node_list); head 139 include/linux/plist.h extern void plist_add(struct plist_node *node, struct plist_head *head); head 140 include/linux/plist.h extern void plist_del(struct plist_node *node, struct plist_head *head); head 142 include/linux/plist.h extern void plist_requeue(struct plist_node *node, struct plist_head *head); head 149 include/linux/plist.h #define plist_for_each(pos, head) \ head 150 include/linux/plist.h list_for_each_entry(pos, &(head)->node_list, node_list) head 159 include/linux/plist.h #define plist_for_each_continue(pos, head) \ head 160 include/linux/plist.h list_for_each_entry_continue(pos, &(head)->node_list, node_list) head 170 include/linux/plist.h #define plist_for_each_safe(pos, n, head) \ head 171 include/linux/plist.h list_for_each_entry_safe(pos, n, &(head)->node_list, node_list) head 179 include/linux/plist.h #define plist_for_each_entry(pos, head, mem) \ head 180 include/linux/plist.h list_for_each_entry(pos, &(head)->node_list, mem.node_list) head 191 include/linux/plist.h #define plist_for_each_entry_continue(pos, head, m) \ head 192 include/linux/plist.h list_for_each_entry_continue(pos, &(head)->node_list, m.node_list) head 203 include/linux/plist.h #define plist_for_each_entry_safe(pos, n, head, m) \ head 204 include/linux/plist.h list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list) head 210 include/linux/plist.h static inline int plist_head_empty(const struct plist_head *head) head 212 include/linux/plist.h return list_empty(&head->node_list); head 233 include/linux/plist.h # define plist_first_entry(head, type, member) \ head 235 include/linux/plist.h WARN_ON(plist_head_empty(head)); \ head 236 include/linux/plist.h container_of(plist_first(head), type, member); \ head 239 include/linux/plist.h # define plist_first_entry(head, type, member) \ head 240 include/linux/plist.h container_of(plist_first(head), type, member) head 250 include/linux/plist.h # define plist_last_entry(head, type, member) \ head 252 include/linux/plist.h WARN_ON(plist_head_empty(head)); \ head 253 include/linux/plist.h container_of(plist_last(head), type, member); \ head 256 include/linux/plist.h # define plist_last_entry(head, type, member) \ head 257 include/linux/plist.h container_of(plist_last(head), type, member) head 280 include/linux/plist.h static inline struct plist_node *plist_first(const struct plist_head *head) head 282 include/linux/plist.h return list_entry(head->node_list.next, head 292 include/linux/plist.h static inline struct plist_node *plist_last(const struct plist_head *head) head 294 include/linux/plist.h return list_entry(head->node_list.prev, head 71 include/linux/posix-timers.h struct timerqueue_head *head; head 77 include/linux/posix-timers.h static inline bool cpu_timer_enqueue(struct timerqueue_head *head, head 80 include/linux/posix-timers.h ctmr->head = head; head 81 include/linux/posix-timers.h return timerqueue_add(head, &ctmr->node); head 86 include/linux/posix-timers.h if (ctmr->head) { head 87 include/linux/posix-timers.h timerqueue_del(ctmr->head, &ctmr->node); head 88 include/linux/posix-timers.h ctmr->head = NULL; head 264 include/linux/ptr_ring.h int head = consumer_head++; head 278 include/linux/ptr_ring.h while (likely(head >= r->consumer_tail)) head 279 include/linux/ptr_ring.h r->queue[head--] = NULL; head 514 include/linux/ptr_ring.h int head; head 526 include/linux/ptr_ring.h head = r->consumer_head - 1; head 527 include/linux/ptr_ring.h while (likely(head >= r->consumer_tail)) head 528 include/linux/ptr_ring.h r->queue[head--] = NULL; head 536 include/linux/ptr_ring.h head = r->consumer_head - 1; head 537 include/linux/ptr_ring.h if (head < 0) head 538 include/linux/ptr_ring.h head = r->size - 1; head 539 include/linux/ptr_ring.h if (r->queue[head]) { head 543 include/linux/ptr_ring.h r->queue[head] = batch[--n]; head 544 include/linux/ptr_ring.h r->consumer_tail = head; head 546 include/linux/ptr_ring.h WRITE_ONCE(r->consumer_head, head); head 22 include/linux/rcu_segcblist.h struct rcu_head *head; head 28 include/linux/rcu_segcblist.h #define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head } head 68 include/linux/rcu_segcblist.h struct rcu_head *head; head 83 include/linux/rcu_segcblist.h .head = NULL, \ head 84 include/linux/rcu_segcblist.h .tails[RCU_DONE_TAIL] = &n.head, \ head 85 include/linux/rcu_segcblist.h .tails[RCU_WAIT_TAIL] = &n.head, \ head 86 include/linux/rcu_segcblist.h .tails[RCU_NEXT_READY_TAIL] = &n.head, \ head 87 include/linux/rcu_segcblist.h .tails[RCU_NEXT_TAIL] = &n.head, \ head 95 include/linux/rculist.h static inline void list_add_rcu(struct list_head *new, struct list_head *head) head 97 include/linux/rculist.h __list_add_rcu(new, head, head->next); head 117 include/linux/rculist.h struct list_head *head) head 119 include/linux/rculist.h __list_add_rcu(new, head->prev, head); head 264 include/linux/rculist.h struct list_head *head, head 268 include/linux/rculist.h __list_splice_init_rcu(list, head, head->next, sync); head 279 include/linux/rculist.h struct list_head *head, head 283 include/linux/rculist.h __list_splice_init_rcu(list, head->prev, head, sync); head 350 include/linux/rculist.h #define list_next_or_null_rcu(head, ptr, type, member) \ head 352 include/linux/rculist.h struct list_head *__head = (head); \ head 370 include/linux/rculist.h #define list_for_each_entry_rcu(pos, head, member, cond...) \ head 372 include/linux/rculist.h pos = list_entry_rcu((head)->next, typeof(*pos), member); \ head 373 include/linux/rculist.h &pos->member != (head); \ head 405 include/linux/rculist.h #define list_for_each_entry_lockless(pos, head, member) \ head 406 include/linux/rculist.h for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ head 407 include/linux/rculist.h &pos->member != (head); \ head 428 include/linux/rculist.h #define list_for_each_entry_continue_rcu(pos, head, member) \ head 430 include/linux/rculist.h &pos->member != (head); \ head 450 include/linux/rculist.h #define list_for_each_entry_from_rcu(pos, head, member) \ head 451 include/linux/rculist.h for (; &(pos)->member != (head); \ head 502 include/linux/rculist.h #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) head 629 include/linux/rculist.h #define __hlist_for_each_rcu(pos, head) \ head 630 include/linux/rculist.h for (pos = rcu_dereference(hlist_first_rcu(head)); \ head 645 include/linux/rculist.h #define hlist_for_each_entry_rcu(pos, head, member, cond...) \ head 647 include/linux/rculist.h pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ head 666 include/linux/rculist.h #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ head 667 include/linux/rculist.h for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\ head 683 include/linux/rculist.h #define hlist_for_each_entry_rcu_bh(pos, head, member) \ head 684 include/linux/rculist.h for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ head 123 include/linux/rculist_bl.h #define hlist_bl_for_each_entry_rcu(tpos, pos, head, member) \ head 124 include/linux/rculist_bl.h for (pos = hlist_bl_first_rcu(head); \ head 41 include/linux/rculist_nulls.h #define hlist_nulls_first_rcu(head) \ head 42 include/linux/rculist_nulls.h (*((struct hlist_nulls_node __rcu __force **)&(head)->first)) head 152 include/linux/rculist_nulls.h #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ head 154 include/linux/rculist_nulls.h pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ head 167 include/linux/rculist_nulls.h #define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \ head 169 include/linux/rculist_nulls.h pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ head 38 include/linux/rcupdate.h void call_rcu(struct rcu_head *head, rcu_callback_t func); head 139 include/linux/rcupdate.h void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); head 187 include/linux/rcupdate.h void init_rcu_head(struct rcu_head *head); head 188 include/linux/rcupdate.h void destroy_rcu_head(struct rcu_head *head); head 189 include/linux/rcupdate.h void init_rcu_head_on_stack(struct rcu_head *head); head 190 include/linux/rcupdate.h void destroy_rcu_head_on_stack(struct rcu_head *head); head 192 include/linux/rcupdate.h static inline void init_rcu_head(struct rcu_head *head) { } head 193 include/linux/rcupdate.h static inline void destroy_rcu_head(struct rcu_head *head) { } head 194 include/linux/rcupdate.h static inline void init_rcu_head_on_stack(struct rcu_head *head) { } head 195 include/linux/rcupdate.h static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { } head 803 include/linux/rcupdate.h #define __kfree_rcu(head, offset) \ head 806 include/linux/rcupdate.h kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ head 16 include/linux/rcupdate_wait.h struct rcu_head head; head 19 include/linux/rcupdate_wait.h void wakeme_after_rcu(struct rcu_head *head); head 37 include/linux/rcutiny.h static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) head 39 include/linux/rcutiny.h call_rcu(head, func); head 36 include/linux/rcutree.h void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); head 7 include/linux/reboot-mode.h struct list_head head; head 32 include/linux/resource_ext.h extern void resource_list_free(struct list_head *head); head 35 include/linux/resource_ext.h struct list_head *head) head 37 include/linux/resource_ext.h list_add(&entry->node, head); head 41 include/linux/resource_ext.h struct list_head *head) head 43 include/linux/resource_ext.h list_add_tail(&entry->node, head); head 421 include/linux/rhashtable.h #define rht_for_each_from(pos, head, tbl, hash) \ head 422 include/linux/rhashtable.h for (pos = head; \ head 445 include/linux/rhashtable.h #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \ head 446 include/linux/rhashtable.h for (pos = head; \ head 495 include/linux/rhashtable.h #define rht_for_each_rcu_from(pos, head, tbl, hash) \ head 497 include/linux/rhashtable.h pos = head; \ head 530 include/linux/rhashtable.h #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \ head 532 include/linux/rhashtable.h pos = head; \ head 715 include/linux/rhashtable.h struct rhash_head *head; head 739 include/linux/rhashtable.h rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { head 746 include/linux/rhashtable.h params.obj_cmpfn(&arg, rht_obj(ht, head)) : head 747 include/linux/rhashtable.h rhashtable_compare(&arg, rht_obj(ht, head)))) { head 748 include/linux/rhashtable.h pprev = &head->next; head 752 include/linux/rhashtable.h data = rht_obj(ht, head); head 759 include/linux/rhashtable.h plist = container_of(head, struct rhlist_head, rhead); head 762 include/linux/rhashtable.h head = rht_dereference_bucket(head->next, tbl, hash); head 763 include/linux/rhashtable.h RCU_INIT_POINTER(list->rhead.next, head); head 784 include/linux/rhashtable.h head = rht_ptr(bkt, tbl, hash); head 786 include/linux/rhashtable.h RCU_INIT_POINTER(obj->next, head); head 107 include/linux/rtnetlink.h void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); head 48 include/linux/sched/wake_q.h static inline void wake_q_init(struct wake_q_head *head) head 50 include/linux/sched/wake_q.h head->first = WAKE_Q_TAIL; head 51 include/linux/sched/wake_q.h head->lastp = &head->first; head 54 include/linux/sched/wake_q.h static inline bool wake_q_empty(struct wake_q_head *head) head 56 include/linux/sched/wake_q.h return head->first == WAKE_Q_TAIL; head 59 include/linux/sched/wake_q.h extern void wake_q_add(struct wake_q_head *head, struct task_struct *task); head 60 include/linux/sched/wake_q.h extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task); head 61 include/linux/sched/wake_q.h extern void wake_up_q(struct wake_q_head *head); head 213 include/linux/seq_file.h extern struct list_head *seq_list_start(struct list_head *head, head 215 include/linux/seq_file.h extern struct list_head *seq_list_start_head(struct list_head *head, head 217 include/linux/seq_file.h extern struct list_head *seq_list_next(void *v, struct list_head *head, head 224 include/linux/seq_file.h extern struct hlist_node *seq_hlist_start(struct hlist_head *head, head 226 include/linux/seq_file.h extern struct hlist_node *seq_hlist_start_head(struct hlist_head *head, head 228 include/linux/seq_file.h extern struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head, head 231 include/linux/seq_file.h extern struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head, head 233 include/linux/seq_file.h extern struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head, head 236 include/linux/seq_file.h struct hlist_head *head, head 240 include/linux/seq_file.h extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos); head 242 include/linux/seq_file.h extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos); head 422 include/linux/serial_core.h #define uart_circ_empty(circ) ((circ)->head == (circ)->tail) head 423 include/linux/serial_core.h #define uart_circ_clear(circ) ((circ)->head = (circ)->tail = 0) head 426 include/linux/serial_core.h (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE)) head 429 include/linux/serial_core.h (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE)) head 880 include/linux/skbuff.h unsigned char *head, head 1385 include/linux/skbuff.h return skb->head + skb->end; head 1400 include/linux/skbuff.h return skb->end - skb->head; head 1896 include/linux/skbuff.h struct sk_buff_head *head) head 1899 include/linux/skbuff.h __skb_queue_splice(list, (struct sk_buff *) head, head->next); head 1900 include/linux/skbuff.h head->qlen += list->qlen; head 1912 include/linux/skbuff.h struct sk_buff_head *head) head 1915 include/linux/skbuff.h __skb_queue_splice(list, (struct sk_buff *) head, head->next); head 1916 include/linux/skbuff.h head->qlen += list->qlen; head 1927 include/linux/skbuff.h struct sk_buff_head *head) head 1930 include/linux/skbuff.h __skb_queue_splice(list, head->prev, (struct sk_buff *) head); head 1931 include/linux/skbuff.h head->qlen += list->qlen; head 1944 include/linux/skbuff.h struct sk_buff_head *head) head 1947 include/linux/skbuff.h __skb_queue_splice(list, head->prev, (struct sk_buff *) head); head 1948 include/linux/skbuff.h head->qlen += list->qlen; head 2155 include/linux/skbuff.h return skb->head + skb->tail; head 2160 include/linux/skbuff.h skb->tail = skb->data - skb->head; head 2303 include/linux/skbuff.h return skb->data - skb->head; head 2402 include/linux/skbuff.h return skb->head + skb->inner_transport_header; head 2412 include/linux/skbuff.h skb->inner_transport_header = skb->data - skb->head; head 2424 include/linux/skbuff.h return skb->head + skb->inner_network_header; head 2429 include/linux/skbuff.h skb->inner_network_header = skb->data - skb->head; head 2441 include/linux/skbuff.h return skb->head + skb->inner_mac_header; head 2446 include/linux/skbuff.h skb->inner_mac_header = skb->data - skb->head; head 2462 include/linux/skbuff.h return skb->head + skb->transport_header; head 2467 include/linux/skbuff.h skb->transport_header = skb->data - skb->head; head 2479 include/linux/skbuff.h return skb->head + skb->network_header; head 2484 include/linux/skbuff.h skb->network_header = skb->data - skb->head; head 2495 include/linux/skbuff.h return skb->head + skb->mac_header; head 2515 include/linux/skbuff.h skb->mac_header = skb->data - skb->head; head 2558 include/linux/skbuff.h return skb->head + skb->csum_start; head 4028 include/linux/skbuff.h skb->csum_start = ((unsigned char *)ptr + start) - skb->head; head 4340 include/linux/skbuff.h return (skb_mac_header(inner_skb) - inner_skb->head) - head 4366 include/linux/skbuff.h SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head; head 4380 include/linux/skbuff.h int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start; head 4384 include/linux/skbuff.h SKB_GSO_CB(skb)->csum_start = csum_start - skb->head; head 57 include/linux/srcu.h void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, head 58 include/linux/srcu.h void (*func)(struct rcu_head *head)); head 346 include/linux/sunrpc/svc.h struct kvec *vec = &rqstp->rq_arg.head[0]; head 354 include/linux/sunrpc/svc.h struct kvec *vec = &rqstp->rq_res.head[0]; head 172 include/linux/sunrpc/svc_rdma.h struct svc_rdma_recv_ctxt *head, __be32 *p); head 53 include/linux/sunrpc/xdr.h struct kvec head[1], /* RPC header + non-page data */ head 72 include/linux/sunrpc/xdr.h buf->head[0].iov_base = start; head 73 include/linux/sunrpc/xdr.h buf->head[0].iov_len = len; head 129 include/linux/sunrpc/xprt.h #define rq_svec rq_snd_buf.head head 228 include/linux/swap.h struct swap_cluster_info head; head 335 include/linux/swap.h struct lruvec *lruvec, struct list_head *head); head 584 include/linux/syscalls.h asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, head 177 include/linux/sysctl.h void (*set_ownership)(struct ctl_table_header *head, head 180 include/linux/sysctl.h int (*permissions)(struct ctl_table_header *head, struct ctl_table *table); head 485 include/linux/thunderbolt.h int head; head 19 include/linux/timerqueue.h extern bool timerqueue_add(struct timerqueue_head *head, head 21 include/linux/timerqueue.h extern bool timerqueue_del(struct timerqueue_head *head, head 34 include/linux/timerqueue.h struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) head 36 include/linux/timerqueue.h struct rb_node *leftmost = rb_first_cached(&head->rb_root); head 56 include/linux/timerqueue.h static inline void timerqueue_init_head(struct timerqueue_head *head) head 58 include/linux/timerqueue.h head->rb_root = RB_ROOT_CACHED; head 634 include/linux/trace_events.h struct pt_regs *regs, struct hlist_head *head, head 639 include/linux/trace_events.h u64 count, struct pt_regs *regs, void *head, head 642 include/linux/trace_events.h perf_tp_event(type, count, raw_data, size, regs, head, rctx, task); head 86 include/linux/tty.h struct tty_buffer *head; /* Queue head */ head 221 include/linux/types.h void (*func)(struct callback_head *head); head 225 include/linux/types.h typedef void (*rcu_callback_t)(struct rcu_head *head); head 226 include/linux/types.h typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); head 81 include/linux/udp.h struct list_head *head, head 70 include/linux/usb/hcd.h struct list_head head; head 123 include/linux/usb/phy.h struct list_head head; head 168 include/linux/visorbus.h u32 head; head 856 include/linux/vmw_vmci_defs.h u64 head; head 860 include/linux/vmw_vmci_defs.h head = vmci_q_header_consumer_head(consume_q_header); head 862 include/linux/vmw_vmci_defs.h if (tail >= produce_q_size || head >= produce_q_size) head 870 include/linux/vmw_vmci_defs.h if (tail >= head) head 871 include/linux/vmw_vmci_defs.h free_space = produce_q_size - (tail - head) - 1; head 873 include/linux/vmw_vmci_defs.h free_space = head - tail - 1; head 136 include/linux/vringh.h u16 *head); head 146 include/linux/vringh.h int vringh_complete_user(struct vringh *vrh, u16 head, u32 len); head 195 include/linux/vringh.h u16 *head, head 202 include/linux/vringh.h int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len); head 36 include/linux/wait.h struct list_head head; head 56 include/linux/wait.h .head = { &(name).head, &(name).head } } head 126 include/linux/wait.h return !list_empty(&wq_head->head); head 139 include/linux/wait.h return list_is_singular(&wq_head->head); head 169 include/linux/wait.h list_add(&wq_entry->entry, &wq_head->head); head 184 include/linux/wait.h list_add_tail(&wq_entry->entry, &wq_head->head); head 149 include/linux/wimax/debug.h void __d_head(char *head, size_t head_size, head 153 include/linux/wimax/debug.h head[0] = 0; head 158 include/linux/wimax/debug.h snprintf(head, head_size, "%s %s: ", head 178 include/linux/wimax/debug.h char head[64]; \ head 181 include/linux/wimax/debug.h __d_head(head, sizeof(head), dev); \ head 182 include/linux/wimax/debug.h printk(KERN_ERR "%s%s%s: " f, head, __func__, tag, ##a); \ head 393 include/linux/wimax/debug.h char head[64]; \ head 396 include/linux/wimax/debug.h __d_head(head, sizeof(head), dev); \ head 397 include/linux/wimax/debug.h print_hex_dump(KERN_ERR, head, 0, 16, 1, \ head 68 include/linux/xattr.h struct list_head head; head 84 include/linux/xattr.h INIT_LIST_HEAD(&xattrs->head); head 95 include/linux/xattr.h list_for_each_entry_safe(xattr, node, &xattrs->head, list) { head 85 include/net/act_api.h struct list_head head; head 259 include/net/bluetooth/bluetooth.h struct hlist_head head; head 488 include/net/bluetooth/l2cap.h __u16 head; head 116 include/net/bond_alb.h u32 head; /* Index to the head of the bi-directional clients head 897 include/net/cfg80211.h const u8 *head, *tail; head 67 include/net/fq_impl.h struct list_head *head; head 73 include/net/fq_impl.h head = &tin->new_flows; head 74 include/net/fq_impl.h if (list_empty(head)) { head 75 include/net/fq_impl.h head = &tin->old_flows; head 76 include/net/fq_impl.h if (list_empty(head)) head 80 include/net/fq_impl.h flow = list_first_entry(head, struct fq_flow, flowchain); head 92 include/net/fq_impl.h if ((head == &tin->new_flows) && head 271 include/net/fq_impl.h struct list_head *head; head 275 include/net/fq_impl.h head = &tin->new_flows; head 276 include/net/fq_impl.h if (list_empty(head)) { head 277 include/net/fq_impl.h head = &tin->old_flows; head 278 include/net/fq_impl.h if (list_empty(head)) head 282 include/net/fq_impl.h flow = list_first_entry(head, struct fq_flow, flowchain); head 49 include/net/inet_common.h struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb); head 60 include/net/inet_common.h #define indirect_call_gro_receive(f2, f1, cb, head, skb) \ head 64 include/net/inet_common.h INDIRECT_CALL_2(cb, f2, f1, head, skb); \ head 173 include/net/inet_frag.h void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, head 98 include/net/inet_hashtables.h #define inet_bind_bucket_for_each(tb, head) \ head 99 include/net/inet_hashtables.h hlist_for_each_entry(tb, head, node) head 116 include/net/inet_hashtables.h struct hlist_head head; head 213 include/net/inet_hashtables.h struct inet_bind_hashbucket *head, head 156 include/net/ip.h void ip_list_rcv(struct list_head *head, struct packet_type *pt, head 70 include/net/ip6_checksum.h skb->csum_start = skb_transport_header(skb) - skb->head; head 288 include/net/ip6_fib.h void fib6_info_destroy_rcu(struct rcu_head *head); head 259 include/net/ip_tunnels.h void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); head 1474 include/net/ip_vs.h void ip_vs_dest_dst_rcu_free(struct rcu_head *head); head 975 include/net/ipv6.h void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, head 67 include/net/ipv6_frag.h struct sk_buff *head; head 94 include/net/ipv6_frag.h head = inet_frag_pull_head(&fq->q); head 95 include/net/ipv6_frag.h if (!head) head 98 include/net/ipv6_frag.h head->dev = dev; head 101 include/net/ipv6_frag.h icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); head 102 include/net/ipv6_frag.h kfree_skb(head); head 156 include/net/iucv/af_iucv.h struct hlist_head head; head 4047 include/net/mac80211.h struct sk_buff *head, head 13 include/net/netfilter/nf_conntrack_count.h struct list_head head; /* connections with the same filtering key */ head 144 include/net/netfilter/nf_conntrack_helper.h struct list_head head; head 21 include/net/netfilter/nf_conntrack_timeout.h struct list_head head; head 687 include/net/netfilter/nf_tables.h struct nft_set_gc_batch_head head; head 698 include/net/netfilter/nf_tables.h call_rcu(&gcb->head.rcu, nft_set_gc_batch_release); head 706 include/net/netfilter/nf_tables.h if (gcb->head.cnt + 1 < ARRAY_SIZE(gcb->elems)) head 716 include/net/netfilter/nf_tables.h gcb->elems[gcb->head.cnt++] = elem; head 434 include/net/netlink.h int __nla_validate(const struct nlattr *head, int len, int maxtype, head 437 include/net/netlink.h int __nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, head 441 include/net/netlink.h struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype); head 584 include/net/netlink.h const struct nlattr *head, int len, head 588 include/net/netlink.h return __nla_parse(tb, maxtype, head, len, policy, head 609 include/net/netlink.h const struct nlattr *head, int len, head 613 include/net/netlink.h return __nla_parse(tb, maxtype, head, len, policy, head 634 include/net/netlink.h const struct nlattr *head, head 639 include/net/netlink.h return __nla_parse(tb, maxtype, head, len, policy, head 760 include/net/netlink.h static inline int nla_validate_deprecated(const struct nlattr *head, int len, head 765 include/net/netlink.h return __nla_validate(head, len, maxtype, policy, NL_VALIDATE_LIBERAL, head 784 include/net/netlink.h static inline int nla_validate(const struct nlattr *head, int len, int maxtype, head 788 include/net/netlink.h return __nla_validate(head, len, maxtype, policy, NL_VALIDATE_STRICT, head 999 include/net/netlink.h #define nlmsg_for_each_msg(pos, head, len, rem) \ head 1000 include/net/netlink.h for (pos = head, rem = len; \ head 1840 include/net/netlink.h #define nla_for_each_attr(pos, head, len, rem) \ head 1841 include/net/netlink.h for (pos = head, rem = len; \ head 111 include/net/netns/ipv6.h struct hlist_head head; head 104 include/net/nexthop.h void nexthop_free_rcu(struct rcu_head *head); head 480 include/net/pkt_cls.h ptr >= skb->head && head 83 include/net/rtnetlink.h struct list_head *head); head 167 include/net/rtnetlink.h int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, head 51 include/net/sch_generic.h struct sk_buff *head; head 291 include/net/sch_generic.h struct list_head head; head 637 include/net/sch_generic.h void dev_deactivate_many(struct list_head *head); head 948 include/net/sch_generic.h qh->head = NULL; head 964 include/net/sch_generic.h qh->head = skb; head 979 include/net/sch_generic.h skb->next = qh->head; head 981 include/net/sch_generic.h if (!qh->head) head 983 include/net/sch_generic.h qh->head = skb; head 989 include/net/sch_generic.h struct sk_buff *skb = qh->head; head 992 include/net/sch_generic.h qh->head = skb->next; head 994 include/net/sch_generic.h if (qh->head == NULL) head 1060 include/net/sch_generic.h return qh->head; head 1138 include/net/sch_generic.h rtnl_kfree_skbs(qh->head, qh->tail); head 1140 include/net/sch_generic.h qh->head = NULL; head 372 include/net/sctp/sctp.h #define sctp_skb_for_each(pos, head, tmp) \ head 373 include/net/sctp/sctp.h skb_queue_walk_safe(head, pos, tmp) head 413 include/net/sctp/sctp.h static inline int sctp_list_single_entry(struct list_head *head) head 415 include/net/sctp/sctp.h return (head->next != head) && (head->next == head->prev); head 509 include/net/sctp/sctp.h #define sctp_for_each_hentry(epb, head) \ head 510 include/net/sctp/sctp.h hlist_for_each_entry(epb, head, node) head 381 include/net/sock.h struct sk_buff *head; head 572 include/net/sock.h static inline struct sock *__sk_head(const struct hlist_head *head) head 574 include/net/sock.h return hlist_entry(head->first, struct sock, sk_node); head 577 include/net/sock.h static inline struct sock *sk_head(const struct hlist_head *head) head 579 include/net/sock.h return hlist_empty(head) ? NULL : __sk_head(head); head 582 include/net/sock.h static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head) head 584 include/net/sock.h return hlist_nulls_entry(head->first, struct sock, sk_nulls_node); head 587 include/net/sock.h static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head) head 589 include/net/sock.h return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head); head 774 include/net/sock.h #define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \ head 775 include/net/sock.h for (pos = rcu_dereference(hlist_first_rcu(head)); \ head 908 include/net/sock.h sk->sk_backlog.head = skb; head 1550 include/net/tcp.h struct hlist_head head; head 1926 include/net/tcp.h struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb); head 58 include/net/udp.h struct hlist_head head; head 169 include/net/udp.h struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, head 83 include/rdma/rdmavt_cq.h u32 head; /* index of next entry to fill */ head 212 include/rdma/rdmavt_qp.h u32 head; /* new work requests posted to the head */ head 957 include/rdma/rdmavt_qp.h RDMA_READ_UAPI_ATOMIC(cq->queue->head) : head 958 include/rdma/rdmavt_qp.h ibcq_to_rvtcq(send_cq)->kqueue->head; head 101 include/trace/events/jbd2.h __field( int, head ) head 108 include/trace/events/jbd2.h __entry->head = journal->j_tail_sequence; head 113 include/trace/events/jbd2.h __entry->transaction, __entry->sync_commit, __entry->head) head 576 include/trace/events/rpcrdma.h __entry->headlen = rqst->rq_snd_buf.head[0].iov_len; head 1066 include/trace/events/rpcrdma.h __entry->base = rqst->rq_rcv_buf.head[0].iov_base; head 385 include/trace/events/sunrpc.h __entry->head_base = xdr->buf->head[0].iov_base, head 386 include/trace/events/sunrpc.h __entry->head_len = xdr->buf->head[0].iov_len, head 444 include/trace/events/sunrpc.h __entry->head_base = xdr->buf->head[0].iov_base, head 445 include/trace/events/sunrpc.h __entry->head_len = xdr->buf->head[0].iov_len, head 485 include/trace/events/sunrpc.h __entry->head_base = req->rq_rcv_buf.head[0].iov_base; head 486 include/trace/events/sunrpc.h __entry->head_len = req->rq_rcv_buf.head[0].iov_len; head 41 include/trace/perf.h struct hlist_head *head; \ head 48 include/trace/perf.h head = this_cpu_ptr(event_call->perf_events); \ head 51 include/trace/perf.h hlist_empty(head)) \ head 70 include/trace/perf.h head, __task); \ head 173 include/uapi/drm/mga_drm.h unsigned int head; /* Position of head pointer */ head 17 include/uapi/linux/fd.h head, /* nr of heads */ head 74 include/uapi/linux/fd.h unsigned int device,head,track; head 82 include/uapi/linux/fsmap.h struct fsmap_head *head) head 84 include/uapi/linux/fsmap.h head->fmh_keys[0] = head->fmh_recs[head->fmh_entries - 1]; head 92 include/uapi/linux/io_uring.h __u32 head; head 109 include/uapi/linux/io_uring.h __u32 head; head 44 include/uapi/linux/seg6_iptunnel.h int head = 0; head 50 include/uapi/linux/seg6_iptunnel.h head = sizeof(struct ipv6hdr); head 56 include/uapi/linux/seg6_iptunnel.h return ((tuninfo->srh->hdrlen + 1) << 3) + head; head 70 include/uapi/linux/virtio_iommu.h struct virtio_iommu_req_head head; head 78 include/uapi/linux/virtio_iommu.h struct virtio_iommu_req_head head; head 94 include/uapi/linux/virtio_iommu.h struct virtio_iommu_req_head head; head 104 include/uapi/linux/virtio_iommu.h struct virtio_iommu_req_head head; head 126 include/uapi/linux/virtio_iommu.h struct virtio_iommu_probe_property head; head 134 include/uapi/linux/virtio_iommu.h struct virtio_iommu_req_head head; head 30 include/uapi/rdma/rvt-abi.h RDMA_ATOMIC_UAPI(__u32, head); head 61 include/uapi/rdma/rvt-abi.h RDMA_ATOMIC_UAPI(__u32, head); head 118 include/xen/grant_table.h void gnttab_free_grant_references(grant_ref_t head); head 276 include/xen/interface/event_channel.h event_word_t head[EVTCHN_FIFO_MAX_QUEUES]; head 53 init/initramfs.c } *head[32]; head 66 init/initramfs.c for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) { head 93 init/initramfs.c for (p = head; p < head + 32; p++) { head 117 ipc/msg.c static void msg_rcu_free(struct rcu_head *head) head 119 ipc/msg.c struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); head 298 ipc/sem.c static void sem_rcu_free(struct rcu_head *head) head 300 ipc/sem.c struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); head 218 ipc/shm.c static void shm_rcu_free(struct rcu_head *head) head 220 ipc/shm.c struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm, head 500 ipc/util.c void (*func)(struct rcu_head *head)) head 172 ipc/util.h void (*func)(struct rcu_head *head)); head 238 kernel/audit.h extern void audit_free_rule_rcu(struct rcu_head *head); head 21 kernel/audit_tree.c struct rcu_head head; head 32 kernel/audit_tree.c struct rcu_head head; head 119 kernel/audit_tree.c kfree_rcu(tree, head); head 147 kernel/audit_tree.c struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); head 158 kernel/audit_tree.c call_rcu(&chunk->head, __put_chunk); head 97 kernel/auditfilter.c void audit_free_rule_rcu(struct rcu_head *head) head 99 kernel/auditfilter.c struct audit_entry *e = container_of(head, struct audit_entry, rcu); head 91 kernel/bpf/cgroup.c static u32 prog_list_length(struct list_head *head) head 96 kernel/bpf/cgroup.c list_for_each_entry(pl, head, node) { head 884 kernel/bpf/cgroup.c int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, head 891 kernel/bpf/cgroup.c .head = head, head 1185 kernel/bpf/cgroup.c if (!ctx->head) head 1187 kernel/bpf/cgroup.c tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len); head 71 kernel/bpf/core.c if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) head 239 kernel/bpf/devmap.c struct hlist_head *head; head 242 kernel/bpf/devmap.c head = dev_map_index_hash(dtab, i); head 244 kernel/bpf/devmap.c hlist_for_each_entry_safe(dev, next, head, index_hlist) { head 293 kernel/bpf/devmap.c struct hlist_head *head = dev_map_index_hash(dtab, key); head 296 kernel/bpf/devmap.c hlist_for_each_entry_rcu(dev, head, index_hlist, head 310 kernel/bpf/devmap.c struct hlist_head *head; head 335 kernel/bpf/devmap.c head = dev_map_index_hash(dtab, i); head 337 kernel/bpf/devmap.c next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), head 750 kernel/bpf/devmap.c struct hlist_head *head; head 753 kernel/bpf/devmap.c head = dev_map_index_hash(dtab, i); head 755 kernel/bpf/devmap.c hlist_for_each_entry_safe(dev, next, head, index_hlist) { head 21 kernel/bpf/hashtab.c struct hlist_nulls_head head; head 373 kernel/bpf/hashtab.c INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); head 417 kernel/bpf/hashtab.c return &__select_bucket(htab, hash)->head; head 421 kernel/bpf/hashtab.c static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash, head 427 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) head 438 kernel/bpf/hashtab.c static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head, head 446 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) head 464 kernel/bpf/hashtab.c struct hlist_nulls_head *head; head 475 kernel/bpf/hashtab.c head = select_bucket(htab, hash); head 477 kernel/bpf/hashtab.c l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); head 574 kernel/bpf/hashtab.c struct hlist_nulls_head *head; head 581 kernel/bpf/hashtab.c head = &b->head; head 585 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) head 600 kernel/bpf/hashtab.c struct hlist_nulls_head *head; head 614 kernel/bpf/hashtab.c head = select_bucket(htab, hash); head 617 kernel/bpf/hashtab.c l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); head 639 kernel/bpf/hashtab.c head = select_bucket(htab, i); head 642 kernel/bpf/hashtab.c next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)), head 662 kernel/bpf/hashtab.c static void htab_elem_free_rcu(struct rcu_head *head) head 664 kernel/bpf/hashtab.c struct htab_elem *l = container_of(head, struct htab_elem, rcu); head 824 kernel/bpf/hashtab.c struct hlist_nulls_head *head; head 841 kernel/bpf/hashtab.c head = &b->head; head 847 kernel/bpf/hashtab.c l_old = lookup_nulls_elem_raw(head, hash, key, key_size, head 868 kernel/bpf/hashtab.c l_old = lookup_elem_raw(head, hash, key, key_size); head 899 kernel/bpf/hashtab.c hlist_nulls_add_head_rcu(&l_new->hash_node, head); head 916 kernel/bpf/hashtab.c struct hlist_nulls_head *head; head 933 kernel/bpf/hashtab.c head = &b->head; head 948 kernel/bpf/hashtab.c l_old = lookup_elem_raw(head, hash, key, key_size); head 957 kernel/bpf/hashtab.c hlist_nulls_add_head_rcu(&l_new->hash_node, head); head 981 kernel/bpf/hashtab.c struct hlist_nulls_head *head; head 998 kernel/bpf/hashtab.c head = &b->head; head 1003 kernel/bpf/hashtab.c l_old = lookup_elem_raw(head, hash, key, key_size); head 1020 kernel/bpf/hashtab.c hlist_nulls_add_head_rcu(&l_new->hash_node, head); head 1034 kernel/bpf/hashtab.c struct hlist_nulls_head *head; head 1051 kernel/bpf/hashtab.c head = &b->head; head 1067 kernel/bpf/hashtab.c l_old = lookup_elem_raw(head, hash, key, key_size); head 1082 kernel/bpf/hashtab.c hlist_nulls_add_head_rcu(&l_new->hash_node, head); head 1110 kernel/bpf/hashtab.c struct hlist_nulls_head *head; head 1123 kernel/bpf/hashtab.c head = &b->head; head 1127 kernel/bpf/hashtab.c l = lookup_elem_raw(head, hash, key, key_size); head 1142 kernel/bpf/hashtab.c struct hlist_nulls_head *head; head 1155 kernel/bpf/hashtab.c head = &b->head; head 1159 kernel/bpf/hashtab.c l = lookup_elem_raw(head, hash, key, key_size); head 1177 kernel/bpf/hashtab.c struct hlist_nulls_head *head = select_bucket(htab, i); head 1181 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { head 1395 kernel/bpf/hashtab.c struct hlist_nulls_head *head; head 1400 kernel/bpf/hashtab.c head = select_bucket(htab, i); head 1402 kernel/bpf/hashtab.c hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { head 15 kernel/bpf/percpu_freelist.c struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); head 17 kernel/bpf/percpu_freelist.c raw_spin_lock_init(&head->lock); head 18 kernel/bpf/percpu_freelist.c head->first = NULL; head 28 kernel/bpf/percpu_freelist.c static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head, head 31 kernel/bpf/percpu_freelist.c raw_spin_lock(&head->lock); head 32 kernel/bpf/percpu_freelist.c node->next = head->first; head 33 kernel/bpf/percpu_freelist.c head->first = node; head 34 kernel/bpf/percpu_freelist.c raw_spin_unlock(&head->lock); head 40 kernel/bpf/percpu_freelist.c struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); head 42 kernel/bpf/percpu_freelist.c ___pcpu_freelist_push(head, node); head 58 kernel/bpf/percpu_freelist.c struct pcpu_freelist_head *head; head 72 kernel/bpf/percpu_freelist.c head = per_cpu_ptr(s->freelist, cpu); head 73 kernel/bpf/percpu_freelist.c ___pcpu_freelist_push(head, buf); head 86 kernel/bpf/percpu_freelist.c struct pcpu_freelist_head *head; head 92 kernel/bpf/percpu_freelist.c head = per_cpu_ptr(s->freelist, cpu); head 93 kernel/bpf/percpu_freelist.c raw_spin_lock(&head->lock); head 94 kernel/bpf/percpu_freelist.c node = head->first; head 96 kernel/bpf/percpu_freelist.c head->first = node->next; head 97 kernel/bpf/percpu_freelist.c raw_spin_unlock(&head->lock); head 100 kernel/bpf/percpu_freelist.c raw_spin_unlock(&head->lock); head 19 kernel/bpf/queue_stack_maps.c u32 head, tail; head 32 kernel/bpf/queue_stack_maps.c return qs->head == qs->tail; head 37 kernel/bpf/queue_stack_maps.c u32 head = qs->head + 1; head 39 kernel/bpf/queue_stack_maps.c if (unlikely(head >= qs->size)) head 40 kernel/bpf/queue_stack_maps.c head = 0; head 42 kernel/bpf/queue_stack_maps.c return head == qs->tail; head 159 kernel/bpf/queue_stack_maps.c index = qs->head - 1; head 167 kernel/bpf/queue_stack_maps.c qs->head = index; head 228 kernel/bpf/queue_stack_maps.c dst = &qs->elements[qs->head * qs->map.value_size]; head 231 kernel/bpf/queue_stack_maps.c if (unlikely(++qs->head >= qs->size)) head 232 kernel/bpf/queue_stack_maps.c qs->head = 0; head 779 kernel/bpf/verifier.c struct bpf_verifier_stack_elem *elem, *head = env->head; head 782 kernel/bpf/verifier.c if (env->head == NULL) head 786 kernel/bpf/verifier.c err = copy_verifier_state(cur, &head->st); head 791 kernel/bpf/verifier.c *insn_idx = head->insn_idx; head 793 kernel/bpf/verifier.c *prev_insn_idx = head->prev_insn_idx; head 794 kernel/bpf/verifier.c elem = head->next; head 795 kernel/bpf/verifier.c free_verifier_state(&head->st, false); head 796 kernel/bpf/verifier.c kfree(head); head 797 kernel/bpf/verifier.c env->head = elem; head 816 kernel/bpf/verifier.c elem->next = env->head; head 817 kernel/bpf/verifier.c env->head = elem; head 48 kernel/events/callchain.c static void release_callchain_buffers_rcu(struct rcu_head *head) head 53 kernel/events/callchain.c entries = container_of(head, struct callchain_cpus_entries, rcu_head); head 1160 kernel/events/core.c struct list_head *head = this_cpu_ptr(&active_ctx_list); head 1166 kernel/events/core.c list_add(&ctx->active_ctx_list, head); head 1183 kernel/events/core.c static void free_ctx(struct rcu_head *head) head 1187 kernel/events/core.c ctx = container_of(head, struct perf_event_context, rcu_head); head 3856 kernel/events/core.c struct list_head *head = this_cpu_ptr(&active_ctx_list); head 3866 kernel/events/core.c list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) head 4323 kernel/events/core.c static void free_event_rcu(struct rcu_head *head) head 4327 kernel/events/core.c event = container_of(head, struct perf_event, rcu_head); head 4539 kernel/events/core.c struct list_head *head); head 7698 kernel/events/core.c void perf_event_aux_event(struct perf_event *event, unsigned long head, head 7714 kernel/events/core.c .offset = head, head 8419 kernel/events/core.c struct hlist_head *head; head 8422 kernel/events/core.c head = find_swevent_head_rcu(swhash, type, event_id); head 8423 kernel/events/core.c if (!head) head 8426 kernel/events/core.c hlist_for_each_entry_rcu(event, head, hlist_entry) { head 8486 kernel/events/core.c struct hlist_head *head; head 8495 kernel/events/core.c head = find_swevent_head(swhash, event); head 8496 kernel/events/core.c if (WARN_ON_ONCE(!head)) head 8499 kernel/events/core.c hlist_add_head_rcu(&event->hlist_entry, head); head 8707 kernel/events/core.c struct pt_regs *regs, struct hlist_head *head, head 8712 kernel/events/core.c if (!trace_call_bpf(call, raw_data) || hlist_empty(head)) { head 8717 kernel/events/core.c perf_tp_event(call->event.type, count, raw_data, size, regs, head, head 8723 kernel/events/core.c struct pt_regs *regs, struct hlist_head *head, int rctx, head 8741 kernel/events/core.c hlist_for_each_entry_rcu(event, head, hlist_entry) { head 9186 kernel/events/core.c struct list_head *head) head 9201 kernel/events/core.c if (head) head 9202 kernel/events/core.c list_splice(head, &event->addr_filters.list); head 26 kernel/events/internal.h local_t head; /* write position */ head 92 kernel/events/internal.h void perf_event_aux_event(struct perf_event *event, unsigned long head, head 53 kernel/events/ring_buffer.c unsigned long head; head 76 kernel/events/ring_buffer.c head = local_read(&rb->head); head 110 kernel/events/ring_buffer.c WRITE_ONCE(rb->user_page->data_head, head); head 125 kernel/events/ring_buffer.c if (unlikely(head != local_read(&rb->head))) { head 138 kernel/events/ring_buffer.c ring_buffer_has_space(unsigned long head, unsigned long tail, head 143 kernel/events/ring_buffer.c return CIRC_SPACE(head, tail, data_size) >= size; head 145 kernel/events/ring_buffer.c return CIRC_SPACE(tail, head, data_size) >= size; head 154 kernel/events/ring_buffer.c unsigned long tail, offset, head; head 193 kernel/events/ring_buffer.c offset = head = local_read(&rb->head); head 195 kernel/events/ring_buffer.c if (unlikely(!ring_buffer_has_space(head, tail, head 214 kernel/events/ring_buffer.c head += size; head 216 kernel/events/ring_buffer.c head -= size; head 217 kernel/events/ring_buffer.c } while (local_cmpxchg(&rb->head, offset, head) != offset); head 220 kernel/events/ring_buffer.c offset = head; head 221 kernel/events/ring_buffer.c head = (u64)(-head); head 229 kernel/events/ring_buffer.c if (unlikely(head - local_read(&rb->wakeup) > rb->watermark)) head 410 kernel/events/ring_buffer.c handle->head = aux_head; head 485 kernel/events/ring_buffer.c aux_head = handle->head; head 548 kernel/events/ring_buffer.c handle->head = rb->aux_head; head 1302 kernel/events/uprobes.c struct list_head *head) head 1308 kernel/events/uprobes.c INIT_LIST_HEAD(head); head 1319 kernel/events/uprobes.c list_add(&u->pending_list, head); head 1326 kernel/events/uprobes.c list_add(&u->pending_list, head); head 918 kernel/futex.c struct list_head *next, *head = &curr->pi_state_list; head 931 kernel/futex.c while (!list_empty(head)) { head 932 kernel/futex.c next = head->next; head 962 kernel/futex.c if (head->next != next) { head 3497 kernel/futex.c SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, head 3505 kernel/futex.c if (unlikely(len != sizeof(*head))) head 3508 kernel/futex.c current->robust_list = head; head 3523 kernel/futex.c struct robust_list_head __user *head; head 3545 kernel/futex.c head = p->robust_list; head 3548 kernel/futex.c if (put_user(sizeof(*head), len_ptr)) head 3550 kernel/futex.c return put_user(head, head_ptr); head 3674 kernel/futex.c struct robust_list __user * __user *head, head 3679 kernel/futex.c if (get_user(uentry, (unsigned long __user *)head)) head 3696 kernel/futex.c struct robust_list_head __user *head = curr->robust_list; head 3710 kernel/futex.c if (fetch_robust_entry(&entry, &head->list.next, &pi)) head 3715 kernel/futex.c if (get_user(futex_offset, &head->futex_offset)) head 3721 kernel/futex.c if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) head 3725 kernel/futex.c while (entry != &head->list) { head 3968 kernel/futex.c compat_uptr_t __user *head, unsigned int *pi) head 3970 kernel/futex.c if (get_user(*uentry, head)) head 3996 kernel/futex.c struct compat_robust_list_head __user *head = curr->compat_robust_list; head 4011 kernel/futex.c if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) head 4016 kernel/futex.c if (get_user(futex_offset, &head->futex_offset)) head 4023 kernel/futex.c &head->list_op_pending, &pip)) head 4027 kernel/futex.c while (entry != (struct robust_list __user *) &head->list) { head 4066 kernel/futex.c struct compat_robust_list_head __user *, head, head 4072 kernel/futex.c if (unlikely(len != sizeof(*head))) head 4075 kernel/futex.c current->compat_robust_list = head; head 4084 kernel/futex.c struct compat_robust_list_head __user *head; head 4106 kernel/futex.c head = p->compat_robust_list; head 4109 kernel/futex.c if (put_user(sizeof(*head), len_ptr)) head 4111 kernel/futex.c return put_user(ptr_to_compat(head), head_ptr); head 59 kernel/gcov/clang.c struct list_head head; head 69 kernel/gcov/clang.c struct list_head head; head 92 kernel/gcov/clang.c INIT_LIST_HEAD(&info->head); head 97 kernel/gcov/clang.c list_add_tail(&info->head, &clang_gcov_list); head 125 kernel/gcov/clang.c INIT_LIST_HEAD(&info->head); head 133 kernel/gcov/clang.c list_add_tail(&info->head, ¤t_info->functions); head 140 kernel/gcov/clang.c struct gcov_fn_info, head); head 186 kernel/gcov/clang.c struct gcov_info, head); head 187 kernel/gcov/clang.c if (list_is_last(&info->head, &clang_gcov_list)) head 189 kernel/gcov/clang.c return list_next_entry(info, head); head 198 kernel/gcov/clang.c list_add_tail(&info->head, &clang_gcov_list); head 209 kernel/gcov/clang.c __list_del_entry(&info->head); head 238 kernel/gcov/clang.c list_for_each_entry(fn, &info->functions, head) head 253 kernel/gcov/clang.c &info1->functions, struct gcov_fn_info, head); head 255 kernel/gcov/clang.c &info2->functions, struct gcov_fn_info, head); head 261 kernel/gcov/clang.c while (!list_is_last(&fn_ptr1->head, &info1->functions) && head 262 kernel/gcov/clang.c !list_is_last(&fn_ptr2->head, &info2->functions)) { head 270 kernel/gcov/clang.c fn_ptr1 = list_next_entry(fn_ptr1, head); head 271 kernel/gcov/clang.c fn_ptr2 = list_next_entry(fn_ptr2, head); head 273 kernel/gcov/clang.c return list_is_last(&fn_ptr1->head, &info1->functions) && head 274 kernel/gcov/clang.c list_is_last(&fn_ptr2->head, &info2->functions); head 288 kernel/gcov/clang.c struct gcov_fn_info, head); head 290 kernel/gcov/clang.c list_for_each_entry(dfn_ptr, &dst->functions, head) { head 305 kernel/gcov/clang.c INIT_LIST_HEAD(&fn_dup->head); head 340 kernel/gcov/clang.c INIT_LIST_HEAD(&dup->head); head 346 kernel/gcov/clang.c list_for_each_entry(fn, &info->functions, head) { head 351 kernel/gcov/clang.c list_add_tail(&fn_dup->head, &dup->functions); head 369 kernel/gcov/clang.c list_for_each_entry_safe(fn, tmp, &info->functions, head) { head 372 kernel/gcov/clang.c list_del(&fn->head); head 461 kernel/gcov/clang.c list_for_each_entry(fi_ptr, &info->functions, head) { head 263 kernel/kexec_core.c image->head = 0; head 264 kernel/kexec_core.c image->entry = &image->head; head 265 kernel/kexec_core.c image->last_entry = &image->head; head 601 kernel/kexec_core.c for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ head 325 kernel/kprobes.c struct hlist_head *head; head 328 kernel/kprobes.c head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; head 329 kernel/kprobes.c hlist_for_each_entry_rcu(p, head, hlist) { head 839 kernel/kprobes.c struct hlist_head *head; head 851 kernel/kprobes.c head = &kprobe_table[i]; head 852 kernel/kprobes.c hlist_for_each_entry_rcu(p, head, hlist) head 864 kernel/kprobes.c struct hlist_head *head; head 878 kernel/kprobes.c head = &kprobe_table[i]; head 879 kernel/kprobes.c hlist_for_each_entry_rcu(p, head, hlist) { head 1179 kernel/kprobes.c struct hlist_head *head) head 1192 kernel/kprobes.c hlist_add_head(&ri->hlist, head); head 1197 kernel/kprobes.c struct hlist_head **head, unsigned long *flags) head 1203 kernel/kprobes.c *head = &kretprobe_inst_table[hash]; head 1248 kernel/kprobes.c struct hlist_head *head, empty_rp; head 1258 kernel/kprobes.c head = &kretprobe_inst_table[hash]; head 1260 kernel/kprobes.c hlist_for_each_entry_safe(ri, tmp, head, hlist) { head 1288 kernel/kprobes.c struct hlist_head *head; head 1293 kernel/kprobes.c head = &kretprobe_inst_table[hash]; head 1294 kernel/kprobes.c hlist_for_each_entry_safe(ri, next, head, hlist) { head 2223 kernel/kprobes.c struct hlist_head *head; head 2239 kernel/kprobes.c head = &kprobe_table[i]; head 2240 kernel/kprobes.c hlist_for_each_entry_rcu(p, head, hlist) head 2379 kernel/kprobes.c struct hlist_head *head; head 2386 kernel/kprobes.c head = &kprobe_table[i]; head 2388 kernel/kprobes.c hlist_for_each_entry_rcu(p, head, hlist) { head 2470 kernel/kprobes.c struct hlist_head *head; head 2489 kernel/kprobes.c head = &kprobe_table[i]; head 2491 kernel/kprobes.c hlist_for_each_entry_rcu(p, head, hlist) { head 2516 kernel/kprobes.c struct hlist_head *head; head 2532 kernel/kprobes.c head = &kprobe_table[i]; head 2534 kernel/kprobes.c hlist_for_each_entry_rcu(p, head, hlist) { head 983 kernel/locking/lockdep.c struct hlist_head *head; head 1010 kernel/locking/lockdep.c head = chainhash_table + i; head 1011 kernel/locking/lockdep.c hlist_for_each_entry_rcu(chain, head, entry) { head 1303 kernel/locking/lockdep.c struct lock_class *links_to, struct list_head *head, head 1325 kernel/locking/lockdep.c list_add_tail_rcu(&entry->entry, head); head 1468 kernel/locking/lockdep.c struct list_head *head; head 1478 kernel/locking/lockdep.c head = get_dep_list(source_entry, offset); head 1479 kernel/locking/lockdep.c if (list_empty(head)) head 1492 kernel/locking/lockdep.c head = get_dep_list(lock, offset); head 1496 kernel/locking/lockdep.c list_for_each_entry_rcu(entry, head, entry) { head 4837 kernel/locking/lockdep.c struct hlist_head *head; head 4841 kernel/locking/lockdep.c head = chainhash_table + i; head 4842 kernel/locking/lockdep.c hlist_for_each_entry_rcu(chain, head, entry) { head 4996 kernel/locking/lockdep.c struct hlist_head *head; head 5001 kernel/locking/lockdep.c head = classhash_table + i; head 5002 kernel/locking/lockdep.c hlist_for_each_entry_rcu(class, head, hash_entry) { head 5080 kernel/locking/lockdep.c struct hlist_head *head; head 5084 kernel/locking/lockdep.c head = classhash_table + i; head 5085 kernel/locking/lockdep.c hlist_for_each_entry_rcu(class, head, hash_entry) { head 130 kernel/notifier.c ret = notifier_chain_register(&nh->head, n); head 152 kernel/notifier.c ret = notifier_chain_unregister(&nh->head, n); head 185 kernel/notifier.c ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); head 226 kernel/notifier.c return notifier_chain_register(&nh->head, n); head 229 kernel/notifier.c ret = notifier_chain_register(&nh->head, n); head 252 kernel/notifier.c ret = notifier_chain_cond_register(&nh->head, n); head 279 kernel/notifier.c return notifier_chain_unregister(&nh->head, n); head 282 kernel/notifier.c ret = notifier_chain_unregister(&nh->head, n); head 317 kernel/notifier.c if (rcu_access_pointer(nh->head)) { head 319 kernel/notifier.c ret = notifier_call_chain(&nh->head, val, v, nr_to_call, head 352 kernel/notifier.c return notifier_chain_register(&nh->head, n); head 369 kernel/notifier.c return notifier_chain_unregister(&nh->head, n); head 396 kernel/notifier.c return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); head 434 kernel/notifier.c return notifier_chain_register(&nh->head, n); head 437 kernel/notifier.c ret = notifier_chain_register(&nh->head, n); head 464 kernel/notifier.c return notifier_chain_unregister(&nh->head, n); head 467 kernel/notifier.c ret = notifier_chain_unregister(&nh->head, n); head 500 kernel/notifier.c ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); head 530 kernel/notifier.c nh->head = NULL; head 23 kernel/power/console.c struct list_head head; head 51 kernel/power/console.c list_for_each_entry(tmp, &pm_vt_switch_list, head) { head 66 kernel/power/console.c list_add(&entry->head, &pm_vt_switch_list); head 83 kernel/power/console.c list_for_each_entry(tmp, &pm_vt_switch_list, head) { head 85 kernel/power/console.c list_del(&tmp->head); head 119 kernel/power/console.c list_for_each_entry(entry, &pm_vt_switch_list, head) { head 172 kernel/rcu/rcu.h static inline int debug_rcu_head_queue(struct rcu_head *head) head 176 kernel/rcu/rcu.h r1 = debug_object_activate(head, &rcuhead_debug_descr); head 177 kernel/rcu/rcu.h debug_object_active_state(head, &rcuhead_debug_descr, head 183 kernel/rcu/rcu.h static inline void debug_rcu_head_unqueue(struct rcu_head *head) head 185 kernel/rcu/rcu.h debug_object_active_state(head, &rcuhead_debug_descr, head 188 kernel/rcu/rcu.h debug_object_deactivate(head, &rcuhead_debug_descr); head 191 kernel/rcu/rcu.h static inline int debug_rcu_head_queue(struct rcu_head *head) head 196 kernel/rcu/rcu.h static inline void debug_rcu_head_unqueue(struct rcu_head *head) head 207 kernel/rcu/rcu.h static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) head 210 kernel/rcu/rcu.h unsigned long offset = (unsigned long)head->func; head 214 kernel/rcu/rcu.h trace_rcu_invoke_kfree_callback(rn, head, offset); head 215 kernel/rcu/rcu.h kfree((void *)head - offset); head 219 kernel/rcu/rcu.h trace_rcu_invoke_callback(rn, head); head 220 kernel/rcu/rcu.h f = head->func; head 221 kernel/rcu/rcu.h WRITE_ONCE(head->func, (rcu_callback_t)0L); head 222 kernel/rcu/rcu.h f(head); head 20 kernel/rcu/rcu_segcblist.c rclp->head = NULL; head 21 kernel/rcu/rcu_segcblist.c rclp->tail = &rclp->head; head 51 kernel/rcu/rcu_segcblist.c drclp->head = srclp->head; head 52 kernel/rcu/rcu_segcblist.c if (drclp->head) head 55 kernel/rcu/rcu_segcblist.c drclp->tail = &drclp->head; head 62 kernel/rcu/rcu_segcblist.c srclp->head = rhp; head 80 kernel/rcu/rcu_segcblist.c rhp = rclp->head; head 84 kernel/rcu/rcu_segcblist.c rclp->head = rhp->next; head 85 kernel/rcu/rcu_segcblist.c if (!rclp->head) head 86 kernel/rcu/rcu_segcblist.c rclp->tail = &rclp->head; head 160 kernel/rcu/rcu_segcblist.c rsclp->head = NULL; head 162 kernel/rcu/rcu_segcblist.c rsclp->tails[i] = &rsclp->head; head 196 kernel/rcu/rcu_segcblist.c &rsclp->head != rsclp->tails[RCU_DONE_TAIL]; head 216 kernel/rcu/rcu_segcblist.c return rsclp->head; head 327 kernel/rcu/rcu_segcblist.c *rclp->tail = rsclp->head; head 328 kernel/rcu/rcu_segcblist.c WRITE_ONCE(rsclp->head, *rsclp->tails[RCU_DONE_TAIL]); head 333 kernel/rcu/rcu_segcblist.c WRITE_ONCE(rsclp->tails[i], &rsclp->head); head 379 kernel/rcu/rcu_segcblist.c if (!rclp->head) head 381 kernel/rcu/rcu_segcblist.c *rclp->tail = rsclp->head; head 382 kernel/rcu/rcu_segcblist.c WRITE_ONCE(rsclp->head, rclp->head); head 384 kernel/rcu/rcu_segcblist.c if (&rsclp->head == rsclp->tails[i]) head 388 kernel/rcu/rcu_segcblist.c rclp->head = NULL; head 389 kernel/rcu/rcu_segcblist.c rclp->tail = &rclp->head; head 399 kernel/rcu/rcu_segcblist.c if (!rclp->head) head 401 kernel/rcu/rcu_segcblist.c WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rclp->head); head 403 kernel/rcu/rcu_segcblist.c rclp->head = NULL; head 404 kernel/rcu/rcu_segcblist.c rclp->tail = &rclp->head; head 49 kernel/rcu/rcu_segcblist.h return !READ_ONCE(rsclp->head); head 137 kernel/rcu/rcuperf.c void (*async)(struct rcu_head *head, rcu_callback_t func); head 207 kernel/rcu/rcuperf.c static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func) head 209 kernel/rcu/rcuperf.c call_srcu(srcu_ctlp, head, func); head 482 kernel/rcu/rcutorture.c call_rcu_busted(struct rcu_head *head, rcu_callback_t func) head 485 kernel/rcu/rcutorture.c func(head); head 558 kernel/rcu/rcutorture.c static void srcu_torture_call(struct rcu_head *head, head 561 kernel/rcu/rcutorture.c call_srcu(srcu_ctlp, head, func); head 750 kernel/rcu/rcutorture.c static void rcu_torture_boost_cb(struct rcu_head *head) head 753 kernel/rcu/rcutorture.c container_of(head, struct rcu_boost_inflight, rcu); head 185 kernel/rcu/srcutiny.c init_rcu_head_on_stack(&rs.head); head 187 kernel/rcu/srcutiny.c call_srcu(ssp, &rs.head, wakeme_after_rcu); head 189 kernel/rcu/srcutiny.c destroy_rcu_head_on_stack(&rs.head); head 920 kernel/rcu/srcutree.c init_rcu_head_on_stack(&rcu.head); head 921 kernel/rcu/srcutree.c __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); head 923 kernel/rcu/srcutree.c destroy_rcu_head_on_stack(&rcu.head); head 133 kernel/rcu/tiny.c void call_rcu(struct rcu_head *head, rcu_callback_t func) head 137 kernel/rcu/tiny.c debug_rcu_head_queue(head); head 138 kernel/rcu/tiny.c head->func = func; head 139 kernel/rcu/tiny.c head->next = NULL; head 142 kernel/rcu/tiny.c *rcu_ctrlblk.curtail = head; head 143 kernel/rcu/tiny.c rcu_ctrlblk.curtail = &head->next; head 2188 kernel/rcu/tree.c trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), head 2500 kernel/rcu/tree.c static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, head 2534 kernel/rcu/tree.c rcu_segcblist_first_pend_cb(&rdp->cblist) != head) head 2556 kernel/rcu/tree.c __call_rcu(struct rcu_head *head, rcu_callback_t func, bool lazy) head 2563 kernel/rcu/tree.c WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); head 2565 kernel/rcu/tree.c if (debug_rcu_head_queue(head)) { head 2572 kernel/rcu/tree.c head, head->func); head 2573 kernel/rcu/tree.c WRITE_ONCE(head->func, rcu_leak_callback); head 2576 kernel/rcu/tree.c head->func = func; head 2577 kernel/rcu/tree.c head->next = NULL; head 2591 kernel/rcu/tree.c if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags)) head 2594 kernel/rcu/tree.c rcu_segcblist_enqueue(&rdp->cblist, head, lazy); head 2596 kernel/rcu/tree.c trace_rcu_kfree_callback(rcu_state.name, head, head 2601 kernel/rcu/tree.c trace_rcu_callback(rcu_state.name, head, head 2610 kernel/rcu/tree.c __call_rcu_core(rdp, head, flags); head 2650 kernel/rcu/tree.c void call_rcu(struct rcu_head *head, rcu_callback_t func) head 2652 kernel/rcu/tree.c __call_rcu(head, func, 0); head 2663 kernel/rcu/tree.c void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) head 2665 kernel/rcu/tree.c __call_rcu(head, func, 1); head 417 kernel/rcu/tree.h void call_rcu(struct rcu_head *head, rcu_callback_t func); head 334 kernel/rcu/update.c void wakeme_after_rcu(struct rcu_head *head) head 338 kernel/rcu/update.c rcu = container_of(head, struct rcu_synchronize, head); head 356 kernel/rcu/update.c init_rcu_head_on_stack(&rs_array[i].head); head 362 kernel/rcu/update.c (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); head 375 kernel/rcu/update.c destroy_rcu_head_on_stack(&rs_array[i].head); head 381 kernel/rcu/update.c void init_rcu_head(struct rcu_head *head) head 383 kernel/rcu/update.c debug_object_init(head, &rcuhead_debug_descr); head 387 kernel/rcu/update.c void destroy_rcu_head(struct rcu_head *head) head 389 kernel/rcu/update.c debug_object_free(head, &rcuhead_debug_descr); head 408 kernel/rcu/update.c void init_rcu_head_on_stack(struct rcu_head *head) head 410 kernel/rcu/update.c debug_object_init_on_stack(head, &rcuhead_debug_descr); head 425 kernel/rcu/update.c void destroy_rcu_head_on_stack(struct rcu_head *head) head 427 kernel/rcu/update.c debug_object_free(head, &rcuhead_debug_descr); head 858 kernel/rcu/update.c static struct rcu_head head; head 861 kernel/rcu/update.c call_rcu(&head, test_callback); head 1637 kernel/resource.c void resource_list_free(struct list_head *head) head 1641 kernel/resource.c list_for_each_entry_safe(entry, tmp, head, node) head 413 kernel/sched/core.c static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) head 432 kernel/sched/core.c *head->lastp = node; head 433 kernel/sched/core.c head->lastp = &node->next; head 449 kernel/sched/core.c void wake_q_add(struct wake_q_head *head, struct task_struct *task) head 451 kernel/sched/core.c if (__wake_q_add(head, task)) head 472 kernel/sched/core.c void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) head 474 kernel/sched/core.c if (!__wake_q_add(head, task)) head 478 kernel/sched/core.c void wake_up_q(struct wake_q_head *head) head 480 kernel/sched/core.c struct wake_q_node *node = head->first; head 3265 kernel/sched/core.c struct callback_head *head, *next; head 3270 kernel/sched/core.c head = rq->balance_callback; head 3272 kernel/sched/core.c while (head) { head 3273 kernel/sched/core.c func = (void (*)(struct rq *))head->func; head 3274 kernel/sched/core.c next = head->next; head 3275 kernel/sched/core.c head->next = NULL; head 3276 kernel/sched/core.c head = next; head 1357 kernel/sched/rt.c requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) head 1363 kernel/sched/rt.c if (head) head 1370 kernel/sched/rt.c static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) head 1377 kernel/sched/rt.c requeue_rt_entity(rt_rq, rt_se, head); head 1619 kernel/sched/rt.c struct plist_head *head = &rq->rt.pushable_tasks; head 1625 kernel/sched/rt.c plist_for_each_entry(p, head, pushable_tasks) { head 1316 kernel/sched/sched.h struct callback_head *head, head 1321 kernel/sched/sched.h if (unlikely(head->next)) head 1324 kernel/sched/sched.h head->func = (void (*)(struct callback_head *))func; head 1325 kernel/sched/sched.h head->next = rq->balance_callback; head 1326 kernel/sched/sched.h rq->balance_callback = head; head 13 kernel/sched/wait.c INIT_LIST_HEAD(&wq_head->head); head 81 kernel/sched/wait.c curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); head 83 kernel/sched/wait.c if (&curr->entry == &wq_head->head) head 86 kernel/sched/wait.c list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { head 100 kernel/sched/wait.c (&next->entry != &wq_head->head)) { head 212 kernel/smp.c struct llist_head *head; head 219 kernel/smp.c head = this_cpu_ptr(&call_single_queue); head 220 kernel/smp.c entry = llist_del_all(head); head 225 kernel/smp.c !warned && !llist_empty(head))) { head 464 kernel/softirq.c struct tasklet_struct *head; head 475 kernel/softirq.c struct tasklet_head *head; head 479 kernel/softirq.c head = this_cpu_ptr(headp); head 481 kernel/softirq.c *head->tail = t; head 482 kernel/softirq.c head->tail = &(t->next); head 508 kernel/softirq.c list = tl_head->head; head 509 kernel/softirq.c tl_head->head = NULL; head 510 kernel/softirq.c tl_head->tail = &tl_head->head; head 581 kernel/softirq.c &per_cpu(tasklet_vec, cpu).head; head 583 kernel/softirq.c &per_cpu(tasklet_hi_vec, cpu).head; head 632 kernel/softirq.c for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { head 650 kernel/softirq.c if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { head 651 kernel/softirq.c *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; head 653 kernel/softirq.c per_cpu(tasklet_vec, cpu).head = NULL; head 654 kernel/softirq.c per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; head 658 kernel/softirq.c if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { head 659 kernel/softirq.c *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; head 661 kernel/softirq.c per_cpu(tasklet_hi_vec, cpu).head = NULL; head 662 kernel/softirq.c per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; head 30 kernel/task_work.c struct callback_head *head; head 33 kernel/task_work.c head = READ_ONCE(task->task_works); head 34 kernel/task_work.c if (unlikely(head == &work_exited)) head 36 kernel/task_work.c work->next = head; head 37 kernel/task_work.c } while (cmpxchg(&task->task_works, head, work) != head); head 93 kernel/task_work.c struct callback_head *work, *head, *next; head 103 kernel/task_work.c head = !work && (task->flags & PF_EXITING) ? head 105 kernel/task_work.c } while (cmpxchg(&task->task_works, work, head) != work); head 427 kernel/time/posix-cpu-timers.c WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node)); head 443 kernel/time/posix-cpu-timers.c static void cleanup_timerqueue(struct timerqueue_head *head) head 448 kernel/time/posix-cpu-timers.c while ((node = timerqueue_getnext(head))) { head 449 kernel/time/posix-cpu-timers.c timerqueue_del(head, node); head 451 kernel/time/posix-cpu-timers.c ctmr->head = NULL; head 763 kernel/time/posix-cpu-timers.c static u64 collect_timerqueue(struct timerqueue_head *head, head 769 kernel/time/posix-cpu-timers.c while ((next = timerqueue_getnext(head))) { head 117 kernel/time/posix-timers.c static struct k_itimer *__posix_timers_find(struct hlist_head *head, head 123 kernel/time/posix-timers.c hlist_for_each_entry_rcu(timer, head, t_hash) { head 133 kernel/time/posix-timers.c struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)]; head 135 kernel/time/posix-timers.c return __posix_timers_find(head, sig, id); head 142 kernel/time/posix-timers.c struct hlist_head *head; head 147 kernel/time/posix-timers.c head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)]; head 148 kernel/time/posix-timers.c if (!__posix_timers_find(head, sig, sig->posix_timer_id)) { head 149 kernel/time/posix-timers.c hlist_add_head_rcu(&timer->t_hash, head); head 443 kernel/time/posix-timers.c static void k_itimer_rcu_free(struct rcu_head *head) head 445 kernel/time/posix-timers.c struct k_itimer *tmr = container_of(head, struct k_itimer, rcu); head 1422 kernel/time/timer.c static void expire_timers(struct timer_base *base, struct hlist_head *head) head 1431 kernel/time/timer.c while (!hlist_empty(head)) { head 1435 kernel/time/timer.c timer = hlist_entry(head->first, struct timer_list, entry); head 1945 kernel/time/timer.c static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head) head 1950 kernel/time/timer.c while (!hlist_empty(head)) { head 1951 kernel/time/timer.c timer = hlist_entry(head->first, struct timer_list, entry); head 1231 kernel/trace/ftrace.c static void clear_ftrace_mod_list(struct list_head *head) head 1236 kernel/trace/ftrace.c if (!head) head 1240 kernel/trace/ftrace.c list_for_each_entry_safe(p, n, head, list) head 3930 kernel/trace/ftrace.c struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; head 3941 kernel/trace/ftrace.c list_for_each_entry_safe(ftrace_mod, n, head, list) { head 3975 kernel/trace/ftrace.c static void process_mod_list(struct list_head *head, struct ftrace_ops *ops, head 3998 kernel/trace/ftrace.c list_for_each_entry_safe(ftrace_mod, n, head, list) { head 4030 kernel/trace/ftrace.c if (enable && list_empty(head)) head 5130 kernel/trace/ftrace.c struct hlist_head *head; head 5146 kernel/trace/ftrace.c head = &fgd->hash->buckets[i]; head 5147 kernel/trace/ftrace.c hlist_for_each_entry(entry, head, hlist) { head 507 kernel/trace/ring_buffer.c unsigned long head; head 914 kernel/trace/ring_buffer.c struct buffer_page *head; head 916 kernel/trace/ring_buffer.c head = cpu_buffer->head_page; head 917 kernel/trace/ring_buffer.c if (!head) head 923 kernel/trace/ring_buffer.c rb_set_list_to_head(cpu_buffer, head->list.prev); head 949 kernel/trace/ring_buffer.c struct buffer_page *head, head 954 kernel/trace/ring_buffer.c unsigned long val = (unsigned long)&head->list; head 972 kernel/trace/ring_buffer.c struct buffer_page *head, head 976 kernel/trace/ring_buffer.c return rb_head_page_set(cpu_buffer, head, prev, head 981 kernel/trace/ring_buffer.c struct buffer_page *head, head 985 kernel/trace/ring_buffer.c return rb_head_page_set(cpu_buffer, head, prev, head 990 kernel/trace/ring_buffer.c struct buffer_page *head, head 994 kernel/trace/ring_buffer.c return rb_head_page_set(cpu_buffer, head, prev, head 1009 kernel/trace/ring_buffer.c struct buffer_page *head; head 1022 kernel/trace/ring_buffer.c page = head = cpu_buffer->head_page; head 1036 kernel/trace/ring_buffer.c } while (page != head); head 1156 kernel/trace/ring_buffer.c struct list_head *head = cpu_buffer->pages; head 1165 kernel/trace/ring_buffer.c if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) head 1167 kernel/trace/ring_buffer.c if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) head 1170 kernel/trace/ring_buffer.c if (rb_check_list(cpu_buffer, head)) head 1173 kernel/trace/ring_buffer.c list_for_each_entry_safe(bpage, tmp, head, list) { head 1348 kernel/trace/ring_buffer.c struct list_head *head = cpu_buffer->pages; head 1355 kernel/trace/ring_buffer.c if (head) { head 1356 kernel/trace/ring_buffer.c list_for_each_entry_safe(bpage, tmp, head, list) { head 1360 kernel/trace/ring_buffer.c bpage = list_entry(head, struct buffer_page, list); head 1920 kernel/trace/ring_buffer.c return __rb_page_index(iter->head_page, iter->head); head 1964 kernel/trace/ring_buffer.c iter->head = 0; head 3174 kernel/trace/ring_buffer.c struct buffer_page *head = rb_set_head_page(cpu_buffer); head 3178 kernel/trace/ring_buffer.c if (unlikely(!head)) head 3183 kernel/trace/ring_buffer.c (commit == head && head 3184 kernel/trace/ring_buffer.c head->read == rb_page_commit(commit))); head 3550 kernel/trace/ring_buffer.c iter->head = cpu_buffer->reader_page->read; head 3555 kernel/trace/ring_buffer.c if (iter->head) head 3604 kernel/trace/ring_buffer.c return ((iter->head_page == commit_page && iter->head == commit) || head 3607 kernel/trace/ring_buffer.c iter->head == rb_page_commit(cpu_buffer->reader_page))); head 3840 kernel/trace/ring_buffer.c if (iter->head >= rb_page_size(iter->head_page)) { head 3858 kernel/trace/ring_buffer.c (iter->head + length > rb_commit_index(cpu_buffer)))) head 3863 kernel/trace/ring_buffer.c iter->head += length; head 3866 kernel/trace/ring_buffer.c if ((iter->head >= rb_page_size(iter->head_page)) && head 3990 kernel/trace/ring_buffer.c if (iter->head >= rb_page_size(iter->head_page)) { head 155 kernel/trace/trace.c struct trace_eval_map_head head; head 5353 kernel/trace/trace.c return ptr + ptr->head.length + 1; head 5393 kernel/trace/trace.c map_array->head.mod = mod; head 5394 kernel/trace/trace.c map_array->head.length = len; head 8738 kernel/trace/trace.c if (map->head.mod == mod) head 434 kernel/trace/trace_event_perf.c struct hlist_head head; head 449 kernel/trace/trace_event_perf.c head.first = &event->hlist_entry; head 466 kernel/trace/trace_event_perf.c 1, ®s, &head, NULL); head 75 kernel/trace/trace_events.c __find_event_field(struct list_head *head, char *name) head 79 kernel/trace/trace_events.c list_for_each_entry(field, head, link) { head 91 kernel/trace/trace_events.c struct list_head *head; head 93 kernel/trace/trace_events.c head = trace_get_fields(call); head 94 kernel/trace/trace_events.c field = __find_event_field(head, name); head 105 kernel/trace/trace_events.c static int __trace_define_field(struct list_head *head, const char *type, head 127 kernel/trace/trace_events.c list_add(&field->link, head); head 136 kernel/trace/trace_events.c struct list_head *head; head 141 kernel/trace/trace_events.c head = trace_get_fields(call); head 142 kernel/trace/trace_events.c return __trace_define_field(head, type, name, offset, size, head 191 kernel/trace/trace_events.c struct list_head *head; head 193 kernel/trace/trace_events.c head = trace_get_fields(call); head 194 kernel/trace/trace_events.c list_for_each_entry_safe(field, next, head, link) { head 207 kernel/trace/trace_events.c struct list_head *head; head 209 kernel/trace/trace_events.c head = trace_get_fields(call); head 214 kernel/trace/trace_events.c tail = list_first_entry(head, struct ftrace_event_field, link); head 1187 kernel/trace/trace_events.c struct list_head *head = trace_get_fields(call); head 1198 kernel/trace/trace_events.c node = head; head 1209 kernel/trace/trace_events.c else if (node == head) head 1953 kernel/trace/trace_events.c struct list_head *head; head 1991 kernel/trace/trace_events.c head = trace_get_fields(call); head 1992 kernel/trace/trace_events.c if (list_empty(head)) { head 1378 kernel/trace/trace_kprobe.c struct hlist_head *head; head 1399 kernel/trace/trace_kprobe.c head = this_cpu_ptr(call->perf_events); head 1400 kernel/trace/trace_kprobe.c if (hlist_empty(head)) head 1416 kernel/trace/trace_kprobe.c head, NULL); head 1428 kernel/trace/trace_kprobe.c struct hlist_head *head; head 1435 kernel/trace/trace_kprobe.c head = this_cpu_ptr(call->perf_events); head 1436 kernel/trace/trace_kprobe.c if (hlist_empty(head)) head 1452 kernel/trace/trace_kprobe.c head, NULL); head 587 kernel/trace/trace_syscalls.c struct hlist_head *head; head 604 kernel/trace/trace_syscalls.c head = this_cpu_ptr(sys_data->enter_event->perf_events); head 606 kernel/trace/trace_syscalls.c if (!valid_prog_array && hlist_empty(head)) head 624 kernel/trace/trace_syscalls.c hlist_empty(head)) { head 631 kernel/trace/trace_syscalls.c head, NULL); head 687 kernel/trace/trace_syscalls.c struct hlist_head *head; head 703 kernel/trace/trace_syscalls.c head = this_cpu_ptr(sys_data->exit_event->perf_events); head 705 kernel/trace/trace_syscalls.c if (!valid_prog_array && hlist_empty(head)) head 721 kernel/trace/trace_syscalls.c hlist_empty(head)) { head 727 kernel/trace/trace_syscalls.c 1, regs, head, NULL); head 1331 kernel/trace/trace_uprobe.c struct hlist_head *head; head 1347 kernel/trace/trace_uprobe.c head = this_cpu_ptr(call->perf_events); head 1348 kernel/trace/trace_uprobe.c if (hlist_empty(head)) head 1373 kernel/trace/trace_uprobe.c head, NULL); head 63 kernel/tracepoint.c static void srcu_free_old_probes(struct rcu_head *head) head 65 kernel/tracepoint.c kfree(container_of(head, struct tp_probes, rcu)); head 68 kernel/tracepoint.c static void rcu_free_old_probes(struct rcu_head *head) head 70 kernel/tracepoint.c call_srcu(&tracepoint_srcu, head, srcu_free_old_probes); head 34 kernel/ucount.c static int set_permissions(struct ctl_table_header *head, head 38 kernel/ucount.c container_of(head->set, struct user_namespace, set); head 39 kernel/user-return-notifier.c struct hlist_head *head; head 41 kernel/user-return-notifier.c head = &get_cpu_var(return_notifier_list); head 42 kernel/user-return-notifier.c hlist_for_each_entry_safe(urn, tmp2, head, link) head 1059 kernel/workqueue.c static void move_linked_works(struct work_struct *work, struct list_head *head, head 1069 kernel/workqueue.c list_move_tail(&work->entry, head); head 1326 kernel/workqueue.c struct list_head *head, unsigned int extra_flags) head 1332 kernel/workqueue.c list_add_tail(&work->entry, head); head 2657 kernel/workqueue.c struct list_head *head; head 2678 kernel/workqueue.c head = worker->scheduled.next; head 2682 kernel/workqueue.c head = target->entry.next; head 2689 kernel/workqueue.c insert_work(pwq, &barr->work, head, head 1301 lib/assoc_array.c static void assoc_array_rcu_cleanup(struct rcu_head *head) head 1304 lib/assoc_array.c container_of(head, struct assoc_array_edit, rcu); head 93 lib/btree.c static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp) head 97 lib/btree.c node = mempool_alloc(head->mempool, gfp); head 176 lib/btree.c static inline void __btree_init(struct btree_head *head) head 178 lib/btree.c head->node = NULL; head 179 lib/btree.c head->height = 0; head 182 lib/btree.c void btree_init_mempool(struct btree_head *head, mempool_t *mempool) head 184 lib/btree.c __btree_init(head); head 185 lib/btree.c head->mempool = mempool; head 189 lib/btree.c int btree_init(struct btree_head *head) head 191 lib/btree.c __btree_init(head); head 192 lib/btree.c head->mempool = mempool_create(0, btree_alloc, btree_free, NULL); head 193 lib/btree.c if (!head->mempool) head 199 lib/btree.c void btree_destroy(struct btree_head *head) head 201 lib/btree.c mempool_free(head->node, head->mempool); head 202 lib/btree.c mempool_destroy(head->mempool); head 203 lib/btree.c head->mempool = NULL; head 207 lib/btree.c void *btree_last(struct btree_head *head, struct btree_geo *geo, head 210 lib/btree.c int height = head->height; head 211 lib/btree.c unsigned long *node = head->node; head 241 lib/btree.c void *btree_lookup(struct btree_head *head, struct btree_geo *geo, head 244 lib/btree.c int i, height = head->height; head 245 lib/btree.c unsigned long *node = head->node; head 271 lib/btree.c int btree_update(struct btree_head *head, struct btree_geo *geo, head 274 lib/btree.c int i, height = head->height; head 275 lib/btree.c unsigned long *node = head->node; head 311 lib/btree.c void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, head 321 lib/btree.c if (head->height == 0) head 327 lib/btree.c node = head->node; head 328 lib/btree.c for (height = head->height ; height > 1; height--) { head 388 lib/btree.c static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo, head 391 lib/btree.c unsigned long *node = head->node; head 394 lib/btree.c for (height = head->height; height > level; height--) { head 413 lib/btree.c static int btree_grow(struct btree_head *head, struct btree_geo *geo, head 419 lib/btree.c node = btree_node_alloc(head, gfp); head 422 lib/btree.c if (head->node) { head 423 lib/btree.c fill = getfill(geo, head->node, 0); head 424 lib/btree.c setkey(geo, node, 0, bkey(geo, head->node, fill - 1)); head 425 lib/btree.c setval(geo, node, 0, head->node); head 427 lib/btree.c head->node = node; head 428 lib/btree.c head->height++; head 432 lib/btree.c static void btree_shrink(struct btree_head *head, struct btree_geo *geo) head 437 lib/btree.c if (head->height <= 1) head 440 lib/btree.c node = head->node; head 443 lib/btree.c head->node = bval(geo, node, 0); head 444 lib/btree.c head->height--; head 445 lib/btree.c mempool_free(node, head->mempool); head 448 lib/btree.c static int btree_insert_level(struct btree_head *head, struct btree_geo *geo, head 456 lib/btree.c if (head->height < level) { head 457 lib/btree.c err = btree_grow(head, geo, gfp); head 463 lib/btree.c node = find_level(head, geo, key, level); head 473 lib/btree.c new = btree_node_alloc(head, gfp); head 476 lib/btree.c err = btree_insert_level(head, geo, head 480 lib/btree.c mempool_free(new, head->mempool); head 510 lib/btree.c int btree_insert(struct btree_head *head, struct btree_geo *geo, head 514 lib/btree.c return btree_insert_level(head, geo, key, val, 1, gfp); head 518 lib/btree.c static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, head 520 lib/btree.c static void merge(struct btree_head *head, struct btree_geo *geo, int level, head 536 lib/btree.c btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1); head 537 lib/btree.c mempool_free(right, head->mempool); head 540 lib/btree.c static void rebalance(struct btree_head *head, struct btree_geo *geo, head 551 lib/btree.c btree_remove_level(head, geo, key, level + 1); head 552 lib/btree.c mempool_free(child, head->mempool); head 556 lib/btree.c parent = find_level(head, geo, key, level + 1); head 564 lib/btree.c merge(head, geo, level, head 575 lib/btree.c merge(head, geo, level, head 591 lib/btree.c static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, head 598 lib/btree.c if (level > head->height) { head 600 lib/btree.c head->height = 0; head 601 lib/btree.c head->node = NULL; head 605 lib/btree.c node = find_level(head, geo, key, level); head 620 lib/btree.c if (level < head->height) head 621 lib/btree.c rebalance(head, geo, key, level, node, fill - 1); head 623 lib/btree.c btree_shrink(head, geo); head 629 lib/btree.c void *btree_remove(struct btree_head *head, struct btree_geo *geo, head 632 lib/btree.c if (head->height == 0) head 635 lib/btree.c return btree_remove_level(head, geo, key, 1); head 676 lib/btree.c static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo, head 691 lib/btree.c count = __btree_for_each(head, geo, child, opaque, head 698 lib/btree.c mempool_free(node, head->mempool); head 746 lib/btree.c size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, head 757 lib/btree.c if (head->node) head 758 lib/btree.c count = __btree_for_each(head, geo, head->node, opaque, func, head 759 lib/btree.c func2, 0, head->height, 0); head 764 lib/btree.c size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, head 775 lib/btree.c if (head->node) head 776 lib/btree.c count = __btree_for_each(head, geo, head->node, opaque, func, head 777 lib/btree.c func2, 1, head->height, 0); head 778 lib/btree.c __btree_init(head); head 75 lib/digsig.c int head, i; head 161 lib/digsig.c head = len - l; head 162 lib/digsig.c memset(out1, 0, head); head 163 lib/digsig.c memcpy(out1 + head, p, l); head 865 lib/iov_iter.c struct page *head; head 878 lib/iov_iter.c head = compound_head(page); head 879 lib/iov_iter.c v += (page - head) << PAGE_SHIFT; head 881 lib/iov_iter.c if (likely(n <= v && v <= (page_size(head)))) head 22 lib/list_sort.c struct list_head *head, **tail = &head; head 44 lib/list_sort.c return head; head 55 lib/list_sort.c static void merge_final(void *priv, cmp_func cmp, struct list_head *head, head 58 lib/list_sort.c struct list_head *tail = head; head 100 lib/list_sort.c tail->next = head; head 101 lib/list_sort.c head->prev = tail; head 188 lib/list_sort.c void list_sort(void *priv, struct list_head *head, head 192 lib/list_sort.c struct list_head *list = head->next, *pending = NULL; head 195 lib/list_sort.c if (list == head->prev) /* Zero or one elements */ head 199 lib/list_sort.c head->prev->next = NULL; head 256 lib/list_sort.c merge_final(priv, (cmp_func)cmp, head, pending, list); head 27 lib/llist.c struct llist_head *head) head 32 lib/llist.c new_last->next = first = READ_ONCE(head->first); head 33 lib/llist.c } while (cmpxchg(&head->first, first, new_first) != first); head 53 lib/llist.c struct llist_node *llist_del_first(struct llist_head *head) head 57 lib/llist.c entry = smp_load_acquire(&head->first); head 63 lib/llist.c entry = cmpxchg(&head->first, old_entry, next); head 79 lib/llist.c struct llist_node *llist_reverse_order(struct llist_node *head) head 83 lib/llist.c while (head) { head 84 lib/llist.c struct llist_node *tmp = head; head 85 lib/llist.c head = head->next; head 70 lib/nlattr.c static int nla_validate_array(const struct nlattr *head, int len, int maxtype, head 78 lib/nlattr.c nla_for_each_attr(entry, head, len, rem) { head 357 lib/nlattr.c static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype, head 369 lib/nlattr.c nla_for_each_attr(nla, head, len, rem) { head 419 lib/nlattr.c int __nla_validate(const struct nlattr *head, int len, int maxtype, head 423 lib/nlattr.c return __nla_validate_parse(head, len, maxtype, policy, validate, head 474 lib/nlattr.c const struct nlattr *head, int len, head 478 lib/nlattr.c return __nla_validate_parse(head, len, maxtype, policy, validate, head 491 lib/nlattr.c struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype) head 496 lib/nlattr.c nla_for_each_attr(nla, head, len, rem) head 56 lib/plist.c static void plist_check_head(struct plist_head *head) head 58 lib/plist.c if (!plist_head_empty(head)) head 59 lib/plist.c plist_check_list(&plist_first(head)->prio_list); head 60 lib/plist.c plist_check_list(&head->node_list); head 73 lib/plist.c void plist_add(struct plist_node *node, struct plist_head *head) head 76 lib/plist.c struct list_head *node_next = &head->node_list; head 78 lib/plist.c plist_check_head(head); head 82 lib/plist.c if (plist_head_empty(head)) head 85 lib/plist.c first = iter = plist_first(head); head 103 lib/plist.c plist_check_head(head); head 112 lib/plist.c void plist_del(struct plist_node *node, struct plist_head *head) head 114 lib/plist.c plist_check_head(head); head 117 lib/plist.c if (node->node_list.next != &head->node_list) { head 132 lib/plist.c plist_check_head(head); head 145 lib/plist.c void plist_requeue(struct plist_node *node, struct plist_head *head) head 148 lib/plist.c struct list_head *node_next = &head->node_list; head 150 lib/plist.c plist_check_head(head); head 151 lib/plist.c BUG_ON(plist_head_empty(head)); head 154 lib/plist.c if (node == plist_last(head)) head 162 lib/plist.c plist_del(node, head); head 164 lib/plist.c plist_for_each_continue(iter, head) { head 172 lib/plist.c plist_check_head(head); head 299 lib/radix-tree.c void radix_tree_node_rcu_free(struct rcu_head *head) head 302 lib/radix-tree.c container_of(head, struct radix_tree_node, rcu_head); head 108 lib/rhashtable.c static void bucket_table_free_rcu(struct rcu_head *head) head 110 lib/rhashtable.c bucket_table_free(container_of(head, struct bucket_table, rcu)); head 222 lib/rhashtable.c struct rhash_head *head, *next, *entry; head 249 lib/rhashtable.c head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); head 251 lib/rhashtable.c RCU_INIT_POINTER(entry->next, head); head 488 lib/rhashtable.c struct rhash_head *head; head 492 lib/rhashtable.c rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { head 499 lib/rhashtable.c ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : head 500 lib/rhashtable.c rhashtable_compare(&arg, rht_obj(ht, head)))) { head 501 lib/rhashtable.c pprev = &head->next; head 506 lib/rhashtable.c return rht_obj(ht, head); head 509 lib/rhashtable.c plist = container_of(head, struct rhlist_head, rhead); head 512 lib/rhashtable.c head = rht_dereference_bucket(head->next, tbl, hash); head 513 lib/rhashtable.c RCU_INIT_POINTER(list->rhead.next, head); head 537 lib/rhashtable.c struct rhash_head *head; head 558 lib/rhashtable.c head = rht_ptr(bkt, tbl, hash); head 560 lib/rhashtable.c RCU_INIT_POINTER(obj->next, head); head 75 lib/test_list_sort.c LIST_HEAD(head); head 94 lib/test_list_sort.c list_add_tail(&el->list, &head); head 97 lib/test_list_sort.c list_sort(NULL, &head, cmp); head 100 lib/test_list_sort.c for (cur = head.next; cur->next != &head; cur = cur->next) { head 129 lib/test_list_sort.c if (head.prev != cur) { head 27 lib/timerqueue.c bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) head 29 lib/timerqueue.c struct rb_node **p = &head->rb_root.rb_root.rb_node; head 48 lib/timerqueue.c rb_insert_color_cached(&node->node, &head->rb_root, leftmost); head 63 lib/timerqueue.c bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) head 67 lib/timerqueue.c rb_erase_cached(&node->node, &head->rb_root); head 70 lib/timerqueue.c return !RB_EMPTY_ROOT(&head->rb_root.rb_root); head 250 lib/xarray.c extern void radix_tree_node_rcu_free(struct rcu_head *head); head 555 lib/xarray.c static int xas_expand(struct xa_state *xas, void *head) head 562 lib/xarray.c if (!head) { head 568 lib/xarray.c } else if (xa_is_node(head)) { head 569 lib/xarray.c node = xa_to_node(head); head 574 lib/xarray.c while (max > max_index(head)) { head 583 lib/xarray.c if (xa_is_value(head)) head 585 lib/xarray.c RCU_INIT_POINTER(node->slots[0], head); head 607 lib/xarray.c if (xa_is_node(head)) { head 608 lib/xarray.c xa_to_node(head)->offset = 0; head 609 lib/xarray.c rcu_assign_pointer(xa_to_node(head)->parent, node); head 611 lib/xarray.c head = xa_mk_node(node); head 612 lib/xarray.c rcu_assign_pointer(xa->xa_head, head); head 155 lib/zlib_deflate/deflate.c s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \ head 156 lib/zlib_deflate/deflate.c s->head[s->ins_h] = (Pos)(str)) head 163 lib/zlib_deflate/deflate.c s->head[s->hash_size-1] = NIL; \ head 164 lib/zlib_deflate/deflate.c memset((char *)s->head, 0, (unsigned)(s->hash_size-1)*sizeof(*s->head)); head 234 lib/zlib_deflate/deflate.c s->head = (Pos *) mem->head_memory; head 773 lib/zlib_deflate/deflate.c p = &s->head[n]; head 110 lib/zlib_deflate/defutil.h Pos *head; /* Heads of the hash chains or NIL. */ head 1465 mm/gup.c struct page *head = compound_head(pages[i]); head 1471 mm/gup.c step = compound_nr(head) - (pages[i] - head); head 1477 mm/gup.c if (is_migrate_cma_page(head)) { head 1478 mm/gup.c if (PageHuge(head)) head 1479 mm/gup.c isolate_huge_page(head, &cma_page_list); head 1481 mm/gup.c if (!PageLRU(head) && drain_allow) { head 1486 mm/gup.c if (!isolate_lru_page(head)) { head 1487 mm/gup.c list_add_tail(&head->lru, &cma_page_list); head 1488 mm/gup.c mod_node_page_state(page_pgdat(head), head 1490 mm/gup.c page_is_file_cache(head), head 1491 mm/gup.c hpage_nr_pages(head)); head 1817 mm/gup.c struct page *head = compound_head(page); head 1818 mm/gup.c if (WARN_ON_ONCE(page_ref_count(head) < 0)) head 1820 mm/gup.c if (unlikely(!page_cache_add_speculative(head, refs))) head 1822 mm/gup.c return head; head 1836 mm/gup.c struct page *head, *page; head 1863 mm/gup.c head = try_get_compound_head(page, 1); head 1864 mm/gup.c if (!head) head 1868 mm/gup.c put_page(head); head 1872 mm/gup.c VM_BUG_ON_PAGE(compound_head(page) != head, page); head 1995 mm/gup.c struct page *head, *page; head 2012 mm/gup.c head = pte_page(pte); head 2014 mm/gup.c page = head + ((addr & (sz-1)) >> PAGE_SHIFT); head 2016 mm/gup.c VM_BUG_ON(compound_head(page) != head); head 2023 mm/gup.c head = try_get_compound_head(head, refs); head 2024 mm/gup.c if (!head) { head 2033 mm/gup.c put_page(head); head 2037 mm/gup.c SetPageReferenced(head); head 2071 mm/gup.c struct page *head, *page; head 2092 mm/gup.c head = try_get_compound_head(pmd_page(orig), refs); head 2093 mm/gup.c if (!head) { head 2101 mm/gup.c put_page(head); head 2105 mm/gup.c SetPageReferenced(head); head 2112 mm/gup.c struct page *head, *page; head 2133 mm/gup.c head = try_get_compound_head(pud_page(orig), refs); head 2134 mm/gup.c if (!head) { head 2142 mm/gup.c put_page(head); head 2146 mm/gup.c SetPageReferenced(head); head 2155 mm/gup.c struct page *head, *page; head 2170 mm/gup.c head = try_get_compound_head(pgd_page(orig), refs); head 2171 mm/gup.c if (!head) { head 2179 mm/gup.c put_page(head); head 2183 mm/gup.c SetPageReferenced(head); head 2436 mm/huge_memory.c static void __split_huge_page_tail(struct page *head, int tail, head 2439 mm/huge_memory.c struct page *page_tail = head + tail; head 2450 mm/huge_memory.c page_tail->flags |= (head->flags & head 2465 mm/huge_memory.c page_tail->mapping = head->mapping; head 2466 mm/huge_memory.c page_tail->index = head->index + tail; head 2480 mm/huge_memory.c page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || head 2481 mm/huge_memory.c PageSwapCache(head))); head 2483 mm/huge_memory.c if (page_is_young(head)) head 2485 mm/huge_memory.c if (page_is_idle(head)) head 2488 mm/huge_memory.c page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); head 2495 mm/huge_memory.c lru_add_page_tail(head, page_tail, lruvec, list); head 2501 mm/huge_memory.c struct page *head = compound_head(page); head 2502 mm/huge_memory.c pg_data_t *pgdat = page_pgdat(head); head 2508 mm/huge_memory.c lruvec = mem_cgroup_page_lruvec(head, pgdat); head 2511 mm/huge_memory.c mem_cgroup_split_huge_fixup(head); head 2513 mm/huge_memory.c if (PageAnon(head) && PageSwapCache(head)) { head 2514 mm/huge_memory.c swp_entry_t entry = { .val = page_private(head) }; head 2522 mm/huge_memory.c __split_huge_page_tail(head, i, lruvec, list); head 2524 mm/huge_memory.c if (head[i].index >= end) { head 2525 mm/huge_memory.c ClearPageDirty(head + i); head 2526 mm/huge_memory.c __delete_from_page_cache(head + i, NULL); head 2527 mm/huge_memory.c if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) head 2528 mm/huge_memory.c shmem_uncharge(head->mapping->host, 1); head 2529 mm/huge_memory.c put_page(head + i); head 2531 mm/huge_memory.c __xa_store(&head->mapping->i_pages, head[i].index, head 2532 mm/huge_memory.c head + i, 0); head 2535 mm/huge_memory.c head + i, 0); head 2539 mm/huge_memory.c ClearPageCompound(head); head 2541 mm/huge_memory.c split_page_owner(head, HPAGE_PMD_ORDER); head 2544 mm/huge_memory.c if (PageAnon(head)) { head 2546 mm/huge_memory.c if (PageSwapCache(head)) { head 2547 mm/huge_memory.c page_ref_add(head, 2); head 2550 mm/huge_memory.c page_ref_inc(head); head 2554 mm/huge_memory.c page_ref_add(head, 2); head 2555 mm/huge_memory.c xa_unlock(&head->mapping->i_pages); head 2560 mm/huge_memory.c remap_page(head); head 2563 mm/huge_memory.c struct page *subpage = head + i; head 2696 mm/huge_memory.c struct page *head = compound_head(page); head 2697 mm/huge_memory.c struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); head 2706 mm/huge_memory.c VM_BUG_ON_PAGE(is_huge_zero_page(head), head); head 2713 mm/huge_memory.c if (PageAnon(head)) { head 2722 mm/huge_memory.c anon_vma = page_get_anon_vma(head); head 2731 mm/huge_memory.c mapping = head->mapping; head 2756 mm/huge_memory.c if (!can_split_huge_page(head, &extra_pins)) { head 2762 mm/huge_memory.c unmap_page(head); head 2763 mm/huge_memory.c VM_BUG_ON_PAGE(compound_mapcount(head), head); head 2773 mm/huge_memory.c XA_STATE(xas, &mapping->i_pages, page_index(head)); head 2780 mm/huge_memory.c if (xas_load(&xas) != head) head 2786 mm/huge_memory.c count = page_count(head); head 2787 mm/huge_memory.c mapcount = total_mapcount(head); head 2788 mm/huge_memory.c if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) { head 2789 mm/huge_memory.c if (!list_empty(page_deferred_list(head))) { head 2791 mm/huge_memory.c list_del(page_deferred_list(head)); head 2802 mm/huge_memory.c if (PageSwapCache(head)) { head 2803 mm/huge_memory.c swp_entry_t entry = { .val = page_private(head) }; head 2813 mm/huge_memory.c dump_page(head, NULL); head 2821 mm/huge_memory.c remap_page(head); head 264 mm/hugetlb.c struct list_head *head = &resv->regions; head 270 mm/hugetlb.c list_for_each_entry(rg, head, link) head 280 mm/hugetlb.c if (&rg->link == head || t < rg->from) { head 303 mm/hugetlb.c if (&rg->link == head) head 360 mm/hugetlb.c struct list_head *head = &resv->regions; head 394 mm/hugetlb.c list_for_each_entry(rg, head, link) head 401 mm/hugetlb.c if (&rg->link == head || t < rg->from) { head 427 mm/hugetlb.c if (&rg->link == head) head 487 mm/hugetlb.c struct list_head *head = &resv->regions; head 494 mm/hugetlb.c list_for_each_entry_safe(rg, trg, head, link) { head 594 mm/hugetlb.c struct list_head *head = &resv->regions; head 600 mm/hugetlb.c list_for_each_entry(rg, head, link) { head 726 mm/hugetlb.c struct list_head *head = &resv_map->region_cache; head 733 mm/hugetlb.c list_for_each_entry_safe(rg, trg, head, link) { head 1616 mm/hugetlb.c struct page *head = compound_head(page); head 1617 mm/hugetlb.c struct hstate *h = page_hstate(head); head 1618 mm/hugetlb.c int nid = page_to_nid(head); head 1625 mm/hugetlb.c if (PageHWPoison(head) && page != head) { head 1627 mm/hugetlb.c ClearPageHWPoison(head); head 1629 mm/hugetlb.c list_del(&head->lru); head 1633 mm/hugetlb.c update_and_free_page(h, head); head 43 mm/kasan/quarantine.c struct qlist_node *head; head 52 mm/kasan/quarantine.c return !q->head; head 57 mm/kasan/quarantine.c q->head = q->tail = NULL; head 65 mm/kasan/quarantine.c q->head = qlink; head 84 mm/kasan/quarantine.c to->tail->next = from->head; head 161 mm/kasan/quarantine.c qlink = q->head; head 273 mm/kasan/quarantine.c curr = from->head; head 159 mm/ksm.c struct list_head *head; head 209 mm/ksm.c struct stable_node *head; head 345 mm/ksm.c return dup->head == STABLE_NODE_DUP_HEAD; head 352 mm/ksm.c dup->head = STABLE_NODE_DUP_HEAD; head 373 mm/ksm.c dup->head = NULL; head 662 mm/ksm.c if (stable_node->head == &migrate_nodes) head 779 mm/ksm.c stable_node = rmap_item->head; head 1561 mm/ksm.c if (page_node && page_node->head != &migrate_nodes) { head 1641 mm/ksm.c VM_BUG_ON(page_node->head != &migrate_nodes); head 1726 mm/ksm.c VM_BUG_ON(page_node->head != &migrate_nodes); head 1744 mm/ksm.c VM_BUG_ON(page_node->head != &migrate_nodes); head 1756 mm/ksm.c stable_node_dup->head = &migrate_nodes; head 1757 mm/ksm.c list_add(&stable_node_dup->list, stable_node_dup->head); head 1789 mm/ksm.c VM_BUG_ON(page_node->head != &migrate_nodes); head 2016 mm/ksm.c rmap_item->head = stable_node; head 2048 mm/ksm.c if (stable_node->head != &migrate_nodes && head 2052 mm/ksm.c stable_node->head = &migrate_nodes; head 2053 mm/ksm.c list_add(&stable_node->list, stable_node->head); head 2055 mm/ksm.c if (stable_node->head != &migrate_nodes && head 2056 mm/ksm.c rmap_item->head == stable_node) head 2068 mm/ksm.c if (kpage == page && rmap_item->head == stable_node) { head 177 mm/list_lru.c struct list_head *head) head 179 mm/list_lru.c list_move(item, head); head 393 mm/list_lru.c static void kvfree_rcu(struct rcu_head *head) head 397 mm/list_lru.c mlru = container_of(head, struct list_lru_memcg, rcu); head 326 mm/memcontrol.c static void memcg_free_shrinker_map_rcu(struct rcu_head *head) head 328 mm/memcontrol.c kvfree(container_of(head, struct memcg_shrinker_map, rcu)); head 3116 mm/memcontrol.c void mem_cgroup_split_huge_fixup(struct page *head) head 3124 mm/memcontrol.c head[i].mem_cgroup = head->mem_cgroup; head 3126 mm/memcontrol.c __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR); head 854 mm/memory-failure.c { head, head, MF_MSG_HUGE, me_huge_page }, head 932 mm/memory-failure.c struct page *head = compound_head(page); head 934 mm/memory-failure.c if (!PageHuge(head) && PageTransHuge(head)) { head 941 mm/memory-failure.c if (!PageAnon(head)) { head 948 mm/memory-failure.c if (get_page_unless_zero(head)) { head 949 mm/memory-failure.c if (head == compound_head(page)) head 954 mm/memory-failure.c put_page(head); head 1087 mm/memory-failure.c struct page *head = compound_head(p); head 1091 mm/memory-failure.c if (TestSetPageHWPoison(head)) { head 1103 mm/memory-failure.c lock_page(head); head 1104 mm/memory-failure.c if (PageHWPoison(head)) { head 1106 mm/memory-failure.c || (p != head && TestSetPageHWPoison(head))) { head 1108 mm/memory-failure.c unlock_page(head); head 1112 mm/memory-failure.c unlock_page(head); head 1118 mm/memory-failure.c lock_page(head); head 1119 mm/memory-failure.c page_flags = head->flags; head 1121 mm/memory-failure.c if (!PageHWPoison(head)) { head 1124 mm/memory-failure.c unlock_page(head); head 1125 mm/memory-failure.c put_hwpoison_page(head); head 1138 mm/memory-failure.c if (huge_page_size(page_hstate(head)) > PMD_SIZE) { head 1144 mm/memory-failure.c if (!hwpoison_user_mappings(p, pfn, flags, &head)) { head 1152 mm/memory-failure.c unlock_page(head); head 1271 mm/memory_hotplug.c struct page *page, *head; head 1284 mm/memory_hotplug.c head = compound_head(page); head 1285 mm/memory_hotplug.c if (page_huge_active(head)) head 1287 mm/memory_hotplug.c skip = compound_nr(head) - (page - head); head 1324 mm/memory_hotplug.c struct page *head = compound_head(page); head 1325 mm/memory_hotplug.c pfn = page_to_pfn(head) + compound_nr(head) - 1; head 1326 mm/memory_hotplug.c isolate_huge_page(head, &source); head 974 mm/mempolicy.c struct page *head = compound_head(page); head 978 mm/mempolicy.c if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) { head 979 mm/mempolicy.c if (!isolate_lru_page(head)) { head 980 mm/mempolicy.c list_add_tail(&head->lru, pagelist); head 981 mm/mempolicy.c mod_node_page_state(page_pgdat(head), head 982 mm/mempolicy.c NR_ISOLATED_ANON + page_is_file_cache(head), head 983 mm/mempolicy.c hpage_nr_pages(head)); head 699 mm/migrate.c static bool buffer_migrate_lock_buffers(struct buffer_head *head, head 702 mm/migrate.c struct buffer_head *bh = head; head 710 mm/migrate.c } while (bh != head); head 723 mm/migrate.c bh = head; head 732 mm/migrate.c } while (bh != head); head 740 mm/migrate.c struct buffer_head *bh, *head; head 752 mm/migrate.c head = page_buffers(page); head 753 mm/migrate.c if (!buffer_migrate_lock_buffers(head, mode)) head 763 mm/migrate.c bh = head; head 770 mm/migrate.c } while (bh != head); head 793 mm/migrate.c bh = head; head 798 mm/migrate.c } while (bh != head); head 811 mm/migrate.c bh = head; head 816 mm/migrate.c } while (bh != head); head 1565 mm/migrate.c struct page *head; head 1567 mm/migrate.c head = compound_head(page); head 1568 mm/migrate.c err = isolate_lru_page(head); head 1573 mm/migrate.c list_add_tail(&head->lru, pagelist); head 1574 mm/migrate.c mod_node_page_state(page_pgdat(head), head 1575 mm/migrate.c NR_ISOLATED_ANON + page_is_file_cache(head), head 1576 mm/migrate.c hpage_nr_pages(head)); head 133 mm/mmu_gather.c static void tlb_remove_table_rcu(struct rcu_head *head) head 138 mm/mmu_gather.c batch = container_of(head, struct mmu_table_batch, rcu); head 1257 mm/page_alloc.c LIST_HEAD(head); head 1289 mm/page_alloc.c list_add_tail(&page->lru, &head); head 1312 mm/page_alloc.c list_for_each_entry_safe(page, tmp, &head, lru) { head 8245 mm/page_alloc.c struct page *head = compound_head(page); head 8248 mm/page_alloc.c if (!hugepage_migration_supported(page_hstate(head))) head 8251 mm/page_alloc.c skip_pages = compound_nr(head) - (page - head); head 335 mm/readahead.c pgoff_t head; head 338 mm/readahead.c head = page_cache_prev_miss(mapping, offset - 1, max); head 341 mm/readahead.c return offset - 1 - head; head 1925 mm/shmem.c struct page *head = compound_head(page); head 1928 mm/shmem.c for (i = 0; i < compound_nr(head); i++) { head 1929 mm/shmem.c clear_highpage(head + i); head 1930 mm/shmem.c flush_dcache_page(head + i); head 1932 mm/shmem.c SetPageUptodate(head); head 2504 mm/shmem.c struct page *head = compound_head(page); head 2509 mm/shmem.c if (head + i == page) head 2511 mm/shmem.c clear_highpage(head + i); head 2512 mm/shmem.c flush_dcache_page(head + i); head 2520 mm/shmem.c SetPageUptodate(head); head 2768 mm/shmem.c WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); head 1404 mm/slab.c static void kmem_rcu_free(struct rcu_head *head) head 1409 mm/slab.c page = container_of(head, struct page, rcu_head); head 726 mm/slab_common.c static void kmemcg_rcufn(struct rcu_head *head) head 728 mm/slab_common.c struct kmem_cache *s = container_of(head, struct kmem_cache, head 134 mm/slob.c struct rcu_head head; head 650 mm/slob.c static void kmem_rcu_free(struct rcu_head *head) head 652 mm/slob.c struct slob_rcu *slob_rcu = (struct slob_rcu *)head; head 665 mm/slob.c call_rcu(&slob_rcu->head, kmem_rcu_free); head 1170 mm/slub.c void *head, void *tail, int bulk_cnt, head 1174 mm/slub.c void *object = head; head 1352 mm/slub.c void *head, void *tail, int bulk_cnt, head 1428 mm/slub.c void **head, void **tail) head 1432 mm/slub.c void *next = *head; head 1433 mm/slub.c void *old_tail = *tail ? *tail : *head; head 1437 mm/slub.c *head = NULL; head 1459 mm/slub.c set_freepointer(s, object, *head); head 1460 mm/slub.c *head = object; head 1466 mm/slub.c if (*head == *tail) head 1469 mm/slub.c return *head != NULL; head 2837 mm/slub.c void *head, void *tail, int cnt, head 2851 mm/slub.c !free_debug_processing(s, page, head, tail, cnt, addr)) head 2895 mm/slub.c head, new.counters, head 2965 mm/slub.c struct page *page, void *head, void *tail, head 2968 mm/slub.c void *tail_obj = tail ? : head; head 2995 mm/slub.c head, next_tid(tid)))) { head 3002 mm/slub.c __slab_free(s, page, head, tail_obj, cnt, addr); head 3007 mm/slub.c void *head, void *tail, int cnt, head 3014 mm/slub.c if (slab_free_freelist_hook(s, &head, &tail)) head 3015 mm/slub.c do_slab_free(s, page, head, tail, cnt, addr); head 386 mm/swapfile.c return cluster_is_null(&list->head); head 391 mm/swapfile.c return cluster_next(&list->head); head 396 mm/swapfile.c cluster_set_null(&list->head); head 405 mm/swapfile.c cluster_set_next_flag(&list->head, idx, 0); head 428 mm/swapfile.c idx = cluster_next(&list->head); head 430 mm/swapfile.c cluster_set_null(&list->head); head 433 mm/swapfile.c cluster_set_next_flag(&list->head, head 611 mm/swapfile.c cluster->index = si->free_clusters.head; head 3525 mm/swapfile.c struct page *head; head 3573 mm/swapfile.c head = vmalloc_to_page(si->swap_map + offset); head 3581 mm/swapfile.c if (!page_private(head)) { head 3583 mm/swapfile.c INIT_LIST_HEAD(&head->lru); head 3584 mm/swapfile.c set_page_private(head, SWP_CONTINUED); head 3588 mm/swapfile.c list_for_each_entry(list_page, &head->lru, lru) { head 3610 mm/swapfile.c list_add_tail(&page->lru, &head->lru); head 3636 mm/swapfile.c struct page *head; head 3641 mm/swapfile.c head = vmalloc_to_page(si->swap_map + offset); head 3642 mm/swapfile.c if (page_private(head) != SWP_CONTINUED) { head 3649 mm/swapfile.c page = list_entry(head->lru.next, struct page, lru); head 3662 mm/swapfile.c BUG_ON(page == head); head 3668 mm/swapfile.c if (page == head) { head 3678 mm/swapfile.c while (page != head) { head 3694 mm/swapfile.c BUG_ON(page == head); head 3703 mm/swapfile.c while (page != head) { head 3726 mm/swapfile.c struct page *head; head 3727 mm/swapfile.c head = vmalloc_to_page(si->swap_map + offset); head 3728 mm/swapfile.c if (page_private(head)) { head 3731 mm/swapfile.c list_for_each_entry_safe(page, next, &head->lru, lru) { head 502 mm/vmalloc.c struct rb_node *parent, struct rb_node **link, struct list_head *head) head 509 mm/vmalloc.c head = &rb_entry(parent, struct vmap_area, rb_node)->list; head 511 mm/vmalloc.c head = head->prev; head 536 mm/vmalloc.c list_add(&va->list, head); head 654 mm/vmalloc.c struct rb_root *root, struct list_head *head) head 660 mm/vmalloc.c link_va(va, root, parent, link, head); head 666 mm/vmalloc.c struct list_head *head) head 676 mm/vmalloc.c link_va(va, root, parent, link, head); head 688 mm/vmalloc.c struct rb_root *root, struct list_head *head) head 716 mm/vmalloc.c if (next != head) { head 740 mm/vmalloc.c if (next->prev != head) { head 759 mm/vmalloc.c link_va(va, root, parent, link, head); head 3476 mm/vmalloc.c struct llist_node *head; head 3479 mm/vmalloc.c head = READ_ONCE(vmap_purge_list.first); head 3480 mm/vmalloc.c if (head == NULL) head 3483 mm/vmalloc.c llist_for_each_entry(va, head, purge_list) { head 724 mm/zsmalloc.c struct zspage *head; head 727 mm/zsmalloc.c head = list_first_entry_or_null(&class->fullness_list[fullness], head 733 mm/zsmalloc.c if (head) { head 734 mm/zsmalloc.c if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) { head 735 mm/zsmalloc.c list_add(&zspage->list, &head->list); head 1655 mm/zsmalloc.c unsigned long head; head 1665 mm/zsmalloc.c head = obj_to_head(page, addr + offset); head 1666 mm/zsmalloc.c if (head & OBJ_ALLOCATED_TAG) { head 1667 mm/zsmalloc.c handle = head & ~OBJ_ALLOCATED_TAG; head 1982 mm/zsmalloc.c unsigned long handle, head; head 2019 mm/zsmalloc.c head = obj_to_head(page, s_addr + pos); head 2020 mm/zsmalloc.c if (head & OBJ_ALLOCATED_TAG) { head 2021 mm/zsmalloc.c handle = head & ~OBJ_ALLOCATED_TAG; head 2037 mm/zsmalloc.c head = obj_to_head(page, addr); head 2038 mm/zsmalloc.c if (head & OBJ_ALLOCATED_TAG) { head 2039 mm/zsmalloc.c handle = head & ~OBJ_ALLOCATED_TAG; head 2085 mm/zsmalloc.c head = obj_to_head(page, addr); head 2086 mm/zsmalloc.c if (head & OBJ_ALLOCATED_TAG) { head 2087 mm/zsmalloc.c handle = head & ~OBJ_ALLOCATED_TAG; head 1136 net/6lowpan/iphc.c u8 head[LOWPAN_IPHC_MAX_HC_BUF_LEN] = {}; head 1144 net/6lowpan/iphc.c hc_ptr = head + 2; head 1299 net/6lowpan/iphc.c head[0] = iphc0; head 1300 net/6lowpan/iphc.c head[1] = iphc1; head 1304 net/6lowpan/iphc.c memcpy(skb_push(skb, hc_ptr - head), head, hc_ptr - head); head 1307 net/6lowpan/iphc.c pr_debug("header len %d skb %u\n", (int)(hc_ptr - head), skb->len); head 82 net/8021q/vlan.c void unregister_vlan_dev(struct net_device *dev, struct list_head *head) head 111 net/8021q/vlan.c unregister_netdevice_queue(dev, head); head 128 net/8021q/vlan.h void unregister_vlan_dev(struct net_device *dev, struct list_head *head); head 457 net/8021q/vlan_core.c static struct sk_buff *vlan_gro_receive(struct list_head *head, head 486 net/8021q/vlan_core.c list_for_each_entry(p, head, list) { head 499 net/8021q/vlan_core.c pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); head 52 net/atm/addr.c struct list_head *head; head 56 net/atm/addr.c head = &dev->lecs; head 58 net/atm/addr.c head = &dev->local; head 59 net/atm/addr.c list_for_each_entry_safe(this, p, head, entry) { head 64 net/atm/addr.c if (head == &dev->local) head 73 net/atm/addr.c struct list_head *head; head 81 net/atm/addr.c head = &dev->lecs; head 83 net/atm/addr.c head = &dev->local; head 84 net/atm/addr.c list_for_each_entry(this, head, entry) { head 96 net/atm/addr.c list_add(&this->entry, head); head 98 net/atm/addr.c if (head == &dev->local) head 108 net/atm/addr.c struct list_head *head; head 116 net/atm/addr.c head = &dev->lecs; head 118 net/atm/addr.c head = &dev->local; head 119 net/atm/addr.c list_for_each_entry(this, head, entry) { head 124 net/atm/addr.c if (head == &dev->local) head 138 net/atm/addr.c struct list_head *head; head 144 net/atm/addr.c head = &dev->lecs; head 146 net/atm/addr.c head = &dev->local; head 147 net/atm/addr.c list_for_each_entry(this, head, entry) head 154 net/atm/addr.c list_for_each_entry(this, head, entry) head 46 net/atm/common.c struct hlist_head *head = &vcc_hash[vcc->vci & (VCC_HTABLE_SIZE - 1)]; head 48 net/atm/common.c sk_add_node(sk, head); head 268 net/atm/common.c struct hlist_head *head = &vcc_hash[i]; head 273 net/atm/common.c sk_for_each_safe(s, tmp, head) { head 315 net/atm/common.c struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)]; head 319 net/atm/common.c sk_for_each(s, head) { head 226 net/atm/lec.c (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb), head 1532 net/atm/lec.c struct hlist_head *head; head 1537 net/atm/lec.c head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])]; head 1538 net/atm/lec.c hlist_for_each_entry(entry, head, next) { head 84 net/atm/proc.c struct hlist_head *head = &vcc_hash[*bucket]; head 86 net/atm/proc.c sk = hlist_empty(head) ? NULL : __sk_head(head); head 210 net/atm/signaling.c struct hlist_head *head = &vcc_hash[i]; head 212 net/atm/signaling.c sk_for_each(s, head) { head 744 net/batman-adv/bat_iv_ogm.c struct hlist_head *head; head 752 net/batman-adv/bat_iv_ogm.c head = &hash->table[i]; head 755 net/batman-adv/bat_iv_ogm.c hlist_for_each_entry_rcu(orig_node, head, hash_entry) { head 1827 net/batman-adv/bat_iv_ogm.c struct hlist_head *head; head 1835 net/batman-adv/bat_iv_ogm.c head = &hash->table[i]; head 1838 net/batman-adv/bat_iv_ogm.c hlist_for_each_entry_rcu(orig_node, head, hash_entry) { head 2049 net/batman-adv/bat_iv_ogm.c struct hlist_head *head, int *idx_s, int *sub) head 2055 net/batman-adv/bat_iv_ogm.c hlist_for_each_entry_rcu(orig_node, head, hash_entry) { head 2087 net/batman-adv/bat_iv_ogm.c struct hlist_head *head; head 2094 net/batman-adv/bat_iv_ogm.c head = &hash->table[bucket]; head 2098 net/batman-adv/bat_iv_ogm.c bat_priv, if_outgoing, head, head 357 net/batman-adv/bat_v.c struct hlist_head *head; head 365 net/batman-adv/bat_v.c head = &hash->table[i]; head 368 net/batman-adv/bat_v.c hlist_for_each_entry_rcu(orig_node, head, hash_entry) { head 545 net/batman-adv/bat_v.c struct hlist_head *head, int *idx_s, int *sub) head 551 net/batman-adv/bat_v.c hlist_for_each_entry_rcu(orig_node, head, hash_entry) { head 582 net/batman-adv/bat_v.c struct hlist_head *head; head 589 net/batman-adv/bat_v.c head = &hash->table[bucket]; head 593 net/batman-adv/bat_v.c bat_priv, if_outgoing, head, &idx, head 215 net/batman-adv/bridge_loop_avoidance.c struct hlist_head *head; head 224 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[index]; head 227 net/batman-adv/bridge_loop_avoidance.c hlist_for_each_entry_rcu(claim, head, hash_entry) { head 255 net/batman-adv/bridge_loop_avoidance.c struct hlist_head *head; head 267 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[index]; head 270 net/batman-adv/bridge_loop_avoidance.c hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { head 295 net/batman-adv/bridge_loop_avoidance.c struct hlist_head *head; head 305 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; head 310 net/batman-adv/bridge_loop_avoidance.c head, hash_entry) { head 588 net/batman-adv/bridge_loop_avoidance.c struct hlist_head *head; head 606 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; head 609 net/batman-adv/bridge_loop_avoidance.c hlist_for_each_entry_rcu(claim, head, hash_entry) { head 1217 net/batman-adv/bridge_loop_avoidance.c struct hlist_head *head; head 1227 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; head 1232 net/batman-adv/bridge_loop_avoidance.c head, hash_entry) { head 1272 net/batman-adv/bridge_loop_avoidance.c struct hlist_head *head; head 1281 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; head 1284 net/batman-adv/bridge_loop_avoidance.c hlist_for_each_entry_rcu(claim, head, hash_entry) { head 1327 net/batman-adv/bridge_loop_avoidance.c struct hlist_head *head; head 1351 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; head 1354 net/batman-adv/bridge_loop_avoidance.c hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { head 1424 net/batman-adv/bridge_loop_avoidance.c struct hlist_head *head; head 1465 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; head 1468 net/batman-adv/bridge_loop_avoidance.c hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { head 1664 net/batman-adv/bridge_loop_avoidance.c struct hlist_head *head; head 1675 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; head 1678 net/batman-adv/bridge_loop_avoidance.c hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { head 2042 net/batman-adv/bridge_loop_avoidance.c struct hlist_head *head; head 2060 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; head 2063 net/batman-adv/bridge_loop_avoidance.c hlist_for_each_entry_rcu(claim, head, hash_entry) { head 2275 net/batman-adv/bridge_loop_avoidance.c struct hlist_head *head; head 2293 net/batman-adv/bridge_loop_avoidance.c head = &hash->table[i]; head 2296 net/batman-adv/bridge_loop_avoidance.c hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { head 163 net/batman-adv/distributed-arp-table.c struct hlist_head *head; head 170 net/batman-adv/distributed-arp-table.c head = &bat_priv->dat.hash->table[i]; head 174 net/batman-adv/distributed-arp-table.c hlist_for_each_entry_safe(dat_entry, node_tmp, head, head 326 net/batman-adv/distributed-arp-table.c struct hlist_head *head; head 338 net/batman-adv/distributed-arp-table.c head = &hash->table[index]; head 341 net/batman-adv/distributed-arp-table.c hlist_for_each_entry_rcu(dat_entry, head, hash_entry) { head 566 net/batman-adv/distributed-arp-table.c struct hlist_head *head; head 578 net/batman-adv/distributed-arp-table.c head = &hash->table[i]; head 581 net/batman-adv/distributed-arp-table.c hlist_for_each_entry_rcu(orig_node, head, hash_entry) { head 858 net/batman-adv/distributed-arp-table.c struct hlist_head *head; head 872 net/batman-adv/distributed-arp-table.c head = &hash->table[i]; head 875 net/batman-adv/distributed-arp-table.c hlist_for_each_entry_rcu(dat_entry, head, hash_entry) { head 39 net/batman-adv/fragmentation.c static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped) head 44 net/batman-adv/fragmentation.c hlist_for_each_entry_safe(entry, node, head, list) { head 316 net/batman-adv/fragmentation.c struct hlist_head head = HLIST_HEAD_INIT; head 320 net/batman-adv/fragmentation.c if (!batadv_frag_insert_packet(orig_node_src, *skb, &head)) head 324 net/batman-adv/fragmentation.c if (hlist_empty(&head)) head 327 net/batman-adv/fragmentation.c skb_out = batadv_frag_merge_packets(&head); head 83 net/batman-adv/hash.h struct hlist_head *head; head 91 net/batman-adv/hash.h head = &hash->table[index]; head 96 net/batman-adv/hash.h hlist_for_each(node, head) { head 105 net/batman-adv/hash.h hlist_add_head_rcu(data_node, head); head 136 net/batman-adv/hash.h struct hlist_head *head; head 140 net/batman-adv/hash.h head = &hash->table[index]; head 143 net/batman-adv/hash.h hlist_for_each(node, head) { head 1754 net/batman-adv/multicast.c struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list; head 1767 net/batman-adv/multicast.c hlist_add_head_rcu(node, head); head 1799 net/batman-adv/multicast.c struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list; head 1812 net/batman-adv/multicast.c hlist_add_head_rcu(node, head); head 1844 net/batman-adv/multicast.c struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list; head 1857 net/batman-adv/multicast.c hlist_add_head_rcu(node, head); head 1889 net/batman-adv/multicast.c struct hlist_head *head = &bat_priv->mcast.want_all_rtr4_list; head 1902 net/batman-adv/multicast.c hlist_add_head_rcu(node, head); head 1934 net/batman-adv/multicast.c struct hlist_head *head = &bat_priv->mcast.want_all_rtr6_list; head 1947 net/batman-adv/multicast.c hlist_add_head_rcu(node, head); head 2112 net/batman-adv/multicast.c struct hlist_head *head; head 2123 net/batman-adv/multicast.c head = &hash->table[i]; head 2126 net/batman-adv/multicast.c hlist_for_each_entry_rcu(orig_node, head, hash_entry) { head 396 net/batman-adv/network-coding.c struct hlist_head *head; head 405 net/batman-adv/network-coding.c head = &hash->table[i]; head 408 net/batman-adv/network-coding.c hlist_for_each_entry_rcu(orig_node, head, hash_entry) head 430 net/batman-adv/network-coding.c struct hlist_head *head; head 437 net/batman-adv/network-coding.c head = &hash->table[i]; head 442 net/batman-adv/network-coding.c hlist_for_each_entry_safe(nc_path, node_tmp, head, hash_entry) { head 542 net/batman-adv/network-coding.c struct hlist_head *head; head 550 net/batman-adv/network-coding.c head = &hash->table[index]; head 553 net/batman-adv/network-coding.c hlist_for_each_entry_rcu(nc_path, head, hash_entry) { head 672 net/batman-adv/network-coding.c struct hlist_head *head; head 683 net/batman-adv/network-coding.c head = &hash->table[i]; head 687 net/batman-adv/network-coding.c hlist_for_each_entry_rcu(nc_path, head, hash_entry) { head 1894 net/batman-adv/network-coding.c struct hlist_head *head; head 1905 net/batman-adv/network-coding.c head = &hash->table[i]; head 1909 net/batman-adv/network-coding.c hlist_for_each_entry_rcu(orig_node, head, hash_entry) { head 61 net/batman-adv/originator.c struct hlist_head *head; head 69 net/batman-adv/originator.c head = &hash->table[index]; head 72 net/batman-adv/originator.c hlist_for_each_entry_rcu(orig_node, head, hash_entry) { head 969 net/batman-adv/originator.c struct hlist_head *head; head 982 net/batman-adv/originator.c head = &hash->table[i]; head 987 net/batman-adv/originator.c head, hash_entry) { head 1337 net/batman-adv/originator.c struct hlist_head *head; head 1347 net/batman-adv/originator.c head = &hash->table[i]; head 1352 net/batman-adv/originator.c head, hash_entry) { head 644 net/batman-adv/send.c static void batadv_forw_packet_list_free(struct hlist_head *head) head 649 net/batman-adv/send.c hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head, head 676 net/batman-adv/send.c spinlock_t *lock, struct hlist_head *head, head 694 net/batman-adv/send.c hlist_add_head(&forw_packet->list, head); head 978 net/batman-adv/send.c struct hlist_head head = HLIST_HEAD_INIT; head 990 net/batman-adv/send.c batadv_forw_packet_list_steal(&bat_priv->forw_bcast_list, &head, head 996 net/batman-adv/send.c batadv_forw_packet_list_steal(&bat_priv->forw_bat_list, &head, head 1001 net/batman-adv/send.c batadv_forw_packet_list_free(&head); head 1106 net/batman-adv/soft-interface.c struct list_head *head) head 1126 net/batman-adv/soft-interface.c unregister_netdevice_queue(soft_iface, head); head 130 net/batman-adv/translation-table.c struct hlist_head *head; head 141 net/batman-adv/translation-table.c head = &hash->table[index]; head 144 net/batman-adv/translation-table.c hlist_for_each_entry_rcu(tt, head, hash_entry) { head 651 net/batman-adv/translation-table.c struct hlist_head *head; head 772 net/batman-adv/translation-table.c head = &tt_global->orig_list; head 774 net/batman-adv/translation-table.c hlist_for_each_entry_rcu(orig_entry, head, list) { head 1082 net/batman-adv/translation-table.c struct hlist_head *head; head 1101 net/batman-adv/translation-table.c head = &hash->table[i]; head 1105 net/batman-adv/translation-table.c head, hash_entry) { head 1399 net/batman-adv/translation-table.c struct hlist_head *head, head 1406 net/batman-adv/translation-table.c hlist_for_each_entry_safe(tt_common_entry, node_tmp, head, head 1436 net/batman-adv/translation-table.c struct hlist_head *head; head 1441 net/batman-adv/translation-table.c head = &hash->table[i]; head 1445 net/batman-adv/translation-table.c batadv_tt_local_purge_list(bat_priv, head, timeout); head 1457 net/batman-adv/translation-table.c struct hlist_head *head; head 1466 net/batman-adv/translation-table.c head = &hash->table[i]; head 1471 net/batman-adv/translation-table.c head, hash_entry) { head 1535 net/batman-adv/translation-table.c const struct hlist_head *head; head 1538 net/batman-adv/translation-table.c head = &entry->orig_list; head 1539 net/batman-adv/translation-table.c hlist_for_each_entry_rcu(tmp_orig_entry, head, list) { head 1597 net/batman-adv/translation-table.c const struct hlist_head *head; head 1601 net/batman-adv/translation-table.c head = &tt_global->orig_list; head 1602 net/batman-adv/translation-table.c hlist_for_each_entry_rcu(orig_entry, head, list) head 1851 net/batman-adv/translation-table.c struct hlist_head *head; head 1854 net/batman-adv/translation-table.c head = &tt_global_entry->orig_list; head 1855 net/batman-adv/translation-table.c hlist_for_each_entry_rcu(orig_entry, head, list) { head 1900 net/batman-adv/translation-table.c struct hlist_head *head; head 1935 net/batman-adv/translation-table.c head = &tt_global_entry->orig_list; head 1937 net/batman-adv/translation-table.c hlist_for_each_entry_rcu(orig_entry, head, list) { head 1982 net/batman-adv/translation-table.c struct hlist_head *head; head 1996 net/batman-adv/translation-table.c head = &hash->table[i]; head 2000 net/batman-adv/translation-table.c head, hash_entry) { head 2096 net/batman-adv/translation-table.c struct hlist_head *head; head 2102 net/batman-adv/translation-table.c head = &global->orig_list; head 2104 net/batman-adv/translation-table.c hlist_for_each_entry_rcu(orig_entry, head, list) { head 2136 net/batman-adv/translation-table.c struct hlist_head *head, int *idx_s, int *sub) head 2142 net/batman-adv/translation-table.c hlist_for_each_entry_rcu(common, head, hash_entry) { head 2174 net/batman-adv/translation-table.c struct hlist_head *head; head 2203 net/batman-adv/translation-table.c head = &hash->table[bucket]; head 2207 net/batman-adv/translation-table.c head, &idx, &sub)) head 2259 net/batman-adv/translation-table.c struct hlist_head *head; head 2264 net/batman-adv/translation-table.c head = &tt_global_entry->orig_list; head 2265 net/batman-adv/translation-table.c hlist_for_each_entry_safe(orig_entry, safe, head, list) head 2286 net/batman-adv/translation-table.c struct hlist_head *head; head 2292 net/batman-adv/translation-table.c head = &tt_global_entry->orig_list; head 2293 net/batman-adv/translation-table.c hlist_for_each_entry_safe(orig_entry, safe, head, list) { head 2319 net/batman-adv/translation-table.c struct hlist_head *head; head 2327 net/batman-adv/translation-table.c head = &tt_global_entry->orig_list; head 2328 net/batman-adv/translation-table.c hlist_for_each_entry_rcu(orig_entry, head, list) { head 2434 net/batman-adv/translation-table.c struct hlist_head *head; head 2442 net/batman-adv/translation-table.c head = &hash->table[i]; head 2447 net/batman-adv/translation-table.c head, hash_entry) { head 2499 net/batman-adv/translation-table.c struct hlist_head *head; head 2508 net/batman-adv/translation-table.c head = &hash->table[i]; head 2512 net/batman-adv/translation-table.c hlist_for_each_entry_safe(tt_common, node_tmp, head, head 2542 net/batman-adv/translation-table.c struct hlist_head *head; head 2551 net/batman-adv/translation-table.c head = &hash->table[i]; head 2556 net/batman-adv/translation-table.c head, hash_entry) { head 2679 net/batman-adv/translation-table.c struct hlist_head *head; head 2685 net/batman-adv/translation-table.c head = &hash->table[i]; head 2688 net/batman-adv/translation-table.c hlist_for_each_entry_rcu(tt_common, head, hash_entry) { head 2757 net/batman-adv/translation-table.c struct hlist_head *head; head 2763 net/batman-adv/translation-table.c head = &hash->table[i]; head 2766 net/batman-adv/translation-table.c hlist_for_each_entry_rcu(tt_common, head, hash_entry) { head 2992 net/batman-adv/translation-table.c struct hlist_head *head; head 3006 net/batman-adv/translation-table.c head = &hash->table[i]; head 3009 net/batman-adv/translation-table.c head, hash_entry) { head 3845 net/batman-adv/translation-table.c struct hlist_head *head; head 3852 net/batman-adv/translation-table.c head = &hash->table[i]; head 3856 net/batman-adv/translation-table.c head, hash_entry) { head 3884 net/batman-adv/translation-table.c struct hlist_head *head; head 3892 net/batman-adv/translation-table.c head = &hash->table[i]; head 3896 net/batman-adv/translation-table.c hlist_for_each_entry_safe(tt_common, node_tmp, head, head 144 net/bluetooth/af_bluetooth.c sk_add_node(sk, &l->head); head 616 net/bluetooth/af_bluetooth.c return seq_hlist_start_head(&l->head, *pos); head 623 net/bluetooth/af_bluetooth.c return seq_hlist_next(v, &l->head, pos); head 200 net/bluetooth/hci_sock.c sk_for_each(sk, &hci_sk_list.head) { head 261 net/bluetooth/hci_sock.c sk_for_each(sk, &hci_sk_list.head) { head 364 net/bluetooth/hci_sock.c sk_for_each(sk, &hci_sk_list.head) { head 686 net/bluetooth/hci_sock.c sk_for_each(sk, &hci_sk_list.head) { head 757 net/bluetooth/hci_sock.c sk_for_each(sk, &hci_sk_list.head) { head 305 net/bluetooth/l2cap_core.c static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head, head 310 net/bluetooth/l2cap_core.c skb_queue_walk(head, skb) { head 344 net/bluetooth/l2cap_core.c seq_list->head = L2CAP_SEQ_LIST_CLEAR; head 366 net/bluetooth/l2cap_core.c u16 seq = seq_list->head; head 369 net/bluetooth/l2cap_core.c seq_list->head = seq_list->list[seq & mask]; head 372 net/bluetooth/l2cap_core.c if (seq_list->head == L2CAP_SEQ_LIST_TAIL) { head 373 net/bluetooth/l2cap_core.c seq_list->head = L2CAP_SEQ_LIST_CLEAR; head 384 net/bluetooth/l2cap_core.c if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) head 390 net/bluetooth/l2cap_core.c seq_list->head = L2CAP_SEQ_LIST_CLEAR; head 404 net/bluetooth/l2cap_core.c seq_list->head = seq; head 1995 net/bluetooth/l2cap_core.c while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { head 2655 net/bluetooth/l2cap_core.c initial_head = chan->srej_list.head; head 2665 net/bluetooth/l2cap_core.c } while (chan->srej_list.head != initial_head); head 6157 net/bluetooth/l2cap_core.c if (chan->srej_list.head == txseq) { head 113 net/bluetooth/rfcomm/sock.c sk_for_each(sk, &rfcomm_sk_list.head) { head 136 net/bluetooth/rfcomm/sock.c sk_for_each(sk, &rfcomm_sk_list.head) { head 1012 net/bluetooth/rfcomm/sock.c sk_for_each(sk, &rfcomm_sk_list.head) { head 220 net/bluetooth/rfcomm/tty.c struct list_head *head = &rfcomm_dev_list; head 237 net/bluetooth/rfcomm/tty.c head = &entry->list; head 251 net/bluetooth/rfcomm/tty.c head = &entry->list; head 262 net/bluetooth/rfcomm/tty.c list_add(&dev->list, head); head 329 net/bluetooth/sco.c sk_for_each(sk, &sco_sk_list.head) { head 349 net/bluetooth/sco.c sk_for_each(sk, &sco_sk_list.head) { head 1091 net/bluetooth/sco.c sk_for_each(sk, &sco_sk_list.head) { head 1166 net/bluetooth/sco.c sk_for_each(sk, &sco_sk_list.head) { head 82 net/bridge/br_fdb.c static void fdb_rcu_free(struct rcu_head *head) head 85 net/bridge/br_fdb.c = container_of(head, struct net_bridge_fdb_entry, rcu); head 282 net/bridge/br_if.c static void destroy_nbp_rcu(struct rcu_head *head) head 285 net/bridge/br_if.c container_of(head, struct net_bridge_port, rcu); head 367 net/bridge/br_if.c void br_dev_delete(struct net_device *dev, struct list_head *head) head 383 net/bridge/br_if.c unregister_netdevice_queue(br->dev, head); head 308 net/bridge/netfilter/ebtables.c find_inlist_lock_noload(struct list_head *head, const char *name, int *error, head 317 net/bridge/netfilter/ebtables.c list_for_each_entry(e, head, list) { head 327 net/bridge/netfilter/ebtables.c find_inlist_lock(struct list_head *head, const char *name, const char *prefix, head 331 net/bridge/netfilter/ebtables.c find_inlist_lock_noload(head, name, error, mutex), head 25 net/caif/cfpkt_skbuff.c struct sk_buff_head head; head 217 net/caif/cfrfml.c u8 head[6]; head 234 net/caif/cfrfml.c err = cfpkt_peek_head(pkt, head, 6); head 268 net/caif/cfrfml.c if (cfpkt_add_head(frontpkt, head, 6) < 0) head 313 net/can/bcm.c static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, head 320 net/can/bcm.c unsigned int datalen = head->nframes * op->cfsiz; head 323 net/can/bcm.c skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); head 327 net/can/bcm.c skb_put_data(skb, head, sizeof(*head)); head 329 net/can/bcm.c if (head->nframes) { head 341 net/can/bcm.c if (head->nframes == 1) head 430 net/can/bcm.c struct bcm_msg_head head; head 442 net/can/bcm.c head.opcode = RX_CHANGED; head 443 net/can/bcm.c head.flags = op->flags; head 444 net/can/bcm.c head.count = op->count; head 445 net/can/bcm.c head.ival1 = op->ival1; head 446 net/can/bcm.c head.ival2 = op->ival2; head 447 net/can/bcm.c head.can_id = op->can_id; head 448 net/can/bcm.c head.nframes = 1; head 450 net/can/bcm.c bcm_send_to_user(op, &head, data, 1); head 488 net/ceph/auth_x.c struct ceph_x_request_header *head = buf; head 502 net/ceph/auth_x.c struct ceph_x_authenticate *auth = (void *)(head + 1); head 513 net/ceph/auth_x.c head->op = cpu_to_le16(CEPHX_GET_AUTH_SESSION_KEY); head 541 net/ceph/auth_x.c void *p = head + 1; head 546 net/ceph/auth_x.c head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY); head 567 net/ceph/auth_x.c struct ceph_x_reply_header *head = buf; head 590 net/ceph/auth_x.c op = le16_to_cpu(head->op); head 591 net/ceph/auth_x.c result = le32_to_cpu(head->result); head 597 net/ceph/auth_x.c buf + sizeof(*head), end); head 605 net/ceph/auth_x.c buf + sizeof(*head), end); head 633 net/ceph/messenger.c static void ceph_msg_remove_list(struct list_head *head) head 635 net/ceph/messenger.c while (!list_empty(head)) { head 636 net/ceph/messenger.c struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, head 1020 net/ceph/messenger.c BUG_ON(list_empty(&pagelist->head)); head 1021 net/ceph/messenger.c page = list_first_entry(&pagelist->head, struct page, lru); head 1081 net/ceph/messenger.c BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); head 2140 net/ceph/osd_client.c } __packed head; head 2152 net/ceph/osd_client.c memcpy(&head, p, sizeof(head)); head 2153 net/ceph/osd_client.c p += sizeof(head); head 2172 net/ceph/osd_client.c ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc)); head 2173 net/ceph/osd_client.c ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch)); head 2174 net/ceph/osd_client.c ceph_encode_copy(&p, &head.flags, sizeof(head.flags)); head 2175 net/ceph/osd_client.c ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime)); head 2185 net/ceph/osd_client.c pgid.seed = le32_to_cpu(head.hash); head 17 net/ceph/pagelist.c INIT_LIST_HEAD(&pl->head); head 32 net/ceph/pagelist.c struct page *page = list_entry(pl->head.prev, struct page, lru); head 43 net/ceph/pagelist.c while (!list_empty(&pl->head)) { head 44 net/ceph/pagelist.c struct page *page = list_first_entry(&pl->head, struct page, head 69 net/ceph/pagelist.c list_add_tail(&page->lru, &pl->head); head 140 net/ceph/pagelist.c c->page_lru = pl->head.prev; head 158 net/ceph/pagelist.c while (pl->head.prev != c->page_lru) { head 159 net/ceph/pagelist.c page = list_entry(pl->head.prev, struct page, lru); head 165 net/ceph/pagelist.c if (!list_empty(&pl->head)) { head 166 net/ceph/pagelist.c page = list_entry(pl->head.prev, struct page, lru); head 326 net/core/dev.c struct list_head *head = ptype_head(pt); head 329 net/core/dev.c list_add_rcu(&pt->list, head); head 349 net/core/dev.c struct list_head *head = ptype_head(pt); head 354 net/core/dev.c list_for_each_entry(pt1, head, list) { head 429 net/core/dev.c struct list_head *head = &offload_base; head 434 net/core/dev.c list_for_each_entry(po1, head, list) { head 655 net/core/dev.c struct hlist_head *head = dev_name_hash(net, name); head 657 net/core/dev.c hlist_for_each_entry(dev, head, name_hlist) head 680 net/core/dev.c struct hlist_head *head = dev_name_hash(net, name); head 682 net/core/dev.c hlist_for_each_entry_rcu(dev, head, name_hlist) head 730 net/core/dev.c struct hlist_head *head = dev_index_hash(net, ifindex); head 732 net/core/dev.c hlist_for_each_entry(dev, head, index_hlist) head 754 net/core/dev.c struct hlist_head *head = dev_index_hash(net, ifindex); head 756 net/core/dev.c hlist_for_each_entry_rcu(dev, head, index_hlist) head 1370 net/core/dev.c static void __dev_close_many(struct list_head *head) head 1377 net/core/dev.c list_for_each_entry(dev, head, close_list) { head 1394 net/core/dev.c dev_deactivate_many(head); head 1396 net/core/dev.c list_for_each_entry(dev, head, close_list) { head 1423 net/core/dev.c void dev_close_many(struct list_head *head, bool unlink) head 1428 net/core/dev.c list_for_each_entry_safe(dev, tmp, head, close_list) head 1432 net/core/dev.c __dev_close_many(head); head 1434 net/core/dev.c list_for_each_entry_safe(dev, tmp, head, close_list) { head 3311 net/core/dev.c struct sk_buff *next, *head = NULL, *tail; head 3324 net/core/dev.c if (!head) head 3325 net/core/dev.c head = skb; head 3333 net/core/dev.c return head; head 4512 net/core/dev.c struct Qdisc *head; head 4515 net/core/dev.c head = sd->output_queue; head 4520 net/core/dev.c while (head) { head 4521 net/core/dev.c struct Qdisc *q = head; head 4524 net/core/dev.c head = head->next_sched; head 4955 net/core/dev.c static inline void __netif_receive_skb_list_ptype(struct list_head *head, head 4963 net/core/dev.c if (list_empty(head)) head 4967 net/core/dev.c ip_list_rcv, head, pt_prev, orig_dev); head 4969 net/core/dev.c list_for_each_entry_safe(skb, next, head, list) { head 4975 net/core/dev.c static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) head 4994 net/core/dev.c list_for_each_entry_safe(skb, next, head, list) { head 5042 net/core/dev.c static void __netif_receive_skb_list(struct list_head *head) head 5048 net/core/dev.c list_for_each_entry_safe(skb, next, head, list) { head 5053 net/core/dev.c list_cut_before(&sublist, head, &skb->list); head 5065 net/core/dev.c if (!list_empty(head)) head 5066 net/core/dev.c __netif_receive_skb_list_core(head, pfmemalloc); head 5132 net/core/dev.c static void netif_receive_skb_list_internal(struct list_head *head) head 5138 net/core/dev.c list_for_each_entry_safe(skb, next, head, list) { head 5144 net/core/dev.c list_splice_init(&sublist, head); head 5149 net/core/dev.c list_for_each_entry_safe(skb, next, head, list) { head 5161 net/core/dev.c __netif_receive_skb_list(head); head 5203 net/core/dev.c void netif_receive_skb_list(struct list_head *head) head 5207 net/core/dev.c if (list_empty(head)) head 5210 net/core/dev.c list_for_each_entry(skb, head, list) head 5213 net/core/dev.c netif_receive_skb_list_internal(head); head 5293 net/core/dev.c struct list_head *head = &offload_base; head 5304 net/core/dev.c list_for_each_entry_rcu(ptype, head, list) { head 5316 net/core/dev.c WARN_ON(&ptype->list == head); head 5329 net/core/dev.c struct list_head *head = &napi->gro_hash[index].list; head 5332 net/core/dev.c list_for_each_entry_safe_reverse(skb, p, head, list) { head 5366 net/core/dev.c struct list_head *head; head 5369 net/core/dev.c head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list; head 5370 net/core/dev.c list_for_each_entry(p, head, list) { head 5396 net/core/dev.c return head; head 5439 net/core/dev.c static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) head 5443 net/core/dev.c oldest = list_last_entry(head, struct sk_buff, list); head 5465 net/core/dev.c struct list_head *head = &offload_base; head 5480 net/core/dev.c list_for_each_entry_rcu(ptype, head, list) { head 5518 net/core/dev.c if (&ptype->list == head) head 8478 net/core/dev.c static void rollback_registered_many(struct list_head *head) head 8486 net/core/dev.c list_for_each_entry_safe(dev, tmp, head, unreg_list) { head 8504 net/core/dev.c list_for_each_entry(dev, head, unreg_list) head 8508 net/core/dev.c list_for_each_entry(dev, head, unreg_list) { head 8518 net/core/dev.c list_for_each_entry(dev, head, unreg_list) { head 8562 net/core/dev.c list_for_each_entry(dev, head, unreg_list) head 9648 net/core/dev.c void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) head 9652 net/core/dev.c if (head) { head 9653 net/core/dev.c list_move_tail(&dev->unreg_list, head); head 9669 net/core/dev.c void unregister_netdevice_many(struct list_head *head) head 9673 net/core/dev.c if (!list_empty(head)) { head 9674 net/core/dev.c rollback_registered_many(head); head 9675 net/core/dev.c list_for_each_entry(dev, head, unreg_list) head 9677 net/core/dev.c list_del(head); head 138 net/core/dst.c static void dst_destroy_rcu(struct rcu_head *head) head 140 net/core/dst.c struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); head 7548 net/core/filter.c *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head), head 7550 net/core/filter.c offsetof(struct sk_buff, head)); head 463 net/core/neighbour.c static void neigh_hash_free_rcu(struct rcu_head *head) head 465 net/core/neighbour.c struct neigh_hash_table *nht = container_of(head, head 1642 net/core/neighbour.c static void neigh_rcu_free_parms(struct rcu_head *head) head 1645 net/core/neighbour.c container_of(head, struct neigh_parms, rcu_head); head 2961 net/core/pktgen.c skb->csum_start = skb_transport_header(skb) - skb->head; head 83 net/core/rtnetlink.c void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail) head 85 net/core/rtnetlink.c if (head && tail) { head 87 net/core/rtnetlink.c defer_kfree_skb_list = head; head 94 net/core/rtnetlink.c struct sk_buff *head = defer_kfree_skb_list; head 100 net/core/rtnetlink.c while (head) { head 101 net/core/rtnetlink.c struct sk_buff *next = head->next; head 103 net/core/rtnetlink.c kfree_skb(head); head 105 net/core/rtnetlink.c head = next; head 1936 net/core/rtnetlink.c struct hlist_head *head; head 1993 net/core/rtnetlink.c head = &tgt_net->dev_index_head[h]; head 1994 net/core/rtnetlink.c hlist_for_each_entry(dev, head, index_hlist) { head 2029 net/core/rtnetlink.c int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, head 2032 net/core/rtnetlink.c return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy, head 4030 net/core/rtnetlink.c struct hlist_head *head; head 4060 net/core/rtnetlink.c head = &net->dev_index_head[h]; head 4061 net/core/rtnetlink.c hlist_for_each_entry(dev, head, index_hlist) { head 5096 net/core/rtnetlink.c struct hlist_head *head; head 5121 net/core/rtnetlink.c head = &net->dev_index_head[h]; head 5122 net/core/rtnetlink.c hlist_for_each_entry(dev, head, index_hlist) { head 105 net/core/skbuff.c msg, addr, skb->len, sz, skb->head, skb->data, head 229 net/core/skbuff.c skb->head = data; head 272 net/core/skbuff.c skb->head = data; head 586 net/core/skbuff.c unsigned char *head = skb->head; head 589 net/core/skbuff.c skb_free_frag(head); head 591 net/core/skbuff.c kfree(head); head 664 net/core/skbuff.c if (likely(skb->head)) head 770 net/core/skbuff.c 16, 1, skb->head, headroom, false); head 1000 net/core/skbuff.c C(head); head 1345 net/core/skbuff.c struct page *page, *head = NULL; head 1359 net/core/skbuff.c while (head) { head 1360 net/core/skbuff.c struct page *next = (struct page *)page_private(head); head 1361 net/core/skbuff.c put_page(head); head 1362 net/core/skbuff.c head = next; head 1366 net/core/skbuff.c set_page_private(page, (unsigned long)head); head 1367 net/core/skbuff.c head = page; head 1370 net/core/skbuff.c page = head; head 1404 net/core/skbuff.c __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); head 1405 net/core/skbuff.c head = (struct page *)page_private(head); head 1407 net/core/skbuff.c __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); head 1524 net/core/skbuff.c BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); head 1637 net/core/skbuff.c memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); head 1663 net/core/skbuff.c off = (data + nhead) - skb->head; head 1665 net/core/skbuff.c skb->head = data; head 1672 net/core/skbuff.c skb->end = skb->head + size; head 1767 net/core/skbuff.c BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, head 1884 net/core/skbuff.c if (unlikely(skb->data < skb->head)) head 2454 net/core/skbuff.c struct sk_buff *head = skb; head 2519 net/core/skbuff.c if (skb == head) { head 2960 net/core/skbuff.c page = virt_to_head_page(from->head); head 3633 net/core/skbuff.c page = virt_to_head_page(frag_skb->head); head 4036 net/core/skbuff.c struct page *page = virt_to_head_page(skb->head); head 5064 net/core/skbuff.c page = virt_to_head_page(from->head); head 5752 net/core/skbuff.c skb->head = data; head 5758 net/core/skbuff.c skb->end = skb->head + size; head 5891 net/core/skbuff.c skb->head = data; head 5897 net/core/skbuff.c skb->end = skb->head + size; head 1691 net/core/sock.c static void __sk_destruct(struct rcu_head *head) head 1693 net/core/sock.c struct sock *sk = container_of(head, struct sock, sk_rcu); head 1814 net/core/sock.c newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; head 2432 net/core/sock.c while ((skb = sk->sk_backlog.head) != NULL) { head 2433 net/core/sock.c sk->sk_backlog.head = sk->sk_backlog.tail = NULL; head 527 net/core/sock_map.c struct hlist_head head; head 552 net/core/sock_map.c sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key, head 557 net/core/sock_map.c hlist_for_each_entry_rcu(elem, head, node) { head 577 net/core/sock_map.c elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); head 604 net/core/sock_map.c elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash, head 626 net/core/sock_map.c elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); head 696 net/core/sock_map.c elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); head 715 net/core/sock_map.c hlist_add_head_rcu(&elem_new->node, &bucket->head); head 769 net/core/sock_map.c struct hlist_head *head; head 775 net/core/sock_map.c head = &sock_hash_select_bucket(htab, hash)->head; head 776 net/core/sock_map.c elem = sock_hash_lookup_elem_raw(head, hash, key, key_size); head 791 net/core/sock_map.c head = &sock_hash_select_bucket(htab, i)->head; head 792 net/core/sock_map.c elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), head 850 net/core/sock_map.c INIT_HLIST_HEAD(&htab->buckets[i].head); head 875 net/core/sock_map.c hlist_for_each_entry_safe(elem, node, &bucket->head, node) { head 132 net/core/sock_reuseport.c static void reuseport_free_rcu(struct rcu_head *head) head 136 net/core/sock_reuseport.c reuse = container_of(head, struct sock_reuseport, rcu); head 347 net/dccp/ackvec.c int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce) head 357 net/dccp/ackvec.c list_add_tail(&new->node, head); head 134 net/dccp/ackvec.h int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce); head 64 net/dccp/ccids/lib/packet_history.c struct tfrc_tx_hist_entry *head = *headp; head 66 net/dccp/ccids/lib/packet_history.c while (head != NULL) { head 67 net/dccp/ccids/lib/packet_history.c struct tfrc_tx_hist_entry *next = head->next; head 69 net/dccp/ccids/lib/packet_history.c kmem_cache_free(tfrc_tx_hist_slab, head); head 70 net/dccp/ccids/lib/packet_history.c head = next; head 43 net/dccp/ccids/lib/packet_history.h tfrc_tx_hist_find_entry(struct tfrc_tx_hist_entry *head, u64 seqno) head 45 net/dccp/ccids/lib/packet_history.h while (head != NULL && head->seqno != seqno) head 46 net/dccp/ccids/lib/packet_history.h head = head->next; head 47 net/dccp/ccids/lib/packet_history.h return head; head 449 net/dccp/feat.c dccp_feat_entry_new(struct list_head *head, u8 feat, bool local) head 453 net/dccp/feat.c list_for_each_entry(entry, head, node) head 458 net/dccp/feat.c head = &entry->node; head 466 net/dccp/feat.c list_add_tail(&entry->node, head); head 40 net/dsa/tag_ksz.c skb_network_header(skb) - skb->head); head 42 net/dsa/tag_ksz.c skb_transport_header(skb) - skb->head); head 36 net/dsa/tag_trailer.c skb_set_network_header(nskb, skb_network_header(skb) - skb->head); head 37 net/dsa/tag_trailer.c skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head); head 453 net/ethernet/eth.c struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb) head 474 net/ethernet/eth.c list_for_each_entry(p, head, list) { head 496 net/ethernet/eth.c pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); head 175 net/ieee802154/6lowpan/core.c static void lowpan_dellink(struct net_device *ldev, struct list_head *head) head 1373 net/ipv4/af_inet.c skb->head - (unsigned char *)iph; head 1385 net/ipv4/af_inet.c skb->network_header = (u8 *)iph - skb->head; head 1407 net/ipv4/af_inet.c struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) head 1448 net/ipv4/af_inet.c list_for_each_entry(p, head, list) { head 1517 net/ipv4/af_inet.c ops->callbacks.gro_receive, head, skb); head 1529 net/ipv4/af_inet.c static struct sk_buff *ipip_gro_receive(struct list_head *head, head 1539 net/ipv4/af_inet.c return inet_gro_receive(head, skb); head 221 net/ipv4/devinet.c static void inet_rcu_free_ifa(struct rcu_head *head) head 223 net/ipv4/devinet.c struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head); head 298 net/ipv4/devinet.c static void in_dev_rcu_put(struct rcu_head *head) head 300 net/ipv4/devinet.c struct in_device *idev = container_of(head, struct in_device, rcu_head); head 1818 net/ipv4/devinet.c struct hlist_head *head; head 1850 net/ipv4/devinet.c head = &tgt_net->dev_index_head[h]; head 1854 net/ipv4/devinet.c hlist_for_each_entry_rcu(dev, head, index_hlist) { head 2228 net/ipv4/devinet.c struct hlist_head *head; head 2250 net/ipv4/devinet.c head = &net->dev_index_head[h]; head 2254 net/ipv4/devinet.c hlist_for_each_entry_rcu(dev, head, index_hlist) { head 28 net/ipv4/esp4_offload.c static struct sk_buff *esp4_gro_receive(struct list_head *head, head 119 net/ipv4/fib_frontend.c struct hlist_head *head; head 126 net/ipv4/fib_frontend.c head = &net->ipv4.fib_table_hash[h]; head 127 net/ipv4/fib_frontend.c hlist_for_each_entry_rcu(tb, head, tb_hlist, head 199 net/ipv4/fib_frontend.c struct hlist_head *head = &net->ipv4.fib_table_hash[h]; head 203 net/ipv4/fib_frontend.c hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) head 983 net/ipv4/fib_frontend.c struct hlist_head *head; head 1023 net/ipv4/fib_frontend.c head = &net->ipv4.fib_table_hash[h]; head 1024 net/ipv4/fib_frontend.c hlist_for_each_entry_rcu(tb, head, tb_hlist) { head 1564 net/ipv4/fib_frontend.c struct hlist_head *head = &net->ipv4.fib_table_hash[i]; head 1568 net/ipv4/fib_frontend.c hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { head 231 net/ipv4/fib_semantics.c static void free_fib_info_rcu(struct rcu_head *head) head 233 net/ipv4/fib_semantics.c struct fib_info *fi = container_of(head, struct fib_info, rcu); head 370 net/ipv4/fib_semantics.c struct hlist_head *head; head 379 net/ipv4/fib_semantics.c head = &fib_info_hash[hash]; head 381 net/ipv4/fib_semantics.c hlist_for_each_entry(fi, head, fib_hash) { head 401 net/ipv4/fib_semantics.c struct hlist_head *head; head 406 net/ipv4/fib_semantics.c head = &fib_info_hash[hash]; head 408 net/ipv4/fib_semantics.c hlist_for_each_entry(fi, head, fib_hash) { head 433 net/ipv4/fib_semantics.c struct hlist_head *head; head 440 net/ipv4/fib_semantics.c head = &fib_info_devhash[hash]; head 441 net/ipv4/fib_semantics.c hlist_for_each_entry(nh, head, nh_hash) { head 1242 net/ipv4/fib_semantics.c struct hlist_head *head = &fib_info_hash[i]; head 1246 net/ipv4/fib_semantics.c hlist_for_each_entry_safe(fi, n, head, fib_hash) { head 1550 net/ipv4/fib_semantics.c struct hlist_head *head; head 1552 net/ipv4/fib_semantics.c head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)]; head 1553 net/ipv4/fib_semantics.c hlist_add_head(&fi->fib_lhash, head); head 1559 net/ipv4/fib_semantics.c struct hlist_head *head; head 1565 net/ipv4/fib_semantics.c head = &fib_info_devhash[hash]; head 1566 net/ipv4/fib_semantics.c hlist_add_head(&nexthop_nh->nh_hash, head); head 1816 net/ipv4/fib_semantics.c struct hlist_head *head = &fib_info_laddrhash[hash]; head 1824 net/ipv4/fib_semantics.c hlist_for_each_entry(fi, head, fib_lhash) { head 1905 net/ipv4/fib_semantics.c struct hlist_head *head = &fib_info_devhash[hash]; head 1908 net/ipv4/fib_semantics.c hlist_for_each_entry(nh, head, nh_hash) { head 1928 net/ipv4/fib_semantics.c struct hlist_head *head = &fib_info_devhash[hash]; head 1934 net/ipv4/fib_semantics.c hlist_for_each_entry(nh, head, nh_hash) { head 2075 net/ipv4/fib_semantics.c struct hlist_head *head; head 2091 net/ipv4/fib_semantics.c head = &fib_info_devhash[hash]; head 2094 net/ipv4/fib_semantics.c hlist_for_each_entry(nh, head, nh_hash) { head 294 net/ipv4/fib_trie.c static void __alias_free_mem(struct rcu_head *head) head 296 net/ipv4/fib_trie.c struct fib_alias *fa = container_of(head, struct fib_alias, rcu); head 310 net/ipv4/fib_trie.c static void __node_free_rcu(struct rcu_head *head) head 312 net/ipv4/fib_trie.c struct tnode *n = container_of(head, struct tnode, rcu); head 493 net/ipv4/fib_trie.c struct callback_head *head = &tn_info(tn)->rcu; head 495 net/ipv4/fib_trie.c while (head) { head 496 net/ipv4/fib_trie.c head = head->next; head 500 net/ipv4/fib_trie.c tn = container_of(head, struct tnode, rcu)->kv; head 2010 net/ipv4/fib_trie.c struct hlist_head *head = &net->ipv4.fib_table_hash[h]; head 2013 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(tb, head, tb_hlist) head 2062 net/ipv4/fib_trie.c struct hlist_head *head = &net->ipv4.fib_table_hash[h]; head 2065 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(tb, head, tb_hlist) head 2070 net/ipv4/fib_trie.c static void __trie_free_rcu(struct rcu_head *head) head 2072 net/ipv4/fib_trie.c struct fib_table *tb = container_of(head, struct fib_table, rcu); head 2460 net/ipv4/fib_trie.c struct hlist_head *head = &net->ipv4.fib_table_hash[h]; head 2463 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(tb, head, tb_hlist) { head 2493 net/ipv4/fib_trie.c struct hlist_head *head = &net->ipv4.fib_table_hash[h]; head 2496 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(tb, head, tb_hlist) { head 2545 net/ipv4/fib_trie.c struct hlist_head *head = &net->ipv4.fib_table_hash[h]; head 2546 net/ipv4/fib_trie.c hlist_for_each_entry_rcu(tb, head, tb_hlist) { head 230 net/ipv4/fou.c struct list_head *head, head 255 net/ipv4/fou.c pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); head 311 net/ipv4/fou.c struct list_head *head, head 402 net/ipv4/fou.c list_for_each_entry(p, head, list) { head 447 net/ipv4/fou.c pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); head 107 net/ipv4/gre_offload.c static struct sk_buff *gre_gro_receive(struct list_head *head, head 181 net/ipv4/gre_offload.c list_for_each_entry(p, head, list) { head 216 net/ipv4/gre_offload.c pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); head 601 net/ipv4/icmp.c if ((u8 *)iph < skb_in->head || head 182 net/ipv4/inet_connection_sock.c struct inet_bind_hashbucket *head; head 221 net/ipv4/inet_connection_sock.c head = &hinfo->bhash[inet_bhashfn(net, port, head 223 net/ipv4/inet_connection_sock.c spin_lock_bh(&head->lock); head 224 net/ipv4/inet_connection_sock.c inet_bind_bucket_for_each(tb, &head->chain) head 234 net/ipv4/inet_connection_sock.c spin_unlock_bh(&head->lock); head 251 net/ipv4/inet_connection_sock.c return head; head 296 net/ipv4/inet_connection_sock.c struct inet_bind_hashbucket *head; head 305 net/ipv4/inet_connection_sock.c head = inet_csk_find_open_port(sk, &tb, &port); head 306 net/ipv4/inet_connection_sock.c if (!head) head 312 net/ipv4/inet_connection_sock.c head = &hinfo->bhash[inet_bhashfn(net, port, head 314 net/ipv4/inet_connection_sock.c spin_lock_bh(&head->lock); head 315 net/ipv4/inet_connection_sock.c inet_bind_bucket_for_each(tb, &head->chain) head 321 net/ipv4/inet_connection_sock.c net, head, port, l3mdev); head 385 net/ipv4/inet_connection_sock.c spin_unlock_bh(&head->lock); head 960 net/ipv4/inet_diag.c struct inet_ehash_bucket *head = &hashinfo->ehash[i]; head 967 net/ipv4/inet_diag.c if (hlist_nulls_empty(&head->chain)) head 977 net/ipv4/inet_diag.c sk_nulls_for_each(sk, node, &head->chain) { head 221 net/ipv4/inet_fragment.c static void inet_frag_destroy_rcu(struct rcu_head *head) head 223 net/ipv4/inet_fragment.c struct inet_frag_queue *q = container_of(head, struct inet_frag_queue, head 411 net/ipv4/inet_fragment.c struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments); head 415 net/ipv4/inet_fragment.c if (head != skb) { head 427 net/ipv4/inet_fragment.c skb_morph(skb, head); head 428 net/ipv4/inet_fragment.c FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag; head 429 net/ipv4/inet_fragment.c rb_replace_node(&head->rbnode, &skb->rbnode, head 431 net/ipv4/inet_fragment.c consume_skb(head); head 432 net/ipv4/inet_fragment.c head = skb; head 434 net/ipv4/inet_fragment.c WARN_ON(head->ip_defrag_offset != 0); head 436 net/ipv4/inet_fragment.c delta = -head->truesize; head 439 net/ipv4/inet_fragment.c if (skb_unclone(head, GFP_ATOMIC)) head 442 net/ipv4/inet_fragment.c delta += head->truesize; head 450 net/ipv4/inet_fragment.c if (skb_has_frag_list(head)) { head 457 net/ipv4/inet_fragment.c skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; head 458 net/ipv4/inet_fragment.c skb_frag_list_init(head); head 459 net/ipv4/inet_fragment.c for (i = 0; i < skb_shinfo(head)->nr_frags; i++) head 460 net/ipv4/inet_fragment.c plen += skb_frag_size(&skb_shinfo(head)->frags[i]); head 461 net/ipv4/inet_fragment.c clone->data_len = head->data_len - plen; head 463 net/ipv4/inet_fragment.c head->truesize += clone->truesize; head 465 net/ipv4/inet_fragment.c clone->ip_summed = head->ip_summed; head 467 net/ipv4/inet_fragment.c skb_shinfo(head)->frag_list = clone; head 470 net/ipv4/inet_fragment.c nextp = &skb_shinfo(head)->frag_list; head 477 net/ipv4/inet_fragment.c void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, head 485 net/ipv4/inet_fragment.c skb_push(head, head->data - skb_network_header(head)); head 488 net/ipv4/inet_fragment.c fp = FRAG_CB(head)->next_frag; head 489 net/ipv4/inet_fragment.c rbn = rb_next(&head->rbnode); head 490 net/ipv4/inet_fragment.c rb_erase(&head->rbnode, &q->rb_fragments); head 492 net/ipv4/inet_fragment.c sum_truesize = head->truesize; head 504 net/ipv4/inet_fragment.c if (head->ip_summed != fp->ip_summed) head 505 net/ipv4/inet_fragment.c head->ip_summed = CHECKSUM_NONE; head 506 net/ipv4/inet_fragment.c else if (head->ip_summed == CHECKSUM_COMPLETE) head 507 net/ipv4/inet_fragment.c head->csum = csum_add(head->csum, fp->csum); head 509 net/ipv4/inet_fragment.c if (try_coalesce && skb_try_coalesce(head, fp, &stolen, head 517 net/ipv4/inet_fragment.c head->data_len += fp->len; head 518 net/ipv4/inet_fragment.c head->len += fp->len; head 519 net/ipv4/inet_fragment.c head->truesize += fp->truesize; head 539 net/ipv4/inet_fragment.c skb_mark_not_on_list(head); head 540 net/ipv4/inet_fragment.c head->prev = NULL; head 541 net/ipv4/inet_fragment.c head->tstamp = q->stamp; head 547 net/ipv4/inet_fragment.c struct sk_buff *head, *skb; head 549 net/ipv4/inet_fragment.c head = skb_rb_first(&q->rb_fragments); head 550 net/ipv4/inet_fragment.c if (!head) head 552 net/ipv4/inet_fragment.c skb = FRAG_CB(head)->next_frag; head 554 net/ipv4/inet_fragment.c rb_replace_node(&head->rbnode, &skb->rbnode, head 557 net/ipv4/inet_fragment.c rb_erase(&head->rbnode, &q->rb_fragments); head 558 net/ipv4/inet_fragment.c memset(&head->rbnode, 0, sizeof(head->rbnode)); head 561 net/ipv4/inet_fragment.c if (head == q->fragments_tail) head 564 net/ipv4/inet_fragment.c sub_frag_mem_limit(q->fqdir, head->truesize); head 566 net/ipv4/inet_fragment.c return head; head 63 net/ipv4/inet_hashtables.c struct inet_bind_hashbucket *head, head 76 net/ipv4/inet_hashtables.c hlist_add_head(&tb->node, &head->chain); head 108 net/ipv4/inet_hashtables.c struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; head 111 net/ipv4/inet_hashtables.c spin_lock(&head->lock); head 117 net/ipv4/inet_hashtables.c spin_unlock(&head->lock); head 134 net/ipv4/inet_hashtables.c struct inet_bind_hashbucket *head = &table->bhash[bhash]; head 138 net/ipv4/inet_hashtables.c spin_lock(&head->lock); head 141 net/ipv4/inet_hashtables.c spin_unlock(&head->lock); head 152 net/ipv4/inet_hashtables.c inet_bind_bucket_for_each(tb, &head->chain) { head 159 net/ipv4/inet_hashtables.c sock_net(sk), head, port, head 162 net/ipv4/inet_hashtables.c spin_unlock(&head->lock); head 168 net/ipv4/inet_hashtables.c spin_unlock(&head->lock); head 204 net/ipv4/inet_hashtables.c &ilb2->head); head 207 net/ipv4/inet_hashtables.c &ilb2->head); head 270 net/ipv4/inet_hashtables.c inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) { head 361 net/ipv4/inet_hashtables.c struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; head 364 net/ipv4/inet_hashtables.c sk_nulls_for_each_rcu(sk, node, &head->chain) { head 410 net/ipv4/inet_hashtables.c struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); head 418 net/ipv4/inet_hashtables.c sk_nulls_for_each(sk2, node, &head->chain) { head 440 net/ipv4/inet_hashtables.c __sk_nulls_add_node_rcu(sk, &head->chain); head 477 net/ipv4/inet_hashtables.c struct inet_ehash_bucket *head; head 484 net/ipv4/inet_hashtables.c head = inet_ehash_bucket(hashinfo, sk->sk_hash); head 485 net/ipv4/inet_hashtables.c list = &head->chain; head 626 net/ipv4/inet_hashtables.c struct inet_bind_hashbucket *head; head 636 net/ipv4/inet_hashtables.c head = &hinfo->bhash[inet_bhashfn(net, port, head 639 net/ipv4/inet_hashtables.c spin_lock_bh(&head->lock); head 642 net/ipv4/inet_hashtables.c spin_unlock_bh(&head->lock); head 645 net/ipv4/inet_hashtables.c spin_unlock(&head->lock); head 672 net/ipv4/inet_hashtables.c head = &hinfo->bhash[inet_bhashfn(net, port, head 674 net/ipv4/inet_hashtables.c spin_lock_bh(&head->lock); head 679 net/ipv4/inet_hashtables.c inet_bind_bucket_for_each(tb, &head->chain) { head 694 net/ipv4/inet_hashtables.c net, head, port, l3mdev); head 696 net/ipv4/inet_hashtables.c spin_unlock_bh(&head->lock); head 703 net/ipv4/inet_hashtables.c spin_unlock_bh(&head->lock); head 724 net/ipv4/inet_hashtables.c spin_unlock(&head->lock); head 767 net/ipv4/inet_hashtables.c INIT_HLIST_HEAD(&h->lhash2[i].head); head 265 net/ipv4/inet_timewait_sock.c struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; head 270 net/ipv4/inet_timewait_sock.c sk_nulls_for_each_rcu(sk, node, &head->chain) { head 141 net/ipv4/inetpeer.c static void inetpeer_free_rcu(struct rcu_head *head) head 143 net/ipv4/inetpeer.c kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu)); head 137 net/ipv4/ip_fragment.c struct sk_buff *head = NULL; head 166 net/ipv4/ip_fragment.c head = inet_frag_pull_head(&qp->q); head 167 net/ipv4/ip_fragment.c if (!head) head 169 net/ipv4/ip_fragment.c head->dev = dev_get_by_index_rcu(net, qp->iif); head 170 net/ipv4/ip_fragment.c if (!head->dev) head 175 net/ipv4/ip_fragment.c iph = ip_hdr(head); head 176 net/ipv4/ip_fragment.c err = ip_route_input_noref(head, iph->daddr, iph->saddr, head 177 net/ipv4/ip_fragment.c iph->tos, head->dev); head 185 net/ipv4/ip_fragment.c (skb_rtable(head)->rt_type != RTN_LOCAL)) head 189 net/ipv4/ip_fragment.c icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); head 196 net/ipv4/ip_fragment.c kfree_skb(head); head 528 net/ipv4/ip_input.c static void ip_sublist_rcv_finish(struct list_head *head) head 532 net/ipv4/ip_input.c list_for_each_entry_safe(skb, next, head, list) { head 539 net/ipv4/ip_input.c struct list_head *head) head 546 net/ipv4/ip_input.c list_for_each_entry_safe(skb, next, head, list) { head 575 net/ipv4/ip_input.c static void ip_sublist_rcv(struct list_head *head, struct net_device *dev, head 579 net/ipv4/ip_input.c head, dev, NULL, ip_rcv_finish); head 580 net/ipv4/ip_input.c ip_list_rcv_finish(net, NULL, head); head 584 net/ipv4/ip_input.c void ip_list_rcv(struct list_head *head, struct packet_type *pt, head 593 net/ipv4/ip_input.c list_for_each_entry_safe(skb, next, head, list) { head 327 net/ipv4/ip_sockglue.c static void ip_ra_destroy_rcu(struct rcu_head *head) head 329 net/ipv4/ip_sockglue.c struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu); head 90 net/ipv4/ip_tunnel.c struct hlist_head *head; head 93 net/ipv4/ip_tunnel.c head = &itn->tunnels[hash]; head 95 net/ipv4/ip_tunnel.c hlist_for_each_entry_rcu(t, head, hash_node) { head 110 net/ipv4/ip_tunnel.c hlist_for_each_entry_rcu(t, head, hash_node) { head 126 net/ipv4/ip_tunnel.c head = &itn->tunnels[hash]; head 128 net/ipv4/ip_tunnel.c hlist_for_each_entry_rcu(t, head, hash_node) { head 145 net/ipv4/ip_tunnel.c hlist_for_each_entry_rcu(t, head, hash_node) { head 193 net/ipv4/ip_tunnel.c struct hlist_head *head = ip_bucket(itn, &t->parms); head 197 net/ipv4/ip_tunnel.c hlist_add_head_rcu(&t->hash_node, head); head 217 net/ipv4/ip_tunnel.c struct hlist_head *head = ip_bucket(itn, parms); head 219 net/ipv4/ip_tunnel.c hlist_for_each_entry_rcu(t, head, hash_node) { head 1001 net/ipv4/ip_tunnel.c void ip_tunnel_dellink(struct net_device *dev, struct list_head *head) head 1010 net/ipv4/ip_tunnel.c unregister_netdevice_queue(dev, head); head 1073 net/ipv4/ip_tunnel.c struct list_head *head, head 1081 net/ipv4/ip_tunnel.c unregister_netdevice_queue(dev, head); head 1093 net/ipv4/ip_tunnel.c unregister_netdevice_queue(t->dev, head); head 668 net/ipv4/ipmr.c struct list_head *head) head 720 net/ipv4/ipmr.c unregister_netdevice_queue(dev, head); head 726 net/ipv4/ipmr.c static void ipmr_cache_free_rcu(struct rcu_head *head) head 728 net/ipv4/ipmr.c struct mr_mfc *c = container_of(head, struct mr_mfc, rcu); head 88 net/ipv4/netfilter/ipt_CLUSTERIP.c static void clusterip_config_rcu_free(struct rcu_head *head) head 93 net/ipv4/netfilter/ipt_CLUSTERIP.c config = container_of(head, struct clusterip_config, rcu); head 94 net/ipv4/netfilter/nf_reject_ipv4.c nskb->csum_start = (unsigned char *)tcph - nskb->head; head 51 net/ipv4/nexthop.c struct hlist_head *head; head 57 net/ipv4/nexthop.c head = &net->nexthop.devhash[hash]; head 58 net/ipv4/nexthop.c hlist_add_head(&nhi->dev_hash, head); head 96 net/ipv4/nexthop.c void nexthop_free_rcu(struct rcu_head *head) head 98 net/ipv4/nexthop.c struct nexthop *nh = container_of(head, struct nexthop, rcu); head 1072 net/ipv4/nexthop.c struct hlist_head *head = &net->nexthop.devhash[hash]; head 1076 net/ipv4/nexthop.c hlist_for_each_entry_safe(nhi, n, head, dev_hash) { head 1769 net/ipv4/nexthop.c struct hlist_head *head = &net->nexthop.devhash[hash]; head 1773 net/ipv4/nexthop.c hlist_for_each_entry_safe(nhi, n, head, dev_hash) { head 96 net/ipv4/raw.c struct hlist_head *head; head 98 net/ipv4/raw.c head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)]; head 101 net/ipv4/raw.c sk_add_node(sk, head); head 174 net/ipv4/raw.c struct hlist_head *head; head 179 net/ipv4/raw.c head = &raw_v4_hashinfo.ht[hash]; head 180 net/ipv4/raw.c if (hlist_empty(head)) head 184 net/ipv4/raw.c sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol, head 1498 net/ipv4/route.c struct list_head head; head 1510 net/ipv4/route.c list_add_tail(&rt->rt_uncached, &ul->head); head 1542 net/ipv4/route.c list_for_each_entry(rt, &ul->head, rt_uncached) { head 3470 net/ipv4/route.c INIT_LIST_HEAD(&ul->head); head 63 net/ipv4/tcp_diag.c hlist_for_each_entry_rcu(key, &md5sig->head, node) head 75 net/ipv4/tcp_diag.c hlist_for_each_entry_rcu(key, &md5sig->head, node) { head 158 net/ipv4/tcp_diag.c hlist_for_each_entry_rcu(key, &md5sig->head, node) head 36 net/ipv4/tcp_fastopen.c static void tcp_fastopen_ctx_free(struct rcu_head *head) head 39 net/ipv4/tcp_fastopen.c container_of(head, struct tcp_fastopen_context, rcu); head 1961 net/ipv4/tcp_input.c struct sk_buff *skb, *head; head 1964 net/ipv4/tcp_input.c head = tcp_rtx_queue_head(sk); head 1965 net/ipv4/tcp_input.c is_reneg = head && (TCP_SKB_CB(head)->sacked & TCPCB_SACKED_ACKED); head 1975 net/ipv4/tcp_input.c skb = head; head 1979 net/ipv4/tcp_input.c else if (tcp_is_rack(sk) && skb != head && head 3277 net/ipv4/tcp_input.c struct sk_buff *head = tcp_send_head(sk); head 3281 net/ipv4/tcp_input.c if (!head) head 3283 net/ipv4/tcp_input.c if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) { head 4914 net/ipv4/tcp_input.c struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end) head 4916 net/ipv4/tcp_input.c struct sk_buff *skb = head, *n; head 5020 net/ipv4/tcp_input.c struct sk_buff *skb, *head; head 5033 net/ipv4/tcp_input.c for (head = skb;;) { head 5043 net/ipv4/tcp_input.c if (range_truesize != head->truesize || head 5046 net/ipv4/tcp_input.c head, skb, start, end); head 625 net/ipv4/tcp_ipv4.c skb->csum_start = skb_transport_header(skb) - skb->head; head 1005 net/ipv4/tcp_ipv4.c hlist_for_each_entry_rcu(key, &md5sig->head, node) { head 1048 net/ipv4/tcp_ipv4.c hlist_for_each_entry_rcu(key, &md5sig->head, node) { head 1094 net/ipv4/tcp_ipv4.c INIT_HLIST_HEAD(&md5sig->head); head 1113 net/ipv4/tcp_ipv4.c hlist_add_head_rcu(&key->node, &md5sig->head); head 1142 net/ipv4/tcp_ipv4.c hlist_for_each_entry_safe(key, n, &md5sig->head, node) { head 180 net/ipv4/tcp_offload.c struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb) head 220 net/ipv4/tcp_offload.c list_for_each_entry(p, head, list) { head 292 net/ipv4/tcp_offload.c skb->csum_start = (unsigned char *)th - skb->head; head 306 net/ipv4/tcp_offload.c struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) head 316 net/ipv4/tcp_offload.c return tcp_gro_receive(head, skb); head 786 net/ipv4/tcp_output.c struct list_head head; /* queue of tcp sockets */ head 833 net/ipv4/tcp_output.c list_splice_init(&tsq->head, &list); head 909 net/ipv4/tcp_output.c INIT_LIST_HEAD(&tsq->head); head 957 net/ipv4/tcp_output.c empty = list_empty(&tsq->head); head 958 net/ipv4/tcp_output.c list_add(&tp->tsq_node, &tsq->head); head 1947 net/ipv4/tcp_output.c struct sk_buff *head; head 2004 net/ipv4/tcp_output.c head = tcp_rtx_queue_head(sk); head 2005 net/ipv4/tcp_output.c if (!head) head 2007 net/ipv4/tcp_output.c delta = tp->tcp_clock_cache - head->tstamp; head 90 net/ipv4/tunnel4.c #define for_each_tunnel_rcu(head, handler) \ head 91 net/ipv4/tunnel4.c for (handler = rcu_dereference(head); \ head 136 net/ipv4/udp.c sk_for_each(sk2, &hslot->head) { head 173 net/ipv4/udp.c udp_portaddr_for_each_entry(sk2, &hslot2->head) { head 201 net/ipv4/udp.c sk_for_each(sk2, &hslot->head) { head 316 net/ipv4/udp.c sk_add_node_rcu(sk, &hslot->head); head 325 net/ipv4/udp.c &hslot2->head); head 328 net/ipv4/udp.c &hslot2->head); head 422 net/ipv4/udp.c udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { head 761 net/ipv4/udp.c skb->csum_start = skb_transport_header(skb) - skb->head; head 807 net/ipv4/udp.c skb->csum_start = skb_transport_header(skb) - skb->head; head 1942 net/ipv4/udp.c &nhslot2->head); head 2168 net/ipv4/udp.c sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { head 2395 net/ipv4/udp.c sk_for_each_rcu(sk, &hslot->head) { head 2424 net/ipv4/udp.c udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { head 2823 net/ipv4/udp.c if (hlist_empty(&hslot->head)) head 2827 net/ipv4/udp.c sk_for_each(sk, &hslot->head) { head 3015 net/ipv4/udp.c INIT_HLIST_HEAD(&table->hash[i].head); head 3020 net/ipv4/udp.c INIT_HLIST_HEAD(&table->hash2[i].head); head 112 net/ipv4/udp_diag.c if (hlist_empty(&hslot->head)) head 116 net/ipv4/udp_diag.c sk_for_each(sk, &hslot->head) { head 123 net/ipv4/udp_offload.c skb->head - (unsigned char *)uh); head 140 net/ipv4/udp_offload.c skb->csum_start = skb_transport_header(skb) - skb->head; head 349 net/ipv4/udp_offload.c static struct sk_buff *udp_gro_receive_segment(struct list_head *head, head 374 net/ipv4/udp_offload.c list_for_each_entry(p, head, list) { head 406 net/ipv4/udp_offload.c struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, head 423 net/ipv4/udp_offload.c pp = call_gro_receive(udp_gro_receive_segment, head, skb); head 440 net/ipv4/udp_offload.c list_for_each_entry(p, head, list) { head 458 net/ipv4/udp_offload.c pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb); head 468 net/ipv4/udp_offload.c struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb) head 487 net/ipv4/udp_offload.c return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb); head 498 net/ipv4/udp_offload.c skb->csum_start = (unsigned char *)uh - skb->head; head 40 net/ipv4/xfrm4_protocol.c #define for_each_protocol_rcu(head, handler) \ head 41 net/ipv4/xfrm4_protocol.c for (handler = rcu_dereference(head); \ head 49 net/ipv4/xfrm4_protocol.c struct xfrm4_protocol __rcu **head = proto_handlers(protocol); head 51 net/ipv4/xfrm4_protocol.c if (!head) head 54 net/ipv4/xfrm4_protocol.c for_each_protocol_rcu(*head, handler) head 66 net/ipv4/xfrm4_protocol.c struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr); head 72 net/ipv4/xfrm4_protocol.c if (!head) head 75 net/ipv4/xfrm4_protocol.c for_each_protocol_rcu(*head, handler) head 709 net/ipv6/addrconf.c struct hlist_head *head; head 731 net/ipv6/addrconf.c head = &net->dev_index_head[h]; head 735 net/ipv6/addrconf.c hlist_for_each_entry_rcu(dev, head, index_hlist) { head 5169 net/ipv6/addrconf.c struct hlist_head *head; head 5204 net/ipv6/addrconf.c head = &tgt_net->dev_index_head[h]; head 5205 net/ipv6/addrconf.c hlist_for_each_entry_rcu(dev, head, index_hlist) { head 5853 net/ipv6/addrconf.c struct hlist_head *head; head 5871 net/ipv6/addrconf.c head = &net->dev_index_head[h]; head 5872 net/ipv6/addrconf.c hlist_for_each_entry_rcu(dev, head, index_hlist) { head 229 net/ipv6/addrconf_core.c static void in6_dev_finish_destroy_rcu(struct rcu_head *head) head 231 net/ipv6/addrconf_core.c struct inet6_dev *idev = container_of(head, struct inet6_dev, rcu); head 133 net/ipv6/addrlabel.c hlist_for_each_entry_rcu(p, &net->ipv6.ip6addrlbl_table.head, list) { head 213 net/ipv6/addrlabel.c hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) { head 234 net/ipv6/addrlabel.c hlist_add_head_rcu(&newp->list, &net->ipv6.ip6addrlbl_table.head); head 276 net/ipv6/addrlabel.c hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) { head 315 net/ipv6/addrlabel.c INIT_HLIST_HEAD(&net->ipv6.ip6addrlbl_table.head); head 337 net/ipv6/addrlabel.c hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) { head 501 net/ipv6/addrlabel.c hlist_for_each_entry_rcu(p, &net->ipv6.ip6addrlbl_table.head, list) { head 49 net/ipv6/esp6_offload.c static struct sk_buff *esp6_gro_receive(struct list_head *head, head 442 net/ipv6/icmp.c if ((u8 *)hdr < skb->head || head 207 net/ipv6/ila/ila_xlat.c struct ila_map *ila, *head; head 235 net/ipv6/ila/ila_xlat.c head = rhashtable_lookup_fast(&ilan->xlat.rhash_table, head 238 net/ipv6/ila/ila_xlat.c if (!head) { head 243 net/ipv6/ila/ila_xlat.c struct ila_map *tila = head, *prev = NULL; head 265 net/ipv6/ila/ila_xlat.c RCU_INIT_POINTER(ila->next, head); head 267 net/ipv6/ila/ila_xlat.c &head->node, head 286 net/ipv6/ila/ila_xlat.c struct ila_map *ila, *head, *prev; head 292 net/ipv6/ila/ila_xlat.c head = rhashtable_lookup_fast(&ilan->xlat.rhash_table, head 294 net/ipv6/ila/ila_xlat.c ila = head; head 315 net/ipv6/ila/ila_xlat.c head = rcu_dereference_protected(ila->next, head 317 net/ipv6/ila/ila_xlat.c if (head) { head 323 net/ipv6/ila/ila_xlat.c &head->node, rht_params); head 65 net/ipv6/inet6_hashtables.c struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; head 69 net/ipv6/inet6_hashtables.c sk_nulls_for_each_rcu(sk, node, &head->chain) { head 128 net/ipv6/inet6_hashtables.c inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) { head 214 net/ipv6/inet6_hashtables.c struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); head 222 net/ipv6/inet6_hashtables.c sk_nulls_for_each(sk2, node, &head->chain) { head 244 net/ipv6/inet6_hashtables.c __sk_nulls_add_node_rcu(sk, &head->chain); head 132 net/ipv6/ip6_checksum.c skb->csum_start = skb_transport_header(skb) - skb->head; head 165 net/ipv6/ip6_fib.c void fib6_info_destroy_rcu(struct rcu_head *head) head 167 net/ipv6/ip6_fib.c struct fib6_info *f6i = container_of(head, struct fib6_info, rcu); head 198 net/ipv6/ip6_fib.c static void node_free_rcu(struct rcu_head *head) head 200 net/ipv6/ip6_fib.c struct fib6_node *fn = container_of(head, struct fib6_node, rcu); head 274 net/ipv6/ip6_fib.c struct hlist_head *head; head 281 net/ipv6/ip6_fib.c head = &net->ipv6.fib_table_hash[h]; head 282 net/ipv6/ip6_fib.c hlist_for_each_entry_rcu(tb, head, tb6_hlist) { head 349 net/ipv6/ip6_fib.c struct hlist_head *head = &net->ipv6.fib_table_hash[h]; head 352 net/ipv6/ip6_fib.c hlist_for_each_entry_rcu(tb, head, tb6_hlist) head 449 net/ipv6/ip6_fib.c struct hlist_head *head = &net->ipv6.fib_table_hash[h]; head 452 net/ipv6/ip6_fib.c hlist_for_each_entry_rcu(tb, head, tb6_hlist) head 573 net/ipv6/ip6_fib.c struct hlist_head *head; head 637 net/ipv6/ip6_fib.c head = &net->ipv6.fib_table_hash[h]; head 638 net/ipv6/ip6_fib.c hlist_for_each_entry_rcu(tb, head, tb6_hlist) { head 2139 net/ipv6/ip6_fib.c struct hlist_head *head; head 2144 net/ipv6/ip6_fib.c head = &net->ipv6.fib_table_hash[h]; head 2145 net/ipv6/ip6_fib.c hlist_for_each_entry_rcu(table, head, tb6_hlist) { head 2314 net/ipv6/ip6_fib.c struct hlist_head *head = &net->ipv6.fib_table_hash[i]; head 2318 net/ipv6/ip6_fib.c hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) { head 104 net/ipv6/ip6_flowlabel.c static void fl_free_rcu(struct rcu_head *head) head 106 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu); head 1525 net/ipv6/ip6_gre.c static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head) head 1535 net/ipv6/ip6_gre.c unregister_netdevice_queue(dev, head); head 1550 net/ipv6/ip6_gre.c head); head 2053 net/ipv6/ip6_gre.c static void ip6gre_dellink(struct net_device *dev, struct list_head *head) head 2059 net/ipv6/ip6_gre.c unregister_netdevice_queue(dev, head); head 79 net/ipv6/ip6_input.c static void ip6_sublist_rcv_finish(struct list_head *head) head 83 net/ipv6/ip6_input.c list_for_each_entry_safe(skb, next, head, list) { head 90 net/ipv6/ip6_input.c struct list_head *head) head 97 net/ipv6/ip6_input.c list_for_each_entry_safe(skb, next, head, list) { head 289 net/ipv6/ip6_input.c static void ip6_sublist_rcv(struct list_head *head, struct net_device *dev, head 293 net/ipv6/ip6_input.c head, dev, NULL, ip6_rcv_finish); head 294 net/ipv6/ip6_input.c ip6_list_rcv_finish(net, NULL, head); head 298 net/ipv6/ip6_input.c void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, head 307 net/ipv6/ip6_input.c list_for_each_entry_safe(skb, next, head, list) { head 29 net/ipv6/ip6_offload.c #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \ head 33 net/ipv6/ip6_offload.c INDIRECT_CALL_L4(cb, f2, f1, head, skb); \ head 126 net/ipv6/ip6_offload.c skb->head - (unsigned char *)(ipv6h + 1); head 130 net/ipv6/ip6_offload.c skb->network_header = (u8 *)ipv6h - skb->head; head 184 net/ipv6/ip6_offload.c INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, head 235 net/ipv6/ip6_offload.c list_for_each_entry(p, head, list) { head 281 net/ipv6/ip6_offload.c ops->callbacks.gro_receive, head, skb); head 292 net/ipv6/ip6_offload.c static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head, head 304 net/ipv6/ip6_offload.c return ipv6_gro_receive(head, skb); head 307 net/ipv6/ip6_offload.c static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, head 319 net/ipv6/ip6_offload.c return inet_gro_receive(head, skb); head 2063 net/ipv6/ip6_tunnel.c static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head) head 2069 net/ipv6/ip6_tunnel.c unregister_netdevice_queue(dev, head); head 1012 net/ipv6/ip6_vti.c static void vti6_dellink(struct net_device *dev, struct list_head *head) head 1018 net/ipv6/ip6_vti.c unregister_netdevice_queue(dev, head); head 692 net/ipv6/ip6mr.c struct list_head *head) head 744 net/ipv6/ip6mr.c unregister_netdevice_queue(dev, head); head 750 net/ipv6/ip6mr.c static inline void ip6mr_cache_free_rcu(struct rcu_head *head) head 752 net/ipv6/ip6mr.c struct mr_mfc *c = container_of(head, struct mr_mfc, rcu); head 24 net/ipv6/netfilter/ip6t_eui64.c if (!(skb_mac_header(skb) >= skb->head && head 344 net/ipv6/netfilter/nf_conntrack_reasm.c memmove(skb->head + sizeof(struct frag_hdr), skb->head, head 345 net/ipv6/netfilter/nf_conntrack_reasm.c (skb->data - skb->head) - sizeof(struct frag_hdr)); head 319 net/ipv6/netfilter/nf_log_ipv6.c if (p < skb->head) head 277 net/ipv6/reassembly.c memmove(skb->head + sizeof(struct frag_hdr), skb->head, head 278 net/ipv6/reassembly.c (skb->data - skb->head) - sizeof(struct frag_hdr)); head 128 net/ipv6/route.c struct list_head head; head 140 net/ipv6/route.c list_add_tail(&rt->rt6i_uncached, &ul->head); head 170 net/ipv6/route.c list_for_each_entry(rt, &ul->head, rt6i_uncached) { head 4282 net/ipv6/route.c struct hlist_head *head; head 4288 net/ipv6/route.c head = &net->ipv6.fib_table_hash[h]; head 4289 net/ipv6/route.c hlist_for_each_entry_rcu(table, head, tb6_hlist) { head 6424 net/ipv6/route.c INIT_LIST_HEAD(&ul->head); head 399 net/ipv6/sit.c static void prl_list_destroy_rcu(struct rcu_head *head) head 403 net/ipv6/sit.c p = container_of(head, struct ip_tunnel_prl_entry, rcu_head); head 1766 net/ipv6/sit.c static void ipip6_dellink(struct net_device *dev, struct list_head *head) head 1772 net/ipv6/sit.c unregister_netdevice_queue(dev, head); head 1811 net/ipv6/sit.c struct list_head *head) head 1819 net/ipv6/sit.c unregister_netdevice_queue(dev, head); head 1833 net/ipv6/sit.c head); head 16 net/ipv6/tcpv6_offload.c struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb) head 26 net/ipv6/tcpv6_offload.c return tcp_gro_receive(head, skb); head 84 net/ipv6/tunnel6.c #define for_each_tunnel_rcu(head, handler) \ head 85 net/ipv6/tunnel6.c for (handler = rcu_dereference(head); \ head 157 net/ipv6/udp.c udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { head 767 net/ipv6/udp.c sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { head 962 net/ipv6/udp.c udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { head 1072 net/ipv6/udp.c skb->csum_start = skb_transport_header(skb) - skb->head; head 92 net/ipv6/udp_offload.c packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; head 115 net/ipv6/udp_offload.c struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb) head 135 net/ipv6/udp_offload.c return udp_gro_receive(head, skb, uh, udp6_lib_lookup_skb); head 40 net/ipv6/xfrm6_protocol.c #define for_each_protocol_rcu(head, handler) \ head 41 net/ipv6/xfrm6_protocol.c for (handler = rcu_dereference(head); \ head 49 net/ipv6/xfrm6_protocol.c struct xfrm6_protocol __rcu **head = proto_handlers(protocol); head 51 net/ipv6/xfrm6_protocol.c if (!head) head 181 net/ipv6/xfrm6_tunnel.c static void x6spi_destroy_rcu(struct rcu_head *head) head 184 net/ipv6/xfrm6_tunnel.c container_of(head, struct xfrm6_tunnel_spi, rcu_head)); head 160 net/iucv/af_iucv.c sk_for_each(sk, &iucv_sk_list.head) { head 196 net/iucv/af_iucv.c sk_for_each(sk, &iucv_sk_list.head) { head 399 net/iucv/af_iucv.c sk_for_each(sk, &iucv_sk_list.head) head 645 net/iucv/af_iucv.c sk_add_node(sk, &l->head); head 1219 net/iucv/af_iucv.c struct iucv_array *iba = (struct iucv_array *)skb->head; head 1320 net/iucv/af_iucv.c struct iucv_array *iba = (struct iucv_array *)skb->head; head 1752 net/iucv/af_iucv.c sk_for_each(sk, &iucv_sk_list.head) head 2192 net/iucv/af_iucv.c sk_for_each(sk, &iucv_sk_list.head) { head 2286 net/iucv/af_iucv.c sk_for_each(sk, &iucv_sk_list.head) head 2356 net/iucv/af_iucv.c sk_for_each(sk, &iucv_sk_list.head) { head 218 net/kcm/kcmsock.c static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head) head 223 net/kcm/kcmsock.c while ((skb = __skb_dequeue(head))) { head 251 net/kcm/kcmsock.c struct sk_buff *head) head 271 net/kcm/kcmsock.c psock->ready_rx_msg = head; head 579 net/kcm/kcmsock.c struct sk_buff *skb, *head; head 602 net/kcm/kcmsock.c head = skb_peek(&sk->sk_write_queue); head 603 net/kcm/kcmsock.c txm = kcm_tx_msg(head); head 625 net/kcm/kcmsock.c skb = head; head 626 net/kcm/kcmsock.c txm = kcm_tx_msg(head); head 691 net/kcm/kcmsock.c if (skb == head) { head 703 net/kcm/kcmsock.c kfree_skb(head); head 707 net/kcm/kcmsock.c } while ((head = skb_peek(&sk->sk_write_queue))); head 709 net/kcm/kcmsock.c if (!head) { head 763 net/kcm/kcmsock.c struct sk_buff *skb = NULL, *head = NULL; head 785 net/kcm/kcmsock.c head = kcm->seq_skb; head 786 net/kcm/kcmsock.c skb = kcm_tx_msg(head)->last_skb; head 806 net/kcm/kcmsock.c if (head == skb) head 807 net/kcm/kcmsock.c skb_shinfo(head)->frag_list = tskb; head 825 net/kcm/kcmsock.c head = alloc_skb(0, sk->sk_allocation); head 826 net/kcm/kcmsock.c while (!head) { head 833 net/kcm/kcmsock.c skb = head; head 848 net/kcm/kcmsock.c if (head != skb) { head 849 net/kcm/kcmsock.c head->len += size; head 850 net/kcm/kcmsock.c head->data_len += size; head 851 net/kcm/kcmsock.c head->truesize += size; head 858 net/kcm/kcmsock.c __skb_queue_tail(&sk->sk_write_queue, head); head 878 net/kcm/kcmsock.c kcm->seq_skb = head; head 879 net/kcm/kcmsock.c kcm_tx_msg(head)->last_skb = skb; head 904 net/kcm/kcmsock.c struct sk_buff *skb = NULL, *head = NULL; head 921 net/kcm/kcmsock.c head = kcm->seq_skb; head 922 net/kcm/kcmsock.c skb = kcm_tx_msg(head)->last_skb; head 937 net/kcm/kcmsock.c head = alloc_skb(0, sk->sk_allocation); head 938 net/kcm/kcmsock.c while (!head) { head 944 net/kcm/kcmsock.c head = alloc_skb(0, sk->sk_allocation); head 947 net/kcm/kcmsock.c skb = head; head 973 net/kcm/kcmsock.c if (head == skb) head 974 net/kcm/kcmsock.c skb_shinfo(head)->frag_list = tskb; head 1009 net/kcm/kcmsock.c if (head != skb) { head 1010 net/kcm/kcmsock.c head->len += copy; head 1011 net/kcm/kcmsock.c head->data_len += copy; head 1026 net/kcm/kcmsock.c if (head) { head 1028 net/kcm/kcmsock.c __skb_queue_tail(&sk->sk_write_queue, head); head 1050 net/kcm/kcmsock.c if (head) { head 1051 net/kcm/kcmsock.c kcm->seq_skb = head; head 1052 net/kcm/kcmsock.c kcm_tx_msg(head)->last_skb = skb; head 1071 net/kcm/kcmsock.c if (head != kcm->seq_skb) head 1072 net/kcm/kcmsock.c kfree_skb(head); head 1337 net/kcm/kcmsock.c struct list_head *head; head 1350 net/kcm/kcmsock.c head = &mux->kcm_socks; head 1354 net/kcm/kcmsock.c head = &tkcm->kcm_sock_list; head 1358 net/kcm/kcmsock.c list_add(&kcm->kcm_sock_list, head); head 1378 net/kcm/kcmsock.c struct list_head *head; head 1450 net/kcm/kcmsock.c head = &mux->psocks; head 1454 net/kcm/kcmsock.c head = &tpsock->psock_list; head 1458 net/kcm/kcmsock.c list_add(&psock->psock_list, head); head 301 net/l2tp/l2tp_core.c struct hlist_head *head; head 305 net/l2tp/l2tp_core.c head = l2tp_session_id_hash(tunnel, session->session_id); head 313 net/l2tp/l2tp_core.c hlist_for_each_entry(session_walk, head, hlist) head 344 net/l2tp/l2tp_core.c hlist_add_head(&session->hlist, head); head 403 net/l2tp/l2tp_ppp.c static void pppol2tp_put_sk(struct rcu_head *head) head 407 net/l2tp/l2tp_ppp.c ps = container_of(head, typeof(*ps), rcu); head 43 net/llc/llc_proc.c struct hlist_nulls_head *head = &sap->sk_laddr_hash[i]; head 46 net/llc/llc_proc.c sk_nulls_for_each(sk, node, head) { head 884 net/mac80211/cfg.c if (!params->head && !old) head 888 net/mac80211/cfg.c if (params->head) head 912 net/mac80211/cfg.c new->head = ((u8 *) new) + sizeof(*new); head 913 net/mac80211/cfg.c new->tail = new->head + new_head_len; head 925 net/mac80211/cfg.c if (params->head) head 926 net/mac80211/cfg.c memcpy(new->head, params->head, new_head_len); head 928 net/mac80211/cfg.c memcpy(new->head, old->head, new_head_len); head 2997 net/mac80211/cfg.c new_beacon->head = pos; head 2998 net/mac80211/cfg.c memcpy(pos, beacon->head, beacon->head_len); head 1223 net/mac80211/driver-ops.h struct sk_buff *head, head 1229 net/mac80211/driver-ops.h return local->ops->can_aggregate_in_amsdu(&local->hw, head, skb); head 74 net/mac80211/ibss.c presp->head = (void *)(presp + 1); head 76 net/mac80211/ibss.c mgmt = (void *) presp->head; head 148 net/mac80211/ibss.c presp->csa_counter_offsets[0] = (pos - presp->head); head 208 net/mac80211/ibss.c presp->head_len = pos - presp->head; head 324 net/mac80211/ibss.c mgmt = (void *)presp->head; head 1579 net/mac80211/ibss.c skb_put_data(skb, presp->head, presp->head_len); head 258 net/mac80211/ieee80211_i.h u8 *head, *tail; head 782 net/mac80211/mesh.c bcn->head = ((u8 *) bcn) + sizeof(*bcn); head 871 net/mac80211/mesh.c memcpy(bcn->head, skb->data, bcn->head_len); head 875 net/mac80211/mesh.c bcn->tail = bcn->head + bcn->head_len; head 1198 net/mac80211/mesh.c skb_put_data(presp, bcn->head, bcn->head_len); head 3238 net/mac80211/tx.c struct sk_buff **frag_tail, *head; head 3284 net/mac80211/tx.c head = skb_peek_tail(&flow->queue); head 3285 net/mac80211/tx.c if (!head || skb_is_gso(head)) head 3288 net/mac80211/tx.c orig_truesize = head->truesize; head 3289 net/mac80211/tx.c orig_len = head->len; head 3291 net/mac80211/tx.c if (skb->len + head->len > max_amsdu_len) head 3295 net/mac80211/tx.c nfrags += 1 + skb_shinfo(head)->nr_frags; head 3296 net/mac80211/tx.c frag_tail = &skb_shinfo(head)->frag_list; head 3309 net/mac80211/tx.c if (!drv_can_aggregate_in_amsdu(local, head, skb)) head 3312 net/mac80211/tx.c if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) head 3323 net/mac80211/tx.c if ((head->len - hdrlen) & 3) head 3324 net/mac80211/tx.c pad = 4 - ((head->len - hdrlen) & 3); head 3341 net/mac80211/tx.c head->len += skb->len; head 3342 net/mac80211/tx.c head->data_len += skb->len; head 3346 net/mac80211/tx.c fq->memory_usage += head->truesize - orig_truesize; head 3347 net/mac80211/tx.c if (head->len != orig_len) { head 3348 net/mac80211/tx.c flow->backlog += head->len - orig_len; head 3349 net/mac80211/tx.c tin->backlog_bytes += head->len - orig_len; head 4313 net/mac80211/tx.c beacon_data = beacon->head; head 4317 net/mac80211/tx.c beacon_data = beacon->head; head 4432 net/mac80211/tx.c beacon_data = beacon->head; head 4441 net/mac80211/tx.c beacon_data = beacon->head; head 4514 net/mac80211/tx.c skb_put_data(skb, beacon->head, beacon->head_len); head 4552 net/mac80211/tx.c skb_put_data(skb, beacon->head, beacon->head_len); head 4587 net/mac80211/tx.c skb_put_data(skb, beacon->head, beacon->head_len); head 4916 net/mac80211/tx.c if (!beacon || !beacon->head) head 1304 net/mpls/af_mpls.c struct hlist_head *head; head 1330 net/mpls/af_mpls.c head = &net->dev_index_head[h]; head 1333 net/mpls/af_mpls.c hlist_for_each_entry_rcu(dev, head, index_hlist) { head 1486 net/mpls/af_mpls.c static void mpls_dev_destroy_rcu(struct rcu_head *head) head 1488 net/mpls/af_mpls.c struct mpls_dev *mdev = container_of(head, struct mpls_dev, rcu); head 69 net/netfilter/core.c struct nf_hook_entries_rcu_head *head; head 71 net/netfilter/core.c head = container_of(h, struct nf_hook_entries_rcu_head, head); head 72 net/netfilter/core.c kvfree(head->allocation); head 77 net/netfilter/core.c struct nf_hook_entries_rcu_head *head; head 86 net/netfilter/core.c head = (void *)&ops[num]; head 87 net/netfilter/core.c head->allocation = e; head 88 net/netfilter/core.c call_rcu(&head->head, __nf_hook_entries_free); head 301 net/netfilter/ipset/ip_set_bitmap_gen.h .head = mtype_head, head 223 net/netfilter/ipset/ip_set_bitmap_ipmac.c if (skb_mac_header(skb) < skb->head || head 1447 net/netfilter/ipset/ip_set_core.c ret = set->variant->head(set, skb); head 1441 net/netfilter/ipset/ip_set_hash_gen.h .head = mtype_head, head 92 net/netfilter/ipset/ip_set_hash_ipmac.c if (skb_mac_header(skb) < skb->head || head 208 net/netfilter/ipset/ip_set_hash_ipmac.c if (skb_mac_header(skb) < skb->head || head 80 net/netfilter/ipset/ip_set_hash_mac.c if (skb_mac_header(skb) < skb->head || head 558 net/netfilter/ipset/ip_set_list_set.c .head = list_set_head, head 62 net/netfilter/ipvs/ip_vs_app.c static void ip_vs_app_inc_rcu_free(struct rcu_head *head) head 64 net/netfilter/ipvs/ip_vs_app.c struct ip_vs_app *inc = container_of(head, struct ip_vs_app, rcu_head); head 800 net/netfilter/ipvs/ip_vs_conn.c static void ip_vs_conn_rcu_free(struct rcu_head *head) head 802 net/netfilter/ipvs/ip_vs_conn.c struct ip_vs_conn *cp = container_of(head, struct ip_vs_conn, head 856 net/netfilter/ipvs/ip_vs_core.c skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset; head 465 net/netfilter/ipvs/ip_vs_ctl.c static void ip_vs_service_rcu_free(struct rcu_head *head) head 469 net/netfilter/ipvs/ip_vs_ctl.c svc = container_of(head, struct ip_vs_service, rcu_head); head 698 net/netfilter/ipvs/ip_vs_ctl.c void ip_vs_dest_dst_rcu_free(struct rcu_head *head) head 700 net/netfilter/ipvs/ip_vs_ctl.c struct ip_vs_dest_dst *dest_dst = container_of(head, head 130 net/netfilter/ipvs/ip_vs_lblc.c static void ip_vs_lblc_rcu_free(struct rcu_head *head) head 132 net/netfilter/ipvs/ip_vs_lblc.c struct ip_vs_lblc_entry *en = container_of(head, head 124 net/netfilter/ipvs/ip_vs_lblcr.c static void ip_vs_lblcr_elem_rcu_free(struct rcu_head *head) head 128 net/netfilter/ipvs/ip_vs_lblcr.c e = container_of(head, struct ip_vs_dest_set_elem, rcu_head); head 373 net/netfilter/ipvs/ip_vs_mh.c static void ip_vs_mh_state_free(struct rcu_head *head) head 377 net/netfilter/ipvs/ip_vs_mh.c s = container_of(head, struct ip_vs_mh_state, rcu_head); head 279 net/netfilter/ipvs/ip_vs_sync.c unsigned char *head; head 352 net/netfilter/ipvs/ip_vs_sync.c sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg); head 438 net/netfilter/ipvs/ip_vs_sync.c sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0); head 576 net/netfilter/ipvs/ip_vs_sync.c if (buff->head + len > buff->end || !m->nr_conns) { head 593 net/netfilter/ipvs/ip_vs_sync.c s = (struct ip_vs_sync_conn_v0 *) buff->head; head 614 net/netfilter/ipvs/ip_vs_sync.c buff->head += len; head 694 net/netfilter/ipvs/ip_vs_sync.c pad = (4 - (size_t) buff->head) & 3; head 696 net/netfilter/ipvs/ip_vs_sync.c if (buff->head + len + pad > buff->end || m->reserved) { head 715 net/netfilter/ipvs/ip_vs_sync.c p = buff->head; head 716 net/netfilter/ipvs/ip_vs_sync.c buff->head += pad + len; head 136 net/netfilter/nf_conncount.c list_for_each_entry_safe(conn, conn_n, &list->head, node) { head 191 net/netfilter/nf_conncount.c list_add_tail(&conn->node, &list->head); head 215 net/netfilter/nf_conncount.c INIT_LIST_HEAD(&list->head); head 234 net/netfilter/nf_conncount.c list_for_each_entry_safe(conn, conn_n, &list->head, node) { head 373 net/netfilter/nf_conncount.c list_add(&conn->node, &rbconn->list.head); head 554 net/netfilter/nf_conncount.c list_for_each_entry_safe(conn, conn_n, &list->head, node) head 1119 net/netfilter/nf_conntrack_core.c struct hlist_nulls_head *head) head 1126 net/netfilter/nf_conntrack_core.c hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) { head 354 net/netfilter/nf_conntrack_expect.c static void nf_ct_expect_free_rcu(struct rcu_head *head) head 358 net/netfilter/nf_conntrack_expect.c exp = container_of(head, struct nf_conntrack_expect, rcu); head 564 net/netfilter/nf_conntrack_expect.c struct hlist_node *head) head 568 net/netfilter/nf_conntrack_expect.c head = rcu_dereference(hlist_next_rcu(head)); head 569 net/netfilter/nf_conntrack_expect.c while (head == NULL) { head 572 net/netfilter/nf_conntrack_expect.c head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); head 574 net/netfilter/nf_conntrack_expect.c return head; head 579 net/netfilter/nf_conntrack_expect.c struct hlist_node *head = ct_expect_get_first(seq); head 581 net/netfilter/nf_conntrack_expect.c if (head) head 582 net/netfilter/nf_conntrack_expect.c while (pos && (head = ct_expect_get_next(seq, head))) head 584 net/netfilter/nf_conntrack_expect.c return pos ? NULL : head; head 322 net/netfilter/nf_conntrack_helper.c list_add_rcu(&n->head, &nf_ct_helper_expectfn_list); head 330 net/netfilter/nf_conntrack_helper.c list_del_rcu(&n->head); head 342 net/netfilter/nf_conntrack_helper.c list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { head 359 net/netfilter/nf_conntrack_helper.c list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { head 122 net/netfilter/nf_conntrack_standalone.c struct hlist_nulls_node *head) head 126 net/netfilter/nf_conntrack_standalone.c head = rcu_dereference(hlist_nulls_next_rcu(head)); head 127 net/netfilter/nf_conntrack_standalone.c while (is_a_nulls(head)) { head 128 net/netfilter/nf_conntrack_standalone.c if (likely(get_nulls_value(head) == st->bucket)) { head 132 net/netfilter/nf_conntrack_standalone.c head = rcu_dereference( head 135 net/netfilter/nf_conntrack_standalone.c return head; head 140 net/netfilter/nf_conntrack_standalone.c struct hlist_nulls_node *head = ct_get_first(seq); head 142 net/netfilter/nf_conntrack_standalone.c if (head) head 143 net/netfilter/nf_conntrack_standalone.c while (pos && (head = ct_get_next(seq, head))) head 145 net/netfilter/nf_conntrack_standalone.c return pos ? NULL : head; head 444 net/netfilter/nf_synproxy_core.c nskb->csum_start = (unsigned char *)nth - nskb->head; head 843 net/netfilter/nf_synproxy_core.c nskb->csum_start = (unsigned char *)nth - nskb->head; head 4967 net/netfilter/nf_tables_api.c gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu); head 4968 net/netfilter/nf_tables_api.c for (i = 0; i < gcb->head.cnt; i++) head 4969 net/netfilter/nf_tables_api.c nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true); head 4982 net/netfilter/nf_tables_api.c gcb->head.set = set; head 6591 net/netfilter/nf_tables_api.c LIST_HEAD(head); head 6594 net/netfilter/nf_tables_api.c list_splice_init(&nf_tables_destroy_list, &head); head 6597 net/netfilter/nf_tables_api.c if (list_empty(&head)) head 6602 net/netfilter/nf_tables_api.c list_for_each_entry_safe(trans, next, &head, list) { head 243 net/netfilter/nfnetlink.c struct list_head head; head 261 net/netfilter/nfnetlink.c list_add_tail(&nfnl_err->head, list); head 268 net/netfilter/nfnetlink.c list_del(&nfnl_err->head); head 276 net/netfilter/nfnetlink.c list_for_each_entry_safe(nfnl_err, next, err_list, head) head 284 net/netfilter/nfnetlink.c list_for_each_entry_safe(nfnl_err, next, err_list, head) { head 32 net/netfilter/nfnetlink_acct.c struct list_head head; head 64 net/netfilter/nfnetlink_acct.c list_for_each_entry(nfacct, &net->nfnl_acct_list, head) { head 126 net/netfilter/nfnetlink_acct.c list_add_tail_rcu(&nfacct->head, &net->nfnl_acct_list); head 202 net/netfilter/nfnetlink_acct.c list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) { head 291 net/netfilter/nfnetlink_acct.c list_for_each_entry(cur, &net->nfnl_acct_list, head) { head 332 net/netfilter/nfnetlink_acct.c list_del_rcu(&cur->head); head 350 net/netfilter/nfnetlink_acct.c list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) head 357 net/netfilter/nfnetlink_acct.c list_for_each_entry(cur, &net->nfnl_acct_list, head) { head 408 net/netfilter/nfnetlink_acct.c list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) { head 500 net/netfilter/nfnetlink_acct.c list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) { head 501 net/netfilter/nfnetlink_acct.c list_del_rcu(&cur->head); head 97 net/netfilter/nfnetlink_cttimeout.c list_for_each_entry(timeout, &net->nfct_timeout_list, head) { head 149 net/netfilter/nfnetlink_cttimeout.c list_add_tail_rcu(&timeout->head, &net->nfct_timeout_list); head 220 net/netfilter/nfnetlink_cttimeout.c list_for_each_entry_rcu(cur, &net->nfct_timeout_list, head) { head 262 net/netfilter/nfnetlink_cttimeout.c list_for_each_entry(cur, &net->nfct_timeout_list, head) { head 303 net/netfilter/nfnetlink_cttimeout.c list_del_rcu(&timeout->head); head 324 net/netfilter/nfnetlink_cttimeout.c head) head 331 net/netfilter/nfnetlink_cttimeout.c list_for_each_entry(cur, &net->nfct_timeout_list, head) { head 518 net/netfilter/nfnetlink_cttimeout.c list_for_each_entry_rcu(timeout, &net->nfct_timeout_list, head) { head 588 net/netfilter/nfnetlink_cttimeout.c list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list, head) { head 589 net/netfilter/nfnetlink_cttimeout.c list_del_rcu(&cur->head); head 103 net/netfilter/nfnetlink_log.c struct hlist_head *head; head 106 net/netfilter/nfnetlink_log.c head = &log->instance_table[instance_hashfn(group_num)]; head 107 net/netfilter/nfnetlink_log.c hlist_for_each_entry_rcu(inst, head, hlist) { head 134 net/netfilter/nfnetlink_log.c static void nfulnl_instance_free_rcu(struct rcu_head *head) head 137 net/netfilter/nfnetlink_log.c container_of(head, struct nfulnl_instance, rcu); head 585 net/netfilter/nfnetlink_log.c if (hwhdrp >= skb->head && head 832 net/netfilter/nfnetlink_log.c struct hlist_head *head = &log->instance_table[i]; head 834 net/netfilter/nfnetlink_log.c hlist_for_each_entry_safe(inst, t2, head, hlist) { head 1025 net/netfilter/nfnetlink_log.c struct hlist_head *head = &log->instance_table[st->bucket]; head 1027 net/netfilter/nfnetlink_log.c if (!hlist_empty(head)) head 1028 net/netfilter/nfnetlink_log.c return rcu_dereference_bh(hlist_first_rcu(head)); head 1039 net/netfilter/nfnetlink_log.c struct hlist_head *head; head 1045 net/netfilter/nfnetlink_log.c head = &log->instance_table[st->bucket]; head 1046 net/netfilter/nfnetlink_log.c h = rcu_dereference_bh(hlist_first_rcu(head)); head 1054 net/netfilter/nfnetlink_log.c struct hlist_node *head; head 1055 net/netfilter/nfnetlink_log.c head = get_first(net, st); head 1057 net/netfilter/nfnetlink_log.c if (head) head 1058 net/netfilter/nfnetlink_log.c while (pos && (head = get_next(net, st, head))) head 1060 net/netfilter/nfnetlink_log.c return pos ? NULL : head; head 104 net/netfilter/nfnetlink_queue.c struct hlist_head *head; head 107 net/netfilter/nfnetlink_queue.c head = &q->instance_table[instance_hashfn(queue_num)]; head 108 net/netfilter/nfnetlink_queue.c hlist_for_each_entry_rcu(inst, head, hlist) { head 165 net/netfilter/nfnetlink_queue.c instance_destroy_rcu(struct rcu_head *head) head 167 net/netfilter/nfnetlink_queue.c struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, head 934 net/netfilter/nfnetlink_queue.c struct hlist_head *head = &q->instance_table[i]; head 936 net/netfilter/nfnetlink_queue.c hlist_for_each_entry_rcu(inst, head, hlist) head 966 net/netfilter/nfnetlink_queue.c struct hlist_head *head = &q->instance_table[i]; head 968 net/netfilter/nfnetlink_queue.c hlist_for_each_entry_rcu(inst, head, hlist) head 988 net/netfilter/nfnetlink_queue.c struct hlist_head *head = &q->instance_table[i]; head 990 net/netfilter/nfnetlink_queue.c hlist_for_each_entry_safe(inst, t2, head, hlist) { head 1446 net/netfilter/nfnetlink_queue.c struct hlist_node *head; head 1447 net/netfilter/nfnetlink_queue.c head = get_first(seq); head 1449 net/netfilter/nfnetlink_queue.c if (head) head 1450 net/netfilter/nfnetlink_queue.c while (pos && (head = get_next(seq, head))) head 1452 net/netfilter/nfnetlink_queue.c return pos ? NULL : head; head 16 net/netfilter/nft_set_bitmap.c struct list_head head; head 95 net/netfilter/nft_set_bitmap.c list_for_each_entry_rcu(be, &priv->list, head) { head 113 net/netfilter/nft_set_bitmap.c list_for_each_entry_rcu(be, &priv->list, head) { head 141 net/netfilter/nft_set_bitmap.c list_add_tail_rcu(&new->head, &priv->list); head 158 net/netfilter/nft_set_bitmap.c list_del_rcu(&be->head); head 222 net/netfilter/nft_set_bitmap.c list_for_each_entry_rcu(be, &priv->list, head) { head 278 net/netfilter/nft_set_bitmap.c list_for_each_entry_safe(be, n, &priv->list, head) head 1533 net/netfilter/x_tables.c struct list_head *head, *curr; head 1561 net/netfilter/x_tables.c trav->head = trav->curr = is_target ? head 1566 net/netfilter/x_tables.c if (trav->curr != trav->head) head 1570 net/netfilter/x_tables.c trav->head = trav->curr = is_target ? head 1576 net/netfilter/x_tables.c if (trav->curr != trav->head) head 1631 net/netfilter/x_tables.c if (trav->curr == trav->head) head 1665 net/netfilter/x_tables.c if (trav->curr == trav->head) head 259 net/netfilter/xt_hashlimit.c static void dsthash_free_rcu(struct rcu_head *head) head 261 net/netfilter/xt_hashlimit.c struct dsthash_ent *ent = container_of(head, struct dsthash_ent, rcu); head 32 net/netfilter/xt_mac.c if (skb_mac_header(skb) < skb->head) head 493 net/netfilter/xt_recent.c const struct list_head *head = e->list.next; head 496 net/netfilter/xt_recent.c while (head == &t->iphash[st->bucket]) { head 499 net/netfilter/xt_recent.c head = t->iphash[st->bucket].next; head 501 net/netfilter/xt_recent.c return list_entry(head, struct recent_entry, list); head 47 net/netlabel/netlabel_addrlist.c struct list_head *head) head 51 net/netlabel/netlabel_addrlist.c list_for_each_entry_rcu(iter, head, list) head 72 net/netlabel/netlabel_addrlist.c struct list_head *head) head 76 net/netlabel/netlabel_addrlist.c list_for_each_entry_rcu(iter, head, list) head 97 net/netlabel/netlabel_addrlist.c struct list_head *head) head 101 net/netlabel/netlabel_addrlist.c list_for_each_entry_rcu(iter, head, list) head 123 net/netlabel/netlabel_addrlist.c struct list_head *head) head 127 net/netlabel/netlabel_addrlist.c list_for_each_entry_rcu(iter, head, list) head 148 net/netlabel/netlabel_addrlist.c int netlbl_af4list_add(struct netlbl_af4list *entry, struct list_head *head) head 152 net/netlabel/netlabel_addrlist.c iter = netlbl_af4list_search(entry->addr, head); head 161 net/netlabel/netlabel_addrlist.c list_for_each_entry_rcu(iter, head, list) head 169 net/netlabel/netlabel_addrlist.c list_add_tail_rcu(&entry->list, head); head 185 net/netlabel/netlabel_addrlist.c int netlbl_af6list_add(struct netlbl_af6list *entry, struct list_head *head) head 189 net/netlabel/netlabel_addrlist.c iter = netlbl_af6list_search(&entry->addr, head); head 199 net/netlabel/netlabel_addrlist.c list_for_each_entry_rcu(iter, head, list) head 207 net/netlabel/netlabel_addrlist.c list_add_tail_rcu(&entry->list, head); head 240 net/netlabel/netlabel_addrlist.c struct list_head *head) head 244 net/netlabel/netlabel_addrlist.c entry = netlbl_af4list_search_exact(addr, mask, head); head 281 net/netlabel/netlabel_addrlist.c struct list_head *head) head 285 net/netlabel/netlabel_addrlist.c entry = netlbl_af6list_search_exact(addr, mask, head); head 82 net/netlabel/netlabel_addrlist.h #define netlbl_af4list_foreach(iter, head) \ head 83 net/netlabel/netlabel_addrlist.h for (iter = __af4list_valid((head)->next, head); \ head 84 net/netlabel/netlabel_addrlist.h &iter->list != (head); \ head 85 net/netlabel/netlabel_addrlist.h iter = __af4list_valid(iter->list.next, head)) head 87 net/netlabel/netlabel_addrlist.h #define netlbl_af4list_foreach_rcu(iter, head) \ head 88 net/netlabel/netlabel_addrlist.h for (iter = __af4list_valid_rcu((head)->next, head); \ head 89 net/netlabel/netlabel_addrlist.h &iter->list != (head); \ head 90 net/netlabel/netlabel_addrlist.h iter = __af4list_valid_rcu(iter->list.next, head)) head 92 net/netlabel/netlabel_addrlist.h #define netlbl_af4list_foreach_safe(iter, tmp, head) \ head 93 net/netlabel/netlabel_addrlist.h for (iter = __af4list_valid((head)->next, head), \ head 94 net/netlabel/netlabel_addrlist.h tmp = __af4list_valid(iter->list.next, head); \ head 95 net/netlabel/netlabel_addrlist.h &iter->list != (head); \ head 96 net/netlabel/netlabel_addrlist.h iter = tmp, tmp = __af4list_valid(iter->list.next, head)) head 99 net/netlabel/netlabel_addrlist.h struct list_head *head); head 101 net/netlabel/netlabel_addrlist.h struct list_head *head); head 104 net/netlabel/netlabel_addrlist.h struct list_head *head); head 107 net/netlabel/netlabel_addrlist.h struct list_head *head); head 149 net/netlabel/netlabel_addrlist.h #define netlbl_af6list_foreach(iter, head) \ head 150 net/netlabel/netlabel_addrlist.h for (iter = __af6list_valid((head)->next, head); \ head 151 net/netlabel/netlabel_addrlist.h &iter->list != (head); \ head 152 net/netlabel/netlabel_addrlist.h iter = __af6list_valid(iter->list.next, head)) head 154 net/netlabel/netlabel_addrlist.h #define netlbl_af6list_foreach_rcu(iter, head) \ head 155 net/netlabel/netlabel_addrlist.h for (iter = __af6list_valid_rcu((head)->next, head); \ head 156 net/netlabel/netlabel_addrlist.h &iter->list != (head); \ head 157 net/netlabel/netlabel_addrlist.h iter = __af6list_valid_rcu(iter->list.next, head)) head 159 net/netlabel/netlabel_addrlist.h #define netlbl_af6list_foreach_safe(iter, tmp, head) \ head 160 net/netlabel/netlabel_addrlist.h for (iter = __af6list_valid((head)->next, head), \ head 161 net/netlabel/netlabel_addrlist.h tmp = __af6list_valid(iter->list.next, head); \ head 162 net/netlabel/netlabel_addrlist.h &iter->list != (head); \ head 163 net/netlabel/netlabel_addrlist.h iter = tmp, tmp = __af6list_valid(iter->list.next, head)) head 166 net/netlabel/netlabel_addrlist.h struct list_head *head); head 169 net/netlabel/netlabel_addrlist.h struct list_head *head); head 172 net/netlabel/netlabel_addrlist.h struct list_head *head); head 175 net/netlabel/netlabel_addrlist.h struct list_head *head); head 284 net/netlink/af_netlink.c if (is_vmalloc_addr(skb->head)) head 363 net/netlink/af_netlink.c if (is_vmalloc_addr(skb->head)) { head 366 net/netlink/af_netlink.c vfree(skb->head); head 368 net/netlink/af_netlink.c skb->head = NULL; head 712 net/netlink/af_netlink.c static void deferred_put_nlk_sk(struct rcu_head *head) head 714 net/netlink/af_netlink.c struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu); head 1274 net/netlink/af_netlink.c if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize) head 34 net/nfc/llcp.h struct hlist_head head; head 180 net/nfc/llcp_commands.c void nfc_llcp_free_sdp_tlv_list(struct hlist_head *head) head 185 net/nfc/llcp_commands.c hlist_for_each_entry_safe(sdp, n, head, node) { head 26 net/nfc/llcp_core.c sk_add_node(sk, &l->head); head 77 net/nfc/llcp_core.c sk_for_each_safe(sk, tmp, &local->sockets.head) { head 126 net/nfc/llcp_core.c sk_for_each_safe(sk, tmp, &local->raw_sockets.head) { head 201 net/nfc/llcp_core.c sk_for_each(sk, &local->sockets.head) { head 342 net/nfc/llcp_core.c sk_for_each(sk, &local->sockets.head) { head 683 net/nfc/llcp_core.c sk_for_each(sk, &local->raw_sockets.head) { head 786 net/nfc/llcp_core.c sk_for_each(sk, &local->connecting_sockets.head) { head 36 net/nfc/nfc.h struct hlist_head head; head 58 net/nfc/nfc.h void nfc_llcp_free_sdp_tlv_list(struct hlist_head *head); head 25 net/nfc/rawsock.c sk_add_node(sk, &l->head); head 367 net/nfc/rawsock.c sk_for_each(sk, &raw_sk_list.head) { head 64 net/openvswitch/actions.c int head; head 97 net/openvswitch/actions.c fifo->head = 0; head 103 net/openvswitch/actions.c return (fifo->head == fifo->tail); head 116 net/openvswitch/actions.c if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1) head 119 net/openvswitch/actions.c return &fifo->fifo[fifo->head++]; head 1096 net/openvswitch/conntrack.c struct hlist_head *head; head 1098 net/openvswitch/conntrack.c head = ct_limit_hash_bucket(info, new_ct_limit->zone); head 1099 net/openvswitch/conntrack.c hlist_for_each_entry_rcu(ct_limit, head, hlist_node) { head 1108 net/openvswitch/conntrack.c hlist_add_head_rcu(&new_ct_limit->hlist_node, head); head 1115 net/openvswitch/conntrack.c struct hlist_head *head; head 1118 net/openvswitch/conntrack.c head = ct_limit_hash_bucket(info, zone); head 1119 net/openvswitch/conntrack.c hlist_for_each_entry_safe(ct_limit, n, head, hlist_node) { head 1132 net/openvswitch/conntrack.c struct hlist_head *head; head 1134 net/openvswitch/conntrack.c head = ct_limit_hash_bucket(info, zone); head 1135 net/openvswitch/conntrack.c hlist_for_each_entry_rcu(ct_limit, head, hlist_node) { head 1890 net/openvswitch/conntrack.c struct hlist_head *head = &info->limits[i]; head 1893 net/openvswitch/conntrack.c hlist_for_each_entry_rcu(ct_limit, head, hlist_node, head 2094 net/openvswitch/conntrack.c struct hlist_head *head; head 2103 net/openvswitch/conntrack.c head = &info->limits[i]; head 2104 net/openvswitch/conntrack.c hlist_for_each_entry_rcu(ct_limit, head, hlist_node) { head 179 net/openvswitch/datapath.c struct hlist_head *head; head 181 net/openvswitch/datapath.c head = vport_hash_bucket(dp, port_no); head 182 net/openvswitch/datapath.c hlist_for_each_entry_rcu(vport, head, dp_hash_node) { head 197 net/openvswitch/datapath.c struct hlist_head *head = vport_hash_bucket(dp, vport->port_no); head 199 net/openvswitch/datapath.c hlist_add_head_rcu(&vport->dp_hash_node, head); head 2403 net/openvswitch/datapath.c struct list_head *head) head 2419 net/openvswitch/datapath.c list_add(&vport->detach_list, head); head 2431 net/openvswitch/datapath.c LIST_HEAD(head); head 2442 net/openvswitch/datapath.c list_vports_from_net(net, dnet, &head); head 2446 net/openvswitch/datapath.c list_for_each_entry_safe(vport, vport_next, &head, detach_list) { head 2304 net/openvswitch/flow_netlink.c static void __ovs_nla_free_flow_actions(struct rcu_head *head) head 2306 net/openvswitch/flow_netlink.c ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu)); head 215 net/openvswitch/flow_table.c struct hlist_head *head = &ti->buckets[i]; head 220 net/openvswitch/flow_table.c hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) { head 253 net/openvswitch/flow_table.c struct hlist_head *head; head 260 net/openvswitch/flow_table.c head = &ti->buckets[*bucket]; head 261 net/openvswitch/flow_table.c hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) { head 285 net/openvswitch/flow_table.c struct hlist_head *head; head 287 net/openvswitch/flow_table.c head = find_bucket(ti, flow->flow_table.hash); head 288 net/openvswitch/flow_table.c hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head); head 294 net/openvswitch/flow_table.c struct hlist_head *head; head 296 net/openvswitch/flow_table.c head = find_bucket(ti, flow->ufid_table.hash); head 297 net/openvswitch/flow_table.c hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head); head 312 net/openvswitch/flow_table.c struct hlist_head *head = &old->buckets[i]; head 315 net/openvswitch/flow_table.c hlist_for_each_entry(flow, head, head 319 net/openvswitch/flow_table.c hlist_for_each_entry(flow, head, head 431 net/openvswitch/flow_table.c struct hlist_head *head; head 437 net/openvswitch/flow_table.c head = find_bucket(ti, hash); head 438 net/openvswitch/flow_table.c hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) { head 516 net/openvswitch/flow_table.c struct hlist_head *head; head 520 net/openvswitch/flow_table.c head = find_bucket(ti, hash); head 521 net/openvswitch/flow_table.c hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) { head 61 net/openvswitch/meter.c struct hlist_head *head; head 63 net/openvswitch/meter.c head = meter_hash_bucket(dp, meter_id); head 64 net/openvswitch/meter.c hlist_for_each_entry_rcu(meter, head, dp_hash_node) { head 73 net/openvswitch/meter.c struct hlist_head *head = meter_hash_bucket(dp, meter->id); head 75 net/openvswitch/meter.c hlist_add_head_rcu(&meter->dp_hash_node, head); head 591 net/openvswitch/meter.c struct hlist_head *head = &dp->meters[i]; head 595 net/openvswitch/meter.c hlist_for_each_entry_safe(meter, n, head, dp_hash_node) head 487 net/packet/af_packet.c return packet_lookup_frame(po, rb, rb->head, status); head 1075 net/packet/af_packet.c po->rx_ring.head, status); head 1147 net/packet/af_packet.c unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max; head 1153 net/packet/af_packet.c buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; head 1207 net/packet/af_packet.c idx = READ_ONCE(po->rx_ring.head); head 2279 net/packet/af_packet.c slot_id = po->rx_ring.head; head 4414 net/packet/af_packet.c rb->head = 0; head 62 net/packet/internal.h unsigned int head; head 83 net/rds/connection.c struct hlist_head *head, head 91 net/rds/connection.c hlist_for_each_entry_rcu(conn, head, c_hash_node) { head 169 net/rds/connection.c struct hlist_head *head = rds_conn_bucket(laddr, faddr); head 176 net/rds/connection.c conn = rds_conn_lookup(net, head, laddr, faddr, trans, tos, dev_if); head 302 net/rds/connection.c found = rds_conn_lookup(net, head, laddr, faddr, trans, head 323 net/rds/connection.c hlist_add_head_rcu(&conn->c_hash_node, head); head 527 net/rds/connection.c struct hlist_head *head; head 543 net/rds/connection.c for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); head 544 net/rds/connection.c i++, head++) { head 545 net/rds/connection.c hlist_for_each_entry_rcu(conn, head, c_hash_node) { head 647 net/rds/connection.c struct hlist_head *head; head 656 net/rds/connection.c for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); head 657 net/rds/connection.c i++, head++) { head 658 net/rds/connection.c hlist_for_each_entry_rcu(conn, head, c_hash_node) { head 685 net/rds/connection.c struct hlist_head *head; head 694 net/rds/connection.c for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); head 695 net/rds/connection.c i++, head++) { head 696 net/rds/connection.c hlist_for_each_entry_rcu(conn, head, c_hash_node) { head 103 net/rds/ib_recv.c struct rds_ib_cache_head *head; head 111 net/rds/ib_recv.c head = per_cpu_ptr(cache->percpu, cpu); head 112 net/rds/ib_recv.c head->first = NULL; head 113 net/rds/ib_recv.c head->count = 0; head 138 net/rds/ib_recv.c struct rds_ib_cache_head *head; head 142 net/rds/ib_recv.c head = per_cpu_ptr(cache->percpu, cpu); head 143 net/rds/ib_recv.c if (head->first) { head 144 net/rds/ib_recv.c list_splice_entire_tail(head->first, caller_list); head 145 net/rds/ib_recv.c head->first = NULL; head 519 net/rds/ib_recv.c struct list_head *head = cache->ready; head 521 net/rds/ib_recv.c if (head) { head 522 net/rds/ib_recv.c if (!list_empty(head)) { head 523 net/rds/ib_recv.c cache->ready = head->next; head 524 net/rds/ib_recv.c list_del_init(head); head 529 net/rds/ib_recv.c return head; head 99 net/rds/message.c struct list_head *head; head 105 net/rds/message.c head = &q->zcookie_head; head 106 net/rds/message.c if (!list_empty(head)) { head 107 net/rds/message.c info = list_entry(head, struct rds_msg_zcopy_info, head 41 net/rxrpc/call_accept.c unsigned int head, tail, call_head, call_tail; head 64 net/rxrpc/call_accept.c head = b->peer_backlog_head; head 66 net/rxrpc/call_accept.c if (CIRC_CNT(head, tail, size) < max) { head 70 net/rxrpc/call_accept.c b->peer_backlog[head] = peer; head 72 net/rxrpc/call_accept.c (head + 1) & (size - 1)); head 75 net/rxrpc/call_accept.c head = b->conn_backlog_head; head 77 net/rxrpc/call_accept.c if (CIRC_CNT(head, tail, size) < max) { head 83 net/rxrpc/call_accept.c b->conn_backlog[head] = conn; head 85 net/rxrpc/call_accept.c (head + 1) & (size - 1)); head 189 net/rxrpc/call_accept.c unsigned int size = RXRPC_BACKLOG_MAX, head, tail; head 201 net/rxrpc/call_accept.c head = b->peer_backlog_head; head 203 net/rxrpc/call_accept.c while (CIRC_CNT(head, tail, size) > 0) { head 209 net/rxrpc/call_accept.c head = b->conn_backlog_head; head 211 net/rxrpc/call_accept.c while (CIRC_CNT(head, tail, size) > 0) { head 223 net/rxrpc/call_accept.c head = b->call_backlog_head; head 225 net/rxrpc/call_accept.c while (CIRC_CNT(head, tail, size) > 0) { head 349 net/rxrpc/output.c iov[1].iov_base = skb->head; head 452 net/rxrpc/sendmsg.c call, skb, skb->mark, skb->head); head 572 net/sched/act_api.c list_for_each_entry(a, &act_base, head) { head 579 net/sched/act_api.c list_add_tail(&act->head, &act_base); head 593 net/sched/act_api.c list_for_each_entry(a, &act_base, head) { head 595 net/sched/act_api.c list_del(&act->head); head 614 net/sched/act_api.c list_for_each_entry(a, &act_base, head) { head 633 net/sched/act_api.c list_for_each_entry(a, &act_base, head) { head 205 net/sched/act_ct.c static void tcf_ct_params_free(struct rcu_head *head) head 207 net/sched/act_ct.c struct tcf_ct_params *params = container_of(head, head 123 net/sched/act_tunnel_key.c const struct nlattr *attr, *head = nla_data(nla); head 125 net/sched/act_tunnel_key.c err = nla_validate_deprecated(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX, head 130 net/sched/act_tunnel_key.c nla_for_each_attr(attr, head, len, rem) { head 115 net/sched/cls_api.c list_for_each_entry(t, &tcf_proto_base, head) { head 165 net/sched/cls_api.c list_for_each_entry(t, &tcf_proto_base, head) head 169 net/sched/cls_api.c list_add_tail(&ops->head, &tcf_proto_base); head 191 net/sched/cls_api.c list_for_each_entry(t, &tcf_proto_base, head) { head 193 net/sched/cls_api.c list_del(&t->head); head 43 net/sched/cls_basic.c struct basic_head *head = rcu_dereference_bh(tp->root); head 46 net/sched/cls_basic.c list_for_each_entry_rcu(f, &head->flist, link) { head 62 net/sched/cls_basic.c struct basic_head *head = rtnl_dereference(tp->root); head 65 net/sched/cls_basic.c list_for_each_entry(f, &head->flist, link) { head 76 net/sched/cls_basic.c struct basic_head *head; head 78 net/sched/cls_basic.c head = kzalloc(sizeof(*head), GFP_KERNEL); head 79 net/sched/cls_basic.c if (head == NULL) head 81 net/sched/cls_basic.c INIT_LIST_HEAD(&head->flist); head 82 net/sched/cls_basic.c idr_init(&head->handle_idr); head 83 net/sched/cls_basic.c rcu_assign_pointer(tp->root, head); head 109 net/sched/cls_basic.c struct basic_head *head = rtnl_dereference(tp->root); head 112 net/sched/cls_basic.c list_for_each_entry_safe(f, n, &head->flist, link) { head 115 net/sched/cls_basic.c idr_remove(&head->handle_idr, f->handle); head 121 net/sched/cls_basic.c idr_destroy(&head->handle_idr); head 122 net/sched/cls_basic.c kfree_rcu(head, rcu); head 128 net/sched/cls_basic.c struct basic_head *head = rtnl_dereference(tp->root); head 133 net/sched/cls_basic.c idr_remove(&head->handle_idr, f->handle); head 136 net/sched/cls_basic.c *last = list_empty(&head->flist); head 176 net/sched/cls_basic.c struct basic_head *head = rtnl_dereference(tp->root); head 204 net/sched/cls_basic.c err = idr_alloc_u32(&head->handle_idr, fnew, &handle, head 207 net/sched/cls_basic.c err = idr_alloc_u32(&head->handle_idr, fnew, &handle, head 223 net/sched/cls_basic.c idr_remove(&head->handle_idr, fnew->handle); head 230 net/sched/cls_basic.c idr_replace(&head->handle_idr, fnew, fnew->handle); head 236 net/sched/cls_basic.c list_add_rcu(&fnew->link, &head->flist); head 250 net/sched/cls_basic.c struct basic_head *head = rtnl_dereference(tp->root); head 253 net/sched/cls_basic.c list_for_each_entry(f, &head->flist, link) { head 83 net/sched/cls_bpf.c struct cls_bpf_head *head = rcu_dereference_bh(tp->root); head 90 net/sched/cls_bpf.c list_for_each_entry_rcu(prog, &head->plist, link) { head 244 net/sched/cls_bpf.c struct cls_bpf_head *head; head 246 net/sched/cls_bpf.c head = kzalloc(sizeof(*head), GFP_KERNEL); head 247 net/sched/cls_bpf.c if (head == NULL) head 250 net/sched/cls_bpf.c INIT_LIST_HEAD_RCU(&head->plist); head 251 net/sched/cls_bpf.c idr_init(&head->handle_idr); head 252 net/sched/cls_bpf.c rcu_assign_pointer(tp->root, head); head 290 net/sched/cls_bpf.c struct cls_bpf_head *head = rtnl_dereference(tp->root); head 292 net/sched/cls_bpf.c idr_remove(&head->handle_idr, prog->handle); head 305 net/sched/cls_bpf.c struct cls_bpf_head *head = rtnl_dereference(tp->root); head 308 net/sched/cls_bpf.c *last = list_empty(&head->plist); head 315 net/sched/cls_bpf.c struct cls_bpf_head *head = rtnl_dereference(tp->root); head 318 net/sched/cls_bpf.c list_for_each_entry_safe(prog, tmp, &head->plist, link) head 321 net/sched/cls_bpf.c idr_destroy(&head->handle_idr); head 322 net/sched/cls_bpf.c kfree_rcu(head, rcu); head 327 net/sched/cls_bpf.c struct cls_bpf_head *head = rtnl_dereference(tp->root); head 330 net/sched/cls_bpf.c list_for_each_entry(prog, &head->plist, link) { head 464 net/sched/cls_bpf.c struct cls_bpf_head *head = rtnl_dereference(tp->root); head 495 net/sched/cls_bpf.c ret = idr_alloc_u32(&head->handle_idr, prog, &handle, head 498 net/sched/cls_bpf.c ret = idr_alloc_u32(&head->handle_idr, prog, &handle, head 519 net/sched/cls_bpf.c idr_replace(&head->handle_idr, prog, handle); head 525 net/sched/cls_bpf.c list_add_rcu(&prog->link, &head->plist); head 535 net/sched/cls_bpf.c idr_remove(&head->handle_idr, prog->handle); head 650 net/sched/cls_bpf.c struct cls_bpf_head *head = rtnl_dereference(tp->root); head 653 net/sched/cls_bpf.c list_for_each_entry(prog, &head->plist, link) { head 668 net/sched/cls_bpf.c struct cls_bpf_head *head = rtnl_dereference(tp->root); head 674 net/sched/cls_bpf.c list_for_each_entry(prog, &head->plist, link) { head 28 net/sched/cls_cgroup.c struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); head 31 net/sched/cls_cgroup.c if (unlikely(!head)) head 35 net/sched/cls_cgroup.c if (!tcf_em_tree_match(skb, &head->ematches, NULL)) head 41 net/sched/cls_cgroup.c return tcf_exts_exec(skb, &head->exts, res); head 58 net/sched/cls_cgroup.c static void __cls_cgroup_destroy(struct cls_cgroup_head *head) head 60 net/sched/cls_cgroup.c tcf_exts_destroy(&head->exts); head 61 net/sched/cls_cgroup.c tcf_em_tree_destroy(&head->ematches); head 62 net/sched/cls_cgroup.c tcf_exts_put_net(&head->exts); head 63 net/sched/cls_cgroup.c kfree(head); head 68 net/sched/cls_cgroup.c struct cls_cgroup_head *head = container_of(to_rcu_work(work), head 72 net/sched/cls_cgroup.c __cls_cgroup_destroy(head); head 83 net/sched/cls_cgroup.c struct cls_cgroup_head *head = rtnl_dereference(tp->root); head 90 net/sched/cls_cgroup.c if (!head && !handle) head 93 net/sched/cls_cgroup.c if (head && handle != head->handle) head 96 net/sched/cls_cgroup.c new = kzalloc(sizeof(*head), GFP_KERNEL); head 121 net/sched/cls_cgroup.c if (head) { head 122 net/sched/cls_cgroup.c tcf_exts_get_net(&head->exts); head 123 net/sched/cls_cgroup.c tcf_queue_work(&head->rwork, cls_cgroup_destroy_work); head 135 net/sched/cls_cgroup.c struct cls_cgroup_head *head = rtnl_dereference(tp->root); head 138 net/sched/cls_cgroup.c if (head) { head 139 net/sched/cls_cgroup.c if (tcf_exts_get_net(&head->exts)) head 140 net/sched/cls_cgroup.c tcf_queue_work(&head->rwork, cls_cgroup_destroy_work); head 142 net/sched/cls_cgroup.c __cls_cgroup_destroy(head); head 155 net/sched/cls_cgroup.c struct cls_cgroup_head *head = rtnl_dereference(tp->root); head 160 net/sched/cls_cgroup.c if (!head) head 162 net/sched/cls_cgroup.c if (arg->fn(tp, head, arg) < 0) { head 173 net/sched/cls_cgroup.c struct cls_cgroup_head *head = rtnl_dereference(tp->root); head 176 net/sched/cls_cgroup.c t->tcm_handle = head->handle; head 182 net/sched/cls_cgroup.c if (tcf_exts_dump(skb, &head->exts) < 0 || head 183 net/sched/cls_cgroup.c tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0) head 188 net/sched/cls_cgroup.c if (tcf_exts_dump_stats(skb, &head->exts) < 0) head 298 net/sched/cls_flow.c struct flow_head *head = rcu_dereference_bh(tp->root); head 305 net/sched/cls_flow.c list_for_each_entry_rcu(f, &head->filters, list) { head 393 net/sched/cls_flow.c struct flow_head *head = rtnl_dereference(tp->root); head 546 net/sched/cls_flow.c list_add_tail_rcu(&fnew->list, &head->filters); head 569 net/sched/cls_flow.c struct flow_head *head = rtnl_dereference(tp->root); head 575 net/sched/cls_flow.c *last = list_empty(&head->filters); head 581 net/sched/cls_flow.c struct flow_head *head; head 583 net/sched/cls_flow.c head = kzalloc(sizeof(*head), GFP_KERNEL); head 584 net/sched/cls_flow.c if (head == NULL) head 586 net/sched/cls_flow.c INIT_LIST_HEAD(&head->filters); head 587 net/sched/cls_flow.c rcu_assign_pointer(tp->root, head); head 594 net/sched/cls_flow.c struct flow_head *head = rtnl_dereference(tp->root); head 597 net/sched/cls_flow.c list_for_each_entry_safe(f, next, &head->filters, list) { head 604 net/sched/cls_flow.c kfree_rcu(head, rcu); head 609 net/sched/cls_flow.c struct flow_head *head = rtnl_dereference(tp->root); head 612 net/sched/cls_flow.c list_for_each_entry(f, &head->filters, list) head 682 net/sched/cls_flow.c struct flow_head *head = rtnl_dereference(tp->root); head 685 net/sched/cls_flow.c list_for_each_entry(f, &head->filters, list) { head 299 net/sched/cls_flower.c struct cls_fl_head *head = rcu_dereference_bh(tp->root); head 305 net/sched/cls_flower.c list_for_each_entry_rcu(mask, &head->masks, list) { head 333 net/sched/cls_flower.c struct cls_fl_head *head; head 335 net/sched/cls_flower.c head = kzalloc(sizeof(*head), GFP_KERNEL); head 336 net/sched/cls_flower.c if (!head) head 339 net/sched/cls_flower.c spin_lock_init(&head->masks_lock); head 340 net/sched/cls_flower.c INIT_LIST_HEAD_RCU(&head->masks); head 341 net/sched/cls_flower.c INIT_LIST_HEAD(&head->hw_filters); head 342 net/sched/cls_flower.c rcu_assign_pointer(tp->root, head); head 343 net/sched/cls_flower.c idr_init(&head->handle_idr); head 345 net/sched/cls_flower.c return rhashtable_init(&head->ht, &mask_ht_params); head 374 net/sched/cls_flower.c static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask) head 379 net/sched/cls_flower.c rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params); head 381 net/sched/cls_flower.c spin_lock(&head->masks_lock); head 383 net/sched/cls_flower.c spin_unlock(&head->masks_lock); head 508 net/sched/cls_flower.c static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle) head 513 net/sched/cls_flower.c f = idr_find(&head->handle_idr, handle); head 525 net/sched/cls_flower.c struct cls_fl_head *head = fl_head_dereference(tp); head 538 net/sched/cls_flower.c idr_remove(&head->handle_idr, f->handle); head 542 net/sched/cls_flower.c *last = fl_mask_put(head, f->mask); head 553 net/sched/cls_flower.c struct cls_fl_head *head = container_of(to_rcu_work(work), head 557 net/sched/cls_flower.c rhashtable_destroy(&head->ht); head 558 net/sched/cls_flower.c kfree(head); head 565 net/sched/cls_flower.c struct cls_fl_head *head = fl_head_dereference(tp); head 570 net/sched/cls_flower.c list_for_each_entry_safe(mask, next_mask, &head->masks, list) { head 577 net/sched/cls_flower.c idr_destroy(&head->handle_idr); head 580 net/sched/cls_flower.c tcf_queue_work(&head->rwork, fl_destroy_sleepable); head 592 net/sched/cls_flower.c struct cls_fl_head *head = fl_head_dereference(tp); head 594 net/sched/cls_flower.c return __fl_get(head, handle); head 1372 net/sched/cls_flower.c static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, head 1399 net/sched/cls_flower.c err = rhashtable_replace_fast(&head->ht, &mask->ht_node, head 1404 net/sched/cls_flower.c spin_lock(&head->masks_lock); head 1405 net/sched/cls_flower.c list_add_tail_rcu(&newmask->list, &head->masks); head 1406 net/sched/cls_flower.c spin_unlock(&head->masks_lock); head 1418 net/sched/cls_flower.c static int fl_check_assign_mask(struct cls_fl_head *head, head 1432 net/sched/cls_flower.c fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht, head 1443 net/sched/cls_flower.c newmask = fl_create_new_mask(head, mask); head 1463 net/sched/cls_flower.c rhashtable_remove_fast(&head->ht, &mask->ht_node, head 1534 net/sched/cls_flower.c struct cls_fl_head *head = fl_head_dereference(tp); head 1595 net/sched/cls_flower.c err = fl_check_assign_mask(head, fnew, fold, mask); head 1647 net/sched/cls_flower.c idr_replace(&head->handle_idr, fnew, fnew->handle); head 1653 net/sched/cls_flower.c fl_mask_put(head, fold->mask); head 1665 net/sched/cls_flower.c err = idr_alloc_u32(&head->handle_idr, fnew, &handle, head 1678 net/sched/cls_flower.c err = idr_alloc_u32(&head->handle_idr, fnew, &handle, head 1707 net/sched/cls_flower.c fl_mask_put(head, fnew->mask); head 1723 net/sched/cls_flower.c struct cls_fl_head *head = fl_head_dereference(tp); head 1729 net/sched/cls_flower.c *last = list_empty(&head->masks); head 1738 net/sched/cls_flower.c struct cls_fl_head *head = fl_head_dereference(tp); head 1744 net/sched/cls_flower.c idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) { head 1762 net/sched/cls_flower.c struct cls_fl_head *head = fl_head_dereference(tp); head 1765 net/sched/cls_flower.c if (list_empty(&head->hw_filters)) { head 1771 net/sched/cls_flower.c f = list_entry(&head->hw_filters, struct cls_fl_filter, head 1773 net/sched/cls_flower.c list_for_each_entry_continue(f, &head->hw_filters, hw_list) { head 1852 net/sched/cls_flower.c struct cls_fl_head *head = fl_head_dereference(tp); head 1855 net/sched/cls_flower.c list_add(&f->hw_list, &head->hw_filters); head 2531 net/sched/cls_flower.c struct cls_fl_head *head = fl_head_dereference(tp); head 2534 net/sched/cls_flower.c tp->deleting = idr_is_empty(&head->handle_idr); head 53 net/sched/cls_fw.c struct fw_head *head = rcu_dereference_bh(tp->root); head 58 net/sched/cls_fw.c if (head != NULL) { head 59 net/sched/cls_fw.c id &= head->mask; head 61 net/sched/cls_fw.c for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f; head 91 net/sched/cls_fw.c struct fw_head *head = rtnl_dereference(tp->root); head 94 net/sched/cls_fw.c if (head == NULL) head 97 net/sched/cls_fw.c f = rtnl_dereference(head->ht[fw_hash(handle)]); head 133 net/sched/cls_fw.c struct fw_head *head = rtnl_dereference(tp->root); head 137 net/sched/cls_fw.c if (head == NULL) head 141 net/sched/cls_fw.c while ((f = rtnl_dereference(head->ht[h])) != NULL) { head 142 net/sched/cls_fw.c RCU_INIT_POINTER(head->ht[h], head 151 net/sched/cls_fw.c kfree_rcu(head, rcu); head 157 net/sched/cls_fw.c struct fw_head *head = rtnl_dereference(tp->root); head 164 net/sched/cls_fw.c if (head == NULL || f == NULL) head 167 net/sched/cls_fw.c fp = &head->ht[fw_hash(f->id)]; head 183 net/sched/cls_fw.c if (rcu_access_pointer(head->ht[h])) { head 204 net/sched/cls_fw.c struct fw_head *head = rtnl_dereference(tp->root); head 229 net/sched/cls_fw.c if (mask != head->mask) head 231 net/sched/cls_fw.c } else if (head->mask != 0xFFFFFFFF) head 243 net/sched/cls_fw.c struct fw_head *head = rtnl_dereference(tp->root); head 287 net/sched/cls_fw.c fp = &head->ht[fw_hash(fnew->id)]; head 306 net/sched/cls_fw.c if (!head) { head 311 net/sched/cls_fw.c head = kzalloc(sizeof(*head), GFP_KERNEL); head 312 net/sched/cls_fw.c if (!head) head 314 net/sched/cls_fw.c head->mask = mask; head 316 net/sched/cls_fw.c rcu_assign_pointer(tp->root, head); head 333 net/sched/cls_fw.c RCU_INIT_POINTER(f->next, head->ht[fw_hash(handle)]); head 334 net/sched/cls_fw.c rcu_assign_pointer(head->ht[fw_hash(handle)], f); head 348 net/sched/cls_fw.c struct fw_head *head = rtnl_dereference(tp->root); head 351 net/sched/cls_fw.c if (head == NULL) head 360 net/sched/cls_fw.c for (f = rtnl_dereference(head->ht[h]); f; head 378 net/sched/cls_fw.c struct fw_head *head = rtnl_dereference(tp->root); head 403 net/sched/cls_fw.c if (head->mask != 0xFFFFFFFF && head 404 net/sched/cls_fw.c nla_put_u32(skb, TCA_FW_MASK, head->mask)) head 30 net/sched/cls_matchall.c struct cls_mall_head *head = rcu_dereference_bh(tp->root); head 32 net/sched/cls_matchall.c if (unlikely(!head)) head 35 net/sched/cls_matchall.c if (tc_skip_sw(head->flags)) head 38 net/sched/cls_matchall.c *res = head->res; head 39 net/sched/cls_matchall.c __this_cpu_inc(head->pf->rhit); head 40 net/sched/cls_matchall.c return tcf_exts_exec(skb, &head->exts, res); head 48 net/sched/cls_matchall.c static void __mall_destroy(struct cls_mall_head *head) head 50 net/sched/cls_matchall.c tcf_exts_destroy(&head->exts); head 51 net/sched/cls_matchall.c tcf_exts_put_net(&head->exts); head 52 net/sched/cls_matchall.c free_percpu(head->pf); head 53 net/sched/cls_matchall.c kfree(head); head 58 net/sched/cls_matchall.c struct cls_mall_head *head = container_of(to_rcu_work(work), head 62 net/sched/cls_matchall.c __mall_destroy(head); head 67 net/sched/cls_matchall.c struct cls_mall_head *head, head 74 net/sched/cls_matchall.c tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); head 79 net/sched/cls_matchall.c &head->flags, &head->in_hw_count, true); head 83 net/sched/cls_matchall.c struct cls_mall_head *head, head 89 net/sched/cls_matchall.c bool skip_sw = tc_skip_sw(head->flags); head 92 net/sched/cls_matchall.c cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts)); head 96 net/sched/cls_matchall.c tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); head 100 net/sched/cls_matchall.c err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts, true); head 103 net/sched/cls_matchall.c mall_destroy_hw_filter(tp, head, cookie, NULL); head 113 net/sched/cls_matchall.c skip_sw, &head->flags, &head->in_hw_count, true); head 118 net/sched/cls_matchall.c mall_destroy_hw_filter(tp, head, cookie, NULL); head 122 net/sched/cls_matchall.c if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW)) head 131 net/sched/cls_matchall.c struct cls_mall_head *head = rtnl_dereference(tp->root); head 133 net/sched/cls_matchall.c if (!head) head 136 net/sched/cls_matchall.c tcf_unbind_filter(tp, &head->res); head 138 net/sched/cls_matchall.c if (!tc_skip_hw(head->flags)) head 139 net/sched/cls_matchall.c mall_destroy_hw_filter(tp, head, (unsigned long) head, extack); head 141 net/sched/cls_matchall.c if (tcf_exts_get_net(&head->exts)) head 142 net/sched/cls_matchall.c tcf_queue_work(&head->rwork, mall_destroy_work); head 144 net/sched/cls_matchall.c __mall_destroy(head); head 149 net/sched/cls_matchall.c struct cls_mall_head *head = rtnl_dereference(tp->root); head 151 net/sched/cls_matchall.c if (head && head->handle == handle) head 152 net/sched/cls_matchall.c return head; head 164 net/sched/cls_matchall.c struct cls_mall_head *head, head 171 net/sched/cls_matchall.c err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, true, head 177 net/sched/cls_matchall.c head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]); head 178 net/sched/cls_matchall.c tcf_bind_filter(tp, &head->res, base); head 189 net/sched/cls_matchall.c struct cls_mall_head *head = rtnl_dereference(tp->root); head 198 net/sched/cls_matchall.c if (head) head 245 net/sched/cls_matchall.c *arg = head; head 262 net/sched/cls_matchall.c struct cls_mall_head *head = rtnl_dereference(tp->root); head 264 net/sched/cls_matchall.c head->deleting = true; head 272 net/sched/cls_matchall.c struct cls_mall_head *head = rtnl_dereference(tp->root); head 277 net/sched/cls_matchall.c if (!head || head->deleting) head 279 net/sched/cls_matchall.c if (arg->fn(tp, head, arg) < 0) head 288 net/sched/cls_matchall.c struct cls_mall_head *head = rtnl_dereference(tp->root); head 293 net/sched/cls_matchall.c if (tc_skip_hw(head->flags)) head 296 net/sched/cls_matchall.c cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts)); head 300 net/sched/cls_matchall.c tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); head 303 net/sched/cls_matchall.c cls_mall.cookie = (unsigned long)head; head 305 net/sched/cls_matchall.c err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts, true); head 308 net/sched/cls_matchall.c if (add && tc_skip_sw(head->flags)) { head 316 net/sched/cls_matchall.c &cls_mall, cb_priv, &head->flags, head 317 net/sched/cls_matchall.c &head->in_hw_count); head 328 net/sched/cls_matchall.c struct cls_mall_head *head, head 334 net/sched/cls_matchall.c tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL); head 340 net/sched/cls_matchall.c tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes, head 348 net/sched/cls_matchall.c struct cls_mall_head *head = fh; head 352 net/sched/cls_matchall.c if (!head) head 355 net/sched/cls_matchall.c if (!tc_skip_hw(head->flags)) head 356 net/sched/cls_matchall.c mall_stats_hw_filter(tp, head, (unsigned long)head); head 358 net/sched/cls_matchall.c t->tcm_handle = head->handle; head 364 net/sched/cls_matchall.c if (head->res.classid && head 365 net/sched/cls_matchall.c nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid)) head 368 net/sched/cls_matchall.c if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags)) head 372 net/sched/cls_matchall.c struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu); head 382 net/sched/cls_matchall.c if (tcf_exts_dump(skb, &head->exts)) head 387 net/sched/cls_matchall.c if (tcf_exts_dump_stats(skb, &head->exts) < 0) head 400 net/sched/cls_matchall.c struct cls_mall_head *head = fh; head 402 net/sched/cls_matchall.c if (head && head->res.classid == classid) { head 404 net/sched/cls_matchall.c __tcf_bind_filter(q, &head->res, base); head 406 net/sched/cls_matchall.c __tcf_unbind_filter(q, &head->res); head 68 net/sched/cls_route.c route4_reset_fastmap(struct route4_head *head) head 71 net/sched/cls_route.c memset(head->fastmap, 0, sizeof(head->fastmap)); head 76 net/sched/cls_route.c route4_set_fastmap(struct route4_head *head, u32 id, int iif, head 83 net/sched/cls_route.c head->fastmap[h].id = id; head 84 net/sched/cls_route.c head->fastmap[h].iif = iif; head 85 net/sched/cls_route.c head->fastmap[h].filter = f; head 120 net/sched/cls_route.c route4_set_fastmap(head, id, iif, f); \ head 127 net/sched/cls_route.c struct route4_head *head = rcu_dereference_bh(tp->root); head 145 net/sched/cls_route.c if (id == head->fastmap[h].id && head 146 net/sched/cls_route.c iif == head->fastmap[h].iif && head 147 net/sched/cls_route.c (f = head->fastmap[h].filter) != NULL) { head 162 net/sched/cls_route.c b = rcu_dereference_bh(head->table[h]); head 188 net/sched/cls_route.c route4_set_fastmap(head, id, iif, ROUTE4_FAILURE); head 217 net/sched/cls_route.c struct route4_head *head = rtnl_dereference(tp->root); head 230 net/sched/cls_route.c b = rtnl_dereference(head->table[h1]); head 243 net/sched/cls_route.c struct route4_head *head; head 245 net/sched/cls_route.c head = kzalloc(sizeof(struct route4_head), GFP_KERNEL); head 246 net/sched/cls_route.c if (head == NULL) head 249 net/sched/cls_route.c rcu_assign_pointer(tp->root, head); head 278 net/sched/cls_route.c struct route4_head *head = rtnl_dereference(tp->root); head 281 net/sched/cls_route.c if (head == NULL) head 287 net/sched/cls_route.c b = rtnl_dereference(head->table[h1]); head 304 net/sched/cls_route.c RCU_INIT_POINTER(head->table[h1], NULL); head 308 net/sched/cls_route.c kfree_rcu(head, rcu); head 314 net/sched/cls_route.c struct route4_head *head = rtnl_dereference(tp->root); head 322 net/sched/cls_route.c if (!head || !f) head 339 net/sched/cls_route.c route4_reset_fastmap(head); head 356 net/sched/cls_route.c RCU_INIT_POINTER(head->table[to_hash(h)], NULL); head 365 net/sched/cls_route.c if (rcu_access_pointer(head->table[h1])) { head 383 net/sched/cls_route.c u32 handle, struct route4_head *head, head 428 net/sched/cls_route.c b = rtnl_dereference(head->table[h1]); head 434 net/sched/cls_route.c rcu_assign_pointer(head->table[h1], b); head 470 net/sched/cls_route.c struct route4_head *head = rtnl_dereference(tp->root); head 512 net/sched/cls_route.c err = route4_set_parms(net, tp, base, f, handle, head, tb, head 532 net/sched/cls_route.c b = rtnl_dereference(head->table[th]); head 545 net/sched/cls_route.c route4_reset_fastmap(head); head 564 net/sched/cls_route.c struct route4_head *head = rtnl_dereference(tp->root); head 567 net/sched/cls_route.c if (head == NULL || arg->stop) head 571 net/sched/cls_route.c struct route4_bucket *b = rtnl_dereference(head->table[h]); head 130 net/sched/cls_rsvp.h struct rsvp_head *head = rcu_dereference_bh(tp->root); head 170 net/sched/cls_rsvp.h for (s = rcu_dereference_bh(head->ht[h1]); s; head 222 net/sched/cls_rsvp.h struct rsvp_head *head = rtnl_dereference(tp->root); head 229 net/sched/cls_rsvp.h for (s = rtnl_dereference(head->ht[h1]); s; head 249 net/sched/cls_rsvp.h struct rsvp_head *head = rtnl_dereference(tp->root); head 258 net/sched/cls_rsvp.h for (s = rtnl_dereference(head->ht[h1]); s; head 343 net/sched/cls_rsvp.h struct rsvp_head *head = rtnl_dereference(tp->root); head 365 net/sched/cls_rsvp.h sp = &head->ht[h & 0xFF]; head 382 net/sched/cls_rsvp.h if (rcu_access_pointer(head->ht[h1])) { head 659 net/sched/cls_rsvp.h struct rsvp_head *head = rtnl_dereference(tp->root); head 668 net/sched/cls_rsvp.h for (s = rtnl_dereference(head->ht[h]); s; head 127 net/sched/sch_cake.c struct sk_buff *head; head 818 net/sched/sch_cake.c struct sk_buff *skb = flow->head; head 821 net/sched/sch_cake.c flow->head = skb->next; head 832 net/sched/sch_cake.c if (!flow->head) head 833 net/sched/sch_cake.c flow->head = skb; head 1129 net/sched/sch_cake.c if (flow->head == flow->tail) head 1152 net/sched/sch_cake.c for (skb_check = flow->head; head 1260 net/sched/sch_cake.c flow->head = elig_ack->next; head 1856 net/sched/sch_cake.c if (flow->head) { head 1890 net/sched/sch_cake.c struct list_head *head; head 1977 net/sched/sch_cake.c head = &b->decaying_flows; head 1978 net/sched/sch_cake.c if (!first_flow || list_empty(head)) { head 1979 net/sched/sch_cake.c head = &b->new_flows; head 1980 net/sched/sch_cake.c if (list_empty(head)) { head 1981 net/sched/sch_cake.c head = &b->old_flows; head 1982 net/sched/sch_cake.c if (unlikely(list_empty(head))) { head 1983 net/sched/sch_cake.c head = &b->decaying_flows; head 1984 net/sched/sch_cake.c if (unlikely(list_empty(head))) head 1989 net/sched/sch_cake.c flow = list_first_entry(head, struct cake_flow, flowchain); head 2005 net/sched/sch_cake.c if (flow->head) { head 2102 net/sched/sch_cake.c !flow->head) head 2961 net/sched/sch_cake.c if (flow->head) { head 2963 net/sched/sch_cake.c skb = flow->head; head 208 net/sched/sch_cbq.c struct cbq_class *head = &q->link; head 225 net/sched/sch_cbq.c defmap = head->defaults; head 227 net/sched/sch_cbq.c fl = rcu_dereference_bh(head->filter_list); head 245 net/sched/sch_cbq.c if (cl->level >= head->level) head 268 net/sched/sch_cbq.c head = cl; head 272 net/sched/sch_cbq.c cl = head; head 278 net/sched/sch_cbq.c !(cl = head->defaults[prio & TC_PRIO_MAX]) && head 279 net/sched/sch_cbq.c !(cl = head->defaults[TC_PRIO_BESTEFFORT])) head 280 net/sched/sch_cbq.c return head; head 67 net/sched/sch_choke.c unsigned int head; head 78 net/sched/sch_choke.c return (q->tail - q->head) & q->tab_mask; head 97 net/sched/sch_choke.c q->head = (q->head + 1) & q->tab_mask; head 98 net/sched/sch_choke.c if (q->head == q->tail) head 100 net/sched/sch_choke.c } while (q->tab[q->head] == NULL); head 108 net/sched/sch_choke.c if (q->head == q->tail) head 122 net/sched/sch_choke.c if (idx == q->head) head 193 net/sched/sch_choke.c *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask; head 199 net/sched/sch_choke.c return q->tab[*pidx = q->head]; head 212 net/sched/sch_choke.c if (q->head == q->tail) head 295 net/sched/sch_choke.c if (q->head == q->tail) { head 301 net/sched/sch_choke.c skb = q->tab[q->head]; head 302 net/sched/sch_choke.c q->tab[q->head] = NULL; head 315 net/sched/sch_choke.c while (q->head != q->tail) { head 316 net/sched/sch_choke.c struct sk_buff *skb = q->tab[q->head]; head 318 net/sched/sch_choke.c q->head = (q->head + 1) & q->tab_mask; head 328 net/sched/sch_choke.c q->head = q->tail = 0; head 391 net/sched/sch_choke.c while (q->head != q->tail) { head 392 net/sched/sch_choke.c struct sk_buff *skb = q->tab[q->head]; head 394 net/sched/sch_choke.c q->head = (q->head + 1) & q->tab_mask; head 407 net/sched/sch_choke.c q->head = 0; head 425 net/sched/sch_choke.c if (q->head == q->tail) head 492 net/sched/sch_choke.c return (q->head != q->tail) ? q->tab[q->head] : NULL; head 35 net/sched/sch_etf.c struct rb_root_cached head; head 113 net/sched/sch_etf.c p = rb_first_cached(&q->head); head 166 net/sched/sch_etf.c struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL; head 189 net/sched/sch_etf.c rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost); head 211 net/sched/sch_etf.c rb_erase_cached(&skb->rbnode, &q->head); head 235 net/sched/sch_etf.c rb_erase_cached(&skb->rbnode, &q->head); head 423 net/sched/sch_etf.c struct rb_node *p = rb_first_cached(&q->head); head 430 net/sched/sch_etf.c rb_erase_cached(&skb->rbnode, &q->head); head 70 net/sched/sch_fq.c struct sk_buff *head; /* list of skbs for this flow : first skb */ head 146 net/sched/sch_fq.c static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) head 148 net/sched/sch_fq.c if (head->first) head 149 net/sched/sch_fq.c head->last->next = flow; head 151 net/sched/sch_fq.c head->first = flow; head 152 net/sched/sch_fq.c head->last = flow; head 347 net/sched/sch_fq.c struct sk_buff *head = flow->head; head 350 net/sched/sch_fq.c return head; head 352 net/sched/sch_fq.c if (!head) head 355 net/sched/sch_fq.c if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send) head 357 net/sched/sch_fq.c return head; head 363 net/sched/sch_fq.c if (skb == flow->head) { head 364 net/sched/sch_fq.c flow->head = skb->next; head 389 net/sched/sch_fq.c struct sk_buff *head, *aux; head 393 net/sched/sch_fq.c head = flow->head; head 394 net/sched/sch_fq.c if (!head || head 396 net/sched/sch_fq.c if (!head) head 397 net/sched/sch_fq.c flow->head = skb; head 485 net/sched/sch_fq.c struct fq_flow_head *head; head 502 net/sched/sch_fq.c head = &q->new_flows; head 503 net/sched/sch_fq.c if (!head->first) { head 504 net/sched/sch_fq.c head = &q->old_flows; head 505 net/sched/sch_fq.c if (!head->first) { head 512 net/sched/sch_fq.c f = head->first; head 516 net/sched/sch_fq.c head->first = f->next; head 527 net/sched/sch_fq.c head->first = f->next; head 541 net/sched/sch_fq.c head->first = f->next; head 543 net/sched/sch_fq.c if ((head == &q->new_flows) && q->old_flows.first) { head 613 net/sched/sch_fq.c rtnl_kfree_skbs(flow->head, flow->tail); head 614 net/sched/sch_fq.c flow->head = NULL; head 44 net/sched/sch_fq_codel.c struct sk_buff *head; head 119 net/sched/sch_fq_codel.c struct sk_buff *skb = flow->head; head 121 net/sched/sch_fq_codel.c flow->head = skb->next; head 130 net/sched/sch_fq_codel.c if (flow->head == NULL) head 131 net/sched/sch_fq_codel.c flow->head = skb; head 264 net/sched/sch_fq_codel.c if (flow->head) { head 287 net/sched/sch_fq_codel.c struct list_head *head; head 290 net/sched/sch_fq_codel.c head = &q->new_flows; head 291 net/sched/sch_fq_codel.c if (list_empty(head)) { head 292 net/sched/sch_fq_codel.c head = &q->old_flows; head 293 net/sched/sch_fq_codel.c if (list_empty(head)) head 296 net/sched/sch_fq_codel.c flow = list_first_entry(head, struct fq_codel_flow, flowchain); head 310 net/sched/sch_fq_codel.c if ((head == &q->new_flows) && !list_empty(&q->old_flows)) head 332 net/sched/sch_fq_codel.c rtnl_kfree_skbs(flow->head, flow->tail); head 333 net/sched/sch_fq_codel.c flow->head = NULL; head 643 net/sched/sch_fq_codel.c if (flow->head) { head 645 net/sched/sch_fq_codel.c skb = flow->head; head 942 net/sched/sch_generic.c static void qdisc_free_cb(struct rcu_head *head) head 944 net/sched/sch_generic.c struct Qdisc *q = container_of(head, struct Qdisc, rcu); head 1195 net/sched/sch_generic.c void dev_deactivate_many(struct list_head *head) head 1199 net/sched/sch_generic.c list_for_each_entry(dev, head, close_list) { head 1216 net/sched/sch_generic.c list_for_each_entry(dev, head, close_list) { head 1353 net/sched/sch_generic.c static void mini_qdisc_rcu_func(struct rcu_head *head) head 1119 net/sched/sch_hfsc.c struct hfsc_class *head, *cl; head 1130 net/sched/sch_hfsc.c head = &q->root; head 1149 net/sched/sch_hfsc.c if (cl->level >= head->level) head 1158 net/sched/sch_hfsc.c head = cl; head 121 net/sched/sch_hhf.c struct sk_buff *head; head 181 net/sched/sch_hhf.c struct list_head *head, head 187 net/sched/sch_hhf.c if (list_empty(head)) head 190 net/sched/sch_hhf.c list_for_each_entry_safe(flow, next, head, flowchain) { head 197 net/sched/sch_hhf.c if (list_is_last(&flow->flowchain, head)) head 212 net/sched/sch_hhf.c static struct hh_flow_state *alloc_new_hh(struct list_head *head, head 218 net/sched/sch_hhf.c if (!list_empty(head)) { head 220 net/sched/sch_hhf.c list_for_each_entry(flow, head, flowchain) { head 239 net/sched/sch_hhf.c list_add_tail(&flow->flowchain, head); head 331 net/sched/sch_hhf.c struct sk_buff *skb = bucket->head; head 333 net/sched/sch_hhf.c bucket->head = skb->next; head 341 net/sched/sch_hhf.c if (bucket->head == NULL) head 342 net/sched/sch_hhf.c bucket->head = skb; head 356 net/sched/sch_hhf.c if (!bucket->head) head 359 net/sched/sch_hhf.c if (bucket->head) { head 423 net/sched/sch_hhf.c struct list_head *head; head 426 net/sched/sch_hhf.c head = &q->new_buckets; head 427 net/sched/sch_hhf.c if (list_empty(head)) { head 428 net/sched/sch_hhf.c head = &q->old_buckets; head 429 net/sched/sch_hhf.c if (list_empty(head)) head 432 net/sched/sch_hhf.c bucket = list_first_entry(head, struct wdrr_bucket, bucketchain); head 443 net/sched/sch_hhf.c if (bucket->head) { head 451 net/sched/sch_hhf.c if ((head == &q->new_buckets) && !list_empty(&q->old_buckets)) head 486 net/sched/sch_hhf.c struct list_head *head = &q->hh_flows[i]; head 488 net/sched/sch_hhf.c if (list_empty(head)) head 490 net/sched/sch_hhf.c list_for_each_entry_safe(flow, next, head, flowchain) { head 353 net/sched/sch_sfq.c struct sk_buff *head; head 430 net/sched/sch_sfq.c head = slot_dequeue_head(slot); head 431 net/sched/sch_sfq.c delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb); head 434 net/sched/sch_sfq.c qdisc_drop(head, sch, to_free); head 111 net/sched/sch_taprio.c static void taprio_free_sched_cb(struct rcu_head *head) head 113 net/sched/sch_taprio.c struct sched_gate_list *sched = container_of(head, struct sched_gate_list, rcu); head 722 net/sctp/input.c struct sctp_hashbucket *head; head 727 net/sctp/input.c head = &sctp_ep_hashtable[epb->hashent]; head 738 net/sctp/input.c sctp_for_each_hentry(epb2, &head->chain) { head 765 net/sctp/input.c write_lock(&head->lock); head 766 net/sctp/input.c hlist_add_head(&epb->node, &head->chain); head 767 net/sctp/input.c write_unlock(&head->lock); head 787 net/sctp/input.c struct sctp_hashbucket *head; head 794 net/sctp/input.c head = &sctp_ep_hashtable[epb->hashent]; head 799 net/sctp/input.c write_lock(&head->lock); head 801 net/sctp/input.c write_unlock(&head->lock); head 832 net/sctp/input.c struct sctp_hashbucket *head; head 841 net/sctp/input.c head = &sctp_ep_hashtable[hash]; head 842 net/sctp/input.c read_lock(&head->lock); head 843 net/sctp/input.c sctp_for_each_hentry(epb, &head->chain) { head 862 net/sctp/input.c read_unlock(&head->lock); head 390 net/sctp/output.c static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) head 392 net/sctp/output.c if (SCTP_OUTPUT_CB(head)->last == head) head 393 net/sctp/output.c skb_shinfo(head)->frag_list = skb; head 395 net/sctp/output.c SCTP_OUTPUT_CB(head)->last->next = skb; head 396 net/sctp/output.c SCTP_OUTPUT_CB(head)->last = skb; head 398 net/sctp/output.c head->truesize += skb->truesize; head 399 net/sctp/output.c head->data_len += skb->len; head 400 net/sctp/output.c head->len += skb->len; head 401 net/sctp/output.c refcount_add(skb->truesize, &head->sk->sk_wmem_alloc); head 407 net/sctp/output.c struct sk_buff *head, int gso, gfp_t gfp) head 413 net/sctp/output.c struct sock *sk = head->sk; head 418 net/sctp/output.c skb_shinfo(head)->gso_type = sk->sk_gso_type; head 419 net/sctp/output.c SCTP_OUTPUT_CB(head)->last = head; head 421 net/sctp/output.c nskb = head; head 501 net/sctp/output.c sctp_packet_gso_append(head, nskb); head 507 net/sctp/output.c memset(head->cb, 0, max(sizeof(struct inet_skb_parm), head 509 net/sctp/output.c skb_shinfo(head)->gso_segs = pkt_count; head 510 net/sctp/output.c skb_shinfo(head)->gso_size = GSO_BY_FRAGS; head 512 net/sctp/output.c if (skb_dst(head) != tp->dst) { head 523 net/sctp/output.c if (!(skb_dst(head)->dev->features & NETIF_F_SCTP_CRC) || head 524 net/sctp/output.c dst_xfrm(skb_dst(head)) || packet->ipfragok) { head 526 net/sctp/output.c (struct sctphdr *)skb_transport_header(head); head 528 net/sctp/output.c sh->checksum = sctp_compute_cksum(head, 0); head 531 net/sctp/output.c head->ip_summed = CHECKSUM_PARTIAL; head 532 net/sctp/output.c head->csum_not_inet = 1; head 533 net/sctp/output.c head->csum_start = skb_transport_header(head) - head->head; head 534 net/sctp/output.c head->csum_offset = offsetof(struct sctphdr, checksum); head 552 net/sctp/output.c struct sk_buff *head; head 572 net/sctp/output.c head = alloc_skb((gso ? packet->overhead : packet->size) + head 574 net/sctp/output.c if (!head) head 576 net/sctp/output.c skb_reserve(head, packet->overhead + MAX_HEADER); head 577 net/sctp/output.c skb_set_owner_w(head, sk); head 580 net/sctp/output.c sh = skb_push(head, sizeof(struct sctphdr)); head 581 net/sctp/output.c skb_reset_transport_header(head); head 591 net/sctp/output.c kfree_skb(head); head 594 net/sctp/output.c skb_dst_set(head, dst); head 597 net/sctp/output.c pkt_count = sctp_packet_pack(packet, head, gso, gfp); head 599 net/sctp/output.c kfree_skb(head); head 602 net/sctp/output.c pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len); head 623 net/sctp/output.c head->ignore_df = packet->ipfragok; head 625 net/sctp/output.c skb_set_dst_pending_confirm(head, 1); head 629 net/sctp/output.c if (tp->af_specific->sctp_xmit(head, tp) >= 0 && head 318 net/sctp/outqueue.c static void sctp_insert_list(struct list_head *head, struct list_head *new) head 328 net/sctp/outqueue.c list_for_each(pos, head) { head 338 net/sctp/outqueue.c list_add_tail(new, head); head 1087 net/sctp/outqueue.c chunk->skb ? chunk->skb->head : NULL, chunk->skb ? head 163 net/sctp/proc.c struct sctp_hashbucket *head; head 172 net/sctp/proc.c head = &sctp_ep_hashtable[hash]; head 173 net/sctp/proc.c read_lock_bh(&head->lock); head 174 net/sctp/proc.c sctp_for_each_hentry(epb, &head->chain) { head 188 net/sctp/proc.c read_unlock_bh(&head->lock); head 5359 net/sctp/socket.c struct sctp_hashbucket *head; head 5361 net/sctp/socket.c for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; head 5362 net/sctp/socket.c hash++, head++) { head 5363 net/sctp/socket.c read_lock_bh(&head->lock); head 5364 net/sctp/socket.c sctp_for_each_hentry(epb, &head->chain) { head 5369 net/sctp/socket.c read_unlock_bh(&head->lock); head 8163 net/sctp/socket.c struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); head 8169 net/sctp/socket.c struct sctp_bind_hashbucket *head; /* hash list */ head 8198 net/sctp/socket.c head = &sctp_port_hashtable[index]; head 8199 net/sctp/socket.c spin_lock(&head->lock); head 8200 net/sctp/socket.c sctp_for_each_hentry(pp, &head->chain) head 8206 net/sctp/socket.c spin_unlock(&head->lock); head 8226 net/sctp/socket.c head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; head 8227 net/sctp/socket.c spin_lock(&head->lock); head 8228 net/sctp/socket.c sctp_for_each_hentry(pp, &head->chain) { head 8285 net/sctp/socket.c if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) head 8327 net/sctp/socket.c spin_unlock(&head->lock); head 8534 net/sctp/socket.c struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) head 8545 net/sctp/socket.c hlist_add_head(&pp->node, &head->chain); head 8563 net/sctp/socket.c struct sctp_bind_hashbucket *head = head 8568 net/sctp/socket.c spin_lock(&head->lock); head 8574 net/sctp/socket.c spin_unlock(&head->lock); head 9367 net/sctp/socket.c struct sctp_bind_hashbucket *head; head 9385 net/sctp/socket.c head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), head 9387 net/sctp/socket.c spin_lock_bh(&head->lock); head 9392 net/sctp/socket.c spin_unlock_bh(&head->lock); head 141 net/sctp/transport.c static void sctp_transport_destroy_rcu(struct rcu_head *head) head 145 net/sctp/transport.c transport = container_of(head, struct sctp_transport, rcu); head 78 net/smc/af_smc.c struct hlist_head *head; head 80 net/smc/af_smc.c head = &h->ht; head 83 net/smc/af_smc.c sk_add_node(sk, head); head 198 net/smc/smc_diag.c struct hlist_head *head; head 203 net/smc/smc_diag.c head = &prot->h.smc_hash->ht; head 204 net/smc/smc_diag.c if (hlist_empty(head)) head 207 net/smc/smc_diag.c sk_for_each(sk, head) { head 103 net/strparser/strparser.c struct sk_buff *head, *skb; head 112 net/strparser/strparser.c head = strp->skb_head; head 113 net/strparser/strparser.c if (head) { head 141 net/strparser/strparser.c err = skb_unclone(head, GFP_ATOMIC); head 148 net/strparser/strparser.c if (unlikely(skb_shinfo(head)->frag_list)) { head 155 net/strparser/strparser.c if (WARN_ON(head->next)) { head 160 net/strparser/strparser.c skb = alloc_skb_for_msg(head); head 167 net/strparser/strparser.c strp->skb_nextp = &head->next; head 169 net/strparser/strparser.c head = skb; head 172 net/strparser/strparser.c &skb_shinfo(head)->frag_list; head 188 net/strparser/strparser.c head = strp->skb_head; head 189 net/strparser/strparser.c if (!head) { head 190 net/strparser/strparser.c head = skb; head 191 net/strparser/strparser.c strp->skb_head = head; head 194 net/strparser/strparser.c stm = _strp_msg(head); head 210 net/strparser/strparser.c stm = _strp_msg(head); head 213 net/strparser/strparser.c head->data_len += skb->len; head 214 net/strparser/strparser.c head->len += skb->len; head 215 net/strparser/strparser.c head->truesize += skb->truesize; head 221 net/strparser/strparser.c len = (*strp->cb.parse_msg)(strp, head); head 248 net/strparser/strparser.c } else if (len <= (ssize_t)head->len - head 309 net/strparser/strparser.c strp->cb.rcv_msg(strp, head); head 376 net/sunrpc/auth.c void rpcauth_destroy_credlist(struct list_head *head) head 380 net/sunrpc/auth.c while (!list_empty(head)) { head 381 net/sunrpc/auth.c cred = list_entry(head->next, struct rpc_cred, cr_lru); head 433 net/sunrpc/auth.c struct hlist_head *head; head 441 net/sunrpc/auth.c head = &cache->hashtable[i]; head 442 net/sunrpc/auth.c while (!hlist_empty(head)) { head 443 net/sunrpc/auth.c cred = hlist_entry(head->first, struct rpc_cred, cr_hash); head 1309 net/sunrpc/auth_gss/auth_gss.c gss_free_ctx_callback(struct rcu_head *head) head 1311 net/sunrpc/auth_gss/auth_gss.c struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu); head 1328 net/sunrpc/auth_gss/auth_gss.c gss_free_cred_callback(struct rcu_head *head) head 1330 net/sunrpc/auth_gss/auth_gss.c struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu); head 1567 net/sunrpc/auth_gss/auth_gss.c iov.iov_base = req->rq_snd_buf.head[0].iov_base; head 1746 net/sunrpc/auth_gss/auth_gss.c offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; head 1862 net/sunrpc/auth_gss/auth_gss.c offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; head 1879 net/sunrpc/auth_gss/auth_gss.c iov = snd_buf->head; head 2031 net/sunrpc/auth_gss/auth_gss.c struct kvec *head = rqstp->rq_rcv_buf.head; head 2040 net/sunrpc/auth_gss/auth_gss.c offset = (u8 *)(p) - (u8 *)head->iov_base; head 480 net/sunrpc/auth_gss/gss_krb5_crypto.c page_pos = desc->pos - outbuf->head[0].iov_len; head 666 net/sunrpc/auth_gss/gss_krb5_crypto.c p = buf->head[0].iov_base + base; head 668 net/sunrpc/auth_gss/gss_krb5_crypto.c memmove(p + shiftlen, p, buf->head[0].iov_len - base); head 670 net/sunrpc/auth_gss/gss_krb5_crypto.c buf->head[0].iov_len += shiftlen; head 765 net/sunrpc/auth_gss/gss_krb5_crypto.c gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen); head 771 net/sunrpc/auth_gss/gss_krb5_crypto.c buf->tail[0].iov_base = buf->head[0].iov_base head 772 net/sunrpc/auth_gss/gss_krb5_crypto.c + buf->head[0].iov_len; head 778 net/sunrpc/auth_gss/gss_krb5_crypto.c memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN); head 58 net/sunrpc/auth_gss/gss_krb5_wrap.c iov = &buf->head[0]; head 72 net/sunrpc/auth_gss/gss_krb5_wrap.c if (len <= buf->head[0].iov_len) { head 73 net/sunrpc/auth_gss/gss_krb5_wrap.c pad = *(u8 *)(buf->head[0].iov_base + len - 1); head 74 net/sunrpc/auth_gss/gss_krb5_wrap.c if (pad > buf->head[0].iov_len) head 76 net/sunrpc/auth_gss/gss_krb5_wrap.c buf->head[0].iov_len -= pad; head 79 net/sunrpc/auth_gss/gss_krb5_wrap.c len -= buf->head[0].iov_len; head 186 net/sunrpc/auth_gss/gss_krb5_wrap.c ptr = buf->head[0].iov_base + offset; head 288 net/sunrpc/auth_gss/gss_krb5_wrap.c ptr = (u8 *)buf->head[0].iov_base + offset; head 317 net/sunrpc/auth_gss/gss_krb5_wrap.c (unsigned char *)buf->head[0].iov_base; head 379 net/sunrpc/auth_gss/gss_krb5_wrap.c orig_start = buf->head[0].iov_base + offset; head 380 net/sunrpc/auth_gss/gss_krb5_wrap.c data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; head 382 net/sunrpc/auth_gss/gss_krb5_wrap.c buf->head[0].iov_len -= (data_start - orig_start); head 409 net/sunrpc/auth_gss/gss_krb5_wrap.c char head[LOCAL_BUF_LEN]; head 415 net/sunrpc/auth_gss/gss_krb5_wrap.c read_bytes_from_xdr_buf(buf, 0, head, shift); head 421 net/sunrpc/auth_gss/gss_krb5_wrap.c write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift); head 466 net/sunrpc/auth_gss/gss_krb5_wrap.c ptr = plainhdr = buf->head[0].iov_base + offset; head 516 net/sunrpc/auth_gss/gss_krb5_wrap.c ptr = buf->head[0].iov_base + offset; head 581 net/sunrpc/auth_gss/gss_krb5_wrap.c movelen = min_t(unsigned int, buf->head[0].iov_len, len); head 584 net/sunrpc/auth_gss/gss_krb5_wrap.c buf->head[0].iov_len); head 586 net/sunrpc/auth_gss/gss_krb5_wrap.c buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; head 94 net/sunrpc/auth_gss/svcauth_gss.c static void rsi_free_rcu(struct rcu_head *head) head 96 net/sunrpc/auth_gss/svcauth_gss.c struct rsi *rsii = container_of(head, struct rsi, rcu_head); head 357 net/sunrpc/auth_gss/svcauth_gss.c static void rsc_free_rcu(struct rcu_head *head) head 359 net/sunrpc/auth_gss/svcauth_gss.c struct rsc *rsci = container_of(head, struct rsc, rcu_head); head 688 net/sunrpc/auth_gss/svcauth_gss.c struct kvec *argv = &rqstp->rq_arg.head[0]; head 731 net/sunrpc/auth_gss/svcauth_gss.c svc_putnl(rqstp->rq_res.head, RPC_AUTH_NULL); head 732 net/sunrpc/auth_gss/svcauth_gss.c p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; head 751 net/sunrpc/auth_gss/svcauth_gss.c svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS); head 760 net/sunrpc/auth_gss/svcauth_gss.c p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; head 875 net/sunrpc/auth_gss/svcauth_gss.c integ_len = svc_getnl(&buf->head[0]); head 897 net/sunrpc/auth_gss/svcauth_gss.c if (svc_getnl(&buf->head[0]) != seq) head 910 net/sunrpc/auth_gss/svcauth_gss.c return buf->head[0].iov_len + buf->page_len + buf->tail[0].iov_len; head 920 net/sunrpc/auth_gss/svcauth_gss.c buf->head[0].iov_len -= pad; head 932 net/sunrpc/auth_gss/svcauth_gss.c priv_len = svc_getnl(&buf->head[0]); head 957 net/sunrpc/auth_gss/svcauth_gss.c offset = buf->head[0].iov_len % 4; head 966 net/sunrpc/auth_gss/svcauth_gss.c if (svc_getnl(&buf->head[0]) != seq) head 1096 net/sunrpc/auth_gss/svcauth_gss.c struct kvec *argv = &rqstp->rq_arg.head[0]; head 1172 net/sunrpc/auth_gss/svcauth_gss.c struct kvec *argv = &rqstp->rq_arg.head[0]; head 1173 net/sunrpc/auth_gss/svcauth_gss.c struct kvec *resv = &rqstp->rq_res.head[0]; head 1285 net/sunrpc/auth_gss/svcauth_gss.c struct kvec *resv = &rqstp->rq_res.head[0]; head 1481 net/sunrpc/auth_gss/svcauth_gss.c struct kvec *argv = &rqstp->rq_arg.head[0]; head 1482 net/sunrpc/auth_gss/svcauth_gss.c struct kvec *resv = &rqstp->rq_res.head[0]; head 1652 net/sunrpc/auth_gss/svcauth_gss.c resbuf->head[0].iov_len -= 2 * 4; head 1675 net/sunrpc/auth_gss/svcauth_gss.c integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base; head 1685 net/sunrpc/auth_gss/svcauth_gss.c if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE) head 1687 net/sunrpc/auth_gss/svcauth_gss.c resbuf->tail[0].iov_base = resbuf->head[0].iov_base head 1688 net/sunrpc/auth_gss/svcauth_gss.c + resbuf->head[0].iov_len; head 1723 net/sunrpc/auth_gss/svcauth_gss.c offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base; head 1737 net/sunrpc/auth_gss/svcauth_gss.c BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base head 1739 net/sunrpc/auth_gss/svcauth_gss.c BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base); head 1740 net/sunrpc/auth_gss/svcauth_gss.c if (resbuf->tail[0].iov_len + resbuf->head[0].iov_len head 1756 net/sunrpc/auth_gss/svcauth_gss.c if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE) head 1758 net/sunrpc/auth_gss/svcauth_gss.c resbuf->tail[0].iov_base = resbuf->head[0].iov_base head 1759 net/sunrpc/auth_gss/svcauth_gss.c + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE; head 1830 net/sunrpc/auth_gss/svcauth_gss.c svcauth_gss_domain_release_rcu(struct rcu_head *head) head 1832 net/sunrpc/auth_gss/svcauth_gss.c struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head); head 54 net/sunrpc/auth_unix.c unx_free_cred_callback(struct rcu_head *head) head 56 net/sunrpc/auth_unix.c struct rpc_cred *rpc_cred = container_of(head, struct rpc_cred, cr_rcu); head 61 net/sunrpc/backchannel_rqst.c free_page((unsigned long)xbufp->head[0].iov_base); head 63 net/sunrpc/backchannel_rqst.c free_page((unsigned long)xbufp->head[0].iov_base); head 56 net/sunrpc/cache.c static void cache_fresh_unlocked(struct cache_head *head, head 63 net/sunrpc/cache.c struct hlist_head *head = &detail->hash_table[hash]; head 67 net/sunrpc/cache.c hlist_for_each_entry_rcu(tmp, head, cache_list) { head 85 net/sunrpc/cache.c struct hlist_head *head = &detail->hash_table[hash]; head 100 net/sunrpc/cache.c hlist_for_each_entry_rcu(tmp, head, cache_list) { head 115 net/sunrpc/cache.c hlist_add_head_rcu(&new->cache_list, head); head 142 net/sunrpc/cache.c static void cache_fresh_locked(struct cache_head *head, time_t expiry, head 149 net/sunrpc/cache.c head->expiry_time = expiry; head 150 net/sunrpc/cache.c head->last_refresh = now; head 152 net/sunrpc/cache.c set_bit(CACHE_VALID, &head->flags); head 155 net/sunrpc/cache.c static void cache_fresh_unlocked(struct cache_head *head, head 158 net/sunrpc/cache.c if (test_and_clear_bit(CACHE_PENDING, &head->flags)) { head 159 net/sunrpc/cache.c cache_revisit_request(head); head 160 net/sunrpc/cache.c cache_dequeue(detail, head); head 443 net/sunrpc/cache.c struct hlist_head *head; head 450 net/sunrpc/cache.c head = ¤t_detail->hash_table[current_index]; head 451 net/sunrpc/cache.c hlist_for_each_entry_safe(ch, tmp, head, cache_list) { head 514 net/sunrpc/cache.c struct hlist_head *head = NULL; head 526 net/sunrpc/cache.c head = &detail->hash_table[i]; head 527 net/sunrpc/cache.c hlist_for_each_entry_safe(ch, tmp, head, cache_list) { head 1866 net/sunrpc/clnt.c req->rq_snd_buf.head[0].iov_len = 0; head 1868 net/sunrpc/clnt.c req->rq_snd_buf.head[0].iov_base, req); head 2529 net/sunrpc/clnt.c req->rq_rcv_buf.head[0].iov_base, req); head 64 net/sunrpc/rpc_pipe.c static void rpc_purge_list(wait_queue_head_t *waitq, struct list_head *head, head 69 net/sunrpc/rpc_pipe.c if (list_empty(head)) head 72 net/sunrpc/rpc_pipe.c msg = list_entry(head->next, struct rpc_pipe_msg, list); head 76 net/sunrpc/rpc_pipe.c } while (!list_empty(head)); head 703 net/sunrpc/sched.c struct list_head *head; head 706 net/sunrpc/sched.c head = &queue->tasks[queue->maxpriority]; head 708 net/sunrpc/sched.c while (!list_empty(head)) { head 710 net/sunrpc/sched.c task = list_first_entry(head, head 715 net/sunrpc/sched.c if (head == &queue->tasks[0]) head 717 net/sunrpc/sched.c head--; head 732 net/sunrpc/sched.c struct list_head *head; head 735 net/sunrpc/sched.c head = &queue->tasks[queue->maxpriority]; head 737 net/sunrpc/sched.c while (!list_empty(head)) { head 739 net/sunrpc/sched.c task = list_first_entry(head, head 745 net/sunrpc/sched.c if (head == &queue->tasks[0]) head 747 net/sunrpc/sched.c head--; head 81 net/sunrpc/socklib.c len = xdr->head[0].iov_len; head 84 net/sunrpc/socklib.c ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); head 1190 net/sunrpc/svc.c struct kvec *argv = &rqstp->rq_arg.head[0]; head 1191 net/sunrpc/svc.c struct kvec *resv = &rqstp->rq_res.head[0]; head 1502 net/sunrpc/svc.c struct kvec *argv = &rqstp->rq_arg.head[0]; head 1503 net/sunrpc/svc.c struct kvec *resv = &rqstp->rq_res.head[0]; head 1553 net/sunrpc/svc.c struct kvec *argv = &rqstp->rq_arg.head[0]; head 1554 net/sunrpc/svc.c struct kvec *resv = &rqstp->rq_res.head[0]; head 1574 net/sunrpc/svc.c if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { head 1575 net/sunrpc/svc.c rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; head 1577 net/sunrpc/svc.c } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len + head 1580 net/sunrpc/svc.c rqstp->rq_arg.head[0].iov_len; head 1582 net/sunrpc/svc.c rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len + head 492 net/sunrpc/svc_xprt.c space += rqstp->rq_res.head[0].iov_len; head 526 net/sunrpc/svc_xprt.c rqstp->rq_res.head[0].iov_len = 0; head 664 net/sunrpc/svc_xprt.c arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); head 665 net/sunrpc/svc_xprt.c arg->head[0].iov_len = PAGE_SIZE; head 862 net/sunrpc/svc_xprt.c rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]); head 902 net/sunrpc/svc_xprt.c xb->len = xb->head[0].iov_len + head 1200 net/sunrpc/svc_xprt.c skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; head 1201 net/sunrpc/svc_xprt.c memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, head 1221 net/sunrpc/svc_xprt.c rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2); head 1223 net/sunrpc/svc_xprt.c rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen; head 65 net/sunrpc/svcauth.c flavor = svc_getnl(&rqstp->rq_arg.head[0]); head 166 net/sunrpc/svcauth.c struct hlist_head *head; head 168 net/sunrpc/svcauth.c head = &auth_domain_table[hash_str(name, DN_HASHBITS)]; head 172 net/sunrpc/svcauth.c hlist_for_each_entry(hp, head, hash) { head 180 net/sunrpc/svcauth.c hlist_add_head_rcu(&new->hash, head); head 189 net/sunrpc/svcauth.c struct hlist_head *head; head 191 net/sunrpc/svcauth.c head = &auth_domain_table[hash_str(name, DN_HASHBITS)]; head 194 net/sunrpc/svcauth.c hlist_for_each_entry_rcu(hp, head, hash) { head 41 net/sunrpc/svcauth_unix.c static void svcauth_unix_domain_release_rcu(struct rcu_head *head) head 43 net/sunrpc/svcauth_unix.c struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head); head 737 net/sunrpc/svcauth_unix.c struct kvec *argv = &rqstp->rq_arg.head[0]; head 738 net/sunrpc/svcauth_unix.c struct kvec *resv = &rqstp->rq_res.head[0]; head 797 net/sunrpc/svcauth_unix.c struct kvec *argv = &rqstp->rq_arg.head[0]; head 798 net/sunrpc/svcauth_unix.c struct kvec *resv = &rqstp->rq_res.head[0]; head 196 net/sunrpc/svcsock.c if (slen == xdr->head[0].iov_len) head 199 net/sunrpc/svcsock.c xdr->head[0].iov_len, flags); head 200 net/sunrpc/svcsock.c if (len != xdr->head[0].iov_len) head 202 net/sunrpc/svcsock.c slen -= xdr->head[0].iov_len; head 276 net/sunrpc/svcsock.c svsk, xdr->head[0].iov_base, xdr->head[0].iov_len, head 575 net/sunrpc/svcsock.c rqstp->rq_arg.head[0].iov_base = skb->data; head 576 net/sunrpc/svcsock.c rqstp->rq_arg.head[0].iov_len = len; head 583 net/sunrpc/svcsock.c if (len <= rqstp->rq_arg.head[0].iov_len) { head 584 net/sunrpc/svcsock.c rqstp->rq_arg.head[0].iov_len = len; head 588 net/sunrpc/svcsock.c rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; head 876 net/sunrpc/svcsock.c rqstp->rq_arg.head[0].iov_base = page_address(rqstp->rq_pages[0]); head 965 net/sunrpc/svcsock.c __be32 *p = (__be32 *)rqstp->rq_arg.head[0].iov_base; head 985 net/sunrpc/svcsock.c dst = &req->rq_private_buf.head[0]; head 986 net/sunrpc/svcsock.c src = &rqstp->rq_arg.head[0]; head 1089 net/sunrpc/svcsock.c if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { head 1090 net/sunrpc/svcsock.c rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; head 1093 net/sunrpc/svcsock.c rqstp->rq_arg.page_len = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; head 1102 net/sunrpc/svcsock.c p = (__be32 *)rqstp->rq_arg.head[0].iov_base; head 1149 net/sunrpc/svcsock.c memcpy(xbufp->head[0].iov_base, &reclen, 4); head 180 net/sunrpc/xdr.c struct kvec *head = xdr->head; head 182 net/sunrpc/xdr.c char *buf = (char *)head->iov_base; head 183 net/sunrpc/xdr.c unsigned int buflen = head->iov_len; head 185 net/sunrpc/xdr.c head->iov_len = offset; head 365 net/sunrpc/xdr.c struct kvec *head, *tail; head 372 net/sunrpc/xdr.c head = buf->head; head 374 net/sunrpc/xdr.c WARN_ON_ONCE(len > head->iov_len); head 375 net/sunrpc/xdr.c if (len > head->iov_len) head 376 net/sunrpc/xdr.c len = head->iov_len; head 408 net/sunrpc/xdr.c (char *)head->iov_base + head 409 net/sunrpc/xdr.c head->iov_len - offs, head 425 net/sunrpc/xdr.c (char *)head->iov_base + head->iov_len - len, head 429 net/sunrpc/xdr.c head->iov_len -= len; head 459 net/sunrpc/xdr.c tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len; head 525 net/sunrpc/xdr.c struct kvec *iov = buf->head; head 673 net/sunrpc/xdr.c struct kvec *head = buf->head; head 710 net/sunrpc/xdr.c xdr->end = head->iov_base + head->iov_len; head 713 net/sunrpc/xdr.c head->iov_len = len; head 715 net/sunrpc/xdr.c xdr->p = head->iov_base + head->iov_len; head 716 net/sunrpc/xdr.c xdr->iov = buf->head; head 843 net/sunrpc/xdr.c else if (xdr->iov == xdr->buf->head) { head 864 net/sunrpc/xdr.c if (buf->head[0].iov_len != 0) head 865 net/sunrpc/xdr.c xdr_set_iov(xdr, buf->head, buf->len); head 869 net/sunrpc/xdr.c xdr_set_iov(xdr, buf->head, buf->len); head 994 net/sunrpc/xdr.c iov = buf->head; head 1087 net/sunrpc/xdr.c buf->head[0] = *iov; head 1113 net/sunrpc/xdr.c if (base < buf->head[0].iov_len) { head 1114 net/sunrpc/xdr.c subbuf->head[0].iov_base = buf->head[0].iov_base + base; head 1115 net/sunrpc/xdr.c subbuf->head[0].iov_len = min_t(unsigned int, len, head 1116 net/sunrpc/xdr.c buf->head[0].iov_len - base); head 1117 net/sunrpc/xdr.c len -= subbuf->head[0].iov_len; head 1120 net/sunrpc/xdr.c base -= buf->head[0].iov_len; head 1121 net/sunrpc/xdr.c subbuf->head[0].iov_len = 0; head 1184 net/sunrpc/xdr.c if (buf->head[0].iov_len) { head 1185 net/sunrpc/xdr.c cur = min_t(size_t, buf->head[0].iov_len, trim); head 1186 net/sunrpc/xdr.c buf->head[0].iov_len -= cur; head 1198 net/sunrpc/xdr.c this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); head 1199 net/sunrpc/xdr.c memcpy(obj, subbuf->head[0].iov_base, this_len); head 1229 net/sunrpc/xdr.c this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); head 1230 net/sunrpc/xdr.c memcpy(subbuf->head[0].iov_base, obj, this_len); head 1301 net/sunrpc/xdr.c boundary = buf->head[0].iov_len; head 1314 net/sunrpc/xdr.c mic->data = subbuf.head[0].iov_base; head 1315 net/sunrpc/xdr.c if (subbuf.head[0].iov_len == mic->len) head 1328 net/sunrpc/xdr.c mic->data = buf->head[0].iov_base + buf->head[0].iov_len; head 1362 net/sunrpc/xdr.c if (todo && base < buf->head->iov_len) { head 1363 net/sunrpc/xdr.c c = buf->head->iov_base + base; head 1365 net/sunrpc/xdr.c buf->head->iov_len - base); head 1391 net/sunrpc/xdr.c base = buf->head->iov_len; /* align to start of pages */ head 1395 net/sunrpc/xdr.c base -= buf->head->iov_len; head 1545 net/sunrpc/xdr.c buf->head->iov_len + buf->page_len + buf->tail->iov_len) head 1562 net/sunrpc/xdr.c if (offset >= buf->head[0].iov_len) { head 1563 net/sunrpc/xdr.c offset -= buf->head[0].iov_len; head 1565 net/sunrpc/xdr.c thislen = buf->head[0].iov_len - offset; head 1568 net/sunrpc/xdr.c sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); head 1844 net/sunrpc/xprt.c xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + head 205 net/sunrpc/xprtmultipath.c struct rpc_xprt *xprt_switch_find_first_entry(struct list_head *head) head 209 net/sunrpc/xprtmultipath.c list_for_each_entry_rcu(pos, head, xprt_switch) { head 227 net/sunrpc/xprtmultipath.c struct rpc_xprt *xprt_switch_find_current_entry(struct list_head *head, head 233 net/sunrpc/xprtmultipath.c list_for_each_entry_rcu(pos, head, xprt_switch) { head 246 net/sunrpc/xprtmultipath.c struct list_head *head; head 250 net/sunrpc/xprtmultipath.c head = &xps->xps_xprt_list; head 252 net/sunrpc/xprtmultipath.c return xprt_switch_find_first_entry(head); head 253 net/sunrpc/xprtmultipath.c return xprt_switch_find_current_entry(head, xpi->xpi_cursor); head 259 net/sunrpc/xprtmultipath.c struct list_head *head; head 265 net/sunrpc/xprtmultipath.c head = &xps->xps_xprt_list; head 266 net/sunrpc/xprtmultipath.c list_for_each_entry_rcu(pos, head, xprt_switch) { head 277 net/sunrpc/xprtmultipath.c struct rpc_xprt *xprt_switch_find_next_entry(struct list_head *head, head 283 net/sunrpc/xprtmultipath.c list_for_each_entry_rcu(pos, head, xprt_switch) { head 318 net/sunrpc/xprtmultipath.c struct rpc_xprt *__xprt_switch_find_next_entry_roundrobin(struct list_head *head, head 323 net/sunrpc/xprtmultipath.c ret = xprt_switch_find_next_entry(head, cur); head 326 net/sunrpc/xprtmultipath.c return xprt_switch_find_first_entry(head); head 333 net/sunrpc/xprtmultipath.c struct list_head *head = &xps->xps_xprt_list; head 340 net/sunrpc/xprtmultipath.c xprt = __xprt_switch_find_next_entry_roundrobin(head, cur); head 248 net/sunrpc/xprtrdma/backchannel.c buf->head[0].iov_base = p; head 249 net/sunrpc/xprtrdma/backchannel.c buf->head[0].iov_len = size; head 182 net/sunrpc/xprtrdma/rpc_rdma.c return (buf->head[0].iov_len + buf->tail[0].iov_len) < head 234 net/sunrpc/xprtrdma/rpc_rdma.c seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n); head 399 net/sunrpc/xprtrdma/rpc_rdma.c pos = rqst->rq_snd_buf.head[0].iov_len; head 456 net/sunrpc/xprtrdma/rpc_rdma.c rqst->rq_rcv_buf.head[0].iov_len, head 632 net/sunrpc/xprtrdma/rpc_rdma.c sge[sge_no].length = xdr->head[0].iov_len; head 949 net/sunrpc/xprtrdma/rpc_rdma.c rqst->rq_rcv_buf.head[0].iov_base = srcp; head 950 net/sunrpc/xprtrdma/rpc_rdma.c rqst->rq_private_buf.head[0].iov_base = srcp; head 955 net/sunrpc/xprtrdma/rpc_rdma.c curlen = rqst->rq_rcv_buf.head[0].iov_len; head 1330 net/sunrpc/xprtrdma/rpc_rdma.c rep->rr_hdrbuf.head[0].iov_base, NULL); head 31 net/sunrpc/xprtrdma/svc_rdma_backchannel.c struct kvec *dst, *src = &rcvbuf->head[0]; head 61 net/sunrpc/xprtrdma/svc_rdma_backchannel.c dst = &req->rq_private_buf.head[0]; head 370 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c arg->head[0].iov_base = ctxt->rc_recv_buf; head 371 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c arg->head[0].iov_len = ctxt->rc_byte_len; head 579 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c rdma_argp = rq_arg->head[0].iov_base; head 612 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c rq_arg->head[0].iov_base = p; head 614 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c rq_arg->head[0].iov_len -= hdr_len; head 641 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svc_rdma_recv_ctxt *head) head 648 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c for (page_no = 0; page_no < head->rc_page_count; page_no++) { head 650 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c rqstp->rq_pages[page_no] = head->rc_pages[page_no]; head 652 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c head->rc_page_count = 0; head 655 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count]; head 656 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c rqstp->rq_arg.page_len = head->rc_arg.page_len; head 663 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c rqstp->rq_arg.head[0] = head->rc_arg.head[0]; head 664 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c rqstp->rq_arg.tail[0] = head->rc_arg.tail[0]; head 665 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c rqstp->rq_arg.len = head->rc_arg.len; head 666 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c rqstp->rq_arg.buflen = head->rc_arg.buflen; head 809 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c p = (__be32 *)rqstp->rq_arg.head[0].iov_base; head 564 net/sunrpc/xprtrdma/svc_rdma_rw.c ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]); head 567 net/sunrpc/xprtrdma/svc_rdma_rw.c consumed = xdr->head[0].iov_len; head 602 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_rdma_recv_ctxt *head = info->ri_readctxt; head 620 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.pages[info->ri_pageno] = head 623 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_page_count++; head 717 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_rdma_recv_ctxt *head = info->ri_readctxt; head 726 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_hdr_count = 0; head 733 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.tail[0].iov_base = head 734 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.head[0].iov_base + info->ri_position; head 735 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.tail[0].iov_len = head 736 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.head[0].iov_len - info->ri_position; head 737 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.head[0].iov_len = info->ri_position; head 750 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.page_len = info->ri_chunklen; head 751 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.len += info->ri_chunklen; head 752 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.buflen += info->ri_chunklen; head 773 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_rdma_recv_ctxt *head = info->ri_readctxt; head 782 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.len += info->ri_chunklen; head 783 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.buflen += info->ri_chunklen; head 785 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_hdr_count = 1; head 786 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.head[0].iov_base = page_address(head->rc_pages[0]); head 787 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.head[0].iov_len = min_t(size_t, PAGE_SIZE, head 790 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.page_len = info->ri_chunklen - head 791 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.head[0].iov_len; head 815 net/sunrpc/xprtrdma/svc_rdma_rw.c struct svc_rdma_recv_ctxt *head, __be32 *p) head 824 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.head[0] = rqstp->rq_arg.head[0]; head 825 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.tail[0] = rqstp->rq_arg.tail[0]; head 826 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.pages = head->rc_pages; head 827 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.page_base = 0; head 828 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.page_len = 0; head 829 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.len = rqstp->rq_arg.len; head 830 net/sunrpc/xprtrdma/svc_rdma_rw.c head->rc_arg.buflen = rqstp->rq_arg.buflen; head 835 net/sunrpc/xprtrdma/svc_rdma_rw.c info->ri_readctxt = head; head 588 net/sunrpc/xprtrdma/svc_rdma_sendto.c memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len); head 589 net/sunrpc/xprtrdma/svc_rdma_sendto.c dst += xdr->head[0].iov_len; head 658 net/sunrpc/xprtrdma/svc_rdma_sendto.c xdr->head[0].iov_base, head 659 net/sunrpc/xprtrdma/svc_rdma_sendto.c xdr->head[0].iov_len); head 587 net/sunrpc/xprtrdma/xprt_rdma.h xdr->head[0].iov_len = len; head 408 net/sunrpc/xprtsock.c want = min_t(size_t, count, buf->head[0].iov_len); head 410 net/sunrpc/xprtsock.c ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek); head 471 net/sunrpc/xprtsock.c if (buf->head[0].iov_len >= transport->recv.offset) head 472 net/sunrpc/xprtsock.c memcpy(buf->head[0].iov_base, head 828 net/sunrpc/xprtsock.c want = xdr->head[0].iov_len + rmsize; head 836 net/sunrpc/xprtsock.c &xdr->head[0], base); head 838 net/sunrpc/xprtsock.c err = xs_send_kvec(sock, &msg, &xdr->head[0], base); head 2672 net/sunrpc/xprtsock.c headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK; head 2674 net/sunrpc/xprtsock.c virt_to_page(xbufp->head[0].iov_base), headoff, head 42 net/sysctl_net.c static int net_ctl_permissions(struct ctl_table_header *head, head 45 net/sysctl_net.c struct net *net = container_of(head->set, struct net, sysctls); head 56 net/sysctl_net.c static void net_ctl_set_ownership(struct ctl_table_header *head, head 60 net/sysctl_net.c struct net *net = container_of(head->set, struct net, sysctls); head 2321 net/tipc/link.c u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1; head 2326 net/tipc/link.c skb_queue_len(&l->transmq), head, tail, head 294 net/tipc/monitor.c static void mon_assign_roles(struct tipc_monitor *mon, struct tipc_peer *head) head 296 net/tipc/monitor.c struct tipc_peer *peer = peer_nxt(head); head 304 net/tipc/monitor.c if (i++ < head->applied) { head 306 net/tipc/monitor.c if (head == self) head 315 net/tipc/monitor.c head = peer; head 316 net/tipc/monitor.c head->is_head = true; head 326 net/tipc/monitor.c struct tipc_peer *peer, *prev, *head; head 338 net/tipc/monitor.c head = peer_head(prev); head 339 net/tipc/monitor.c if (head == self) head 351 net/tipc/monitor.c mon_assign_roles(mon, head); head 392 net/tipc/monitor.c struct tipc_peer *peer, *head; head 399 net/tipc/monitor.c head = peer_head(peer); head 400 net/tipc/monitor.c if (head == self) head 402 net/tipc/monitor.c mon_assign_roles(mon, head); head 411 net/tipc/monitor.c struct tipc_peer *peer, *head; head 432 net/tipc/monitor.c head = peer_head(peer); head 433 net/tipc/monitor.c if (head == self) head 435 net/tipc/monitor.c mon_assign_roles(mon, head); head 124 net/tipc/msg.c struct sk_buff *head = *headbuf; head 141 net/tipc/msg.c if (unlikely(head)) head 145 net/tipc/msg.c head = *headbuf = frag; head 147 net/tipc/msg.c TIPC_SKB_CB(head)->tail = NULL; head 148 net/tipc/msg.c if (skb_is_nonlinear(head)) { head 149 net/tipc/msg.c skb_walk_frags(head, tail) { head 150 net/tipc/msg.c TIPC_SKB_CB(head)->tail = tail; head 153 net/tipc/msg.c skb_frag_list_init(head); head 158 net/tipc/msg.c if (!head) head 161 net/tipc/msg.c if (skb_try_coalesce(head, frag, &headstolen, &delta)) { head 164 net/tipc/msg.c tail = TIPC_SKB_CB(head)->tail; head 165 net/tipc/msg.c if (!skb_has_frag_list(head)) head 166 net/tipc/msg.c skb_shinfo(head)->frag_list = frag; head 169 net/tipc/msg.c head->truesize += frag->truesize; head 170 net/tipc/msg.c head->data_len += frag->len; head 171 net/tipc/msg.c head->len += frag->len; head 172 net/tipc/msg.c TIPC_SKB_CB(head)->tail = frag; head 176 net/tipc/msg.c TIPC_SKB_CB(head)->validated = false; head 177 net/tipc/msg.c if (unlikely(!tipc_msg_validate(&head))) head 179 net/tipc/msg.c *buf = head; head 180 net/tipc/msg.c TIPC_SKB_CB(head)->tail = NULL; head 705 net/tipc/msg.c struct sk_buff *head = NULL; head 725 net/tipc/msg.c if (tipc_buf_append(&head, &frag)) head 727 net/tipc/msg.c if (!head) head 734 net/tipc/msg.c kfree_skb(head); head 1158 net/tipc/msg.h struct sk_buff_head *head) head 1160 net/tipc/msg.h spin_lock_bh(&head->lock); head 1161 net/tipc/msg.h skb_queue_splice_tail(list, head); head 1162 net/tipc/msg.h spin_unlock_bh(&head->lock); head 1170 net/tipc/msg.h struct sk_buff_head *head) head 1179 net/tipc/msg.h tipc_skb_queue_splice_tail(&tmp, head); head 191 net/tipc/name_distr.c struct sk_buff_head head; head 193 net/tipc/name_distr.c __skb_queue_head_init(&head); head 196 net/tipc/name_distr.c named_distribute(net, &head, dnode, &nt->cluster_scope); head 197 net/tipc/name_distr.c tipc_node_xmit(net, &head, dnode, 0); head 900 net/tipc/name_table.c struct hlist_head *head; head 910 net/tipc/name_table.c head = &tn->nametbl->services[i]; head 918 net/tipc/name_table.c hlist_for_each_entry_rcu(service, head, service_list) head 1493 net/tipc/node.c struct sk_buff_head head; head 1495 net/tipc/node.c __skb_queue_head_init(&head); head 1496 net/tipc/node.c __skb_queue_tail(&head, skb); head 1497 net/tipc/node.c tipc_node_xmit(net, &head, dnode, selector); head 494 net/tipc/socket.c static void tipc_sk_callback(struct rcu_head *head) head 496 net/tipc/socket.c struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu); head 3814 net/tipc/socket.c i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i); head 3815 net/tipc/socket.c if (sk->sk_backlog.tail != sk->sk_backlog.head) { head 181 net/tls/tls_device_fallback.c skb->csum_start = skb_transport_header(skb) - skb->head; head 73 net/vmw_vsock/diag.c struct list_head *head = &vsock_bind_table[bucket]; head 76 net/vmw_vsock/diag.c list_for_each_entry(vsk, head, bound_table) { head 103 net/vmw_vsock/diag.c struct list_head *head = &vsock_connected_table[bucket]; head 106 net/vmw_vsock/diag.c list_for_each_entry(vsk, head, connected_table) { head 186 net/vmw_vsock/vmci_transport_notify.c u64 head; head 201 net/vmw_vsock/vmci_transport_notify.c vmci_qpair_get_consume_indexes(vmci_trans(vsk)->qpair, &tail, &head); head 202 net/vmw_vsock/vmci_transport_notify.c room_left = vmci_trans(vsk)->consume_size - head; head 208 net/vmw_vsock/vmci_transport_notify.c waiting_info.offset = head + room_needed; head 228 net/vmw_vsock/vmci_transport_notify.c u64 head; head 237 net/vmw_vsock/vmci_transport_notify.c vmci_qpair_get_produce_indexes(vmci_trans(vsk)->qpair, &tail, &head); head 171 net/wimax/op-msg.c struct nlmsghdr *nlh = (void *) msg->head; head 193 net/wimax/op-msg.c struct nlmsghdr *nlh = (void *) msg->head; head 214 net/wimax/op-msg.c struct nlmsghdr *nlh = (void *) msg->head; head 4412 net/wireless/nl80211.c bcn->head = nla_data(attrs[NL80211_ATTR_BEACON_HEAD]); head 562 net/wireless/trace.h __dynamic_array(u8, head, info ? info->head_len : 0) head 575 net/wireless/trace.h if (info->head) head 576 net/wireless/trace.h memcpy(__get_dynamic_array(head), info->head, head 584 net/wireless/util.c frag_page = virt_to_head_page(skb->head); head 677 net/xfrm/xfrm_interface.c static void xfrmi_dellink(struct net_device *dev, struct list_head *head) head 679 net/xfrm/xfrm_interface.c unregister_netdevice_queue(dev, head); head 118 net/xfrm/xfrm_policy.c struct rhash_head head; head 407 net/xfrm/xfrm_policy.c static void xfrm_policy_destroy_rcu(struct rcu_head *head) head 409 net/xfrm/xfrm_policy.c struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu); head 722 net/xfrm/xfrm_policy.c &bin->k, &bin->head, head 1085 net/xfrm/xfrm_policy.c if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head, head 1485 net/xfrm/xfrm_policy.c .head_offset = offsetof(struct xfrm_pol_inexact_bin, head), head 46 samples/bpf/test_lru_dist.c static inline int list_empty(const struct list_head *head) head 48 samples/bpf/test_lru_dist.c return head->next == head; head 61 samples/bpf/test_lru_dist.c static inline void list_add(struct list_head *new, struct list_head *head) head 63 samples/bpf/test_lru_dist.c __list_add(new, head, head->next); head 77 samples/bpf/test_lru_dist.c static inline void list_move(struct list_head *list, struct list_head *head) head 80 samples/bpf/test_lru_dist.c list_add(list, head); head 113 samples/vfio-mdev/mtty.c u8 head, tail; head 311 samples/vfio-mdev/mtty.c mdev_state->s[index].rxtx.head] = data; head 313 samples/vfio-mdev/mtty.c CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head); head 356 samples/vfio-mdev/mtty.c (mdev_state->s[index].rxtx.head == head 377 samples/vfio-mdev/mtty.c mdev_state->s[index].rxtx.head = 0; head 473 samples/vfio-mdev/mtty.c if (mdev_state->s[index].rxtx.head != head 481 samples/vfio-mdev/mtty.c if (mdev_state->s[index].rxtx.head == head 524 samples/vfio-mdev/mtty.c (mdev_state->s[index].rxtx.head == head 555 samples/vfio-mdev/mtty.c if (mdev_state->s[index].rxtx.head != head 564 samples/vfio-mdev/mtty.c if (mdev_state->s[index].rxtx.head == head 76 scripts/dtc/livetree.c struct property *head = NULL; head 81 scripts/dtc/livetree.c p->next = head; head 82 scripts/dtc/livetree.c head = p; head 85 scripts/dtc/livetree.c return head; head 49 scripts/kconfig/list.h #define list_for_each_entry(pos, head, member) \ head 50 scripts/kconfig/list.h for (pos = list_entry((head)->next, typeof(*pos), member); \ head 51 scripts/kconfig/list.h &pos->member != (head); \ head 61 scripts/kconfig/list.h #define list_for_each_entry_safe(pos, n, head, member) \ head 62 scripts/kconfig/list.h for (pos = list_entry((head)->next, typeof(*pos), member), \ head 64 scripts/kconfig/list.h &pos->member != (head); \ head 71 scripts/kconfig/list.h static inline int list_empty(const struct list_head *head) head 73 scripts/kconfig/list.h return head->next == head; head 100 scripts/kconfig/list.h static inline void list_add_tail(struct list_head *_new, struct list_head *head) head 102 scripts/kconfig/list.h __list_add(_new, head->prev, head); head 26 scripts/kconfig/lkc_proto.h struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head); head 358 scripts/kconfig/mconf.c struct list_head *head; head 369 scripts/kconfig/mconf.c list_for_each_entry(pos, data->head, entries) { head 433 scripts/kconfig/mconf.c LIST_HEAD(head); head 437 scripts/kconfig/mconf.c .head = &head, head 443 scripts/kconfig/mconf.c res = get_relations_str(sym_arr, &head); head 456 scripts/kconfig/mconf.c list_for_each_entry_safe(pos, tmp, &head, entries) head 702 scripts/kconfig/menu.c struct list_head *head) head 717 scripts/kconfig/menu.c if (head && location) { head 731 scripts/kconfig/menu.c if (list_empty(head)) head 734 scripts/kconfig/menu.c jump->index = list_entry(head->prev, struct jump_key, head 737 scripts/kconfig/menu.c list_add_tail(&jump->entries, head); head 792 scripts/kconfig/menu.c struct list_head *head) head 810 scripts/kconfig/menu.c get_prompt_str(r, prop, head); head 840 scripts/kconfig/menu.c struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head) head 847 scripts/kconfig/menu.c get_symbol_str(&res, sym, head); head 1074 scripts/kconfig/qconf.cc QString head, debug, help; head 1079 scripts/kconfig/qconf.cc head += "<big><b>"; head 1080 scripts/kconfig/qconf.cc head += print_filter(_menu->prompt->text); head 1081 scripts/kconfig/qconf.cc head += "</b></big>"; head 1083 scripts/kconfig/qconf.cc head += " ("; head 1085 scripts/kconfig/qconf.cc head += QString().sprintf("<a href=\"s%p\">", sym); head 1086 scripts/kconfig/qconf.cc head += print_filter(sym->name); head 1088 scripts/kconfig/qconf.cc head += "</a>"; head 1089 scripts/kconfig/qconf.cc head += ")"; head 1092 scripts/kconfig/qconf.cc head += "<big><b>"; head 1094 scripts/kconfig/qconf.cc head += QString().sprintf("<a href=\"s%p\">", sym); head 1095 scripts/kconfig/qconf.cc head += print_filter(sym->name); head 1097 scripts/kconfig/qconf.cc head += "</a>"; head 1098 scripts/kconfig/qconf.cc head += "</b></big>"; head 1100 scripts/kconfig/qconf.cc head += "<br><br>"; head 1110 scripts/kconfig/qconf.cc head += "<big><b>"; head 1111 scripts/kconfig/qconf.cc head += print_filter(_menu->prompt->text); head 1112 scripts/kconfig/qconf.cc head += "</b></big><br><br>"; head 1124 scripts/kconfig/qconf.cc setText(head + debug + help); head 1964 security/apparmor/apparmorfs.c #define list_entry_is_head(pos, head, member) (&pos->member == (head)) head 382 security/apparmor/domain.c struct aa_ns *ns, struct list_head *head, head 390 security/apparmor/domain.c AA_BUG(!head); head 394 security/apparmor/domain.c list_for_each_entry_rcu(profile, head, base.list) { head 161 security/apparmor/include/lib.h static inline struct aa_policy *__policy_find(struct list_head *head, head 166 security/apparmor/include/lib.h list_for_each_entry_rcu(policy, head, list) { head 186 security/apparmor/include/lib.h static inline struct aa_policy *__policy_strn_find(struct list_head *head, head 191 security/apparmor/include/lib.h list_for_each_entry_rcu(policy, head, list) { head 90 security/apparmor/include/policy.h struct rhash_head head; head 196 security/apparmor/include/policy.h void __aa_profile_list_release(struct list_head *head); head 141 security/apparmor/include/policy_ns.h static inline struct aa_ns *__aa_findn_ns(struct list_head *head, head 144 security/apparmor/include/policy_ns.h return (struct aa_ns *)__policy_strn_find(head, name, n); head 147 security/apparmor/include/policy_ns.h static inline struct aa_ns *__aa_find_ns(struct list_head *head, head 150 security/apparmor/include/policy_ns.h return __aa_findn_ns(head, name, strlen(name)); head 363 security/apparmor/label.c static void label_free_rcu(struct rcu_head *head) head 365 security/apparmor/label.c struct aa_label *label = container_of(head, struct aa_label, rcu); head 174 security/apparmor/policy.c void __aa_profile_list_release(struct list_head *head) head 177 security/apparmor/policy.c list_for_each_entry_safe(profile, tmp, head, base.list) head 306 security/apparmor/policy.c static struct aa_profile *__strn_find_child(struct list_head *head, head 309 security/apparmor/policy.c return (struct aa_profile *)__policy_strn_find(head, name, len); head 321 security/apparmor/policy.c static struct aa_profile *__find_child(struct list_head *head, const char *name) head 323 security/apparmor/policy.c return __strn_find_child(head, name, strlen(name)); head 324 security/apparmor/policy_ns.c static void __ns_list_release(struct list_head *head); head 374 security/apparmor/policy_ns.c static void __ns_list_release(struct list_head *head) head 378 security/apparmor/policy_ns.c list_for_each_entry_safe(ns, tmp, head, base.list) head 878 security/apparmor/policy_unpack.c params.head_offset = offsetof(struct aa_data, head); head 903 security/apparmor/policy_unpack.c rhashtable_insert_fast(profile->data, &data->head, head 89 security/keys/user_defined.c static void user_free_payload_rcu(struct rcu_head *head) head 93 security/keys/user_defined.c payload = container_of(head, struct user_key_payload, rcu); head 450 security/security.c hlist_add_tail_rcu(&hooks[i].list, hooks[i].head); head 925 security/security.c static void inode_free_by_rcu(struct rcu_head *head) head 930 security/security.c kmem_cache_free(lsm_inode_cache, head); head 154 security/selinux/avc.c struct hlist_head *head; head 161 security/selinux/avc.c head = &avc->avc_cache.slots[i]; head 162 security/selinux/avc.c if (!hlist_empty(head)) { head 165 security/selinux/avc.c hlist_for_each_entry_rcu(node, head, list) head 466 security/selinux/avc.c struct hlist_head *head; head 472 security/selinux/avc.c head = &avc->avc_cache.slots[hvalue]; head 479 security/selinux/avc.c hlist_for_each_entry(node, head, list) { head 528 security/selinux/avc.c struct hlist_head *head; head 531 security/selinux/avc.c head = &avc->avc_cache.slots[hvalue]; head 532 security/selinux/avc.c hlist_for_each_entry_rcu(node, head, list) { head 621 security/selinux/avc.c struct hlist_head *head; head 637 security/selinux/avc.c head = &avc->avc_cache.slots[hvalue]; head 640 security/selinux/avc.c hlist_for_each_entry(pos, head, list) { head 648 security/selinux/avc.c hlist_add_head_rcu(&node->list, head); head 837 security/selinux/avc.c struct hlist_head *head; head 864 security/selinux/avc.c head = &avc->avc_cache.slots[hvalue]; head 869 security/selinux/avc.c hlist_for_each_entry(pos, head, list) { head 937 security/selinux/avc.c struct hlist_head *head; head 944 security/selinux/avc.c head = &avc->avc_cache.slots[i]; head 953 security/selinux/avc.c hlist_for_each_entry(node, head, list) head 264 security/selinux/ss/conditional.c struct cond_av_list *head; head 336 security/selinux/ss/conditional.c if (!data->head) head 337 security/selinux/ss/conditional.c data->head = list; head 344 security/selinux/ss/conditional.c cond_av_list_destroy(data->head); head 345 security/selinux/ss/conditional.c data->head = NULL; head 368 security/selinux/ss/conditional.c data.head = NULL; head 377 security/selinux/ss/conditional.c *ret_list = data.head; head 38 security/selinux/ss/mls.c int i, l, len, head, prev; head 52 security/selinux/ss/mls.c head = -2; head 58 security/selinux/ss/mls.c if (head != prev) { head 64 security/selinux/ss/mls.c head = i; head 68 security/selinux/ss/mls.c if (prev != head) { head 94 security/selinux/ss/mls.c int i, l, head, prev; head 112 security/selinux/ss/mls.c head = -2; head 118 security/selinux/ss/mls.c if (prev != head) { head 119 security/selinux/ss/mls.c if (prev - head > 1) head 134 security/selinux/ss/mls.c head = i; head 139 security/selinux/ss/mls.c if (prev != head) { head 140 security/selinux/ss/mls.c if (prev - head > 1) head 801 security/selinux/ss/policydb.c c = g->head; head 852 security/selinux/ss/policydb.c struct ocontext *head, *c; head 861 security/selinux/ss/policydb.c head = p->ocontexts[OCON_ISID]; head 862 security/selinux/ss/policydb.c for (c = head; c; c = c->next) { head 2033 security/selinux/ss/policydb.c for (l = NULL, c = genfs->head; c; head 2053 security/selinux/ss/policydb.c genfs->head = newc; head 3230 security/selinux/ss/policydb.c for (c = genfs->head; c; c = c->next) head 3236 security/selinux/ss/policydb.c for (c = genfs->head; c; c = c->next) { head 207 security/selinux/ss/policydb.h struct ocontext *head; head 2698 security/selinux/ss/services.c for (c = genfs->head; c; c = c->next) { head 405 security/smack/smack_access.c struct hlist_head *head; head 408 security/smack/smack_access.c head = &smack_known_hash[hash & (SMACK_HASH_SLOTS - 1)]; head 410 security/smack/smack_access.c hlist_add_head_rcu(&skp->smk_hashed, head); head 424 security/smack/smack_access.c struct hlist_head *head; head 428 security/smack/smack_access.c head = &smack_known_hash[hash & (SMACK_HASH_SLOTS - 1)]; head 430 security/smack/smack_access.c hlist_for_each_entry_rcu(skp, head, smk_hashed) head 532 security/smack/smackfs.c struct list_head *head) head 538 security/smack/smackfs.c for (list = rcu_dereference(list_next_rcu(head)); head 539 security/smack/smackfs.c list != head; head 549 security/smack/smackfs.c struct list_head *head) head 556 security/smack/smackfs.c return (list == head) ? NULL : list; head 439 security/tomoyo/audit.c void tomoyo_read_log(struct tomoyo_io_buffer *head) head 443 security/tomoyo/audit.c if (head->r.w_pos) head 445 security/tomoyo/audit.c kfree(head->read_buf); head 446 security/tomoyo/audit.c head->read_buf = NULL; head 456 security/tomoyo/audit.c head->read_buf = ptr->log; head 457 security/tomoyo/audit.c head->r.w[head->r.w_pos++] = head->read_buf; head 213 security/tomoyo/common.c static bool tomoyo_flush(struct tomoyo_io_buffer *head) head 215 security/tomoyo/common.c while (head->r.w_pos) { head 216 security/tomoyo/common.c const char *w = head->r.w[0]; head 220 security/tomoyo/common.c if (len > head->read_user_buf_avail) head 221 security/tomoyo/common.c len = head->read_user_buf_avail; head 224 security/tomoyo/common.c if (copy_to_user(head->read_user_buf, w, len)) head 226 security/tomoyo/common.c head->read_user_buf_avail -= len; head 227 security/tomoyo/common.c head->read_user_buf += len; head 230 security/tomoyo/common.c head->r.w[0] = w; head 234 security/tomoyo/common.c if (head->poll) { head 235 security/tomoyo/common.c if (!head->read_user_buf_avail || head 236 security/tomoyo/common.c copy_to_user(head->read_user_buf, "", 1)) head 238 security/tomoyo/common.c head->read_user_buf_avail--; head 239 security/tomoyo/common.c head->read_user_buf++; head 241 security/tomoyo/common.c head->r.w_pos--; head 242 security/tomoyo/common.c for (len = 0; len < head->r.w_pos; len++) head 243 security/tomoyo/common.c head->r.w[len] = head->r.w[len + 1]; head 245 security/tomoyo/common.c head->r.avail = 0; head 259 security/tomoyo/common.c static void tomoyo_set_string(struct tomoyo_io_buffer *head, const char *string) head 261 security/tomoyo/common.c if (head->r.w_pos < TOMOYO_MAX_IO_READ_QUEUE) { head 262 security/tomoyo/common.c head->r.w[head->r.w_pos++] = string; head 263 security/tomoyo/common.c tomoyo_flush(head); head 268 security/tomoyo/common.c static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, head 277 security/tomoyo/common.c static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, head 282 security/tomoyo/common.c size_t pos = head->r.avail; head 283 security/tomoyo/common.c int size = head->readbuf_size - pos; head 288 security/tomoyo/common.c len = vsnprintf(head->read_buf + pos, size, fmt, args) + 1; head 290 security/tomoyo/common.c if (pos + len >= head->readbuf_size) { head 294 security/tomoyo/common.c head->r.avail += len; head 295 security/tomoyo/common.c tomoyo_set_string(head, head->read_buf + pos); head 305 security/tomoyo/common.c static void tomoyo_set_space(struct tomoyo_io_buffer *head) head 307 security/tomoyo/common.c tomoyo_set_string(head, " "); head 317 security/tomoyo/common.c static bool tomoyo_set_lf(struct tomoyo_io_buffer *head) head 319 security/tomoyo/common.c tomoyo_set_string(head, "\n"); head 320 security/tomoyo/common.c return !head->r.w_pos; head 330 security/tomoyo/common.c static void tomoyo_set_slash(struct tomoyo_io_buffer *head) head 332 security/tomoyo/common.c tomoyo_set_string(head, "/"); head 369 security/tomoyo/common.c static void tomoyo_print_namespace(struct tomoyo_io_buffer *head) head 373 security/tomoyo/common.c tomoyo_set_string(head, head 374 security/tomoyo/common.c container_of(head->r.ns, head 377 security/tomoyo/common.c tomoyo_set_space(head); head 386 security/tomoyo/common.c static void tomoyo_print_name_union(struct tomoyo_io_buffer *head, head 389 security/tomoyo/common.c tomoyo_set_space(head); head 391 security/tomoyo/common.c tomoyo_set_string(head, "@"); head 392 security/tomoyo/common.c tomoyo_set_string(head, ptr->group->group_name->name); head 394 security/tomoyo/common.c tomoyo_set_string(head, ptr->filename->name); head 406 security/tomoyo/common.c static void tomoyo_print_name_union_quoted(struct tomoyo_io_buffer *head, head 410 security/tomoyo/common.c tomoyo_set_string(head, "@"); head 411 security/tomoyo/common.c tomoyo_set_string(head, ptr->group->group_name->name); head 413 security/tomoyo/common.c tomoyo_set_string(head, "\""); head 414 security/tomoyo/common.c tomoyo_set_string(head, ptr->filename->name); head 415 security/tomoyo/common.c tomoyo_set_string(head, "\""); head 428 security/tomoyo/common.c (struct tomoyo_io_buffer *head, const struct tomoyo_number_union *ptr) head 431 security/tomoyo/common.c tomoyo_set_string(head, "@"); head 432 security/tomoyo/common.c tomoyo_set_string(head, ptr->group->group_name->name); head 463 security/tomoyo/common.c tomoyo_io_printf(head, "%s", buffer); head 475 security/tomoyo/common.c static void tomoyo_print_number_union(struct tomoyo_io_buffer *head, head 478 security/tomoyo/common.c tomoyo_set_space(head); head 479 security/tomoyo/common.c tomoyo_print_number_union_nospace(head, ptr); head 674 security/tomoyo/common.c static int tomoyo_write_profile(struct tomoyo_io_buffer *head) head 676 security/tomoyo/common.c char *data = head->write_buf; head 681 security/tomoyo/common.c if (sscanf(data, "PROFILE_VERSION=%u", &head->w.ns->profile_version) head 688 security/tomoyo/common.c profile = tomoyo_assign_profile(head->w.ns, i); head 729 security/tomoyo/common.c static void tomoyo_print_config(struct tomoyo_io_buffer *head, const u8 config) head 731 security/tomoyo/common.c tomoyo_io_printf(head, "={ mode=%s grant_log=%s reject_log=%s }\n", head 744 security/tomoyo/common.c static void tomoyo_read_profile(struct tomoyo_io_buffer *head) head 748 security/tomoyo/common.c container_of(head->r.ns, typeof(*ns), namespace_list); head 751 security/tomoyo/common.c if (head->r.eof) head 754 security/tomoyo/common.c index = head->r.index; head 756 security/tomoyo/common.c switch (head->r.step) { head 758 security/tomoyo/common.c tomoyo_print_namespace(head); head 759 security/tomoyo/common.c tomoyo_io_printf(head, "PROFILE_VERSION=%u\n", head 761 security/tomoyo/common.c head->r.step++; head 764 security/tomoyo/common.c for ( ; head->r.index < TOMOYO_MAX_PROFILES; head 765 security/tomoyo/common.c head->r.index++) head 766 security/tomoyo/common.c if (ns->profile_ptr[head->r.index]) head 768 security/tomoyo/common.c if (head->r.index == TOMOYO_MAX_PROFILES) { head 769 security/tomoyo/common.c head->r.eof = true; head 772 security/tomoyo/common.c head->r.step++; head 780 security/tomoyo/common.c tomoyo_print_namespace(head); head 781 security/tomoyo/common.c tomoyo_io_printf(head, "%u-COMMENT=", index); head 782 security/tomoyo/common.c tomoyo_set_string(head, comment ? comment->name : ""); head 783 security/tomoyo/common.c tomoyo_set_lf(head); head 784 security/tomoyo/common.c tomoyo_print_namespace(head); head 785 security/tomoyo/common.c tomoyo_io_printf(head, "%u-PREFERENCE={ ", index); head 787 security/tomoyo/common.c tomoyo_io_printf(head, "%s=%u ", head 790 security/tomoyo/common.c tomoyo_set_string(head, "}\n"); head 791 security/tomoyo/common.c head->r.step++; head 796 security/tomoyo/common.c tomoyo_print_namespace(head); head 797 security/tomoyo/common.c tomoyo_io_printf(head, "%u-%s", index, "CONFIG"); head 798 security/tomoyo/common.c tomoyo_print_config(head, profile->default_config); head 799 security/tomoyo/common.c head->r.bit = 0; head 800 security/tomoyo/common.c head->r.step++; head 804 security/tomoyo/common.c for ( ; head->r.bit < TOMOYO_MAX_MAC_INDEX head 805 security/tomoyo/common.c + TOMOYO_MAX_MAC_CATEGORY_INDEX; head->r.bit++) { head 806 security/tomoyo/common.c const u8 i = head->r.bit; head 811 security/tomoyo/common.c tomoyo_print_namespace(head); head 813 security/tomoyo/common.c tomoyo_io_printf(head, "%u-CONFIG::%s::%s", head 819 security/tomoyo/common.c tomoyo_io_printf(head, "%u-CONFIG::%s", index, head 821 security/tomoyo/common.c tomoyo_print_config(head, config); head 822 security/tomoyo/common.c head->r.bit++; head 825 security/tomoyo/common.c if (head->r.bit == TOMOYO_MAX_MAC_INDEX head 827 security/tomoyo/common.c head->r.index++; head 828 security/tomoyo/common.c head->r.step = 1; head 832 security/tomoyo/common.c if (tomoyo_flush(head)) head 847 security/tomoyo/common.c return container_of(a, struct tomoyo_manager, head)->manager == head 848 security/tomoyo/common.c container_of(b, struct tomoyo_manager, head)->manager; head 877 security/tomoyo/common.c error = tomoyo_update_policy(&e.head, sizeof(e), ¶m, head 893 security/tomoyo/common.c static int tomoyo_write_manager(struct tomoyo_io_buffer *head) head 895 security/tomoyo/common.c char *data = head->write_buf; head 898 security/tomoyo/common.c tomoyo_manage_by_non_root = !head->w.is_delete; head 901 security/tomoyo/common.c return tomoyo_update_manager_entry(data, head->w.is_delete); head 911 security/tomoyo/common.c static void tomoyo_read_manager(struct tomoyo_io_buffer *head) head 913 security/tomoyo/common.c if (head->r.eof) head 915 security/tomoyo/common.c list_for_each_cookie(head->r.acl, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER]) { head 917 security/tomoyo/common.c list_entry(head->r.acl, typeof(*ptr), head.list); head 919 security/tomoyo/common.c if (ptr->head.is_deleted) head 921 security/tomoyo/common.c if (!tomoyo_flush(head)) head 923 security/tomoyo/common.c tomoyo_set_string(head, ptr->manager->name); head 924 security/tomoyo/common.c tomoyo_set_lf(head); head 926 security/tomoyo/common.c head->r.eof = true; head 954 security/tomoyo/common.c list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list, head 956 security/tomoyo/common.c if (!ptr->head.is_deleted && head 990 security/tomoyo/common.c static bool tomoyo_select_domain(struct tomoyo_io_buffer *head, head 1019 security/tomoyo/common.c head->w.domain = domain; head 1021 security/tomoyo/common.c if (!head->read_buf) head 1023 security/tomoyo/common.c memset(&head->r, 0, sizeof(head->r)); head 1024 security/tomoyo/common.c head->r.print_this_domain_only = true; head 1026 security/tomoyo/common.c head->r.domain = &domain->list; head 1028 security/tomoyo/common.c head->r.eof = 1; head 1029 security/tomoyo/common.c tomoyo_io_printf(head, "# select %s\n", data); head 1031 security/tomoyo/common.c tomoyo_io_printf(head, "# This is a deleted domain.\n"); head 1046 security/tomoyo/common.c const struct tomoyo_task_acl *p1 = container_of(a, typeof(*p1), head); head 1047 security/tomoyo/common.c const struct tomoyo_task_acl *p2 = container_of(b, typeof(*p2), head); head 1067 security/tomoyo/common.c .head.type = TOMOYO_TYPE_MANUAL_TASK_ACL, head 1072 security/tomoyo/common.c error = tomoyo_update_domain(&e.head, sizeof(e), param, head 1172 security/tomoyo/common.c static int tomoyo_write_domain(struct tomoyo_io_buffer *head) head 1174 security/tomoyo/common.c char *data = head->write_buf; head 1176 security/tomoyo/common.c struct tomoyo_domain_info *domain = head->w.domain; head 1177 security/tomoyo/common.c const bool is_delete = head->w.is_delete; head 1191 security/tomoyo/common.c head->w.domain = domain; head 1232 security/tomoyo/common.c static bool tomoyo_print_condition(struct tomoyo_io_buffer *head, head 1235 security/tomoyo/common.c switch (head->r.cond_step) { head 1237 security/tomoyo/common.c head->r.cond_index = 0; head 1238 security/tomoyo/common.c head->r.cond_step++; head 1240 security/tomoyo/common.c tomoyo_set_space(head); head 1241 security/tomoyo/common.c tomoyo_set_string(head, cond->transit->name); head 1260 security/tomoyo/common.c for (skip = 0; skip < head->r.cond_index; skip++) { head 1285 security/tomoyo/common.c while (head->r.cond_index < condc) { head 1290 security/tomoyo/common.c if (!tomoyo_flush(head)) head 1293 security/tomoyo/common.c head->r.cond_index++; head 1294 security/tomoyo/common.c tomoyo_set_space(head); head 1297 security/tomoyo/common.c tomoyo_io_printf(head, head 1300 security/tomoyo/common.c tomoyo_set_string(head, head 1302 security/tomoyo/common.c tomoyo_set_string(head, "\""); head 1306 security/tomoyo/common.c tomoyo_set_string(head, head 1308 security/tomoyo/common.c tomoyo_set_string(head, head 1310 security/tomoyo/common.c tomoyo_io_printf(head, "\"]%s=", envp->is_not ? "!" : ""); head 1312 security/tomoyo/common.c tomoyo_set_string(head, "\""); head 1313 security/tomoyo/common.c tomoyo_set_string(head, envp->value->name); head 1314 security/tomoyo/common.c tomoyo_set_string(head, "\""); head 1316 security/tomoyo/common.c tomoyo_set_string(head, head 1323 security/tomoyo/common.c (head, numbers_p++); head 1326 security/tomoyo/common.c tomoyo_set_string(head, head 1330 security/tomoyo/common.c tomoyo_set_string(head, match ? "=" : "!="); head 1334 security/tomoyo/common.c (head, names_p++); head 1338 security/tomoyo/common.c (head, numbers_p++); head 1341 security/tomoyo/common.c tomoyo_set_string(head, head 1347 security/tomoyo/common.c head->r.cond_step++; head 1350 security/tomoyo/common.c if (!tomoyo_flush(head)) head 1352 security/tomoyo/common.c head->r.cond_step++; head 1356 security/tomoyo/common.c tomoyo_io_printf(head, " grant_log=%s", head 1359 security/tomoyo/common.c tomoyo_set_lf(head); head 1373 security/tomoyo/common.c static void tomoyo_set_group(struct tomoyo_io_buffer *head, head 1376 security/tomoyo/common.c if (head->type == TOMOYO_EXCEPTIONPOLICY) { head 1377 security/tomoyo/common.c tomoyo_print_namespace(head); head 1378 security/tomoyo/common.c tomoyo_io_printf(head, "acl_group %u ", head 1379 security/tomoyo/common.c head->r.acl_group_index); head 1381 security/tomoyo/common.c tomoyo_set_string(head, category); head 1392 security/tomoyo/common.c static bool tomoyo_print_entry(struct tomoyo_io_buffer *head, head 1399 security/tomoyo/common.c if (head->r.print_cond_part) head 1403 security/tomoyo/common.c if (!tomoyo_flush(head)) head 1407 security/tomoyo/common.c container_of(acl, typeof(*ptr), head); head 1413 security/tomoyo/common.c if (head->r.print_transition_related_only && head 1417 security/tomoyo/common.c tomoyo_set_group(head, "file "); head 1420 security/tomoyo/common.c tomoyo_set_slash(head); head 1422 security/tomoyo/common.c tomoyo_set_string(head, tomoyo_path_keyword[bit]); head 1426 security/tomoyo/common.c tomoyo_print_name_union(head, &ptr->name); head 1429 security/tomoyo/common.c container_of(acl, typeof(*ptr), head); head 1431 security/tomoyo/common.c tomoyo_set_group(head, "task "); head 1432 security/tomoyo/common.c tomoyo_set_string(head, "manual_domain_transition "); head 1433 security/tomoyo/common.c tomoyo_set_string(head, ptr->domainname->name); head 1434 security/tomoyo/common.c } else if (head->r.print_transition_related_only) { head 1438 security/tomoyo/common.c container_of(acl, typeof(*ptr), head); head 1445 security/tomoyo/common.c tomoyo_set_group(head, "file "); head 1448 security/tomoyo/common.c tomoyo_set_slash(head); head 1450 security/tomoyo/common.c tomoyo_set_string(head, tomoyo_mac_keywords head 1455 security/tomoyo/common.c tomoyo_print_name_union(head, &ptr->name1); head 1456 security/tomoyo/common.c tomoyo_print_name_union(head, &ptr->name2); head 1459 security/tomoyo/common.c container_of(acl, typeof(*ptr), head); head 1466 security/tomoyo/common.c tomoyo_set_group(head, "file "); head 1469 security/tomoyo/common.c tomoyo_set_slash(head); head 1471 security/tomoyo/common.c tomoyo_set_string(head, tomoyo_mac_keywords head 1476 security/tomoyo/common.c tomoyo_print_name_union(head, &ptr->name); head 1477 security/tomoyo/common.c tomoyo_print_number_union(head, &ptr->number); head 1480 security/tomoyo/common.c container_of(acl, typeof(*ptr), head); head 1487 security/tomoyo/common.c tomoyo_set_group(head, "file "); head 1490 security/tomoyo/common.c tomoyo_set_slash(head); head 1492 security/tomoyo/common.c tomoyo_set_string(head, tomoyo_mac_keywords head 1497 security/tomoyo/common.c tomoyo_print_name_union(head, &ptr->name); head 1498 security/tomoyo/common.c tomoyo_print_number_union(head, &ptr->mode); head 1499 security/tomoyo/common.c tomoyo_print_number_union(head, &ptr->major); head 1500 security/tomoyo/common.c tomoyo_print_number_union(head, &ptr->minor); head 1503 security/tomoyo/common.c container_of(acl, typeof(*ptr), head); head 1510 security/tomoyo/common.c tomoyo_set_group(head, "network inet "); head 1511 security/tomoyo/common.c tomoyo_set_string(head, tomoyo_proto_keyword head 1513 security/tomoyo/common.c tomoyo_set_space(head); head 1516 security/tomoyo/common.c tomoyo_set_slash(head); head 1518 security/tomoyo/common.c tomoyo_set_string(head, tomoyo_socket_keyword[bit]); head 1522 security/tomoyo/common.c tomoyo_set_space(head); head 1524 security/tomoyo/common.c tomoyo_set_string(head, "@"); head 1525 security/tomoyo/common.c tomoyo_set_string(head, ptr->address.group->group_name head 1531 security/tomoyo/common.c tomoyo_io_printf(head, "%s", buf); head 1533 security/tomoyo/common.c tomoyo_print_number_union(head, &ptr->port); head 1536 security/tomoyo/common.c container_of(acl, typeof(*ptr), head); head 1543 security/tomoyo/common.c tomoyo_set_group(head, "network unix "); head 1544 security/tomoyo/common.c tomoyo_set_string(head, tomoyo_proto_keyword head 1546 security/tomoyo/common.c tomoyo_set_space(head); head 1549 security/tomoyo/common.c tomoyo_set_slash(head); head 1551 security/tomoyo/common.c tomoyo_set_string(head, tomoyo_socket_keyword[bit]); head 1555 security/tomoyo/common.c tomoyo_print_name_union(head, &ptr->name); head 1558 security/tomoyo/common.c container_of(acl, typeof(*ptr), head); head 1560 security/tomoyo/common.c tomoyo_set_group(head, "file mount"); head 1561 security/tomoyo/common.c tomoyo_print_name_union(head, &ptr->dev_name); head 1562 security/tomoyo/common.c tomoyo_print_name_union(head, &ptr->dir_name); head 1563 security/tomoyo/common.c tomoyo_print_name_union(head, &ptr->fs_type); head 1564 security/tomoyo/common.c tomoyo_print_number_union(head, &ptr->flags); head 1567 security/tomoyo/common.c container_of(acl, typeof(*ptr), head); head 1569 security/tomoyo/common.c tomoyo_set_group(head, "misc env "); head 1570 security/tomoyo/common.c tomoyo_set_string(head, ptr->env->name); head 1573 security/tomoyo/common.c head->r.print_cond_part = true; head 1574 security/tomoyo/common.c head->r.cond_step = 0; head 1575 security/tomoyo/common.c if (!tomoyo_flush(head)) head 1578 security/tomoyo/common.c if (!tomoyo_print_condition(head, acl->cond)) head 1580 security/tomoyo/common.c head->r.print_cond_part = false; head 1582 security/tomoyo/common.c tomoyo_set_lf(head); head 1597 security/tomoyo/common.c static bool tomoyo_read_domain2(struct tomoyo_io_buffer *head, head 1600 security/tomoyo/common.c list_for_each_cookie(head->r.acl, list) { head 1602 security/tomoyo/common.c list_entry(head->r.acl, typeof(*ptr), list); head 1604 security/tomoyo/common.c if (!tomoyo_print_entry(head, ptr)) head 1607 security/tomoyo/common.c head->r.acl = NULL; head 1618 security/tomoyo/common.c static void tomoyo_read_domain(struct tomoyo_io_buffer *head) head 1620 security/tomoyo/common.c if (head->r.eof) head 1622 security/tomoyo/common.c list_for_each_cookie(head->r.domain, &tomoyo_domain_list) { head 1624 security/tomoyo/common.c list_entry(head->r.domain, typeof(*domain), list); head 1627 security/tomoyo/common.c switch (head->r.step) { head 1630 security/tomoyo/common.c !head->r.print_this_domain_only) head 1633 security/tomoyo/common.c tomoyo_set_string(head, domain->domainname->name); head 1634 security/tomoyo/common.c tomoyo_set_lf(head); head 1635 security/tomoyo/common.c tomoyo_io_printf(head, "use_profile %u\n", head 1639 security/tomoyo/common.c tomoyo_set_string(head, tomoyo_dif[i]); head 1640 security/tomoyo/common.c head->r.index = 0; head 1641 security/tomoyo/common.c head->r.step++; head 1644 security/tomoyo/common.c while (head->r.index < TOMOYO_MAX_ACL_GROUPS) { head 1645 security/tomoyo/common.c i = head->r.index++; head 1648 security/tomoyo/common.c tomoyo_io_printf(head, "use_group %u\n", i); head 1649 security/tomoyo/common.c if (!tomoyo_flush(head)) head 1652 security/tomoyo/common.c head->r.index = 0; head 1653 security/tomoyo/common.c head->r.step++; head 1654 security/tomoyo/common.c tomoyo_set_lf(head); head 1657 security/tomoyo/common.c if (!tomoyo_read_domain2(head, &domain->acl_info_list)) head 1659 security/tomoyo/common.c head->r.step++; head 1660 security/tomoyo/common.c if (!tomoyo_set_lf(head)) head 1664 security/tomoyo/common.c head->r.step = 0; head 1665 security/tomoyo/common.c if (head->r.print_this_domain_only) head 1670 security/tomoyo/common.c head->r.eof = true; head 1680 security/tomoyo/common.c static int tomoyo_write_pid(struct tomoyo_io_buffer *head) head 1682 security/tomoyo/common.c head->r.eof = false; head 1696 security/tomoyo/common.c static void tomoyo_read_pid(struct tomoyo_io_buffer *head) head 1698 security/tomoyo/common.c char *buf = head->write_buf; head 1706 security/tomoyo/common.c head->r.eof = true; head 1709 security/tomoyo/common.c if (head->r.w_pos || head->r.eof) head 1711 security/tomoyo/common.c head->r.eof = true; head 1726 security/tomoyo/common.c tomoyo_io_printf(head, "%u %u ", pid, domain->profile); head 1727 security/tomoyo/common.c tomoyo_set_string(head, domain->domainname->name); head 1756 security/tomoyo/common.c static int tomoyo_write_exception(struct tomoyo_io_buffer *head) head 1758 security/tomoyo/common.c const bool is_delete = head->w.is_delete; head 1760 security/tomoyo/common.c .ns = head->w.ns, head 1762 security/tomoyo/common.c .data = head->write_buf, head 1781 security/tomoyo/common.c (head->w.ns, &head->w.ns->acl_group[group], head 1797 security/tomoyo/common.c static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx) head 1800 security/tomoyo/common.c container_of(head->r.ns, typeof(*ns), namespace_list); head 1803 security/tomoyo/common.c list_for_each_cookie(head->r.group, list) { head 1805 security/tomoyo/common.c list_entry(head->r.group, typeof(*group), head.list); head 1807 security/tomoyo/common.c list_for_each_cookie(head->r.acl, &group->member_list) { head 1809 security/tomoyo/common.c list_entry(head->r.acl, typeof(*ptr), list); head 1813 security/tomoyo/common.c if (!tomoyo_flush(head)) head 1815 security/tomoyo/common.c tomoyo_print_namespace(head); head 1816 security/tomoyo/common.c tomoyo_set_string(head, tomoyo_group_name[idx]); head 1817 security/tomoyo/common.c tomoyo_set_string(head, group->group_name->name); head 1819 security/tomoyo/common.c tomoyo_set_space(head); head 1820 security/tomoyo/common.c tomoyo_set_string(head, container_of head 1822 security/tomoyo/common.c head)->member_name->name); head 1824 security/tomoyo/common.c tomoyo_print_number_union(head, &container_of head 1827 security/tomoyo/common.c head)->number); head 1832 security/tomoyo/common.c head); head 1836 security/tomoyo/common.c tomoyo_io_printf(head, " %s", buffer); head 1838 security/tomoyo/common.c tomoyo_set_lf(head); head 1840 security/tomoyo/common.c head->r.acl = NULL; head 1842 security/tomoyo/common.c head->r.group = NULL; head 1856 security/tomoyo/common.c static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) head 1859 security/tomoyo/common.c container_of(head->r.ns, typeof(*ns), namespace_list); head 1862 security/tomoyo/common.c list_for_each_cookie(head->r.acl, list) { head 1864 security/tomoyo/common.c container_of(head->r.acl, typeof(*acl), list); head 1867 security/tomoyo/common.c if (!tomoyo_flush(head)) head 1873 security/tomoyo/common.c container_of(acl, typeof(*ptr), head); head 1875 security/tomoyo/common.c tomoyo_print_namespace(head); head 1876 security/tomoyo/common.c tomoyo_set_string(head, tomoyo_transition_type head 1878 security/tomoyo/common.c tomoyo_set_string(head, ptr->program ? head 1880 security/tomoyo/common.c tomoyo_set_string(head, " from "); head 1881 security/tomoyo/common.c tomoyo_set_string(head, ptr->domainname ? head 1889 security/tomoyo/common.c container_of(acl, typeof(*ptr), head); head 1891 security/tomoyo/common.c tomoyo_print_namespace(head); head 1892 security/tomoyo/common.c tomoyo_set_string(head, "aggregator "); head 1893 security/tomoyo/common.c tomoyo_set_string(head, head 1895 security/tomoyo/common.c tomoyo_set_space(head); head 1896 security/tomoyo/common.c tomoyo_set_string(head, head 1903 security/tomoyo/common.c tomoyo_set_lf(head); head 1905 security/tomoyo/common.c head->r.acl = NULL; head 1916 security/tomoyo/common.c static void tomoyo_read_exception(struct tomoyo_io_buffer *head) head 1919 security/tomoyo/common.c container_of(head->r.ns, typeof(*ns), namespace_list); head 1921 security/tomoyo/common.c if (head->r.eof) head 1923 security/tomoyo/common.c while (head->r.step < TOMOYO_MAX_POLICY && head 1924 security/tomoyo/common.c tomoyo_read_policy(head, head->r.step)) head 1925 security/tomoyo/common.c head->r.step++; head 1926 security/tomoyo/common.c if (head->r.step < TOMOYO_MAX_POLICY) head 1928 security/tomoyo/common.c while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP && head 1929 security/tomoyo/common.c tomoyo_read_group(head, head->r.step - TOMOYO_MAX_POLICY)) head 1930 security/tomoyo/common.c head->r.step++; head 1931 security/tomoyo/common.c if (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP) head 1933 security/tomoyo/common.c while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP head 1935 security/tomoyo/common.c head->r.acl_group_index = head->r.step - TOMOYO_MAX_POLICY head 1937 security/tomoyo/common.c if (!tomoyo_read_domain2(head, &ns->acl_group head 1938 security/tomoyo/common.c [head->r.acl_group_index])) head 1940 security/tomoyo/common.c head->r.step++; head 1942 security/tomoyo/common.c head->r.eof = true; head 2202 security/tomoyo/common.c static void tomoyo_read_query(struct tomoyo_io_buffer *head) head 2209 security/tomoyo/common.c if (head->r.w_pos) head 2211 security/tomoyo/common.c kfree(head->read_buf); head 2212 security/tomoyo/common.c head->read_buf = NULL; head 2217 security/tomoyo/common.c if (pos++ != head->r.query_index) head 2224 security/tomoyo/common.c head->r.query_index = 0; head 2235 security/tomoyo/common.c if (pos++ != head->r.query_index) head 2248 security/tomoyo/common.c head->read_buf = buf; head 2249 security/tomoyo/common.c head->r.w[head->r.w_pos++] = buf; head 2250 security/tomoyo/common.c head->r.query_index++; head 2263 security/tomoyo/common.c static int tomoyo_write_answer(struct tomoyo_io_buffer *head) head 2265 security/tomoyo/common.c char *data = head->write_buf; head 2302 security/tomoyo/common.c static void tomoyo_read_version(struct tomoyo_io_buffer *head) head 2304 security/tomoyo/common.c if (!head->r.eof) { head 2305 security/tomoyo/common.c tomoyo_io_printf(head, "2.6.0"); head 2306 security/tomoyo/common.c head->r.eof = true; head 2350 security/tomoyo/common.c static void tomoyo_read_stat(struct tomoyo_io_buffer *head) head 2355 security/tomoyo/common.c if (head->r.eof) head 2358 security/tomoyo/common.c tomoyo_io_printf(head, "Policy %-30s %10u", head 2365 security/tomoyo/common.c tomoyo_io_printf(head, " (Last: %04u/%02u/%02u %02u:%02u:%02u)", head 2369 security/tomoyo/common.c tomoyo_set_lf(head); head 2375 security/tomoyo/common.c tomoyo_io_printf(head, "Memory used by %-22s %10u", head 2379 security/tomoyo/common.c tomoyo_io_printf(head, " (Quota: %10u)", used); head 2380 security/tomoyo/common.c tomoyo_set_lf(head); head 2382 security/tomoyo/common.c tomoyo_io_printf(head, "Total memory used: %10u\n", head 2384 security/tomoyo/common.c head->r.eof = true; head 2394 security/tomoyo/common.c static int tomoyo_write_stat(struct tomoyo_io_buffer *head) head 2396 security/tomoyo/common.c char *data = head->write_buf; head 2416 security/tomoyo/common.c struct tomoyo_io_buffer *head = kzalloc(sizeof(*head), GFP_NOFS); head 2418 security/tomoyo/common.c if (!head) head 2420 security/tomoyo/common.c mutex_init(&head->io_sem); head 2421 security/tomoyo/common.c head->type = type; head 2425 security/tomoyo/common.c head->write = tomoyo_write_domain; head 2426 security/tomoyo/common.c head->read = tomoyo_read_domain; head 2430 security/tomoyo/common.c head->write = tomoyo_write_exception; head 2431 security/tomoyo/common.c head->read = tomoyo_read_exception; head 2435 security/tomoyo/common.c head->poll = tomoyo_poll_log; head 2436 security/tomoyo/common.c head->read = tomoyo_read_log; head 2440 security/tomoyo/common.c head->write = tomoyo_write_pid; head 2441 security/tomoyo/common.c head->read = tomoyo_read_pid; head 2445 security/tomoyo/common.c head->read = tomoyo_read_version; head 2446 security/tomoyo/common.c head->readbuf_size = 128; head 2450 security/tomoyo/common.c head->write = tomoyo_write_stat; head 2451 security/tomoyo/common.c head->read = tomoyo_read_stat; head 2452 security/tomoyo/common.c head->readbuf_size = 1024; head 2456 security/tomoyo/common.c head->write = tomoyo_write_profile; head 2457 security/tomoyo/common.c head->read = tomoyo_read_profile; head 2460 security/tomoyo/common.c head->poll = tomoyo_poll_query; head 2461 security/tomoyo/common.c head->write = tomoyo_write_answer; head 2462 security/tomoyo/common.c head->read = tomoyo_read_query; head 2466 security/tomoyo/common.c head->write = tomoyo_write_manager; head 2467 security/tomoyo/common.c head->read = tomoyo_read_manager; head 2475 security/tomoyo/common.c head->read = NULL; head 2476 security/tomoyo/common.c head->poll = NULL; head 2477 security/tomoyo/common.c } else if (!head->poll) { head 2479 security/tomoyo/common.c if (!head->readbuf_size) head 2480 security/tomoyo/common.c head->readbuf_size = 4096 * 2; head 2481 security/tomoyo/common.c head->read_buf = kzalloc(head->readbuf_size, GFP_NOFS); head 2482 security/tomoyo/common.c if (!head->read_buf) { head 2483 security/tomoyo/common.c kfree(head); head 2492 security/tomoyo/common.c head->write = NULL; head 2493 security/tomoyo/common.c } else if (head->write) { head 2494 security/tomoyo/common.c head->writebuf_size = 4096 * 2; head 2495 security/tomoyo/common.c head->write_buf = kzalloc(head->writebuf_size, GFP_NOFS); head 2496 security/tomoyo/common.c if (!head->write_buf) { head 2497 security/tomoyo/common.c kfree(head->read_buf); head 2498 security/tomoyo/common.c kfree(head); head 2510 security/tomoyo/common.c file->private_data = head; head 2511 security/tomoyo/common.c tomoyo_notify_gc(head, true); head 2526 security/tomoyo/common.c struct tomoyo_io_buffer *head = file->private_data; head 2528 security/tomoyo/common.c if (head->poll) head 2529 security/tomoyo/common.c return head->poll(file, wait) | EPOLLOUT | EPOLLWRNORM; head 2540 security/tomoyo/common.c static inline void tomoyo_set_namespace_cursor(struct tomoyo_io_buffer *head) head 2544 security/tomoyo/common.c if (head->type != TOMOYO_EXCEPTIONPOLICY && head 2545 security/tomoyo/common.c head->type != TOMOYO_PROFILE) head 2551 security/tomoyo/common.c ns = head->r.ns; head 2552 security/tomoyo/common.c if (!ns || (head->r.eof && ns->next != &tomoyo_namespace_list)) { head 2554 security/tomoyo/common.c memset(&head->r, 0, sizeof(head->r)); head 2555 security/tomoyo/common.c head->r.ns = ns ? ns->next : tomoyo_namespace_list.next; head 2566 security/tomoyo/common.c static inline bool tomoyo_has_more_namespace(struct tomoyo_io_buffer *head) head 2568 security/tomoyo/common.c return (head->type == TOMOYO_EXCEPTIONPOLICY || head 2569 security/tomoyo/common.c head->type == TOMOYO_PROFILE) && head->r.eof && head 2570 security/tomoyo/common.c head->r.ns->next != &tomoyo_namespace_list; head 2582 security/tomoyo/common.c ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, head 2588 security/tomoyo/common.c if (!head->read) head 2590 security/tomoyo/common.c if (mutex_lock_interruptible(&head->io_sem)) head 2592 security/tomoyo/common.c head->read_user_buf = buffer; head 2593 security/tomoyo/common.c head->read_user_buf_avail = buffer_len; head 2595 security/tomoyo/common.c if (tomoyo_flush(head)) head 2598 security/tomoyo/common.c tomoyo_set_namespace_cursor(head); head 2599 security/tomoyo/common.c head->read(head); head 2600 security/tomoyo/common.c } while (tomoyo_flush(head) && head 2601 security/tomoyo/common.c tomoyo_has_more_namespace(head)); head 2603 security/tomoyo/common.c len = head->read_user_buf - buffer; head 2604 security/tomoyo/common.c mutex_unlock(&head->io_sem); head 2618 security/tomoyo/common.c static int tomoyo_parse_policy(struct tomoyo_io_buffer *head, char *line) head 2621 security/tomoyo/common.c head->w.is_delete = !strncmp(line, "delete ", 7); head 2622 security/tomoyo/common.c if (head->w.is_delete) head 2625 security/tomoyo/common.c if (head->type == TOMOYO_EXCEPTIONPOLICY || head 2626 security/tomoyo/common.c head->type == TOMOYO_PROFILE) { head 2632 security/tomoyo/common.c head->w.ns = tomoyo_assign_namespace(line); head 2635 security/tomoyo/common.c head->w.ns = NULL; head 2637 security/tomoyo/common.c head->w.ns = &tomoyo_kernel_namespace; head 2639 security/tomoyo/common.c if (!head->w.ns) head 2643 security/tomoyo/common.c return head->write(head); head 2655 security/tomoyo/common.c ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, head 2660 security/tomoyo/common.c char *cp0 = head->write_buf; head 2663 security/tomoyo/common.c if (!head->write) head 2667 security/tomoyo/common.c if (mutex_lock_interruptible(&head->io_sem)) head 2669 security/tomoyo/common.c head->read_user_buf_avail = 0; head 2675 security/tomoyo/common.c if (head->w.avail >= head->writebuf_size - 1) { head 2676 security/tomoyo/common.c const int len = head->writebuf_size * 2; head 2683 security/tomoyo/common.c memmove(cp, cp0, head->w.avail); head 2685 security/tomoyo/common.c head->write_buf = cp; head 2687 security/tomoyo/common.c head->writebuf_size = len; head 2695 security/tomoyo/common.c cp0[head->w.avail++] = c; head 2698 security/tomoyo/common.c cp0[head->w.avail - 1] = '\0'; head 2699 security/tomoyo/common.c head->w.avail = 0; head 2702 security/tomoyo/common.c head->w.ns = &tomoyo_kernel_namespace; head 2703 security/tomoyo/common.c head->w.domain = NULL; head 2704 security/tomoyo/common.c memset(&head->r, 0, sizeof(head->r)); head 2708 security/tomoyo/common.c switch (head->type) { head 2713 security/tomoyo/common.c if (tomoyo_select_domain(head, cp0)) head 2718 security/tomoyo/common.c head->r.print_transition_related_only = true; head 2728 security/tomoyo/common.c switch (tomoyo_parse_policy(head, cp0)) { head 2733 security/tomoyo/common.c switch (head->type) { head 2749 security/tomoyo/common.c mutex_unlock(&head->io_sem); head 2758 security/tomoyo/common.c void tomoyo_close_control(struct tomoyo_io_buffer *head) head 2764 security/tomoyo/common.c if (head->type == TOMOYO_QUERY && head 2767 security/tomoyo/common.c tomoyo_notify_gc(head, false); head 2835 security/tomoyo/common.c struct tomoyo_io_buffer head = { }; head 2841 security/tomoyo/common.c head.type = TOMOYO_PROFILE; head 2842 security/tomoyo/common.c head.write = tomoyo_write_profile; head 2846 security/tomoyo/common.c head.type = TOMOYO_EXCEPTIONPOLICY; head 2847 security/tomoyo/common.c head.write = tomoyo_write_exception; head 2851 security/tomoyo/common.c head.type = TOMOYO_DOMAINPOLICY; head 2852 security/tomoyo/common.c head.write = tomoyo_write_domain; head 2856 security/tomoyo/common.c head.type = TOMOYO_MANAGER; head 2857 security/tomoyo/common.c head.write = tomoyo_write_manager; head 2861 security/tomoyo/common.c head.type = TOMOYO_STAT; head 2862 security/tomoyo/common.c head.write = tomoyo_write_stat; head 2872 security/tomoyo/common.c head.write_buf = start; head 2873 security/tomoyo/common.c tomoyo_parse_policy(&head, start); head 514 security/tomoyo/common.h struct tomoyo_shared_acl_head head; head 542 security/tomoyo/common.h struct tomoyo_shared_acl_head head; head 549 security/tomoyo/common.h struct tomoyo_acl_head head; head 555 security/tomoyo/common.h struct tomoyo_acl_head head; head 561 security/tomoyo/common.h struct tomoyo_acl_head head; head 653 security/tomoyo/common.h struct tomoyo_shared_acl_head head; head 699 security/tomoyo/common.h struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MANUAL_TASK_ACL */ head 710 security/tomoyo/common.h struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_ACL */ head 720 security/tomoyo/common.h struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_NUMBER_ACL */ head 729 security/tomoyo/common.h struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MKDEV_ACL */ head 741 security/tomoyo/common.h struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH2_ACL */ head 749 security/tomoyo/common.h struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MOUNT_ACL */ head 758 security/tomoyo/common.h struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_ENV_ACL */ head 764 security/tomoyo/common.h struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_INET_ACL */ head 773 security/tomoyo/common.h struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_UNIX_ACL */ head 794 security/tomoyo/common.h void (*read)(struct tomoyo_io_buffer *head); head 795 security/tomoyo/common.h int (*write)(struct tomoyo_io_buffer *head); head 850 security/tomoyo/common.h struct tomoyo_acl_head head; head 860 security/tomoyo/common.h struct tomoyo_acl_head head; head 867 security/tomoyo/common.h struct tomoyo_acl_head head; head 972 security/tomoyo/common.h void tomoyo_close_control(struct tomoyo_io_buffer *head); head 1026 security/tomoyo/common.h ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, head 1028 security/tomoyo/common.h ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, head 1058 security/tomoyo/common.h void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register); head 1065 security/tomoyo/common.h void tomoyo_read_log(struct tomoyo_io_buffer *head); head 1183 security/tomoyo/common.h atomic_dec(&ptr->head.users); head 1197 security/tomoyo/common.h atomic_dec(&cond->head.users); head 1210 security/tomoyo/common.h atomic_dec(&group->head.users); head 1329 security/tomoyo/common.h #define list_for_each_cookie(pos, head) \ head 1331 security/tomoyo/common.h pos = srcu_dereference((head)->next, &tomoyo_ss); \ head 1332 security/tomoyo/common.h for ( ; pos != (head); pos = srcu_dereference(pos->next, &tomoyo_ss)) head 418 security/tomoyo/condition.c list_for_each_entry(ptr, &tomoyo_condition_list, head.list) { head 420 security/tomoyo/condition.c atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS) head 423 security/tomoyo/condition.c atomic_inc(&ptr->head.users); head 429 security/tomoyo/condition.c atomic_set(&entry->head.users, 1); head 430 security/tomoyo/condition.c list_add(&entry->head.list, &tomoyo_condition_list); head 439 security/tomoyo/condition.c tomoyo_del_condition(&entry->head.list); head 698 security/tomoyo/condition.c tomoyo_del_condition(&entry->head.list); head 117 security/tomoyo/domain.c container_of(new_entry, struct tomoyo_path_acl, head) head 224 security/tomoyo/domain.c head); head 227 security/tomoyo/domain.c head); head 276 security/tomoyo/domain.c error = tomoyo_update_policy(&e.head, sizeof(e), param, head 304 security/tomoyo/domain.c list_for_each_entry_rcu(ptr, list, head.list, head 306 security/tomoyo/domain.c if (ptr->head.is_deleted || ptr->type != type) head 386 security/tomoyo/domain.c head); head 388 security/tomoyo/domain.c head); head 419 security/tomoyo/domain.c error = tomoyo_update_policy(&e.head, sizeof(e), param, head 742 security/tomoyo/domain.c list_for_each_entry_rcu(ptr, list, head.list, head 744 security/tomoyo/domain.c if (ptr->head.is_deleted || head 22 security/tomoyo/environ.c container_of(ptr, typeof(*acl), head); head 79 security/tomoyo/environ.c const struct tomoyo_env_acl *p1 = container_of(a, typeof(*p1), head); head 80 security/tomoyo/environ.c const struct tomoyo_env_acl *p2 = container_of(b, typeof(*p2), head); head 96 security/tomoyo/environ.c struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL }; head 105 security/tomoyo/environ.c error = tomoyo_update_domain(&e.head, sizeof(e), param, head 256 security/tomoyo/file.c head); head 279 security/tomoyo/file.c container_of(ptr, typeof(*acl), head); head 300 security/tomoyo/file.c container_of(ptr, typeof(*acl), head); head 320 security/tomoyo/file.c container_of(ptr, typeof(*acl), head); head 344 security/tomoyo/file.c const struct tomoyo_path_acl *p1 = container_of(a, typeof(*p1), head); head 345 security/tomoyo/file.c const struct tomoyo_path_acl *p2 = container_of(b, typeof(*p2), head); head 363 security/tomoyo/file.c u16 * const a_perm = &container_of(a, struct tomoyo_path_acl, head) head 366 security/tomoyo/file.c const u16 b_perm = container_of(b, struct tomoyo_path_acl, head)->perm; head 390 security/tomoyo/file.c .head.type = TOMOYO_TYPE_PATH_ACL, head 398 security/tomoyo/file.c error = tomoyo_update_domain(&e.head, sizeof(e), param, head 416 security/tomoyo/file.c const struct tomoyo_mkdev_acl *p1 = container_of(a, typeof(*p1), head); head 417 security/tomoyo/file.c const struct tomoyo_mkdev_acl *p2 = container_of(b, typeof(*p2), head); head 439 security/tomoyo/file.c head)->perm; head 441 security/tomoyo/file.c const u8 b_perm = container_of(b, struct tomoyo_mkdev_acl, head) head 466 security/tomoyo/file.c .head.type = TOMOYO_TYPE_MKDEV_ACL, head 477 security/tomoyo/file.c error = tomoyo_update_domain(&e.head, sizeof(e), param, head 498 security/tomoyo/file.c const struct tomoyo_path2_acl *p1 = container_of(a, typeof(*p1), head); head 499 security/tomoyo/file.c const struct tomoyo_path2_acl *p2 = container_of(b, typeof(*p2), head); head 518 security/tomoyo/file.c u8 * const a_perm = &container_of(a, struct tomoyo_path2_acl, head) head 521 security/tomoyo/file.c const u8 b_perm = container_of(b, struct tomoyo_path2_acl, head)->perm; head 545 security/tomoyo/file.c .head.type = TOMOYO_TYPE_PATH2_ACL, head 554 security/tomoyo/file.c error = tomoyo_update_domain(&e.head, sizeof(e), param, head 635 security/tomoyo/file.c head); head 637 security/tomoyo/file.c head); head 657 security/tomoyo/file.c head)->perm; head 659 security/tomoyo/file.c const u8 b_perm = container_of(b, struct tomoyo_path_number_acl, head) head 682 security/tomoyo/file.c .head.type = TOMOYO_TYPE_PATH_NUMBER_ACL, head 691 security/tomoyo/file.c error = tomoyo_update_domain(&e.head, sizeof(e), param, head 965 security/tomoyo/file.c const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head); head 966 security/tomoyo/file.c const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head); head 985 security/tomoyo/file.c struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL }; head 994 security/tomoyo/file.c error = tomoyo_update_domain(&e.head, sizeof(e), param, head 42 security/tomoyo/gc.c struct tomoyo_io_buffer *head; head 46 security/tomoyo/gc.c list_for_each_entry(head, &tomoyo_io_buffer_list, list) { head 47 security/tomoyo/gc.c head->users++; head 49 security/tomoyo/gc.c mutex_lock(&head->io_sem); head 50 security/tomoyo/gc.c if (head->r.domain == element || head->r.group == element || head 51 security/tomoyo/gc.c head->r.acl == element || &head->w.domain->list == element) head 53 security/tomoyo/gc.c mutex_unlock(&head->io_sem); head 55 security/tomoyo/gc.c head->users--; head 73 security/tomoyo/gc.c struct tomoyo_io_buffer *head; head 78 security/tomoyo/gc.c list_for_each_entry(head, &tomoyo_io_buffer_list, list) { head 81 security/tomoyo/gc.c head->users++; head 83 security/tomoyo/gc.c mutex_lock(&head->io_sem); head 85 security/tomoyo/gc.c const char *w = head->r.w[i]; head 92 security/tomoyo/gc.c mutex_unlock(&head->io_sem); head 94 security/tomoyo/gc.c head->users--; head 112 security/tomoyo/gc.c container_of(element, typeof(*ptr), head.list); head 128 security/tomoyo/gc.c container_of(element, typeof(*ptr), head.list); head 144 security/tomoyo/gc.c container_of(element, typeof(*ptr), head.list); head 166 security/tomoyo/gc.c = container_of(acl, typeof(*entry), head); head 173 security/tomoyo/gc.c = container_of(acl, typeof(*entry), head); head 181 security/tomoyo/gc.c = container_of(acl, typeof(*entry), head); head 189 security/tomoyo/gc.c = container_of(acl, typeof(*entry), head); head 199 security/tomoyo/gc.c = container_of(acl, typeof(*entry), head); head 209 security/tomoyo/gc.c container_of(acl, typeof(*entry), head); head 217 security/tomoyo/gc.c container_of(acl, typeof(*entry), head); head 226 security/tomoyo/gc.c container_of(acl, typeof(*entry), head); head 234 security/tomoyo/gc.c container_of(acl, typeof(*entry), head); head 280 security/tomoyo/gc.c head.list); head 332 security/tomoyo/gc.c container_of(element, typeof(*member), head.list); head 347 security/tomoyo/gc.c container_of(element, typeof(*group), head.list); head 439 security/tomoyo/gc.c head.list)->entry.name)) head 578 security/tomoyo/gc.c list_for_each_entry_safe(group, tmp, list, head.list) { head 581 security/tomoyo/gc.c atomic_read(&group->head.users) > 0) head 583 security/tomoyo/gc.c atomic_set(&group->head.users, head 586 security/tomoyo/gc.c &group->head.list); head 621 security/tomoyo/gc.c struct tomoyo_io_buffer *head; head 625 security/tomoyo/gc.c list_for_each_entry_safe(head, tmp, &tomoyo_io_buffer_list, head 627 security/tomoyo/gc.c if (head->users) head 629 security/tomoyo/gc.c list_del(&head->list); head 630 security/tomoyo/gc.c kfree(head->read_buf); head 631 security/tomoyo/gc.c kfree(head->write_buf); head 632 security/tomoyo/gc.c kfree(head); head 650 security/tomoyo/gc.c void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register) head 656 security/tomoyo/gc.c head->users = 1; head 657 security/tomoyo/gc.c list_add(&head->list, &tomoyo_io_buffer_list); head 659 security/tomoyo/gc.c is_write = head->write_buf != NULL; head 660 security/tomoyo/gc.c if (!--head->users) { head 661 security/tomoyo/gc.c list_del(&head->list); head 662 security/tomoyo/gc.c kfree(head->read_buf); head 663 security/tomoyo/gc.c kfree(head->write_buf); head 664 security/tomoyo/gc.c kfree(head); head 24 security/tomoyo/group.c return container_of(a, struct tomoyo_path_group, head)->member_name == head 25 security/tomoyo/group.c container_of(b, struct tomoyo_path_group, head)->member_name; head 39 security/tomoyo/group.c return !memcmp(&container_of(a, struct tomoyo_number_group, head) head 41 security/tomoyo/group.c &container_of(b, struct tomoyo_number_group, head) head 43 security/tomoyo/group.c sizeof(container_of(a, struct tomoyo_number_group, head) head 59 security/tomoyo/group.c head); head 61 security/tomoyo/group.c head); head 90 security/tomoyo/group.c error = tomoyo_update_policy(&e.head, sizeof(e), param, head 99 security/tomoyo/group.c error = tomoyo_update_policy(&e.head, sizeof(e), param, head 111 security/tomoyo/group.c error = tomoyo_update_policy(&e.head, sizeof(e), param, head 136 security/tomoyo/group.c list_for_each_entry_rcu(member, &group->member_list, head.list, head 138 security/tomoyo/group.c if (member->head.is_deleted) head 165 security/tomoyo/group.c list_for_each_entry_rcu(member, &group->member_list, head.list, head 167 security/tomoyo/group.c if (member->head.is_deleted) head 196 security/tomoyo/group.c list_for_each_entry_rcu(member, &group->member_list, head.list, head 198 security/tomoyo/group.c if (member->head.is_deleted) head 112 security/tomoyo/memory.c list_for_each_entry(group, list, head.list) { head 114 security/tomoyo/memory.c atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS) head 116 security/tomoyo/memory.c atomic_inc(&group->head.users); head 125 security/tomoyo/memory.c atomic_set(&entry->head.users, 1); head 126 security/tomoyo/memory.c list_add_tail_rcu(&entry->head.list, list); head 157 security/tomoyo/memory.c struct list_head *head; head 163 security/tomoyo/memory.c head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)]; head 166 security/tomoyo/memory.c list_for_each_entry(ptr, head, head.list) { head 168 security/tomoyo/memory.c atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS) head 170 security/tomoyo/memory.c atomic_inc(&ptr->head.users); head 177 security/tomoyo/memory.c atomic_set(&ptr->head.users, 1); head 179 security/tomoyo/memory.c list_add_tail(&ptr->head.list, head); head 51 security/tomoyo/mount.c container_of(ptr, typeof(*acl), head); head 195 security/tomoyo/network.c const struct tomoyo_inet_acl *p1 = container_of(a, typeof(*p1), head); head 196 security/tomoyo/network.c const struct tomoyo_inet_acl *p2 = container_of(b, typeof(*p2), head); head 214 security/tomoyo/network.c const struct tomoyo_unix_acl *p1 = container_of(a, typeof(*p1), head); head 215 security/tomoyo/network.c const struct tomoyo_unix_acl *p2 = container_of(b, typeof(*p2), head); head 235 security/tomoyo/network.c &container_of(a, struct tomoyo_inet_acl, head)->perm; head 237 security/tomoyo/network.c const u8 b_perm = container_of(b, struct tomoyo_inet_acl, head)->perm; head 261 security/tomoyo/network.c &container_of(a, struct tomoyo_unix_acl, head)->perm; head 263 security/tomoyo/network.c const u8 b_perm = container_of(b, struct tomoyo_unix_acl, head)->perm; head 284 security/tomoyo/network.c struct tomoyo_inet_acl e = { .head.type = TOMOYO_TYPE_INET_ACL }; head 311 security/tomoyo/network.c error = tomoyo_update_domain(&e.head, sizeof(e), param, head 329 security/tomoyo/network.c struct tomoyo_unix_acl e = { .head.type = TOMOYO_TYPE_UNIX_ACL }; head 345 security/tomoyo/network.c error = tomoyo_update_domain(&e.head, sizeof(e), param, head 423 security/tomoyo/network.c container_of(ptr, typeof(*acl), head); head 453 security/tomoyo/network.c container_of(ptr, typeof(*acl), head); head 23 security/tomoyo/securityfs_if.c head); head 1041 security/tomoyo/util.c perm = container_of(ptr, struct tomoyo_path_acl, head) head 1045 security/tomoyo/util.c perm = container_of(ptr, struct tomoyo_path2_acl, head) head 1050 security/tomoyo/util.c head)->perm; head 1054 security/tomoyo/util.c head)->perm; head 1058 security/tomoyo/util.c head)->perm; head 1062 security/tomoyo/util.c head)->perm; head 49 sound/core/seq/oss/seq_oss_readq.c q->head = q->tail = 0; head 78 sound/core/seq/oss/seq_oss_readq.c q->head = q->tail = 0; head 173 sound/core/seq/oss/seq_oss_readq.c memcpy(rec, &q->q[q->head], sizeof(*rec)); head 184 sound/core/seq/oss/seq_oss_readq.c (q->qlen > 0 || q->head == q->tail), head 196 sound/core/seq/oss/seq_oss_readq.c q->head = (q->head + 1) % q->maxlen; head 22 sound/core/seq/oss/seq_oss_readq.h int head, tail; head 42 sound/core/seq/seq_fifo.c f->head = NULL; head 125 sound/core/seq/seq_fifo.c if (f->head == NULL) head 126 sound/core/seq/seq_fifo.c f->head = cell; head 146 sound/core/seq/seq_fifo.c if ((cell = f->head) != NULL) { head 147 sound/core/seq/seq_fifo.c f->head = cell->next; head 205 sound/core/seq/seq_fifo.c cell->next = f->head; head 206 sound/core/seq/seq_fifo.c f->head = cell; head 244 sound/core/seq/seq_fifo.c oldhead = f->head; head 247 sound/core/seq/seq_fifo.c f->head = NULL; head 17 sound/core/seq/seq_fifo.h struct snd_seq_event_cell *head; /* pointer to head of fifo */ head 51 sound/core/seq/seq_prioq.c f->head = NULL; head 165 sound/core/seq/seq_prioq.c cur = f->head; /* cursor */ head 193 sound/core/seq/seq_prioq.c if (f->head == cur) /* this is the first cell, set head to it */ head 194 sound/core/seq/seq_prioq.c f->head = cell; head 224 sound/core/seq/seq_prioq.c cell = f->head; head 228 sound/core/seq/seq_prioq.c f->head = cell->next; head 284 sound/core/seq/seq_prioq.c cell = f->head; head 289 sound/core/seq/seq_prioq.c if (cell == f->head) { head 290 sound/core/seq/seq_prioq.c f->head = cell->next; head 394 sound/core/seq/seq_prioq.c cell = f->head; head 402 sound/core/seq/seq_prioq.c if (cell == f->head) { head 403 sound/core/seq/seq_prioq.c f->head = cell->next; head 15 sound/core/seq/seq_prioq.h struct snd_seq_event_cell *head; /* pointer to head of prioq */ head 748 sound/core/timer.c struct list_head *head) head 753 sound/core/timer.c while (!list_empty(head)) { head 754 sound/core/timer.c ti = list_first_entry(head, struct snd_timer_instance, head 776 sound/core/timer.c struct list_head *head) head 781 sound/core/timer.c while (!list_empty(head)) head 782 sound/core/timer.c list_del_init(head->next); head 108 sound/isa/msnd/msnd_midi.c u16 head, tail, size; head 111 sound/isa/msnd/msnd_midi.c head = readw(mpu->dev->MIDQ + JQS_wHead); head 114 sound/isa/msnd/msnd_midi.c if (head > size || tail > size) head 116 sound/isa/msnd/msnd_midi.c while (head != tail) { head 117 sound/isa/msnd/msnd_midi.c unsigned char val = readw(pwMIDQData + 2 * head); head 121 sound/isa/msnd/msnd_midi.c if (++head > size) head 122 sound/isa/msnd/msnd_midi.c head = 0; head 123 sound/isa/msnd/msnd_midi.c writew(head, mpu->dev->MIDQ + JQS_wHead); head 159 sound/isa/msnd/msnd_pinnacle.c u16 head, tail, size; head 165 sound/isa/msnd/msnd_pinnacle.c head = readw(chip->DSPQ + JQS_wHead); head 168 sound/isa/msnd/msnd_pinnacle.c if (head > size || tail > size) head 170 sound/isa/msnd/msnd_pinnacle.c while (head != tail) { head 171 sound/isa/msnd/msnd_pinnacle.c snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * head)); head 172 sound/isa/msnd/msnd_pinnacle.c if (++head > size) head 173 sound/isa/msnd/msnd_pinnacle.c head = 0; head 174 sound/isa/msnd/msnd_pinnacle.c writew(head, chip->DSPQ + JQS_wHead); head 21 sound/pci/ctxfi/ctimap.c struct list_head *pos, *pre, *head; head 24 sound/pci/ctxfi/ctimap.c head = mappers; head 26 sound/pci/ctxfi/ctimap.c if (list_empty(head)) { head 29 sound/pci/ctxfi/ctimap.c list_add(&entry->list, head); head 33 sound/pci/ctxfi/ctimap.c list_for_each(pos, head) { head 41 sound/pci/ctxfi/ctimap.c if (pos != head) { head 43 sound/pci/ctxfi/ctimap.c if (pre == head) head 44 sound/pci/ctxfi/ctimap.c pre = head->prev; head 48 sound/pci/ctxfi/ctimap.c pre = head->prev; head 49 sound/pci/ctxfi/ctimap.c pos = head->next; head 50 sound/pci/ctxfi/ctimap.c list_add_tail(&entry->list, head); head 67 sound/pci/ctxfi/ctimap.c struct list_head *next, *pre, *head; head 70 sound/pci/ctxfi/ctimap.c head = mappers; head 72 sound/pci/ctxfi/ctimap.c if (list_empty(head)) head 75 sound/pci/ctxfi/ctimap.c pre = (entry->list.prev == head) ? head->prev : entry->list.prev; head 76 sound/pci/ctxfi/ctimap.c next = (entry->list.next == head) ? head->next : entry->list.next; head 96 sound/pci/ctxfi/ctimap.c void free_input_mapper_list(struct list_head *head) head 101 sound/pci/ctxfi/ctimap.c while (!list_empty(head)) { head 102 sound/pci/ctxfi/ctimap.c pos = head->next; head 1132 sound/pci/echoaudio/echoaudio_dsp.c int head = pipe->sglist_head; head 1135 sound/pci/echoaudio/echoaudio_dsp.c if (head < MAX_SGLIST_ENTRIES - 1) { head 1136 sound/pci/echoaudio/echoaudio_dsp.c list[head].addr = cpu_to_le32(address); head 1137 sound/pci/echoaudio/echoaudio_dsp.c list[head].size = cpu_to_le32(length); head 55 sound/soc/codecs/hdac_hdmi.c struct list_head head; head 75 sound/soc/codecs/hdac_hdmi.c struct list_head head; head 84 sound/soc/codecs/hdac_hdmi.c struct list_head head; head 98 sound/soc/codecs/hdac_hdmi.c struct list_head head; head 155 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pcm, &hdmi->pcm_list, head) { head 293 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pcm, &hdmi->pcm_list, head) { head 535 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pcm, &hdmi->pcm_list, head) { head 540 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(port, &pcm->port_list, head) { head 572 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(cvt, &hdmi->cvt_list, head) { head 725 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pcm, &hdmi->pcm_list, head) { head 729 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(p, &pcm->port_list, head) { head 920 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pcm, &hdmi->pcm_list, head) { head 924 sound/soc/codecs/hdac_hdmi.c list_for_each_entry_safe(p, p_next, &pcm->port_list, head) { head 928 sound/soc/codecs/hdac_hdmi.c list_del(&p->head); head 937 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pcm, &hdmi->pcm_list, head) { head 939 sound/soc/codecs/hdac_hdmi.c list_add_tail(&port->head, &pcm->port_list); head 1010 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(cvt, &hdmi->cvt_list, head) { head 1095 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(cvt, &hdmi->cvt_list, head) { head 1107 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pin, &hdmi->pin_list, head) { head 1125 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pin, &hdmi->pin_list, head) { head 1151 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pin, &hdmi->pin_list, head) { head 1186 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(cvt, &hdmi->cvt_list, head) { head 1219 sound/soc/codecs/hdac_hdmi.c list_add_tail(&cvt->head, &hdmi->cvt_list); head 1370 sound/soc/codecs/hdac_hdmi.c list_add_tail(&pin->head, &hdmi->pin_list); head 1453 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(cvt, &hdmi->cvt_list, head) { head 1628 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pin, &hdmi->pin_list, head) { head 1689 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pin, &hdmi->pin_list, head) { head 1738 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pin, &hdmi->pin_list, head) { head 1780 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pin, &hdmi->pin_list, head) { head 1824 sound/soc/codecs/hdac_hdmi.c list_add_tail(&pcm->head, &hdmi->pcm_list); head 1836 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pin, &hdmi->pin_list, head) { head 1988 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(port, &pcm->port_list, head) head 2020 sound/soc/codecs/hdac_hdmi.c port = list_first_entry(&pcm->port_list, struct hdac_hdmi_port, head); head 2117 sound/soc/codecs/hdac_hdmi.c list_for_each_entry(pin, &hdmi->pin_list, head) head 28 sound/soc/codecs/sigmadsp.c struct list_head head; head 39 sound/soc/codecs/sigmadsp.c struct list_head head; head 238 sound/soc/codecs/sigmadsp.c list_add_tail(&ctrl->head, &sigmadsp->ctrl_list); head 269 sound/soc/codecs/sigmadsp.c list_add_tail(&data->head, &sigmadsp->data_list); head 411 sound/soc/codecs/sigmadsp.c list_add_tail(&data->head, &sigmadsp->data_list); head 458 sound/soc/codecs/sigmadsp.c list_for_each_entry_safe(ctrl, _ctrl, &sigmadsp->ctrl_list, head) { head 463 sound/soc/codecs/sigmadsp.c list_for_each_entry_safe(data, _data, &sigmadsp->data_list, head) head 715 sound/soc/codecs/sigmadsp.c list_for_each_entry(ctrl, &sigmadsp->ctrl_list, head) { head 750 sound/soc/codecs/sigmadsp.c list_for_each_entry(data, &sigmadsp->data_list, head) { head 760 sound/soc/codecs/sigmadsp.c list_for_each_entry(ctrl, &sigmadsp->ctrl_list, head) head 784 sound/soc/codecs/sigmadsp.c list_for_each_entry(ctrl, &sigmadsp->ctrl_list, head) head 34 sound/soc/intel/boards/bxt_da7219_max98357a.c struct list_head head; head 237 sound/soc/intel/boards/bxt_da7219_max98357a.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 618 sound/soc/intel/boards/bxt_da7219_max98357a.c list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) { head 27 sound/soc/intel/boards/bxt_rt298.c struct list_head head; head 197 sound/soc/intel/boards/bxt_rt298.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 530 sound/soc/intel/boards/bxt_rt298.c list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) { head 36 sound/soc/intel/boards/glk_rt5682_max98357a.c struct list_head head; head 219 sound/soc/intel/boards/glk_rt5682_max98357a.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 548 sound/soc/intel/boards/glk_rt5682_max98357a.c list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) { head 34 sound/soc/intel/boards/kbl_da7219_max98357a.c struct list_head head; head 216 sound/soc/intel/boards/kbl_da7219_max98357a.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 548 sound/soc/intel/boards/kbl_da7219_max98357a.c list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) { head 42 sound/soc/intel/boards/kbl_da7219_max98927.c struct list_head head; head 391 sound/soc/intel/boards/kbl_da7219_max98927.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 977 sound/soc/intel/boards/kbl_da7219_max98927.c list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) { head 36 sound/soc/intel/boards/kbl_rt5660.c struct list_head head; head 223 sound/soc/intel/boards/kbl_rt5660.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 470 sound/soc/intel/boards/kbl_rt5660.c list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) { head 38 sound/soc/intel/boards/kbl_rt5663_max98927.c struct list_head head; head 318 sound/soc/intel/boards/kbl_rt5663_max98927.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 888 sound/soc/intel/boards/kbl_rt5663_max98927.c list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) { head 44 sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c struct list_head head; head 203 sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 602 sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) { head 36 sound/soc/intel/boards/skl_hda_dsp_common.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 142 sound/soc/intel/boards/skl_hda_dsp_common.c list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) { head 21 sound/soc/intel/boards/skl_hda_dsp_common.h struct list_head head; head 30 sound/soc/intel/boards/skl_nau88l25_max98357a.c struct list_head head; head 195 sound/soc/intel/boards/skl_nau88l25_max98357a.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 213 sound/soc/intel/boards/skl_nau88l25_max98357a.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 231 sound/soc/intel/boards/skl_nau88l25_max98357a.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 607 sound/soc/intel/boards/skl_nau88l25_max98357a.c list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) { head 34 sound/soc/intel/boards/skl_nau88l25_ssm4567.c struct list_head head; head 214 sound/soc/intel/boards/skl_nau88l25_ssm4567.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 232 sound/soc/intel/boards/skl_nau88l25_ssm4567.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 251 sound/soc/intel/boards/skl_nau88l25_ssm4567.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 648 sound/soc/intel/boards/skl_nau88l25_ssm4567.c list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) { head 27 sound/soc/intel/boards/skl_rt286.c struct list_head head; head 156 sound/soc/intel/boards/skl_rt286.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 489 sound/soc/intel/boards/skl_rt286.c list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) { head 51 sound/soc/intel/boards/sof_rt5682.c struct list_head head; head 132 sound/soc/intel/boards/sof_rt5682.c list_add_tail(&pcm->head, &ctx->hdmi_pcm_list); head 281 sound/soc/intel/boards/sof_rt5682.c list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) { head 127 sound/usb/mixer.c ((cval)->head.mixer->ignore_ctl_error ? 0 : (err)) head 298 sound/usb/mixer.c struct snd_usb_audio *chip = cval->head.mixer->chip; head 309 sound/usb/mixer.c idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8); head 334 sound/usb/mixer.c struct snd_usb_audio *chip = cval->head.mixer->chip; head 357 sound/usb/mixer.c idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8); head 401 sound/usb/mixer.c return (cval->head.mixer->protocol == UAC_VERSION_1) ? head 432 sound/usb/mixer.c if (!cval->head.mixer->ignore_ctl_error) head 433 sound/usb/mixer.c usb_audio_dbg(cval->head.mixer->chip, head 450 sound/usb/mixer.c struct snd_usb_audio *chip = cval->head.mixer->chip; head 457 sound/usb/mixer.c if (cval->head.mixer->protocol == UAC_VERSION_1) { head 482 sound/usb/mixer.c idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8); head 518 sound/usb/mixer.c usb_audio_dbg(cval->head.mixer->chip, head 1068 sound/usb/mixer.c struct snd_usb_audio *chip = cval->head.mixer->chip; head 1212 sound/usb/mixer.c usb_audio_err(cval->head.mixer->chip, head 1214 sound/usb/mixer.c cval->head.id, snd_usb_ctrl_intf(cval->head.mixer->chip), head 1215 sound/usb/mixer.c cval->control, cval->head.id); head 1321 sound/usb/mixer.c snd_ctl_notify(cval->head.mixer->chip->card, head 1424 sound/usb/mixer.c struct snd_usb_audio *chip = cval->head.mixer->chip; head 1433 sound/usb/mixer.c idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8); head 1434 sound/usb/mixer.c if (cval->head.mixer->protocol == UAC_VERSION_2) { head 1583 sound/usb/mixer.c snd_usb_mixer_elem_init_std(&cval->head, mixer, unitid); head 1709 sound/usb/mixer.c cval->head.id, kctl->id.name, cval->channels, head 1714 sound/usb/mixer.c cval->head.id, kctl->id.name, cval->channels, head 1716 sound/usb/mixer.c snd_usb_mixer_add_control(&cval->head, kctl); head 1775 sound/usb/mixer.c snd_usb_mixer_elem_init_std(&cval->head, mixer, term->id); head 1808 sound/usb/mixer.c snd_usb_mixer_add_control(&cval->head, kctl); head 1835 sound/usb/mixer.c snd_usb_mixer_elem_init_std(&cval->head, state->mixer, hdr->bClockID); head 1862 sound/usb/mixer.c return snd_usb_mixer_add_control(&cval->head, kctl); head 2050 sound/usb/mixer.c snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid); head 2082 sound/usb/mixer.c cval->head.id, kctl->id.name, cval->channels, cval->min, cval->max); head 2083 sound/usb/mixer.c snd_usb_mixer_add_control(&cval->head, kctl); head 2394 sound/usb/mixer.c snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid); head 2480 sound/usb/mixer.c cval->head.id, kctl->id.name, cval->channels, head 2483 sound/usb/mixer.c err = snd_usb_mixer_add_control(&cval->head, kctl); head 2632 sound/usb/mixer.c snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid); head 2726 sound/usb/mixer.c cval->head.id, kctl->id.name, desc->bNrInPins); head 2727 sound/usb/mixer.c return snd_usb_mixer_add_control(&cval->head, kctl); head 3232 sound/usb/mixer.c "channels=%i, type=\"%s\"\n", cval->head.id, head 3311 sound/usb/mixer.c &info->head.kctl->id); head 77 sound/usb/mixer.h container_of(list, struct usb_mixer_elem_info, head) head 80 sound/usb/mixer.h struct usb_mixer_elem_list head; head 69 sound/usb/mixer_quirks.c snd_usb_mixer_elem_init_std(&cval->head, mixer, unitid); head 103 sound/usb/mixer_quirks.c return snd_usb_mixer_add_control(&cval->head, kctl); head 450 sound/usb/mixer_scarlett.c struct snd_usb_audio *chip = elem->head.mixer->chip; head 453 sound/usb/mixer_scarlett.c int idx = snd_usb_ctrl_intf(chip) | (elem->head.id << 8); head 543 sound/usb/mixer_scarlett.c elem->head.mixer = mixer; head 544 sound/usb/mixer_scarlett.c elem->head.resume = resume; head 547 sound/usb/mixer_scarlett.c elem->head.id = index; head 564 sound/usb/mixer_scarlett.c err = snd_usb_mixer_add_control(&elem->head, kctl); head 952 sound/usb/mixer_scarlett_gen2.c elem->head.mixer = mixer; head 954 sound/usb/mixer_scarlett_gen2.c elem->head.id = index; head 966 sound/usb/mixer_scarlett_gen2.c err = snd_usb_mixer_add_control(&elem->head, kctl); head 1028 sound/usb/mixer_scarlett_gen2.c struct usb_mixer_interface *mixer = elem->head.mixer; head 1045 sound/usb/mixer_scarlett_gen2.c struct usb_mixer_interface *mixer = elem->head.mixer; head 1063 sound/usb/mixer_scarlett_gen2.c struct usb_mixer_interface *mixer = elem->head.mixer; head 1130 sound/usb/mixer_scarlett_gen2.c struct scarlett2_mixer_data *private = elem->head.mixer->private_data; head 1141 sound/usb/mixer_scarlett_gen2.c struct usb_mixer_interface *mixer = elem->head.mixer; head 1214 sound/usb/mixer_scarlett_gen2.c struct scarlett2_mixer_data *private = elem->head.mixer->private_data; head 1225 sound/usb/mixer_scarlett_gen2.c struct usb_mixer_interface *mixer = elem->head.mixer; head 1264 sound/usb/mixer_scarlett_gen2.c struct scarlett2_mixer_data *private = elem->head.mixer->private_data; head 1275 sound/usb/mixer_scarlett_gen2.c struct usb_mixer_interface *mixer = elem->head.mixer; head 1314 sound/usb/mixer_scarlett_gen2.c struct usb_mixer_interface *mixer = elem->head.mixer; head 1331 sound/usb/mixer_scarlett_gen2.c struct usb_mixer_interface *mixer = elem->head.mixer; head 1483 sound/usb/mixer_scarlett_gen2.c struct scarlett2_mixer_data *private = elem->head.mixer->private_data; head 1493 sound/usb/mixer_scarlett_gen2.c struct usb_mixer_interface *mixer = elem->head.mixer; head 1569 sound/usb/mixer_scarlett_gen2.c struct scarlett2_mixer_data *private = elem->head.mixer->private_data; head 1601 sound/usb/mixer_scarlett_gen2.c struct scarlett2_mixer_data *private = elem->head.mixer->private_data; head 1611 sound/usb/mixer_scarlett_gen2.c struct usb_mixer_interface *mixer = elem->head.mixer; head 1695 sound/usb/mixer_scarlett_gen2.c err = scarlett2_usb_get_meter_levels(elem->head.mixer, meter_levels); head 196 sound/usb/mixer_us16x08.c struct snd_usb_audio *chip = elem->head.mixer->chip; head 264 sound/usb/mixer_us16x08.c struct snd_usb_audio *chip = elem->head.mixer->chip; head 281 sound/usb/mixer_us16x08.c buf[6] = elem->head.id; head 301 sound/usb/mixer_us16x08.c struct snd_usb_audio *chip = elem->head.mixer->chip; head 308 sound/usb/mixer_us16x08.c switch (elem->head.id) { head 322 sound/usb/mixer_us16x08.c buf[6] = elem->head.id; head 343 sound/usb/mixer_us16x08.c switch (elem->head.id) { head 374 sound/usb/mixer_us16x08.c struct snd_usb_audio *chip = elem->head.mixer->chip; head 391 sound/usb/mixer_us16x08.c buf[6] = elem->head.id; head 423 sound/usb/mixer_us16x08.c int val_idx = COMP_STORE_IDX(elem->head.id); head 434 sound/usb/mixer_us16x08.c struct snd_usb_audio *chip = elem->head.mixer->chip; head 449 sound/usb/mixer_us16x08.c val_idx = elem->head.id - SND_US16X08_ID_COMP_BASE; head 493 sound/usb/mixer_us16x08.c val = store->val[EQ_STORE_BAND_IDX(elem->head.id)] head 494 sound/usb/mixer_us16x08.c [EQ_STORE_PARAM_IDX(elem->head.id)][index]; head 504 sound/usb/mixer_us16x08.c struct snd_usb_audio *chip = elem->head.mixer->chip; head 550 sound/usb/mixer_us16x08.c int b_idx = EQ_STORE_BAND_IDX(elem->head.id) - 1; head 551 sound/usb/mixer_us16x08.c int p_idx = EQ_STORE_PARAM_IDX(elem->head.id); head 564 sound/usb/mixer_us16x08.c struct snd_usb_audio *chip = elem->head.mixer->chip; head 569 sound/usb/mixer_us16x08.c int b_idx = EQ_STORE_BAND_IDX(elem->head.id) - 1; head 570 sound/usb/mixer_us16x08.c int p_idx = EQ_STORE_PARAM_IDX(elem->head.id); head 688 sound/usb/mixer_us16x08.c struct snd_usb_audio *chip = elem->head.mixer->chip; head 1059 sound/usb/mixer_us16x08.c elem->head.mixer = mixer; head 1060 sound/usb/mixer_us16x08.c elem->head.resume = NULL; head 1063 sound/usb/mixer_us16x08.c elem->head.id = index; head 1081 sound/usb/mixer_us16x08.c err = snd_usb_mixer_add_control(&elem->head, kctl); head 57 sound/usb/usx2y/usx2yhwdeppcm.c int head = usX2Y->hwdep_pcm_shm->captured_iso_head + 1; head 58 sound/usb/usx2y/usx2yhwdeppcm.c if (head >= ARRAY_SIZE(usX2Y->hwdep_pcm_shm->captured_iso)) head 59 sound/usb/usx2y/usx2yhwdeppcm.c head = 0; head 60 sound/usb/usx2y/usx2yhwdeppcm.c usX2Y->hwdep_pcm_shm->capture_iso_start = head; head 61 sound/usb/usx2y/usx2yhwdeppcm.c snd_printdd("cap start %i\n", head); head 145 sound/usb/usx2y/usx2yhwdeppcm.c int head = shm->captured_iso_head + 1; head 146 sound/usb/usx2y/usx2yhwdeppcm.c if (head >= ARRAY_SIZE(shm->captured_iso)) head 147 sound/usb/usx2y/usx2yhwdeppcm.c head = 0; head 148 sound/usb/usx2y/usx2yhwdeppcm.c shm->captured_iso[head].frame = urb->start_frame + pack; head 149 sound/usb/usx2y/usx2yhwdeppcm.c shm->captured_iso[head].offset = desc->offset; head 150 sound/usb/usx2y/usx2yhwdeppcm.c shm->captured_iso[head].length = desc->actual_length; head 151 sound/usb/usx2y/usx2yhwdeppcm.c shm->captured_iso_head = head; head 30 tools/bpf/bpftool/cfg.c struct bpf_insn *head; head 94 tools/bpf/bpftool/cfg.c if (bb->head == insn) head 96 tools/bpf/bpftool/cfg.c else if (bb->head > insn) head 106 tools/bpf/bpftool/cfg.c new_bb->head = insn; head 208 tools/bpf/bpftool/cfg.c bb->tail = bb_next(bb)->head - 1; head 249 tools/bpf/bpftool/cfg.c if (bb->head == insn) head 405 tools/bpf/bpftool/cfg.c start_idx = bb->head - func->start; head 406 tools/bpf/bpftool/cfg.c dump_xlated_for_graph(&dd, bb->head, bb->tail, start_idx); head 61 tools/include/linux/list.h static inline void list_add(struct list_head *new, struct list_head *head) head 63 tools/include/linux/list.h __list_add(new, head, head->next); head 75 tools/include/linux/list.h static inline void list_add_tail(struct list_head *new, struct list_head *head) head 77 tools/include/linux/list.h __list_add(new, head->prev, head); head 154 tools/include/linux/list.h static inline void list_move(struct list_head *list, struct list_head *head) head 157 tools/include/linux/list.h list_add(list, head); head 166 tools/include/linux/list.h struct list_head *head) head 169 tools/include/linux/list.h list_add_tail(list, head); head 178 tools/include/linux/list.h const struct list_head *head) head 180 tools/include/linux/list.h return list->next == head; head 187 tools/include/linux/list.h static inline int list_empty(const struct list_head *head) head 189 tools/include/linux/list.h return head->next == head; head 205 tools/include/linux/list.h static inline int list_empty_careful(const struct list_head *head) head 207 tools/include/linux/list.h struct list_head *next = head->next; head 208 tools/include/linux/list.h return (next == head) && (next == head->prev); head 215 tools/include/linux/list.h static inline void list_rotate_left(struct list_head *head) head 219 tools/include/linux/list.h if (!list_empty(head)) { head 220 tools/include/linux/list.h first = head->next; head 221 tools/include/linux/list.h list_move_tail(first, head); head 229 tools/include/linux/list.h static inline int list_is_singular(const struct list_head *head) head 231 tools/include/linux/list.h return !list_empty(head) && (head->next == head->prev); head 235 tools/include/linux/list.h struct list_head *head, struct list_head *entry) head 238 tools/include/linux/list.h list->next = head->next; head 242 tools/include/linux/list.h head->next = new_first; head 243 tools/include/linux/list.h new_first->prev = head; head 261 tools/include/linux/list.h struct list_head *head, struct list_head *entry) head 263 tools/include/linux/list.h if (list_empty(head)) head 265 tools/include/linux/list.h if (list_is_singular(head) && head 266 tools/include/linux/list.h (head->next != entry && head != entry)) head 268 tools/include/linux/list.h if (entry == head) head 271 tools/include/linux/list.h __list_cut_position(list, head, entry); head 294 tools/include/linux/list.h struct list_head *head) head 297 tools/include/linux/list.h __list_splice(list, head, head->next); head 306 tools/include/linux/list.h struct list_head *head) head 309 tools/include/linux/list.h __list_splice(list, head->prev, head); head 320 tools/include/linux/list.h struct list_head *head) head 323 tools/include/linux/list.h __list_splice(list, head, head->next); head 337 tools/include/linux/list.h struct list_head *head) head 340 tools/include/linux/list.h __list_splice(list, head->prev, head); head 408 tools/include/linux/list.h #define list_for_each(pos, head) \ head 409 tools/include/linux/list.h for (pos = (head)->next; pos != (head); pos = pos->next) head 416 tools/include/linux/list.h #define list_for_each_prev(pos, head) \ head 417 tools/include/linux/list.h for (pos = (head)->prev; pos != (head); pos = pos->prev) head 425 tools/include/linux/list.h #define list_for_each_safe(pos, n, head) \ head 426 tools/include/linux/list.h for (pos = (head)->next, n = pos->next; pos != (head); \ head 435 tools/include/linux/list.h #define list_for_each_prev_safe(pos, n, head) \ head 436 tools/include/linux/list.h for (pos = (head)->prev, n = pos->prev; \ head 437 tools/include/linux/list.h pos != (head); \ head 446 tools/include/linux/list.h #define list_for_each_entry(pos, head, member) \ head 447 tools/include/linux/list.h for (pos = list_first_entry(head, typeof(*pos), member); \ head 448 tools/include/linux/list.h &pos->member != (head); \ head 457 tools/include/linux/list.h #define list_for_each_entry_reverse(pos, head, member) \ head 458 tools/include/linux/list.h for (pos = list_last_entry(head, typeof(*pos), member); \ head 459 tools/include/linux/list.h &pos->member != (head); \ head 470 tools/include/linux/list.h #define list_prepare_entry(pos, head, member) \ head 471 tools/include/linux/list.h ((pos) ? : list_entry(head, typeof(*pos), member)) head 482 tools/include/linux/list.h #define list_for_each_entry_continue(pos, head, member) \ head 484 tools/include/linux/list.h &pos->member != (head); \ head 496 tools/include/linux/list.h #define list_for_each_entry_continue_reverse(pos, head, member) \ head 498 tools/include/linux/list.h &pos->member != (head); \ head 509 tools/include/linux/list.h #define list_for_each_entry_from(pos, head, member) \ head 510 tools/include/linux/list.h for (; &pos->member != (head); \ head 520 tools/include/linux/list.h #define list_for_each_entry_safe(pos, n, head, member) \ head 521 tools/include/linux/list.h for (pos = list_first_entry(head, typeof(*pos), member), \ head 523 tools/include/linux/list.h &pos->member != (head); \ head 536 tools/include/linux/list.h #define list_for_each_entry_safe_continue(pos, n, head, member) \ head 539 tools/include/linux/list.h &pos->member != (head); \ head 552 tools/include/linux/list.h #define list_for_each_entry_safe_from(pos, n, head, member) \ head 554 tools/include/linux/list.h &pos->member != (head); \ head 567 tools/include/linux/list.h #define list_for_each_entry_safe_reverse(pos, n, head, member) \ head 568 tools/include/linux/list.h for (pos = list_last_entry(head, typeof(*pos), member), \ head 570 tools/include/linux/list.h &pos->member != (head); \ head 696 tools/include/linux/list.h #define hlist_for_each(pos, head) \ head 697 tools/include/linux/list.h for (pos = (head)->first; pos ; pos = pos->next) head 699 tools/include/linux/list.h #define hlist_for_each_safe(pos, n, head) \ head 700 tools/include/linux/list.h for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ head 714 tools/include/linux/list.h #define hlist_for_each_entry(pos, head, member) \ head 715 tools/include/linux/list.h for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ head 745 tools/include/linux/list.h #define hlist_for_each_entry_safe(pos, n, head, member) \ head 746 tools/include/linux/list.h for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\ head 769 tools/include/linux/list.h #define list_for_each_from(pos, head) \ head 770 tools/include/linux/list.h for (; pos != (head); pos = pos->next) head 61 tools/include/linux/ring_buffer.h u64 head = READ_ONCE(base->data_head); head 64 tools/include/linux/ring_buffer.h return head; head 38 tools/io_uring/io_uring-bench.c unsigned *head; head 47 tools/io_uring/io_uring-bench.c unsigned *head; head 203 tools/io_uring/io_uring-bench.c if (next_tail == *ring->head) head 248 tools/io_uring/io_uring-bench.c unsigned head, reaped = 0; head 250 tools/io_uring/io_uring-bench.c head = *ring->head; head 255 tools/io_uring/io_uring-bench.c if (head == *ring->tail) head 257 tools/io_uring/io_uring-bench.c cqe = &ring->cqes[head & cq_ring_mask]; head 269 tools/io_uring/io_uring-bench.c head++; head 273 tools/io_uring/io_uring-bench.c *ring->head = head; head 436 tools/io_uring/io_uring-bench.c sring->head = ptr + p.sq_off.head; head 453 tools/io_uring/io_uring-bench.c cring->head = ptr + p.cq_off.head; head 16 tools/io_uring/queue.c unsigned head; head 20 tools/io_uring/queue.c head = *cq->khead; head 30 tools/io_uring/queue.c if (head != *cq->ktail) { head 31 tools/io_uring/queue.c *cqe_ptr = &cq->cqes[head & mask]; head 22 tools/io_uring/setup.c sq->khead = ptr + p->sq_off.head; head 49 tools/io_uring/setup.c cq->khead = ptr + p->cq_off.head; head 103 tools/lib/bpf/nlattr.c int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, head 111 tools/lib/bpf/nlattr.c libbpf_nla_for_each_attr(nla, head, len, rem) { head 60 tools/lib/bpf/nlattr.h #define libbpf_nla_for_each_attr(pos, head, len, rem) \ head 61 tools/lib/bpf/nlattr.h for (pos = head, rem = len; \ head 98 tools/lib/bpf/nlattr.h int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, head 717 tools/perf/arch/arm/util/cs-etm.c size_t buffer_size, u64 head) head 733 tools/perf/arch/arm/util/cs-etm.c if (head >= buffer_size) head 747 tools/perf/arch/arm/util/cs-etm.c if (head > watermark) head 748 tools/perf/arch/arm/util/cs-etm.c watermark = head; head 770 tools/perf/arch/arm/util/cs-etm.c u64 *head, u64 *old) head 794 tools/perf/arch/arm/util/cs-etm.c if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) { head 800 tools/perf/arch/arm/util/cs-etm.c __func__, idx, (size_t)*old, (size_t)*head, mm->len); head 810 tools/perf/arch/arm/util/cs-etm.c if (*head >= mm->len) { head 811 tools/perf/arch/arm/util/cs-etm.c *old = *head - mm->len; head 813 tools/perf/arch/arm/util/cs-etm.c *head += mm->len; head 814 tools/perf/arch/arm/util/cs-etm.c *old = *head - mm->len; head 361 tools/perf/arch/x86/util/intel-bts.c u64 *head, u64 *old) head 369 tools/perf/arch/x86/util/intel-bts.c __func__, idx, (size_t)*old, (size_t)*head); head 390 tools/perf/arch/x86/util/intel-bts.c *old = *head; head 391 tools/perf/arch/x86/util/intel-bts.c *head += mm->len; head 397 tools/perf/arch/x86/util/intel-bts.c if (*old > *head) head 398 tools/perf/arch/x86/util/intel-bts.c *head += mm->len; head 402 tools/perf/arch/x86/util/intel-bts.c __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head); head 964 tools/perf/arch/x86/util/intel-pt.c void *data, size_t head) head 969 tools/perf/arch/x86/util/intel-pt.c if (head > ref_offset || head < ref_end - buf_size) head 971 tools/perf/arch/x86/util/intel-pt.c } else if (head > ref_offset && head < ref_end) { head 980 tools/perf/arch/x86/util/intel-pt.c void *data, size_t head) head 982 tools/perf/arch/x86/util/intel-pt.c if (head >= ref_size) { head 983 tools/perf/arch/x86/util/intel-pt.c memcpy(ref_buf, data + head - ref_size, ref_size); head 985 tools/perf/arch/x86/util/intel-pt.c memcpy(ref_buf, data, head); head 986 tools/perf/arch/x86/util/intel-pt.c ref_size -= head; head 987 tools/perf/arch/x86/util/intel-pt.c memcpy(ref_buf + head, data + buf_size - ref_size, ref_size); head 993 tools/perf/arch/x86/util/intel-pt.c u64 head) head 1000 tools/perf/arch/x86/util/intel-pt.c data, head); head 1003 tools/perf/arch/x86/util/intel-pt.c data, head); head 1027 tools/perf/arch/x86/util/intel-pt.c u64 *head, u64 *old) head 1035 tools/perf/arch/x86/util/intel-pt.c __func__, idx, (size_t)*old, (size_t)*head); head 1053 tools/perf/arch/x86/util/intel-pt.c wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head); head 1069 tools/perf/arch/x86/util/intel-pt.c *old = *head; head 1070 tools/perf/arch/x86/util/intel-pt.c *head += mm->len; head 1076 tools/perf/arch/x86/util/intel-pt.c if (*old > *head) head 1077 tools/perf/arch/x86/util/intel-pt.c *head += mm->len; head 1081 tools/perf/arch/x86/util/intel-pt.c __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head); head 495 tools/perf/builtin-diff.c list_for_each_entry(pair, &he->pairs.head, pairs.node) head 425 tools/perf/builtin-ftrace.c struct list_head *head = opt->value; head 433 tools/perf/builtin-ftrace.c list_add_tail(&entry->list, head); head 438 tools/perf/builtin-ftrace.c static void delete_filter_func(struct list_head *head) head 442 tools/perf/builtin-ftrace.c list_for_each_entry_safe(pos, tmp, head, list) { head 159 tools/perf/builtin-kvm.c struct list_head *head; head 165 tools/perf/builtin-kvm.c head = &kvm_events_cache[i]; head 166 tools/perf/builtin-kvm.c list_for_each_entry(event, head, hash_entry) { head 229 tools/perf/builtin-kvm.c struct list_head *head; head 233 tools/perf/builtin-kvm.c head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)]; head 234 tools/perf/builtin-kvm.c list_for_each_entry(event, head, hash_entry) { head 243 tools/perf/builtin-kvm.c list_add(&event->hash_entry, head); head 105 tools/perf/ui/browser.c struct list_head *head = browser->entries; head 113 tools/perf/ui/browser.c pos = ui_browser__list_head_filter_entries(browser, head->next); head 119 tools/perf/ui/browser.c pos = ui_browser__list_head_filter_prev_entries(browser, head->prev); head 501 tools/perf/ui/browser.c struct list_head *head = browser->entries; head 505 tools/perf/ui/browser.c browser->top = ui_browser__list_head_filter_entries(browser, head->next); head 509 tools/perf/ui/browser.c list_for_each_from(pos, head) { head 53 tools/perf/ui/hist.c list_for_each_entry(pair, &he->pairs.head, pairs.node) { head 178 tools/perf/ui/hist.c list_for_each_entry(pair, &a->pairs.head, pairs.node) { head 183 tools/perf/ui/hist.c list_for_each_entry(pair, &b->pairs.head, pairs.node) { head 1256 tools/perf/util/annotate.c static void annotation_line__add(struct annotation_line *al, struct list_head *head) head 1258 tools/perf/util/annotate.c list_add_tail(&al->node, head); head 1262 tools/perf/util/annotate.c annotation_line__next(struct annotation_line *pos, struct list_head *head) head 1264 tools/perf/util/annotate.c list_for_each_entry_continue(pos, head, node) head 2511 tools/perf/util/annotate.c size_t disasm__fprintf(struct list_head *head, FILE *fp) head 2516 tools/perf/util/annotate.c list_for_each_entry(pos, head, al.node) head 200 tools/perf/util/annotate.h annotation_line__next(struct annotation_line *pos, struct list_head *head); head 223 tools/perf/util/annotate.h size_t disasm__fprintf(struct list_head *head, FILE *fp); head 163 tools/perf/util/auxtrace.c INIT_LIST_HEAD(&queue_array[i].head); head 200 tools/perf/util/auxtrace.c list_splice_tail(&queues->queue_array[i].head, head 201 tools/perf/util/auxtrace.c &queue_array[i].head); head 263 tools/perf/util/auxtrace.c list_add_tail(&buffer->list, &queue->head); head 409 tools/perf/util/auxtrace.c while (!list_empty(&queues->queue_array[i].head)) { head 412 tools/perf/util/auxtrace.c buffer = list_entry(queues->queue_array[i].head.next, head 554 tools/perf/util/auxtrace.c unsigned char *data, u64 *head, u64 *old) head 557 tools/perf/util/auxtrace.c return itr->find_snapshot(itr, idx, mm, data, head, old); head 607 tools/perf/util/auxtrace.c static int auxtrace_index__alloc(struct list_head *head) head 618 tools/perf/util/auxtrace.c list_add_tail(&auxtrace_index->list, head); head 623 tools/perf/util/auxtrace.c void auxtrace_index__free(struct list_head *head) head 627 tools/perf/util/auxtrace.c list_for_each_entry_safe(auxtrace_index, n, head, list) { head 633 tools/perf/util/auxtrace.c static struct auxtrace_index *auxtrace_index__last(struct list_head *head) head 638 tools/perf/util/auxtrace.c if (list_empty(head)) { head 639 tools/perf/util/auxtrace.c err = auxtrace_index__alloc(head); head 644 tools/perf/util/auxtrace.c auxtrace_index = list_entry(head->prev, struct auxtrace_index, list); head 647 tools/perf/util/auxtrace.c err = auxtrace_index__alloc(head); head 650 tools/perf/util/auxtrace.c auxtrace_index = list_entry(head->prev, struct auxtrace_index, head 657 tools/perf/util/auxtrace.c int auxtrace_index__auxtrace_event(struct list_head *head, head 663 tools/perf/util/auxtrace.c auxtrace_index = auxtrace_index__last(head); head 690 tools/perf/util/auxtrace.c int auxtrace_index__write(int fd, struct list_head *head) head 696 tools/perf/util/auxtrace.c list_for_each_entry(auxtrace_index, head, list) head 702 tools/perf/util/auxtrace.c list_for_each_entry(auxtrace_index, head, list) { head 711 tools/perf/util/auxtrace.c static int auxtrace_index__process_entry(int fd, struct list_head *head, head 721 tools/perf/util/auxtrace.c auxtrace_index = auxtrace_index__last(head); head 743 tools/perf/util/auxtrace.c struct list_head *head = &session->auxtrace_index; head 758 tools/perf/util/auxtrace.c err = auxtrace_index__process_entry(fd, head, needs_swap); head 802 tools/perf/util/auxtrace.c if (list_is_last(&buffer->list, &queue->head)) head 807 tools/perf/util/auxtrace.c if (list_empty(&queue->head)) head 809 tools/perf/util/auxtrace.c return list_entry(queue->head.next, struct auxtrace_buffer, head 1237 tools/perf/util/auxtrace.c u64 head, old = mm->prev, offset, ref; head 1244 tools/perf/util/auxtrace.c head = auxtrace_mmap__read_snapshot_head(mm); head 1246 tools/perf/util/auxtrace.c &head, &old)) head 1249 tools/perf/util/auxtrace.c head = auxtrace_mmap__read_head(mm); head 1252 tools/perf/util/auxtrace.c if (old == head) head 1256 tools/perf/util/auxtrace.c mm->idx, old, head, head - old); head 1259 tools/perf/util/auxtrace.c head_off = head & mm->mask; head 1262 tools/perf/util/auxtrace.c head_off = head % mm->len; head 1276 tools/perf/util/auxtrace.c if (head > old || size <= head || mm->mask) { head 1277 tools/perf/util/auxtrace.c offset = head - size; head 1286 tools/perf/util/auxtrace.c offset = head - size - rem; head 1326 tools/perf/util/auxtrace.c mm->prev = head; head 1329 tools/perf/util/auxtrace.c auxtrace_mmap__write_tail(mm, head); head 1506 tools/perf/util/auxtrace.c list_add_tail(&filt->list, &filts->head); head 1519 tools/perf/util/auxtrace.c INIT_LIST_HEAD(&filts->head); head 1527 tools/perf/util/auxtrace.c list_for_each_entry_safe(filt, n, &filts->head, list) { head 2124 tools/perf/util/auxtrace.c list_for_each_entry(filt, &filts.head, list) { head 213 tools/perf/util/auxtrace.h struct list_head head; head 332 tools/perf/util/auxtrace.h u64 *head, u64 *old); head 379 tools/perf/util/auxtrace.h struct list_head head; head 396 tools/perf/util/auxtrace.h u64 head = READ_ONCE(pc->aux_head); head 400 tools/perf/util/auxtrace.h return head; head 407 tools/perf/util/auxtrace.h u64 head = READ_ONCE(pc->aux_head); head 409 tools/perf/util/auxtrace.h u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0); head 414 tools/perf/util/auxtrace.h return head; head 514 tools/perf/util/auxtrace.h unsigned char *data, u64 *head, u64 *old); head 517 tools/perf/util/auxtrace.h int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event, head 519 tools/perf/util/auxtrace.h int auxtrace_index__write(int fd, struct list_head *head); head 522 tools/perf/util/auxtrace.h void auxtrace_index__free(struct list_head *head); head 683 tools/perf/util/auxtrace.h struct list_head *head __maybe_unused) head 698 tools/perf/util/auxtrace.h void auxtrace_index__free(struct list_head *head __maybe_unused) head 120 tools/perf/util/block-range.c struct block_range *head = malloc(sizeof(struct block_range)); head 121 tools/perf/util/block-range.c if (!head) head 124 tools/perf/util/block-range.c *head = (struct block_range){ head 131 tools/perf/util/block-range.c rb_link_left_of_node(&head->node, &next->node); head 132 tools/perf/util/block-range.c rb_insert_color(&head->node, &block_ranges.root); head 135 tools/perf/util/block-range.c iter.start = head; head 167 tools/perf/util/block-range.c struct block_range *head = malloc(sizeof(struct block_range)); head 168 tools/perf/util/block-range.c if (!head) head 171 tools/perf/util/block-range.c *head = (struct block_range){ head 185 tools/perf/util/block-range.c rb_link_left_of_node(&head->node, &entry->node); head 186 tools/perf/util/block-range.c rb_insert_color(&head->node, &block_ranges.root); head 290 tools/perf/util/build-id.c #define dsos__for_each_with_build_id(pos, head) \ head 291 tools/perf/util/build-id.c list_for_each_entry(pos, head, node) \ head 332 tools/perf/util/build-id.c dsos__for_each_with_build_id(pos, &machine->dsos.head) { head 382 tools/perf/util/build-id.c static int __dsos__hit_all(struct list_head *head) head 386 tools/perf/util/build-id.c list_for_each_entry(pos, head, node) head 394 tools/perf/util/build-id.c return __dsos__hit_all(&machine->dsos.head); head 829 tools/perf/util/build-id.c static int __dsos__cache_build_ids(struct list_head *head, head 835 tools/perf/util/build-id.c dsos__for_each_with_build_id(pos, head) head 844 tools/perf/util/build-id.c return __dsos__cache_build_ids(&machine->dsos.head, machine); head 870 tools/perf/util/build-id.c return __dsos__read_build_ids(&machine->dsos.head, with_hits); head 1523 tools/perf/util/callchain.c LIST_HEAD(head); head 1533 tools/perf/util/callchain.c list_add_tail(&new->list, &head); head 1538 tools/perf/util/callchain.c list_for_each_entry_safe_reverse(chain, new, &head, list) head 1551 tools/perf/util/callchain.c list_for_each_entry_safe(chain, new, &head, list) { head 101 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c packet_queue->head = (packet_queue->head + 1) & head 104 tools/perf/util/cs-etm-decoder/cs-etm-decoder.c *packet = packet_queue->packet_buffer[packet_queue->head]; head 196 tools/perf/util/cs-etm.c queue->head = 0; head 748 tools/perf/util/cs-etm.c if (list_empty(&queue->head) || etmq) head 153 tools/perf/util/cs-etm.h u32 head; head 12 tools/perf/util/dsos.c bool __dsos__read_build_ids(struct list_head *head, bool with_hits) head 18 tools/perf/util/dsos.c list_for_each_entry(pos, head, node) { head 92 tools/perf/util/dsos.c list_add_tail(&dso->node, &dsos->head); head 129 tools/perf/util/dsos.c list_for_each_entry(pos, &dsos->head, node) head 207 tools/perf/util/dsos.c size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, head 213 tools/perf/util/dsos.c list_for_each_entry(pos, head, node) { head 222 tools/perf/util/dsos.c size_t __dsos__fprintf(struct list_head *head, FILE *fp) head 227 tools/perf/util/dsos.c list_for_each_entry(pos, head, node) { head 18 tools/perf/util/dsos.h struct list_head head; head 38 tools/perf/util/dsos.h bool __dsos__read_build_ids(struct list_head *head, bool with_hits); head 40 tools/perf/util/dsos.h size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, head 42 tools/perf/util/dsos.h size_t __dsos__fprintf(struct list_head *head, FILE *fp); head 242 tools/perf/util/evlist.c LIST_HEAD(head); head 249 tools/perf/util/evlist.c list_add_tail(&evsel->core.node, &head); head 252 tools/perf/util/evlist.c perf_evlist__splice_list_tail(evlist, &head); head 257 tools/perf/util/evlist.c __evlist__for_each_entry_safe(&head, n, evsel) head 444 tools/perf/util/evlist.c struct hlist_head *head; head 449 tools/perf/util/evlist.c head = &evlist->core.heads[hash]; head 451 tools/perf/util/evlist.c hlist_for_each_entry(sid, head, node) head 515 tools/perf/util/evlist.c struct hlist_head *head; head 535 tools/perf/util/evlist.c head = &evlist->core.heads[hash]; head 537 tools/perf/util/evlist.c hlist_for_each_entry(sid, head, node) { head 2485 tools/perf/util/hist.c list_for_each_entry(leader, &pos->pairs.head, pairs.node) { head 172 tools/perf/util/intel-bts.c if (list_empty(&queue->head)) head 254 tools/perf/util/intel-bts.c if (b->list.prev == &queue->head) head 652 tools/perf/util/intel-pt.c list_for_each_entry(filt, &pt->filts.head, list) { head 1044 tools/perf/util/intel-pt.c if (list_empty(&queue->head)) head 47 tools/perf/util/machine.c INIT_LIST_HEAD(&dsos->head); head 172 tools/perf/util/machine.c list_for_each_entry_safe(pos, n, &dsos->head, node) { head 806 tools/perf/util/machine.c size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp); head 810 tools/perf/util/machine.c ret += __dsos__fprintf(&pos->dsos.head, fp); head 819 tools/perf/util/machine.c return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm); head 1503 tools/perf/util/machine.c list_for_each_entry(dso, &machine->dsos.head, node) { head 1576 tools/perf/util/machine.c list_for_each_entry(dso, &machine->dsos.head, node) { head 73 tools/perf/util/metricgroup.c INIT_LIST_HEAD(&me->head); head 195 tools/perf/util/metricgroup.c list_add(&expr->nd, &me->head); head 16 tools/perf/util/metricgroup.h struct list_head head; /* list of metric_expr */ head 446 tools/perf/util/mmap.c u64 head = perf_mmap__read_head(md); head 451 tools/perf/util/mmap.c md->core.start = md->core.overwrite ? head : old; head 452 tools/perf/util/mmap.c md->core.end = md->core.overwrite ? old : head; head 462 tools/perf/util/mmap.c md->core.prev = head; head 492 tools/perf/util/mmap.c u64 head = perf_mmap__read_head(md); head 524 tools/perf/util/mmap.c md->core.prev = head; head 225 tools/perf/util/ordered-events.c struct list_head *head = &oe->events; head 238 tools/perf/util/ordered-events.c list_for_each_entry_safe(iter, tmp, head, list) { head 255 tools/perf/util/ordered-events.c if (list_empty(head)) head 258 tools/perf/util/ordered-events.c oe->last = list_entry(head->prev, struct ordered_event, list); head 294 tools/perf/util/ordered-events.c struct list_head *head = &oe->events; head 296 tools/perf/util/ordered-events.c first = list_entry(head->next, struct ordered_event, list); head 300 tools/perf/util/ordered-events.c if (WARN_ONCE(!last || list_empty(head), "empty queue")) head 402 tools/perf/util/parse-events.c struct list_head *head, head 1156 tools/perf/util/parse-events.c struct list_head *head, head 1162 tools/perf/util/parse-events.c list_for_each_entry(term, head, list) head 1399 tools/perf/util/parse-events.c struct list_head *head; head 1416 tools/perf/util/parse-events.c head = malloc(sizeof(struct list_head)); head 1417 tools/perf/util/parse-events.c if (!head) head 1419 tools/perf/util/parse-events.c INIT_LIST_HEAD(head); head 1423 tools/perf/util/parse-events.c list_add_tail(&term->list, head); head 1426 tools/perf/util/parse-events.c pmu->name, head, head 1433 tools/perf/util/parse-events.c parse_events_terms__delete(head); head 75 tools/perf/util/parse-events.y %type <head> event_config head 76 tools/perf/util/parse-events.y %type <head> opt_event_config head 77 tools/perf/util/parse-events.y %type <head> opt_pmu_config head 79 tools/perf/util/parse-events.y %type <head> event_pmu head 80 tools/perf/util/parse-events.y %type <head> event_legacy_symbol head 81 tools/perf/util/parse-events.y %type <head> event_legacy_cache head 82 tools/perf/util/parse-events.y %type <head> event_legacy_mem head 83 tools/perf/util/parse-events.y %type <head> event_legacy_tracepoint head 85 tools/perf/util/parse-events.y %type <head> event_legacy_numeric head 86 tools/perf/util/parse-events.y %type <head> event_legacy_raw head 87 tools/perf/util/parse-events.y %type <head> event_bpf_file head 88 tools/perf/util/parse-events.y %type <head> event_def head 89 tools/perf/util/parse-events.y %type <head> event_mod head 90 tools/perf/util/parse-events.y %type <head> event_name head 91 tools/perf/util/parse-events.y %type <head> event head 92 tools/perf/util/parse-events.y %type <head> events head 93 tools/perf/util/parse-events.y %type <head> group_def head 94 tools/perf/util/parse-events.y %type <head> group head 95 tools/perf/util/parse-events.y %type <head> groups head 104 tools/perf/util/parse-events.y struct list_head *head; head 534 tools/perf/util/parse-events.y struct list_head *head = $1; head 537 tools/perf/util/parse-events.y ABORT_ON(!head); head 538 tools/perf/util/parse-events.y list_add_tail(&term->list, head); head 544 tools/perf/util/parse-events.y struct list_head *head = malloc(sizeof(*head)); head 547 tools/perf/util/parse-events.y ABORT_ON(!head); head 548 tools/perf/util/parse-events.y INIT_LIST_HEAD(head); head 549 tools/perf/util/parse-events.y list_add_tail(&term->list, head); head 550 tools/perf/util/parse-events.y $$ = head; head 44 tools/perf/util/pmu.c int perf_pmu__format_parse(char *dir, struct list_head *head) head 70 tools/perf/util/pmu.c ret = perf_pmu_parse(head, name); head 427 tools/perf/util/pmu.c static int pmu_aliases_parse(char *dir, struct list_head *head) head 458 tools/perf/util/pmu.c if (perf_pmu__new_alias(head, dir, name, file) < 0) head 471 tools/perf/util/pmu.c static int pmu_aliases(const char *name, struct list_head *head) head 486 tools/perf/util/pmu.c if (pmu_aliases_parse(path, head)) head 751 tools/perf/util/pmu.c static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu) head 785 tools/perf/util/pmu.c __perf_pmu__new_alias(head, NULL, (char *)pe->name, head 83 tools/perf/util/pmu.h int perf_pmu__format_parse(char *dir, struct list_head *head); head 804 tools/perf/util/s390-cpumsf.c if (list_empty(&queue->head)) head 49 tools/perf/util/session.c decomp_last_rem = decomp_last->size - decomp_last->head; head 63 tools/perf/util/session.c decomp->head = 0; head 66 tools/perf/util/session.c memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem); head 1861 tools/perf/util/session.c u64 head; head 1867 tools/perf/util/session.c head = 0; head 1920 tools/perf/util/session.c if ((skip = perf_session__process_event(session, event, head)) < 0) { head 1922 tools/perf/util/session.c head, event->header.size, event->header.type); head 1927 tools/perf/util/session.c head += size; head 1930 tools/perf/util/session.c head += skip; head 1957 tools/perf/util/session.c prefetch_event(char *buf, u64 head, size_t mmap_size, head 1966 tools/perf/util/session.c if (head + sizeof(event->header) > mmap_size) head 1969 tools/perf/util/session.c event = (union perf_event *)(buf + head); head 1973 tools/perf/util/session.c if (head + event->header.size <= mmap_size) head 1981 tools/perf/util/session.c " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size); head 1987 tools/perf/util/session.c fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap) head 1989 tools/perf/util/session.c return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL)); head 1993 tools/perf/util/session.c fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap) head 1995 tools/perf/util/session.c return prefetch_event(buf, head, mmap_size, needs_swap, NULL); head 2007 tools/perf/util/session.c while (decomp->head < decomp->size && !session_done()) { head 2008 tools/perf/util/session.c union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data, head 2019 tools/perf/util/session.c decomp->file_pos + decomp->head, event->header.size, event->header.type); head 2026 tools/perf/util/session.c decomp->head += size; head 2062 tools/perf/util/session.c u64 head, page_offset, file_offset, file_pos, size; head 2071 tools/perf/util/session.c head = rd->data_offset - page_offset; head 2102 tools/perf/util/session.c file_pos = file_offset + head; head 2109 tools/perf/util/session.c event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap); head 2119 tools/perf/util/session.c page_offset = page_size * (head / page_size); head 2121 tools/perf/util/session.c head -= page_offset; head 2132 tools/perf/util/session.c file_offset + head, event->header.size, head 2141 tools/perf/util/session.c head += size; head 50 tools/perf/util/session.h u64 head; head 92 tools/perf/util/sort.h struct list_head head; head 178 tools/perf/util/sort.h list_add_tail(&pair->pairs.node, &he->pairs.head); head 1048 tools/perf/util/stat-shadow.c list_for_each_entry (mexp, &me->head, nd) { head 2231 tools/perf/util/symbol-elf.c int get_sdt_note_list(struct list_head *head, const char *target) head 2245 tools/perf/util/symbol-elf.c ret = construct_sdt_notes_list(elf, head); head 263 tools/perf/util/symbol.h int get_sdt_note_list(struct list_head *head, const char *target); head 42 tools/power/acpi/tools/acpidbg/acpidbg.c (CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) head 44 tools/power/acpi/tools/acpidbg/acpidbg.c (CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) head 46 tools/power/acpi/tools/acpidbg/acpidbg.c (CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) head 48 tools/power/acpi/tools/acpidbg/acpidbg.c (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) head 77 tools/power/acpi/tools/acpidbg/acpidbg.c .head = 0, head 82 tools/power/acpi/tools/acpidbg/acpidbg.c .head = 0, head 128 tools/power/acpi/tools/acpidbg/acpidbg.c p = &crc->buf[crc->head]; head 134 tools/power/acpi/tools/acpidbg/acpidbg.c crc->head = (crc->head + len) & (ACPI_AML_BUF_SIZE - 1); head 144 tools/power/acpi/tools/acpidbg/acpidbg.c p = &crc->buf[crc->head]; head 155 tools/power/acpi/tools/acpidbg/acpidbg.c crc->head = (crc->head + len) & (ACPI_AML_BUF_SIZE - 1); head 165 tools/power/acpi/tools/acpidbg/acpidbg.c p = &crc->buf[crc->head]; head 171 tools/power/acpi/tools/acpidbg/acpidbg.c crc->head = (crc->head + 1) & (ACPI_AML_BUF_SIZE - 1); head 187 tools/power/acpi/tools/acpidbg/acpidbg.c crc->head = (crc->head + 1) & (ACPI_AML_BUF_SIZE - 1); head 198 tools/power/acpi/tools/acpidbg/acpidbg.c crc->head = (crc->head + 1) & (ACPI_AML_BUF_SIZE - 1); head 211 tools/power/acpi/tools/acpidbg/acpidbg.c crc->head = (crc->head + 1) & (ACPI_AML_BUF_SIZE - 1); head 10 tools/testing/radix-tree/linux/radix-tree.h static inline void trace_call_rcu(struct rcu_head *head, head 11 tools/testing/radix-tree/linux/radix-tree.h void (*func)(struct rcu_head *head)) head 14 tools/testing/radix-tree/linux/radix-tree.h printf("Delaying free of %p to slab\n", (char *)head - head 16 tools/testing/radix-tree/linux/radix-tree.h call_rcu(head, func); head 72 tools/testing/radix-tree/test.c static void item_free_rcu(struct rcu_head *head) head 74 tools/testing/radix-tree/test.c struct item *item = container_of(head, struct item, rcu_head); head 141 tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h void (*func)(struct callback_head *head); head 145 tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h typedef void (*rcu_callback_t)(struct rcu_head *head); head 146 tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); head 9 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.c void wakeme_after_rcu(struct rcu_head *head) head 40 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h struct rcu_head head; head 44 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h void wakeme_after_rcu(struct rcu_head *head); head 40 tools/testing/selftests/rseq/basic_percpu_ops_test.c struct percpu_list_node *head; head 147 tools/testing/selftests/rseq/basic_percpu_ops_test.c expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head); head 149 tools/testing/selftests/rseq/basic_percpu_ops_test.c targetptr = (intptr_t *)&list->c[cpu].head; head 169 tools/testing/selftests/rseq/basic_percpu_ops_test.c struct percpu_list_node *head; head 175 tools/testing/selftests/rseq/basic_percpu_ops_test.c targetptr = (intptr_t *)&list->c[cpu].head; head 178 tools/testing/selftests/rseq/basic_percpu_ops_test.c load = (intptr_t *)&head; head 184 tools/testing/selftests/rseq/basic_percpu_ops_test.c return head; head 200 tools/testing/selftests/rseq/basic_percpu_ops_test.c node = list->c[cpu].head; head 203 tools/testing/selftests/rseq/basic_percpu_ops_test.c list->c[cpu].head = node->next; head 260 tools/testing/selftests/rseq/basic_percpu_ops_test.c node->next = list.c[i].head; head 261 tools/testing/selftests/rseq/basic_percpu_ops_test.c list.c[i].head = node; head 285 tools/testing/selftests/rseq/param_test.c struct percpu_list_node *head; head 525 tools/testing/selftests/rseq/param_test.c expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head); head 527 tools/testing/selftests/rseq/param_test.c targetptr = (intptr_t *)&list->c[cpu].head; head 550 tools/testing/selftests/rseq/param_test.c struct percpu_list_node *head; head 556 tools/testing/selftests/rseq/param_test.c targetptr = (intptr_t *)&list->c[cpu].head; head 559 tools/testing/selftests/rseq/param_test.c load = (intptr_t *)&head; head 563 tools/testing/selftests/rseq/param_test.c node = head; head 583 tools/testing/selftests/rseq/param_test.c node = list->c[cpu].head; head 586 tools/testing/selftests/rseq/param_test.c list->c[cpu].head = node->next; head 642 tools/testing/selftests/rseq/param_test.c node->next = list.c[i].head; head 643 tools/testing/selftests/rseq/param_test.c list.c[i].head = node; head 729 tools/testing/selftests/rseq/param_test.c struct percpu_buffer_node *head; head 741 tools/testing/selftests/rseq/param_test.c head = NULL; head 744 tools/testing/selftests/rseq/param_test.c head = RSEQ_READ_ONCE(buffer->c[cpu].array[offset - 1]); head 749 tools/testing/selftests/rseq/param_test.c (intptr_t)head, newval, cpu); head 756 tools/testing/selftests/rseq/param_test.c return head; head 766 tools/testing/selftests/rseq/param_test.c struct percpu_buffer_node *head; head 772 tools/testing/selftests/rseq/param_test.c head = buffer->c[cpu].array[offset - 1]; head 774 tools/testing/selftests/rseq/param_test.c return head; head 45 tools/testing/selftests/timers/clocksource-switch.c char *head, *tmp; head 56 tools/testing/selftests/timers/clocksource-switch.c head = buf; head 58 tools/testing/selftests/timers/clocksource-switch.c while (head - buf < size) { head 60 tools/testing/selftests/timers/clocksource-switch.c for (tmp = head; *tmp != ' '; tmp++) { head 67 tools/testing/selftests/timers/clocksource-switch.c strcpy(list[i], head); head 68 tools/testing/selftests/timers/clocksource-switch.c head = tmp + 1; head 114 tools/testing/vsock/vsock_diag_test.c static void print_vsock_stats(FILE *fp, struct list_head *head) head 118 tools/testing/vsock/vsock_diag_test.c list_for_each_entry(st, head, list) head 122 tools/testing/vsock/vsock_diag_test.c static struct vsock_stat *find_vsock_stat(struct list_head *head, int fd) head 132 tools/testing/vsock/vsock_diag_test.c list_for_each_entry(st, head, list) head 140 tools/testing/vsock/vsock_diag_test.c static void check_no_sockets(struct list_head *head) head 142 tools/testing/vsock/vsock_diag_test.c if (!list_empty(head)) { head 144 tools/testing/vsock/vsock_diag_test.c print_vsock_stats(stderr, head); head 149 tools/testing/vsock/vsock_diag_test.c static void check_num_sockets(struct list_head *head, int expected) head 154 tools/testing/vsock/vsock_diag_test.c list_for_each(node, head) head 160 tools/testing/vsock/vsock_diag_test.c print_vsock_stats(stderr, head); head 58 tools/usb/usbip/libsrc/list.h static inline void list_add(struct list_head *new, struct list_head *head) head 60 tools/usb/usbip/libsrc/list.h __list_add(new, head, head->next); head 111 tools/usb/usbip/libsrc/list.h #define list_for_each(pos, head) \ head 112 tools/usb/usbip/libsrc/list.h for (pos = (head)->next; pos != (head); pos = pos->next) head 120 tools/usb/usbip/libsrc/list.h #define list_for_each_safe(pos, n, head) \ head 121 tools/usb/usbip/libsrc/list.h for (pos = (head)->next, n = pos->next; pos != (head); \ head 114 tools/virtio/ringtest/ring.c unsigned head, index; head 120 tools/virtio/ringtest/ring.c head = (ring_size - 1) & (guest.avail_idx++); head 125 tools/virtio/ringtest/ring.c ring[head].addr = (unsigned long)(void*)buf; head 126 tools/virtio/ringtest/ring.c ring[head].len = len; head 133 tools/virtio/ringtest/ring.c index = ring[head].index; head 138 tools/virtio/ringtest/ring.c ring[head].flags = DESC_HW; head 145 tools/virtio/ringtest/ring.c unsigned head = (ring_size - 1) & guest.last_used_idx; head 149 tools/virtio/ringtest/ring.c if (ring[head].flags & DESC_HW) head 153 tools/virtio/ringtest/ring.c *lenp = ring[head].len; head 154 tools/virtio/ringtest/ring.c index = ring[head].index & (ring_size - 1); head 166 tools/virtio/ringtest/ring.c unsigned head = (ring_size - 1) & guest.last_used_idx; head 168 tools/virtio/ringtest/ring.c return (ring[head].flags & DESC_HW); head 221 tools/virtio/ringtest/ring.c unsigned head = (ring_size - 1) & host.used_idx; head 223 tools/virtio/ringtest/ring.c return !(ring[head].flags & DESC_HW); head 228 tools/virtio/ringtest/ring.c unsigned head = (ring_size - 1) & host.used_idx; head 230 tools/virtio/ringtest/ring.c if (!(ring[head].flags & DESC_HW)) head 241 tools/virtio/ringtest/ring.c ring[head].len--; head 249 tools/virtio/ringtest/ring.c ring[head].flags = 0; head 101 tools/virtio/ringtest/virtio_ring_0_9.c unsigned head; head 111 tools/virtio/ringtest/virtio_ring_0_9.c head = (ring_size - 1) & (guest.avail_idx++); head 113 tools/virtio/ringtest/virtio_ring_0_9.c head = guest.free_head; head 118 tools/virtio/ringtest/virtio_ring_0_9.c desc[head].flags = VRING_DESC_F_NEXT; head 119 tools/virtio/ringtest/virtio_ring_0_9.c desc[head].addr = (unsigned long)(void *)buf; head 120 tools/virtio/ringtest/virtio_ring_0_9.c desc[head].len = len; head 125 tools/virtio/ringtest/virtio_ring_0_9.c desc[head].flags &= ~VRING_DESC_F_NEXT; head 127 tools/virtio/ringtest/virtio_ring_0_9.c guest.free_head = desc[head].next; head 130 tools/virtio/ringtest/virtio_ring_0_9.c data[head].data = datap; head 137 tools/virtio/ringtest/virtio_ring_0_9.c (head | (avail & ~(ring_size - 1))) ^ 0x8000; head 143 tools/virtio/ringtest/virtio_ring_0_9.c ring.avail->ring[avail] = head; head 154 tools/virtio/ringtest/virtio_ring_0_9.c unsigned head; head 159 tools/virtio/ringtest/virtio_ring_0_9.c head = (ring_size - 1) & guest.last_used_idx; head 160 tools/virtio/ringtest/virtio_ring_0_9.c index = ring.used->ring[head].id; head 172 tools/virtio/ringtest/virtio_ring_0_9.c head = (ring_size - 1) & guest.last_used_idx; head 173 tools/virtio/ringtest/virtio_ring_0_9.c index = head; head 175 tools/virtio/ringtest/virtio_ring_0_9.c head = (ring_size - 1) & guest.last_used_idx; head 176 tools/virtio/ringtest/virtio_ring_0_9.c index = ring.used->ring[head].id; head 183 tools/virtio/ringtest/virtio_ring_0_9.c *lenp = ring.used->ring[head].len; head 201 tools/virtio/ringtest/virtio_ring_0_9.c unsigned short head = last_used_idx & (ring_size - 1); head 202 tools/virtio/ringtest/virtio_ring_0_9.c unsigned index = ring.used->ring[head].id; head 260 tools/virtio/ringtest/virtio_ring_0_9.c unsigned head = host.used_idx; head 262 tools/virtio/ringtest/virtio_ring_0_9.c unsigned index = ring.avail->ring[head & (ring_size - 1)]; head 264 tools/virtio/ringtest/virtio_ring_0_9.c return ((index ^ head ^ 0x8000) & ~(ring_size - 1)); head 266 tools/virtio/ringtest/virtio_ring_0_9.c return head == ring.avail->idx; head 274 tools/virtio/ringtest/virtio_ring_0_9.c unsigned head; head 277 tools/virtio/ringtest/virtio_ring_0_9.c head = ring.avail->ring[used_idx & (ring_size - 1)]; head 278 tools/virtio/ringtest/virtio_ring_0_9.c if ((used_idx ^ head ^ 0x8000) & ~(ring_size - 1)) head 284 tools/virtio/ringtest/virtio_ring_0_9.c desc = &ring.desc[head & (ring_size - 1)]; head 294 tools/virtio/ringtest/virtio_ring_0_9.c head = used_idx; head 296 tools/virtio/ringtest/virtio_ring_0_9.c head = ring.avail->ring[used_idx]; head 298 tools/virtio/ringtest/virtio_ring_0_9.c desc = &ring.desc[head]; head 308 tools/virtio/ringtest/virtio_ring_0_9.c ring.used->ring[used_idx].id = head; head 111 tools/virtio/vringh_test.c static inline int vringh_get_head(struct vringh *vrh, u16 *head) head 128 tools/virtio/vringh_test.c err = get_user(*head, &vrh->vring.avail->ring[i]); head 198 tools/virtio/vringh_test.c u16 head, written; head 202 tools/virtio/vringh_test.c err = vringh_get_head(&vrh, &head); head 227 tools/virtio/vringh_test.c getrange, &head); head 273 tools/virtio/vringh_test.c err = vringh_complete_user(&vrh, head, written); head 447 tools/virtio/vringh_test.c u16 head; head 495 tools/virtio/vringh_test.c err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); head 519 tools/virtio/vringh_test.c err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); head 554 tools/virtio/vringh_test.c err = vringh_complete_user(&vrh, head, err); head 588 tools/virtio/vringh_test.c err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); head 616 tools/virtio/vringh_test.c used[0].id = head; head 641 tools/virtio/vringh_test.c err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); head 644 tools/virtio/vringh_test.c used[i].id = head; head 725 tools/virtio/vringh_test.c err = vringh_getdesc_user(&vrh, &riov, &wiov, getrange, &head); head 729 tools/virtio/vringh_test.c if (head != 0) head 730 tools/virtio/vringh_test.c errx(1, "vringh_getdesc_user: head %i not 0", head);