ent 229 arch/arc/include/asm/arcregs.h unsigned int pad2:19, fam:1, pad:2, ent:2, ver:8; ent 231 arch/arc/include/asm/arcregs.h unsigned int ver:8, ent:2, pad:2, fam:1, pad2:19; ent 207 arch/arc/kernel/setup.c if (bpu.ent) { ent 208 arch/arc/kernel/setup.c cpu->bpu.num_cache = 256 << (bpu.ent - 1); ent 209 arch/arc/kernel/setup.c cpu->bpu.num_pred = 256 << (bpu.ent - 1); ent 106 arch/ia64/kernel/perfmon_default_smpl.c pfm_default_smpl_entry_t *ent; ent 131 arch/ia64/kernel/perfmon_default_smpl.c ent = (pfm_default_smpl_entry_t *)cur; ent 135 arch/ia64/kernel/perfmon_default_smpl.c entry_size = sizeof(*ent) + (npmds << 3); ent 138 arch/ia64/kernel/perfmon_default_smpl.c e = (unsigned long *)(ent+1); ent 160 arch/ia64/kernel/perfmon_default_smpl.c ent->pid = current->pid; ent 161 arch/ia64/kernel/perfmon_default_smpl.c ent->ovfl_pmd = ovfl_pmd; ent 162 arch/ia64/kernel/perfmon_default_smpl.c ent->last_reset_val = arg->pmd_last_reset; //pmd[0].reg_last_reset_val; ent 167 arch/ia64/kernel/perfmon_default_smpl.c ent->ip = regs->cr_iip | ((regs->cr_ipsr >> 41) & 0x3); ent 169 arch/ia64/kernel/perfmon_default_smpl.c ent->tstamp = stamp; ent 170 arch/ia64/kernel/perfmon_default_smpl.c ent->cpu = smp_processor_id(); ent 171 arch/ia64/kernel/perfmon_default_smpl.c ent->set = arg->active_set; ent 172 arch/ia64/kernel/perfmon_default_smpl.c ent->tgid = current->tgid; ent 30 arch/mips/include/asm/asm.h .ent symbol, 0; \ ent 42 arch/mips/include/asm/asm.h .ent symbol, 0; \ ent 134 arch/mips/sibyte/common/bus_watcher.c struct proc_dir_entry *ent; ent 136 arch/mips/sibyte/common/bus_watcher.c ent = proc_create_single_data("bus_watcher", S_IWUSR | S_IRUGO, NULL, ent 138 arch/mips/sibyte/common/bus_watcher.c if (!ent) { ent 385 arch/parisc/kernel/pci-dma.c struct proc_dir_entry* ent; ent 386 arch/parisc/kernel/pci-dma.c ent = proc_create_single("pcxl_dma", 0, proc_gsc_root, ent 388 arch/parisc/kernel/pci-dma.c if (!ent) ent 42 arch/powerpc/platforms/powernv/memtrace.c struct memtrace_entry *ent = filp->private_data; ent 44 arch/powerpc/platforms/powernv/memtrace.c return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size); ent 177 arch/powerpc/platforms/powernv/memtrace.c struct memtrace_entry *ent = &memtrace_array[i]; ent 179 arch/powerpc/platforms/powernv/memtrace.c ent->mem = ioremap(ent->start, ent->size); ent 181 arch/powerpc/platforms/powernv/memtrace.c if (!ent->mem) { ent 183 arch/powerpc/platforms/powernv/memtrace.c ent->start); ent 188 arch/powerpc/platforms/powernv/memtrace.c snprintf(ent->name, 16, "%08x", ent->nid); ent 189 arch/powerpc/platforms/powernv/memtrace.c dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir); ent 192 arch/powerpc/platforms/powernv/memtrace.c ent->nid); ent 196 arch/powerpc/platforms/powernv/memtrace.c ent->dir = dir; ent 197 arch/powerpc/platforms/powernv/memtrace.c debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops); ent 198 arch/powerpc/platforms/powernv/memtrace.c debugfs_create_x64("start", 0400, dir, &ent->start); ent 199 arch/powerpc/platforms/powernv/memtrace.c debugfs_create_x64("size", 0400, dir, &ent->size); ent 217 arch/powerpc/platforms/powernv/memtrace.c struct memtrace_entry *ent; ent 220 arch/powerpc/platforms/powernv/memtrace.c ent = &memtrace_array[i]; ent 223 arch/powerpc/platforms/powernv/memtrace.c if (ent->nid == NUMA_NO_NODE) ent 227 arch/powerpc/platforms/powernv/memtrace.c if (ent->mem) { ent 228 arch/powerpc/platforms/powernv/memtrace.c iounmap(ent->mem); ent 229 arch/powerpc/platforms/powernv/memtrace.c ent->mem = 0; ent 232 arch/powerpc/platforms/powernv/memtrace.c if (add_memory(ent->nid, ent->start, ent->size)) { ent 234 arch/powerpc/platforms/powernv/memtrace.c ent->nid); ent 245 arch/powerpc/platforms/powernv/memtrace.c walk_memory_blocks(ent->start, ent->size, NULL, ent 254 arch/powerpc/platforms/powernv/memtrace.c debugfs_remove_recursive(ent->dir); ent 255 arch/powerpc/platforms/powernv/memtrace.c pr_info("Added trace memory back to node %d\n", ent->nid); ent 256 arch/powerpc/platforms/powernv/memtrace.c ent->size = ent->start = ent->nid = NUMA_NO_NODE; ent 90 arch/powerpc/platforms/powernv/opal-xscom.c struct scom_debug_entry *ent = filp->private_data; ent 103 arch/powerpc/platforms/powernv/opal-xscom.c rc = opal_scom_read(ent->chip, reg_base, reg, &val); ent 121 arch/powerpc/platforms/powernv/opal-xscom.c struct scom_debug_entry *ent = filp->private_data; ent 136 arch/powerpc/platforms/powernv/opal-xscom.c rc = opal_scom_write(ent->chip, reg_base, reg, val); ent 158 arch/powerpc/platforms/powernv/opal-xscom.c struct scom_debug_entry *ent; ent 161 arch/powerpc/platforms/powernv/opal-xscom.c ent = kzalloc(sizeof(*ent), GFP_KERNEL); ent 162 arch/powerpc/platforms/powernv/opal-xscom.c if (!ent) ent 165 arch/powerpc/platforms/powernv/opal-xscom.c ent->chip = chip; ent 166 arch/powerpc/platforms/powernv/opal-xscom.c snprintf(ent->name, 16, "%08x", chip); ent 167 arch/powerpc/platforms/powernv/opal-xscom.c ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn); ent 168 arch/powerpc/platforms/powernv/opal-xscom.c ent->path.size = strlen((char *)ent->path.data); ent 170 arch/powerpc/platforms/powernv/opal-xscom.c dir = debugfs_create_dir(ent->name, root); ent 172 arch/powerpc/platforms/powernv/opal-xscom.c kfree(ent->path.data); ent 173 arch/powerpc/platforms/powernv/opal-xscom.c kfree(ent); ent 177 arch/powerpc/platforms/powernv/opal-xscom.c debugfs_create_blob("devspec", 0400, dir, &ent->path); ent 178 arch/powerpc/platforms/powernv/opal-xscom.c debugfs_create_file("access", 0600, dir, ent, &scom_debug_fops); ent 103 arch/powerpc/platforms/powernv/vas.c struct list_head *ent; ent 111 arch/powerpc/platforms/powernv/vas.c list_for_each(ent, &vas_instances) { ent 112 arch/powerpc/platforms/powernv/vas.c vinst = list_entry(ent, struct vas_instance, node); ent 402 arch/powerpc/platforms/pseries/reconfig.c struct proc_dir_entry *ent; ent 404 arch/powerpc/platforms/pseries/reconfig.c ent = proc_create("powerpc/ofdt", 0200, NULL, &ofdt_fops); ent 405 arch/powerpc/platforms/pseries/reconfig.c if (ent) ent 406 arch/powerpc/platforms/pseries/reconfig.c proc_set_size(ent, 0); ent 166 arch/powerpc/platforms/pseries/scanlog.c struct proc_dir_entry *ent; ent 178 arch/powerpc/platforms/pseries/scanlog.c ent = proc_create("powerpc/rtas/scan-log-dump", 0400, NULL, ent 180 arch/powerpc/platforms/pseries/scanlog.c if (!ent) ent 71 arch/sparc/include/asm/mmu_64.h void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte); ent 72 arch/sparc/include/asm/mmu_64.h void tsb_flush(unsigned long ent, unsigned long tag); ent 349 arch/sparc/kernel/chmc.c const struct linux_prom64_registers *ent; ent 353 arch/sparc/kernel/chmc.c ent = &mem_regs[i]; ent 354 arch/sparc/kernel/chmc.c this_base = ent->phys_addr; ent 355 arch/sparc/kernel/chmc.c this_end = this_base + ent->reg_size; ent 490 arch/sparc/kernel/ds.c struct dr_cpu_resp_entry *ent; ent 495 arch/sparc/kernel/ds.c ent = (struct dr_cpu_resp_entry *) (tag + 1); ent 506 arch/sparc/kernel/ds.c ent[i].cpu = cpu; ent 507 arch/sparc/kernel/ds.c ent[i].result = DR_CPU_RES_OK; ent 508 arch/sparc/kernel/ds.c ent[i].stat = default_stat; ent 517 arch/sparc/kernel/ds.c struct dr_cpu_resp_entry *ent; ent 522 arch/sparc/kernel/ds.c ent = (struct dr_cpu_resp_entry *) (tag + 1); ent 525 arch/sparc/kernel/ds.c if (ent[i].cpu != cpu) ent 527 arch/sparc/kernel/ds.c ent[i].result = res; ent 528 arch/sparc/kernel/ds.c ent[i].stat = stat; ent 1966 arch/sparc/kernel/traps_64.c static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, ent 1969 arch/sparc/kernel/traps_64.c u64 *raw_ptr = (u64 *) ent; ent 1983 arch/sparc/kernel/traps_64.c pfx, ent->err_handle, ent->err_stick); ent 1985 arch/sparc/kernel/traps_64.c printk("%s: type [%s]\n", pfx, sun4v_err_type_to_str(ent->err_type)); ent 1987 arch/sparc/kernel/traps_64.c attrs = ent->err_attrs; ent 1998 arch/sparc/kernel/traps_64.c printk("%s: raddr [0x%016llx]\n", pfx, ent->err_raddr); ent 2000 arch/sparc/kernel/traps_64.c if (ent->err_raddr == ~(u64)0) ent 2005 arch/sparc/kernel/traps_64.c printk("%s: size [0x%x]\n", pfx, ent->err_size); ent 2011 arch/sparc/kernel/traps_64.c printk("%s: cpu[%u]\n", pfx, ent->err_cpu); ent 2014 arch/sparc/kernel/traps_64.c printk("%s: asi [0x%02x]\n", pfx, ent->err_asi); ent 2019 arch/sparc/kernel/traps_64.c (ent->err_asr & SUN4V_ERR_ASR_VALID) != 0) ent 2021 arch/sparc/kernel/traps_64.c pfx, ent->err_asr & ~SUN4V_ERR_ASR_VALID); ent 2036 arch/sparc/kernel/traps_64.c void do_mcd_err(struct pt_regs *regs, struct sun4v_error_entry ent) ent 2062 arch/sparc/kernel/traps_64.c ent.err_raddr, entry->fixup); ent 2073 arch/sparc/kernel/traps_64.c force_sig_fault(SIGSEGV, SEGV_ADIDERR, (void __user *)ent.err_raddr, ent 2083 arch/sparc/kernel/traps_64.c struct sun4v_error_entry *ent, local_copy; ent 2092 arch/sparc/kernel/traps_64.c ent = __va(paddr); ent 2094 arch/sparc/kernel/traps_64.c memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); ent 2097 arch/sparc/kernel/traps_64.c ent->err_handle = 0; ent 2156 arch/sparc/kernel/traps_64.c struct sun4v_error_entry *ent) { ent 2158 arch/sparc/kernel/traps_64.c unsigned int attrs = ent->err_attrs; ent 2161 arch/sparc/kernel/traps_64.c unsigned long addr = ent->err_raddr; ent 2167 arch/sparc/kernel/traps_64.c unsigned long page_cnt = DIV_ROUND_UP(ent->err_size, ent 2201 arch/sparc/kernel/traps_64.c struct sun4v_error_entry *ent, local_copy; ent 2210 arch/sparc/kernel/traps_64.c ent = __va(paddr); ent 2212 arch/sparc/kernel/traps_64.c memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry)); ent 2215 arch/sparc/kernel/traps_64.c ent->err_handle = 0; ent 269 arch/sparc/mm/init_64.c static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) ent 271 arch/sparc/mm/init_64.c unsigned long tsb_addr = (unsigned long) ent; ent 1876 arch/sparc/mm/init_64.c struct tsb *ent = &swapper_tsb[i]; ent 1878 arch/sparc/mm/init_64.c ent->tag = (1UL << TSB_TAG_INVALID_BIT); ent 1882 arch/sparc/mm/init_64.c struct tsb *ent = &swapper_4m_tsb[i]; ent 1884 arch/sparc/mm/init_64.c ent->tag = (1UL << TSB_TAG_INVALID_BIT); ent 38 arch/sparc/mm/tsb.c struct tsb *ent = &swapper_tsb[idx]; ent 41 arch/sparc/mm/tsb.c match |= (ent->tag << 22); ent 43 arch/sparc/mm/tsb.c ent->tag = (1UL << TSB_TAG_INVALID_BIT); ent 62 arch/sparc/mm/tsb.c struct tsb *ent = &swapper_tsb[hash]; ent 64 arch/sparc/mm/tsb.c if (tag_compare(ent->tag, v)) ent 65 arch/sparc/mm/tsb.c ent->tag = (1UL << TSB_TAG_INVALID_BIT); ent 73 arch/sparc/mm/tsb.c unsigned long tag, ent, hash; ent 77 arch/sparc/mm/tsb.c ent = tsb + (hash * sizeof(struct tsb)); ent 80 arch/sparc/mm/tsb.c tsb_flush(ent, tag); ent 763 arch/um/drivers/mconsole_kern.c struct proc_dir_entry *ent; ent 768 arch/um/drivers/mconsole_kern.c ent = proc_create("mconsole", 0200, NULL, &mconsole_proc_fops); ent 769 arch/um/drivers/mconsole_kern.c if (ent == NULL) { ent 212 arch/um/drivers/ubd_kern.c struct proc_dir_entry *dir, *ent; ent 220 arch/um/drivers/ubd_kern.c ent = proc_create_single("media", S_IRUGO, dir, ent 222 arch/um/drivers/ubd_kern.c if(!ent) return; ent 69 arch/um/kernel/exitcode.c struct proc_dir_entry *ent; ent 71 arch/um/kernel/exitcode.c ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_fops); ent 72 arch/um/kernel/exitcode.c if (ent == NULL) { ent 362 arch/um/kernel/process.c struct proc_dir_entry *ent; ent 366 arch/um/kernel/process.c ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops); ent 368 arch/um/kernel/process.c if (ent == NULL) ent 84 arch/um/os-Linux/umid.c struct dirent *ent; ent 97 arch/um/os-Linux/umid.c while ((ent = readdir(directory)) != NULL) { ent 98 arch/um/os-Linux/umid.c if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, "..")) ent 100 arch/um/os-Linux/umid.c len = strlen(dir) + sizeof("/") + strlen(ent->d_name) + 1; ent 106 arch/um/os-Linux/umid.c sprintf(file, "%s/%s", dir, ent->d_name); ent 895 arch/x86/kvm/cpuid.c const struct kvm_cpuid_param *ent = ¶m[i]; ent 897 arch/x86/kvm/cpuid.c if (ent->qualifier && !ent->qualifier(ent)) ent 900 arch/x86/kvm/cpuid.c r = do_cpuid_func(&cpuid_entries[nent], ent->func, ent 907 arch/x86/kvm/cpuid.c for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func) ent 1815 arch/x86/kvm/hyperv.c struct kvm_cpuid_entry2 *ent = &cpuid_entries[i]; ent 1818 arch/x86/kvm/hyperv.c switch (ent->function) { ent 1822 arch/x86/kvm/hyperv.c ent->eax = HYPERV_CPUID_NESTED_FEATURES; ent 1823 arch/x86/kvm/hyperv.c ent->ebx = signature[0]; ent 1824 arch/x86/kvm/hyperv.c ent->ecx = signature[1]; ent 1825 arch/x86/kvm/hyperv.c ent->edx = signature[2]; ent 1830 arch/x86/kvm/hyperv.c ent->eax = signature[0]; ent 1838 arch/x86/kvm/hyperv.c ent->eax = 0x00003839; ent 1839 arch/x86/kvm/hyperv.c ent->ebx = 0x000A0000; ent 1843 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE; ent 1844 arch/x86/kvm/hyperv.c ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; ent 1845 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_MSR_SYNIC_AVAILABLE; ent 1846 arch/x86/kvm/hyperv.c ent->eax |= HV_MSR_SYNTIMER_AVAILABLE; ent 1847 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE; ent 1848 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE; ent 1849 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE; ent 1850 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_MSR_RESET_AVAILABLE; ent 1851 arch/x86/kvm/hyperv.c ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; ent 1852 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS; ent 1853 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT; ent 1855 arch/x86/kvm/hyperv.c ent->ebx |= HV_X64_POST_MESSAGES; ent 1856 arch/x86/kvm/hyperv.c ent->ebx |= HV_X64_SIGNAL_EVENTS; ent 1858 arch/x86/kvm/hyperv.c ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE; ent 1859 arch/x86/kvm/hyperv.c ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; ent 1866 arch/x86/kvm/hyperv.c ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; ent 1871 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; ent 1872 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; ent 1873 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; ent 1874 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; ent 1875 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; ent 1877 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; ent 1879 arch/x86/kvm/hyperv.c ent->eax |= HV_X64_NO_NONARCH_CORESHARING; ent 1884 arch/x86/kvm/hyperv.c ent->ebx = 0x00000FFF; ent 1890 arch/x86/kvm/hyperv.c ent->eax = KVM_MAX_VCPUS; ent 1895 arch/x86/kvm/hyperv.c ent->ebx = 64; ent 1900 arch/x86/kvm/hyperv.c ent->eax = evmcs_ver; ent 409 arch/x86/kvm/ioapic.c union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; ent 411 arch/x86/kvm/ioapic.c if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG) ent 414 arch/x86/kvm/ioapic.c if (ioapic->irr & (1 << i) && !ent->fields.remote_irr) ent 435 arch/x86/kvm/ioapic.c union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; ent 437 arch/x86/kvm/ioapic.c if (ent->fields.vector != vector) ent 456 arch/x86/kvm/ioapic.c ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); ent 457 arch/x86/kvm/ioapic.c ent->fields.remote_irr = 0; ent 458 arch/x86/kvm/ioapic.c if (!ent->fields.mask && (ioapic->irr & (1 << i))) { ent 471 arch/x86/kvm/ioapic.c trace_kvm_ioapic_delayed_eoi_inj(ent->bits); ent 2281 arch/x86/kvm/mmu.c u64 ent = sp->spt[i]; ent 2283 arch/x86/kvm/mmu.c if (!is_shadow_present_pte(ent) || is_large_pte(ent)) { ent 2288 arch/x86/kvm/mmu.c child = page_header(ent & PT64_BASE_ADDR_MASK); ent 40 arch/x86/kvm/mmu_audit.c u64 *ent = sp->spt; ent 42 arch/x86/kvm/mmu_audit.c fn(vcpu, ent + i, level); ent 44 arch/x86/kvm/mmu_audit.c if (is_shadow_present_pte(ent[i]) && ent 45 arch/x86/kvm/mmu_audit.c !is_last_spte(ent[i], level)) { ent 48 arch/x86/kvm/mmu_audit.c child = page_header(ent[i] & PT64_BASE_ADDR_MASK); ent 58 arch/x86/platform/scx200/scx200_32.c static int scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 62 drivers/ata/acard-ahci.c static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); ent 355 drivers/ata/acard-ahci.c static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 357 drivers/ata/acard-ahci.c unsigned int board_id = ent->driver_data; ent 81 drivers/ata/ahci.c static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); ent 1626 drivers/ata/ahci.c static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1628 drivers/ata/ahci.c unsigned int board_id = ent->driver_data; ent 1641 drivers/ata/ata_piix.c static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1655 drivers/ata/ata_piix.c if (!in_module_init && ent->driver_data >= ich5_sata) ent 1659 drivers/ata/ata_piix.c piix_port_info[ent->driver_data].flags |= ent 1666 drivers/ata/ata_piix.c port_info[0] = piix_port_info[ent->driver_data]; ent 1667 drivers/ata/ata_piix.c port_info[1] = piix_port_info[ent->driver_data]; ent 1700 drivers/ata/ata_piix.c piix_map_db_table[ent->driver_data]); ent 1709 drivers/ata/ata_piix.c piix_init_pcs(host, piix_map_db_table[ent->driver_data]); ent 919 drivers/ata/libata-core.c const struct ata_xfer_ent *ent; ent 921 drivers/ata/libata-core.c for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) ent 922 drivers/ata/libata-core.c if (highbit >= ent->shift && highbit < ent->shift + ent->bits) ent 923 drivers/ata/libata-core.c return ent->base + highbit - ent->shift; ent 941 drivers/ata/libata-core.c const struct ata_xfer_ent *ent; ent 943 drivers/ata/libata-core.c for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) ent 944 drivers/ata/libata-core.c if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) ent 945 drivers/ata/libata-core.c return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) ent 946 drivers/ata/libata-core.c & ~((1 << ent->shift) - 1); ent 964 drivers/ata/libata-core.c const struct ata_xfer_ent *ent; ent 966 drivers/ata/libata-core.c for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) ent 967 drivers/ata/libata-core.c if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) ent 968 drivers/ata/libata-core.c return ent->shift; ent 3363 drivers/ata/libata-core.c const struct ata_xfer_ent *ent; ent 3366 drivers/ata/libata-core.c for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) ent 3367 drivers/ata/libata-core.c if (ent->shift == xfer_shift) ent 3368 drivers/ata/libata-core.c base_mode = ent->base; ent 325 drivers/ata/libata-eh.c int ent = ata_lookup_timeout_table(cmd); ent 328 drivers/ata/libata-eh.c if (ent < 0) ent 331 drivers/ata/libata-eh.c idx = ehc->cmd_timeout_idx[dev->devno][ent]; ent 332 drivers/ata/libata-eh.c return ata_eh_cmd_timeout_table[ent].timeouts[idx]; ent 350 drivers/ata/libata-eh.c int ent = ata_lookup_timeout_table(cmd); ent 353 drivers/ata/libata-eh.c if (ent < 0) ent 356 drivers/ata/libata-eh.c idx = ehc->cmd_timeout_idx[dev->devno][ent]; ent 357 drivers/ata/libata-eh.c if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX) ent 358 drivers/ata/libata-eh.c ehc->cmd_timeout_idx[dev->devno][ent]++; ent 364 drivers/ata/libata-eh.c struct ata_ering_entry *ent; ent 371 drivers/ata/libata-eh.c ent = &ering->ring[ering->cursor]; ent 372 drivers/ata/libata-eh.c ent->eflags = eflags; ent 373 drivers/ata/libata-eh.c ent->err_mask = err_mask; ent 374 drivers/ata/libata-eh.c ent->timestamp = get_jiffies_64(); ent 379 drivers/ata/libata-eh.c struct ata_ering_entry *ent = &ering->ring[ering->cursor]; ent 381 drivers/ata/libata-eh.c if (ent->err_mask) ent 382 drivers/ata/libata-eh.c return ent; ent 391 drivers/ata/libata-eh.c struct ata_ering_entry *ent; ent 395 drivers/ata/libata-eh.c ent = &ering->ring[idx]; ent 396 drivers/ata/libata-eh.c if (!ent->err_mask) ent 398 drivers/ata/libata-eh.c rc = map_fn(ent, arg); ent 407 drivers/ata/libata-eh.c static int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) ent 409 drivers/ata/libata-eh.c ent->eflags |= ATA_EFLAG_OLD_ER; ent 1857 drivers/ata/libata-eh.c static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) ent 1862 drivers/ata/libata-eh.c if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) ent 1865 drivers/ata/libata-eh.c cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, ent 3257 drivers/ata/libata-eh.c struct ata_ering_entry *ent; ent 3259 drivers/ata/libata-eh.c ent = ata_ering_top(&dev->ering); ent 3260 drivers/ata/libata-eh.c if (ent) ent 3261 drivers/ata/libata-eh.c ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER; ent 3596 drivers/ata/libata-eh.c static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg) ent 3602 drivers/ata/libata-eh.c if ((ent->eflags & ATA_EFLAG_OLD_ER) || ent 3603 drivers/ata/libata-eh.c (ent->timestamp < now - min(now, interval))) ent 497 drivers/ata/libata-transport.c static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg) ent 503 drivers/ata/libata-transport.c seconds = div_u64_rem(ent->timestamp, HZ, &rem); ent 507 drivers/ata/libata-transport.c arg->written += get_ata_err_names(ent->err_mask, ent 151 drivers/ata/pata_buddha.c const struct zorro_device_id *ent) ent 159 drivers/ata/pata_buddha.c unsigned int type = ent->driver_data; ent 264 drivers/ata/pata_efar.c static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ent 259 drivers/ata/pata_it8213.c static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ent 58 drivers/ata/pata_netcell.c static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ent 351 drivers/ata/pata_ns87415.c static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ent 236 drivers/ata/pata_oldpiix.c static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ent 59 drivers/ata/pata_pdc2027x.c static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); ent 697 drivers/ata/pata_pdc2027x.c const struct pci_device_id *ent) ent 701 drivers/ata/pata_pdc2027x.c unsigned int board_idx = (unsigned int) ent->driver_data; ent 214 drivers/ata/pata_radisys.c static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ent 310 drivers/ata/pata_rdc.c static int rdc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 86 drivers/ata/pata_rz1000.c static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ent 39 drivers/ata/pata_sch.c const struct pci_device_id *ent); ent 158 drivers/ata/pata_sch.c static int sch_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 723 drivers/ata/pata_sis.c static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ent 116 drivers/ata/pdc_adma.c const struct pci_device_id *ent); ent 576 drivers/ata/pdc_adma.c const struct pci_device_id *ent) ent 578 drivers/ata/pdc_adma.c unsigned int board_idx = (unsigned int) ent->driver_data; ent 817 drivers/ata/sata_inic162x.c static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 4299 drivers/ata/sata_mv.c const struct pci_device_id *ent); ent 4367 drivers/ata/sata_mv.c const struct pci_device_id *ent) ent 4369 drivers/ata/sata_mv.c unsigned int board_idx = (unsigned int)ent->driver_data; ent 281 drivers/ata/sata_nv.c static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); ent 2314 drivers/ata/sata_nv.c static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 2323 drivers/ata/sata_nv.c unsigned long type = ent->driver_data; ent 139 drivers/ata/sata_promise.c static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); ent 1168 drivers/ata/sata_promise.c const struct pci_device_id *ent) ent 1170 drivers/ata/sata_promise.c const struct ata_port_info *pi = &pdc_port_info[ent->driver_data]; ent 100 drivers/ata/sata_qstor.c static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); ent 550 drivers/ata/sata_qstor.c const struct pci_device_id *ent) ent 552 drivers/ata/sata_qstor.c unsigned int board_idx = (unsigned int) ent->driver_data; ent 98 drivers/ata/sata_sil.c static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); ent 720 drivers/ata/sata_sil.c static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 722 drivers/ata/sata_sil.c int board_id = ent->driver_data; ent 345 drivers/ata/sata_sil24.c static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); ent 1258 drivers/ata/sata_sil24.c static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1261 drivers/ata/sata_sil24.c struct ata_port_info pi = sil24_port_info[ent->driver_data]; ent 49 drivers/ata/sata_sis.c static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); ent 181 drivers/ata/sata_sis.c static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 214 drivers/ata/sata_sis.c switch (ent->device) { ent 409 drivers/ata/sata_svw.c static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 412 drivers/ata/sata_svw.c { &k2_port_info[ent->driver_data], NULL }; ent 200 drivers/ata/sata_sx4.c static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); ent 1429 drivers/ata/sata_sx4.c const struct pci_device_id *ent) ent 1432 drivers/ata/sata_sx4.c { &pdc_port_info[ent->driver_data], NULL }; ent 43 drivers/ata/sata_uli.c static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); ent 129 drivers/ata/sata_uli.c static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 132 drivers/ata/sata_uli.c unsigned int board_idx = (unsigned int) ent->driver_data; ent 69 drivers/ata/sata_via.c static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); ent 676 drivers/ata/sata_via.c static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 681 drivers/ata/sata_via.c int board_id = (int) ent->driver_data; ent 321 drivers/ata/sata_vsc.c const struct pci_device_id *ent) ent 2235 drivers/atm/eni.c const struct pci_device_id *ent) ent 2268 drivers/atm/eni.c eni_dev->asic = ent->driver_data; ent 1891 drivers/atm/firestream.c const struct pci_device_id *ent) ent 1910 drivers/atm/firestream.c fs_dev->flags = ent->driver_data; ent 3177 drivers/atm/iphase.c static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 177 drivers/atm/nicstar.c const struct pci_device_id *ent) ent 1594 drivers/atm/zatm.c const struct pci_device_id *ent) ent 1624 drivers/atm/zatm.c zatm_dev->copper = (int)ent->driver_data; ent 159 drivers/bcma/scan.c u32 ent = readl(*eromptr); ent 161 drivers/bcma/scan.c return ent; ent 171 drivers/bcma/scan.c u32 ent = bcma_erom_get_ent(bus, eromptr); ent 172 drivers/bcma/scan.c if (!(ent & SCAN_ER_VALID)) ent 174 drivers/bcma/scan.c if ((ent & SCAN_ER_TAG) != SCAN_ER_TAG_CI) ent 176 drivers/bcma/scan.c return ent; ent 181 drivers/bcma/scan.c u32 ent = bcma_erom_get_ent(bus, eromptr); ent 183 drivers/bcma/scan.c return (ent == (SCAN_ER_TAG_END | SCAN_ER_VALID)); ent 188 drivers/bcma/scan.c u32 ent = bcma_erom_get_ent(bus, eromptr); ent 190 drivers/bcma/scan.c return (((ent & SCAN_ER_VALID)) && ent 191 drivers/bcma/scan.c ((ent & SCAN_ER_TAGX) == SCAN_ER_TAG_ADDR) && ent 192 drivers/bcma/scan.c ((ent & SCAN_ADDR_TYPE) == SCAN_ADDR_TYPE_BRIDGE)); ent 197 drivers/bcma/scan.c u32 ent; ent 199 drivers/bcma/scan.c ent = bcma_erom_get_ent(bus, eromptr); ent 200 drivers/bcma/scan.c if ((ent & SCAN_ER_VALID) && ent 201 drivers/bcma/scan.c ((ent & SCAN_ER_TAG) == SCAN_ER_TAG_CI)) ent 203 drivers/bcma/scan.c if (ent == (SCAN_ER_TAG_END | SCAN_ER_VALID)) ent 211 drivers/bcma/scan.c u32 ent = bcma_erom_get_ent(bus, eromptr); ent 212 drivers/bcma/scan.c if (!(ent & SCAN_ER_VALID)) ent 214 drivers/bcma/scan.c if ((ent & SCAN_ER_TAG) != SCAN_ER_TAG_MP) ent 216 drivers/bcma/scan.c return ent; ent 225 drivers/bcma/scan.c u32 ent = bcma_erom_get_ent(bus, eromptr); ent 226 drivers/bcma/scan.c if ((!(ent & SCAN_ER_VALID)) || ent 227 drivers/bcma/scan.c ((ent & SCAN_ER_TAGX) != SCAN_ER_TAG_ADDR) || ent 228 drivers/bcma/scan.c ((ent & SCAN_ADDR_TYPE) != type) || ent 229 drivers/bcma/scan.c (((ent & SCAN_ADDR_PORT) >> SCAN_ADDR_PORT_SHIFT) != port)) { ent 234 drivers/bcma/scan.c addrl = ent & SCAN_ADDR_ADDR; ent 235 drivers/bcma/scan.c if (ent & SCAN_ADDR_AG32) ent 240 drivers/bcma/scan.c if ((ent & SCAN_ADDR_SZ) == SCAN_ADDR_SZ_SZD) { ent 247 drivers/bcma/scan.c ((ent & SCAN_ADDR_SZ) >> SCAN_ADDR_SZ_SHIFT); ent 1636 drivers/block/drbd/drbd_req.c unsigned long now, unsigned long ent, ent 1641 drivers/block/drbd/drbd_req.c if (!time_after(now, net_req->pre_send_jif + ent)) ent 1644 drivers/block/drbd/drbd_req.c if (time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) ent 1680 drivers/block/drbd/drbd_req.c if (time_after(now, connection->send.last_sent_barrier_jif + ent)) { ent 1713 drivers/block/drbd/drbd_req.c unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ ent 1731 drivers/block/drbd/drbd_req.c ent = timeout * HZ/10 * ko_count; ent 1732 drivers/block/drbd/drbd_req.c et = min_not_zero(dt, ent); ent 1775 drivers/block/drbd/drbd_req.c if (ent && req_peer && net_timeout_reached(req_peer, connection, now, ent, ko_count, timeout)) ent 1787 drivers/block/drbd/drbd_req.c ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent)) ent 1788 drivers/block/drbd/drbd_req.c ? req_peer->pre_send_jif + ent : now + et; ent 1791 drivers/block/drbd/drbd_req.c nt = time_before(ent, dt) ? ent : dt; ent 3997 drivers/block/mtip32xx/mtip32xx.c const struct pci_device_id *ent) ent 3164 drivers/block/skd_main.c static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 402 drivers/block/sx8.c static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); ent 1406 drivers/block/sx8.c static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ent 302 drivers/char/agp/ali-agp.c static int agp_ali_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 409 drivers/char/agp/amd-k7-agp.c const struct pci_device_id *ent) ent 419 drivers/char/agp/amd-k7-agp.c j = ent - agp_amdk7_pci_table; ent 514 drivers/char/agp/amd64-agp.c const struct pci_device_id *ent) ent 494 drivers/char/agp/ati-agp.c static int agp_ati_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 344 drivers/char/agp/efficeon-agp.c const struct pci_device_id *ent) ent 591 drivers/char/agp/i460-agp.c const struct pci_device_id *ent) ent 725 drivers/char/agp/intel-agp.c const struct pci_device_id *ent) ent 337 drivers/char/agp/nvidia-agp.c const struct pci_device_id *ent) ent 182 drivers/char/agp/sis-agp.c static int agp_sis_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 450 drivers/char/agp/sworks-agp.c const struct pci_device_id *ent) ent 602 drivers/char/agp/uninorth-agp.c const struct pci_device_id *ent) ent 441 drivers/char/agp/via-agp.c static int agp_via_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 452 drivers/char/agp/via-agp.c j = ent - agp_via_pci_table; ent 131 drivers/char/hw_random/amd-rng.c const struct pci_device_id *ent; ent 136 drivers/char/hw_random/amd-rng.c ent = pci_match_id(pci_tbl, pdev); ent 137 drivers/char/hw_random/amd-rng.c if (ent) ent 90 drivers/char/hw_random/geode-rng.c const struct pci_device_id *ent; ent 95 drivers/char/hw_random/geode-rng.c ent = pci_match_id(pci_tbl, pdev); ent 96 drivers/char/hw_random/geode-rng.c if (ent) ent 1122 drivers/char/ipmi/ipmi_msghandler.c struct seq_table *ent = &intf->seq_table[seq]; ent 1123 drivers/char/ipmi/ipmi_msghandler.c ent->timeout = ent->orig_timeout; ent 1152 drivers/char/ipmi/ipmi_msghandler.c struct seq_table *ent = &intf->seq_table[seq]; ent 1154 drivers/char/ipmi/ipmi_msghandler.c ent->inuse = 0; ent 1156 drivers/char/ipmi/ipmi_msghandler.c msg = ent->recv_msg; ent 3546 drivers/char/ipmi/ipmi_msghandler.c struct seq_table *ent; ent 3576 drivers/char/ipmi/ipmi_msghandler.c ent = &intf->seq_table[i]; ent 3577 drivers/char/ipmi/ipmi_msghandler.c if (!ent->inuse) ent 3579 drivers/char/ipmi/ipmi_msghandler.c deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED); ent 4596 drivers/char/ipmi/ipmi_msghandler.c static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, ent 4607 drivers/char/ipmi/ipmi_msghandler.c if (!ent->inuse) ent 4610 drivers/char/ipmi/ipmi_msghandler.c if (timeout_period < ent->timeout) { ent 4611 drivers/char/ipmi/ipmi_msghandler.c ent->timeout -= timeout_period; ent 4616 drivers/char/ipmi/ipmi_msghandler.c if (ent->retries_left == 0) { ent 4618 drivers/char/ipmi/ipmi_msghandler.c ent->inuse = 0; ent 4620 drivers/char/ipmi/ipmi_msghandler.c msg = ent->recv_msg; ent 4622 drivers/char/ipmi/ipmi_msghandler.c if (ent->broadcast) ent 4624 drivers/char/ipmi/ipmi_msghandler.c else if (is_lan_addr(&ent->recv_msg->addr)) ent 4638 drivers/char/ipmi/ipmi_msghandler.c ent->timeout = MAX_MSG_TIMEOUT; ent 4639 drivers/char/ipmi/ipmi_msghandler.c ent->retries_left--; ent 4640 drivers/char/ipmi/ipmi_msghandler.c smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, ent 4641 drivers/char/ipmi/ipmi_msghandler.c ent->seqid); ent 4643 drivers/char/ipmi/ipmi_msghandler.c if (is_lan_addr(&ent->recv_msg->addr)) ent 4662 drivers/char/ipmi/ipmi_msghandler.c if (is_lan_addr(&ent->recv_msg->addr)) ent 70 drivers/char/ipmi/ipmi_si_pci.c const struct pci_device_id *ent) ent 846 drivers/char/rtc.c struct proc_dir_entry *ent; ent 958 drivers/char/rtc.c ent = proc_create_single("driver/rtc", 0, NULL, rtc_proc_show); ent 959 drivers/char/rtc.c if (!ent) ent 128 drivers/char/xillybus/xillybus_pcie.c const struct pci_device_id *ent) ent 541 drivers/crypto/cavium/cpt/cptpf_main.c static int cpt_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 664 drivers/crypto/cavium/cpt/cptvf_main.c static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 17 drivers/crypto/cavium/cpt/cptvf_reqmanager.c struct pending_entry *ent = NULL; ent 19 drivers/crypto/cavium/cpt/cptvf_reqmanager.c ent = &q->head[q->rear]; ent 20 drivers/crypto/cavium/cpt/cptvf_reqmanager.c if (unlikely(ent->busy)) { ent 21 drivers/crypto/cavium/cpt/cptvf_reqmanager.c ent = NULL; ent 30 drivers/crypto/cavium/cpt/cptvf_reqmanager.c return ent; ent 230 drivers/crypto/cavium/cpt/cptvf_reqmanager.c u8 *ent; ent 243 drivers/crypto/cavium/cpt/cptvf_reqmanager.c ent = &queue->qhead->head[queue->idx * qinfo->cmd_size]; ent 244 drivers/crypto/cavium/cpt/cptvf_reqmanager.c memcpy(ent, (void *)cmd, qinfo->cmd_size); ent 289 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c u8 *ent; ent 295 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c ent = cmdq->base + (idx * cmdq->instr_size); ent 296 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c memcpy(ent, &sr->instr, cmdq->instr_size); ent 238 drivers/crypto/cavium/zip/zip_main.c static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1675 drivers/crypto/inside-secure/safexcel.c const struct pci_device_id *ent) ent 1684 drivers/crypto/inside-secure/safexcel.c ent->vendor, ent->device, ent->subvendor, ent 1685 drivers/crypto/inside-secure/safexcel.c ent->subdevice, ent->driver_data); ent 1692 drivers/crypto/inside-secure/safexcel.c priv->version = (enum safexcel_eip_version)ent->driver_data; ent 510 drivers/crypto/n2_core.c struct cwq_initial_entry *ent) ent 512 drivers/crypto/n2_core.c unsigned long hv_ret = spu_queue_submit(qp, ent); ent 526 drivers/crypto/n2_core.c struct cwq_initial_entry *ent; ent 562 drivers/crypto/n2_core.c ent = qp->q + qp->tail; ent 564 drivers/crypto/n2_core.c ent->control = control_word_base(nbytes, auth_key_len, 0, ent 569 drivers/crypto/n2_core.c ent->src_addr = __pa(walk.data); ent 570 drivers/crypto/n2_core.c ent->auth_key_addr = auth_key; ent 571 drivers/crypto/n2_core.c ent->auth_iv_addr = __pa(hash_loc); ent 572 drivers/crypto/n2_core.c ent->final_auth_state_addr = 0UL; ent 573 drivers/crypto/n2_core.c ent->enc_key_addr = 0UL; ent 574 drivers/crypto/n2_core.c ent->enc_iv_addr = 0UL; ent 575 drivers/crypto/n2_core.c ent->dest_addr = __pa(hash_loc); ent 579 drivers/crypto/n2_core.c ent = spu_queue_next(qp, ent); ent 581 drivers/crypto/n2_core.c ent->control = (nbytes - 1); ent 582 drivers/crypto/n2_core.c ent->src_addr = __pa(walk.data); ent 583 drivers/crypto/n2_core.c ent->auth_key_addr = 0UL; ent 584 drivers/crypto/n2_core.c ent->auth_iv_addr = 0UL; ent 585 drivers/crypto/n2_core.c ent->final_auth_state_addr = 0UL; ent 586 drivers/crypto/n2_core.c ent->enc_key_addr = 0UL; ent 587 drivers/crypto/n2_core.c ent->enc_iv_addr = 0UL; ent 588 drivers/crypto/n2_core.c ent->dest_addr = 0UL; ent 592 drivers/crypto/n2_core.c ent->control |= CONTROL_END_OF_BLOCK; ent 594 drivers/crypto/n2_core.c if (submit_and_wait_for_tail(qp, ent) != HV_EOK) ent 837 drivers/crypto/n2_core.c struct cwq_initial_entry *ent; ent 841 drivers/crypto/n2_core.c ent = spu_queue_alloc(qp, cp->arr_len); ent 842 drivers/crypto/n2_core.c if (!ent) { ent 850 drivers/crypto/n2_core.c ent->control = control_word_base(cp->arr[0].src_len, ent 855 drivers/crypto/n2_core.c ent->src_addr = cp->arr[0].src_paddr; ent 856 drivers/crypto/n2_core.c ent->auth_key_addr = 0UL; ent 857 drivers/crypto/n2_core.c ent->auth_iv_addr = 0UL; ent 858 drivers/crypto/n2_core.c ent->final_auth_state_addr = 0UL; ent 859 drivers/crypto/n2_core.c ent->enc_key_addr = __pa(&ctx->key); ent 860 drivers/crypto/n2_core.c ent->enc_iv_addr = cp->iv_paddr; ent 861 drivers/crypto/n2_core.c ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); ent 864 drivers/crypto/n2_core.c ent = spu_queue_next(qp, ent); ent 866 drivers/crypto/n2_core.c ent->control = cp->arr[i].src_len - 1; ent 867 drivers/crypto/n2_core.c ent->src_addr = cp->arr[i].src_paddr; ent 868 drivers/crypto/n2_core.c ent->auth_key_addr = 0UL; ent 869 drivers/crypto/n2_core.c ent->auth_iv_addr = 0UL; ent 870 drivers/crypto/n2_core.c ent->final_auth_state_addr = 0UL; ent 871 drivers/crypto/n2_core.c ent->enc_key_addr = 0UL; ent 872 drivers/crypto/n2_core.c ent->enc_iv_addr = 0UL; ent 873 drivers/crypto/n2_core.c ent->dest_addr = 0UL; ent 875 drivers/crypto/n2_core.c ent->control |= CONTROL_END_OF_BLOCK; ent 877 drivers/crypto/n2_core.c return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; ent 74 drivers/crypto/qat/qat_c3xxx/adf_drv.c static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); ent 119 drivers/crypto/qat/qat_c3xxx/adf_drv.c static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 129 drivers/crypto/qat/qat_c3xxx/adf_drv.c switch (ent->device) { ent 133 drivers/crypto/qat/qat_c3xxx/adf_drv.c dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); ent 74 drivers/crypto/qat/qat_c3xxxvf/adf_drv.c static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); ent 120 drivers/crypto/qat/qat_c3xxxvf/adf_drv.c static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 131 drivers/crypto/qat/qat_c3xxxvf/adf_drv.c switch (ent->device) { ent 135 drivers/crypto/qat/qat_c3xxxvf/adf_drv.c dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); ent 74 drivers/crypto/qat/qat_c62x/adf_drv.c static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); ent 119 drivers/crypto/qat/qat_c62x/adf_drv.c static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 129 drivers/crypto/qat/qat_c62x/adf_drv.c switch (ent->device) { ent 133 drivers/crypto/qat/qat_c62x/adf_drv.c dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); ent 74 drivers/crypto/qat/qat_c62xvf/adf_drv.c static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); ent 120 drivers/crypto/qat/qat_c62xvf/adf_drv.c static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 131 drivers/crypto/qat/qat_c62xvf/adf_drv.c switch (ent->device) { ent 135 drivers/crypto/qat/qat_c62xvf/adf_drv.c dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); ent 74 drivers/crypto/qat/qat_dh895xcc/adf_drv.c static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); ent 119 drivers/crypto/qat/qat_dh895xcc/adf_drv.c static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 129 drivers/crypto/qat/qat_dh895xcc/adf_drv.c switch (ent->device) { ent 133 drivers/crypto/qat/qat_dh895xcc/adf_drv.c dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); ent 74 drivers/crypto/qat/qat_dh895xccvf/adf_drv.c static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent); ent 120 drivers/crypto/qat/qat_dh895xccvf/adf_drv.c static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 131 drivers/crypto/qat/qat_dh895xccvf/adf_drv.c switch (ent->device) { ent 135 drivers/crypto/qat/qat_dh895xccvf/adf_drv.c dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device); ent 303 drivers/edac/amd76x_edac.c const struct pci_device_id *ent) ent 308 drivers/edac/amd76x_edac.c return amd76x_probe1(pdev, ent->driver_data); ent 1386 drivers/edac/e752x_edac.c static int e752x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1394 drivers/edac/e752x_edac.c return e752x_probe1(pdev, ent->driver_data); ent 529 drivers/edac/e7xxx_edac.c static int e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 535 drivers/edac/e7xxx_edac.c -EIO : e7xxx_probe1(pdev, ent->driver_data); ent 455 drivers/edac/i3000_edac.c static int i3000_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 464 drivers/edac/i3000_edac.c rc = i3000_probe1(pdev, ent->driver_data); ent 433 drivers/edac/i3200_edac.c static int i3200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 442 drivers/edac/i3200_edac.c rc = i3200_probe1(pdev, ent->driver_data); ent 352 drivers/edac/i82443bxgx_edac.c const struct pci_device_id *ent) ent 359 drivers/edac/i82443bxgx_edac.c rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data); ent 256 drivers/edac/i82860_edac.c const struct pci_device_id *ent) ent 266 drivers/edac/i82860_edac.c rc = i82860_probe1(pdev, ent->driver_data); ent 473 drivers/edac/i82875p_edac.c const struct pci_device_id *ent) ent 483 drivers/edac/i82875p_edac.c rc = i82875p_probe1(pdev, ent->driver_data); ent 591 drivers/edac/i82975x_edac.c const struct pci_device_id *ent) ent 600 drivers/edac/i82975x_edac.c rc = i82975x_probe1(pdev, ent->driver_data); ent 542 drivers/edac/ie31200_edac.c const struct pci_device_id *ent) ent 549 drivers/edac/ie31200_edac.c return ie31200_probe1(pdev, ent->driver_data); ent 180 drivers/edac/pasemi_edac.c const struct pci_device_id *ent) ent 361 drivers/edac/r82600_edac.c const struct pci_device_id *ent) ent 366 drivers/edac/r82600_edac.c return r82600_probe1(pdev, ent->driver_data); ent 472 drivers/edac/thunderx_edac.c struct dentry *ent; ent 481 drivers/edac/thunderx_edac.c ent = edac_debugfs_create_file(attrs[i]->name, attrs[i]->mode, ent 484 drivers/edac/thunderx_edac.c if (!ent) ent 414 drivers/edac/x38_edac.c static int x38_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 423 drivers/edac/x38_edac.c rc = x38_probe1(pdev, ent->driver_data); ent 3552 drivers/firewire/ohci.c const struct pci_device_id *ent) ent 180 drivers/gpio/gpio-amd8111.c const struct pci_device_id *ent; ent 193 drivers/gpio/gpio-amd8111.c ent = pci_match_id(pci_tbl, pdev); ent 194 drivers/gpio/gpio-amd8111.c if (ent) ent 828 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c struct dentry *ent, *root = minor->debugfs_root; ent 832 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c ent = debugfs_create_file(debugfs_regs_names[i], ent 835 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c if (!i && !IS_ERR_OR_NULL(ent)) ent 836 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c i_size_write(ent->d_inode, adev->rmmio_size); ent 837 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c adev->debugfs_regs[i] = ent; ent 1036 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c const struct pci_device_id *ent) ent 1039 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c unsigned long flags = ent->driver_data; ent 1109 drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c ret = drm_dev_register(dev, ent->driver_data); ent 931 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c con->ent = debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir, ent 941 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (!obj || obj->ent) ent 950 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c obj->ent = debugfs_create_file(obj->fs_data.debugfs_name, ent 960 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c if (!obj || !obj->ent) ent 963 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c debugfs_remove(obj->ent); ent 964 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c obj->ent = NULL; ent 977 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c debugfs_remove(con->ent); ent 980 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c con->ent = NULL; ent 321 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct dentry *ent; ent 395 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h struct dentry *ent; ent 493 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c struct dentry *ent, *root = minor->debugfs_root; ent 498 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c ent = debugfs_create_file(name, ent 501 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c if (!ent) ent 504 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c i_size_write(ent->d_inode, ring->ring_size + 12); ent 505 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c ring->ent = ent; ent 513 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c debugfs_remove(ring->ent); ent 227 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h struct dentry *ent; ent 2437 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c struct dentry *ent, *root = minor->debugfs_root; ent 2440 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c ent = debugfs_create_file( ent 2445 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c if (IS_ERR(ent)) ent 2446 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c return PTR_ERR(ent); ent 2448 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c i_size_write(ent->d_inode, adev->gmc.mc_vram_size); ent 2450 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c i_size_write(ent->d_inode, adev->gmc.gart_size); ent 2451 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c adev->mman.debugfs_entries[count] = ent; ent 88 drivers/gpu/drm/ast/ast_drv.c static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 92 drivers/gpu/drm/ast/ast_drv.c return drm_get_pci_dev(pdev, ent, &driver); ent 105 drivers/gpu/drm/bochs/bochs_drv.c const struct pci_device_id *ent) ent 529 drivers/gpu/drm/cirrus/cirrus.c const struct pci_device_id *ent) ent 222 drivers/gpu/drm/drm_pci.c int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, ent 248 drivers/gpu/drm/drm_pci.c ret = drm_dev_register(dev, ent->driver_data); ent 38 drivers/gpu/drm/gma500/psb_drv.c static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent); ent 427 drivers/gpu/drm/gma500/psb_drv.c static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 429 drivers/gpu/drm/gma500/psb_drv.c return drm_get_pci_dev(pdev, ent, &driver); ent 329 drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c const struct pci_device_id *ent) ent 1507 drivers/gpu/drm/i915/i915_drv.c i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1510 drivers/gpu/drm/i915/i915_drv.c (struct intel_device_info *)ent->driver_data; ent 1562 drivers/gpu/drm/i915/i915_drv.c int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1565 drivers/gpu/drm/i915/i915_drv.c (struct intel_device_info *)ent->driver_data; ent 1569 drivers/gpu/drm/i915/i915_drv.c dev_priv = i915_driver_create(pdev, ent); ent 2222 drivers/gpu/drm/i915/i915_drv.h int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); ent 930 drivers/gpu/drm/i915/i915_pci.c static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 933 drivers/gpu/drm/i915/i915_pci.c (struct intel_device_info *) ent->driver_data; ent 961 drivers/gpu/drm/i915/i915_pci.c err = i915_driver_probe(pdev, ent); ent 48 drivers/gpu/drm/mgag200/mgag200_drv.c static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 52 drivers/gpu/drm/mgag200/mgag200_drv.c return drm_get_pci_dev(pdev, ent, &driver); ent 27 drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h u32 nvbios_M0205Se(struct nvkm_bios *, int ent, int idx, u8 *ver, u8 *hdr); ent 28 drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h u32 nvbios_M0205Sp(struct nvkm_bios *, int ent, int idx, u8 *ver, u8 *hdr, ent 25 drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h u32 nvbios_M0209Se(struct nvkm_bios *, int ent, int idx, u8 *ver, u8 *hdr); ent 26 drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h u32 nvbios_M0209Sp(struct nvkm_bios *, int ent, int idx, u8 *ver, u8 *hdr, ent 59 drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h u16 dcb_table(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len); ent 47 drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h u16 dcb_gpio_entry(struct nvkm_bios *, int idx, int ent, u8 *ver, u8 *len); ent 48 drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h u16 dcb_gpio_parse(struct nvkm_bios *, int idx, int ent, u8 *ver, u8 *len, ent 1311 drivers/gpu/drm/nouveau/nouveau_bios.c u8 *ent = dcb + 8 + (idx * 8); ent 1312 drivers/gpu/drm/nouveau/nouveau_bios.c if (i2c && ent < i2c) ent 1313 drivers/gpu/drm/nouveau/nouveau_bios.c return ent; ent 1317 drivers/gpu/drm/nouveau/nouveau_bios.c u8 *ent = dcb + 4 + (idx * 10); ent 1318 drivers/gpu/drm/nouveau/nouveau_bios.c if (i2c && ent < i2c) ent 1319 drivers/gpu/drm/nouveau/nouveau_bios.c return ent; ent 61 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c struct gf100_gr_init *ent = &init[i]; ent 64 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c ent->addr = av->addr; ent 65 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c ent->data = av->data; ent 66 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c ent->count = 1; ent 67 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c ent->pitch = 1; ent 110 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c struct gf100_gr_init *ent = &init[i]; ent 113 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c ent->addr = av->addr; ent 114 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c ent->data = av->data; ent 115 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c ent->count = 1; ent 116 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c ent->pitch = 1; ent 108 drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c nvbios_M0205Se(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr) ent 112 drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c u32 data = nvbios_M0205Ee(bios, ent, ver, hdr, &cnt, &len); ent 122 drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c nvbios_M0205Sp(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr, ent 125 drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0205.c u32 data = nvbios_M0205Se(bios, ent, idx, ver, hdr); ent 95 drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c nvbios_M0209Se(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr) ent 99 drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c u32 data = nvbios_M0209Ee(bios, ent, ver, hdr, &cnt, &len); ent 109 drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c nvbios_M0209Sp(struct nvkm_bios *bios, int ent, int idx, u8 *ver, u8 *hdr, ent 114 drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c u32 data = nvbios_M0209Ep(bios, ent, ver, hdr, &cnt, &len, &M0209E); ent 116 drivers/gpu/drm/nouveau/nvkm/subdev/bios/M0209.c u32 i, data = nvbios_M0209Se(bios, ent, idx, ver, hdr); ent 61 drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c dcb_gpio_entry(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len) ent 71 drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c if (gpio && ent < cnt) ent 72 drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c return gpio + hdr + (ent * *len); ent 78 drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c dcb_gpio_parse(struct nvkm_bios *bios, int idx, int ent, u8 *ver, u8 *len, ent 81 drivers/gpu/drm/nouveau/nvkm/subdev/bios/gpio.c u16 data = dcb_gpio_entry(bios, idx, ent, ver, len); ent 75 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); ent 76 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c if (ent) { ent 78 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c u32 ent_value = nvbios_rd32(bios, ent); ent 88 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c info->type = nvbios_rd08(bios, ent + 0x03); ent 90 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c info->type = nvbios_rd08(bios, ent + 0x03) & 0x07; ent 102 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c info->drive = nvbios_rd08(bios, ent + 0); ent 103 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c info->sense = nvbios_rd08(bios, ent + 1); ent 106 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c info->drive = nvbios_rd08(bios, ent + 1); ent 109 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c info->drive = nvbios_rd08(bios, ent + 0) & 0x0f; ent 110 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c if (nvbios_rd08(bios, ent + 1) & 0x01) ent 111 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c info->share = nvbios_rd08(bios, ent + 1) >> 1; ent 114 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c info->auxch = nvbios_rd08(bios, ent + 0) & 0x0f; ent 115 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c if (nvbios_rd08(bios, ent + 1) & 0x01) ent 119 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c info->drive = (nvbios_rd16(bios, ent + 0) & 0x01f) >> 0; ent 122 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c info->auxch = (nvbios_rd16(bios, ent + 0) & 0x3e0) >> 5; ent 141 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c ent = 0x0048; ent 143 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c ent = 0x0036 + bios->bmp_offset; ent 146 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c info->drive = nvbios_rd08(bios, ent + 4); ent 148 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c info->sense = nvbios_rd08(bios, ent + 5); ent 152 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c info->drive = nvbios_rd08(bios, ent + 6); ent 154 drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c info->sense = nvbios_rd08(bios, ent + 7); ent 33 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c int ent = -1; ent 35 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gf119.c while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) { ent 33 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c int ent = -1; ent 35 drivers/gpu/drm/nouveau/nvkm/subdev/gpio/nv50.c while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) { ent 72 drivers/gpu/drm/qxl/qxl_drv.c qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 114 drivers/gpu/drm/qxl/qxl_drv.c ret = drm_dev_register(&qdev->ddev, ent->driver_data); ent 326 drivers/gpu/drm/radeon/radeon_drv.c const struct pci_device_id *ent) ent 332 drivers/gpu/drm/radeon/radeon_drv.c if (!ent) ent 335 drivers/gpu/drm/radeon/radeon_drv.c flags = ent->driver_data; ent 394 drivers/gpu/drm/radeon/radeon_drv.c ret = drm_dev_register(dev, ent->driver_data); ent 39 drivers/gpu/drm/vboxvideo/vbox_drv.c static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1428 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1430 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c return drm_get_pci_dev(pdev, ent, &driver); ent 118 drivers/hid/intel-ish-hid/ipc/pci-ish.c static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 118 drivers/i2c/busses/i2c-icy.c const struct zorro_device_id *ent) ent 102 drivers/i2c/busses/i2c-pxa-pci.c const struct pci_device_id *ent) ent 150 drivers/i2c/busses/i2c-thunderx-pcidrv.c const struct pci_device_id *ent) ent 1066 drivers/ide/ide-cd.c struct atapi_toc_entry ent; ent 1173 drivers/ide/ide-cd.c toc->ent[i].track = bcd2bin(toc->ent[i].track); ent 1174 drivers/ide/ide-cd.c msf_from_bcd(&toc->ent[i].addr.msf); ent 1176 drivers/ide/ide-cd.c toc->ent[i].addr.lba = msf_to_lba(toc->ent[i].addr.msf.minute, ent 1177 drivers/ide/ide-cd.c toc->ent[i].addr.msf.second, ent 1178 drivers/ide/ide-cd.c toc->ent[i].addr.msf.frame); ent 1188 drivers/ide/ide-cd.c toc->last_session_lba = be32_to_cpu(ms_tmp.ent.addr.lba); ent 1202 drivers/ide/ide-cd.c msf_from_bcd(&ms_tmp.ent.addr.msf); ent 1203 drivers/ide/ide-cd.c toc->last_session_lba = msf_to_lba(ms_tmp.ent.addr.msf.minute, ent 1204 drivers/ide/ide-cd.c ms_tmp.ent.addr.msf.second, ent 1205 drivers/ide/ide-cd.c ms_tmp.ent.addr.msf.frame); ent 71 drivers/ide/ide-cd.h struct atapi_toc_entry ent[MAX_TRACKS+1]; ent 316 drivers/ide/ide-cd_ioctl.c struct atapi_toc_entry **ent) ent 335 drivers/ide/ide-cd_ioctl.c *ent = &toc->ent[ntracks]; ent 339 drivers/ide/ide-cd_ioctl.c *ent = &toc->ent[track - toc->hdr.first_track]; ent 476 drivers/ide/ide-proc.c struct proc_dir_entry *ent; ent 481 drivers/ide/ide-proc.c ent = proc_create_single_data(p->name, p->mode, dir, p->show, data); ent 482 drivers/ide/ide-proc.c if (!ent) return; ent 535 drivers/ide/ide-proc.c struct proc_dir_entry *ent; ent 553 drivers/ide/ide-proc.c ent = proc_symlink(drive->name, proc_ide_root, name); ent 554 drivers/ide/ide-proc.c if (!ent) return; ent 226 drivers/infiniband/hw/bnxt_re/main.c static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) ent 234 drivers/infiniband/hw/bnxt_re/main.c if (!ent) { ent 248 drivers/infiniband/hw/bnxt_re/main.c rdev->msix_entries[indx].vector = ent[indx].vector; ent 502 drivers/infiniband/hw/efa/efa_main.c static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1670 drivers/infiniband/hw/hfi1/hfi.h static inline int ingress_pkey_matches_entry(u16 pkey, u16 ent) ent 1673 drivers/infiniband/hw/hfi1/hfi.h u16 ment = ent & PKEY_LOW_15_MASK; ent 1682 drivers/infiniband/hw/hfi1/hfi.h return !!(ent & PKEY_MEMBER_MASK); ent 1637 drivers/infiniband/hw/hfi1/init.c static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1647 drivers/infiniband/hw/hfi1/init.c if (!(ent->device == PCI_DEVICE_ID_INTEL0 || ent 1648 drivers/infiniband/hw/hfi1/init.c ent->device == PCI_DEVICE_ID_INTEL1)) { ent 1650 drivers/infiniband/hw/hfi1/init.c ent->device); ent 500 drivers/infiniband/hw/hfi1/trace.c u8 hfi1_trace_get_tid_ctrl(u32 ent) ent 502 drivers/infiniband/hw/hfi1/trace.c return EXP_TID_GET(ent, CTRL); ent 505 drivers/infiniband/hw/hfi1/trace.c u16 hfi1_trace_get_tid_len(u32 ent) ent 507 drivers/infiniband/hw/hfi1/trace.c return EXP_TID_GET(ent, LEN); ent 510 drivers/infiniband/hw/hfi1/trace.c u16 hfi1_trace_get_tid_idx(u32 ent) ent 512 drivers/infiniband/hw/hfi1/trace.c return EXP_TID_GET(ent, IDX); ent 24 drivers/infiniband/hw/hfi1/trace_tid.h u8 hfi1_trace_get_tid_ctrl(u32 ent); ent 25 drivers/infiniband/hw/hfi1/trace_tid.h u16 hfi1_trace_get_tid_len(u32 ent); ent 26 drivers/infiniband/hw/hfi1/trace_tid.h u16 hfi1_trace_get_tid_idx(u32 ent); ent 683 drivers/infiniband/hw/hfi1/trace_tid.h TP_PROTO(struct rvt_qp *qp, int index, u32 ent), ent 684 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(qp, index, ent), ent 697 drivers/infiniband/hw/hfi1/trace_tid.h __entry->ctrl = hfi1_trace_get_tid_ctrl(ent); ent 698 drivers/infiniband/hw/hfi1/trace_tid.h __entry->idx = hfi1_trace_get_tid_idx(ent); ent 699 drivers/infiniband/hw/hfi1/trace_tid.h __entry->len = hfi1_trace_get_tid_len(ent); ent 720 drivers/infiniband/hw/hfi1/trace_tid.h TP_PROTO(struct rvt_qp *qp, int index, u32 ent), ent 721 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(qp, index, ent) ent 726 drivers/infiniband/hw/hfi1/trace_tid.h TP_PROTO(struct rvt_qp *qp, int index, u32 ent), ent 727 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(qp, index, ent) ent 1125 drivers/infiniband/hw/hfi1/verbs.c static inline int egress_pkey_matches_entry(u16 pkey, u16 ent) ent 1128 drivers/infiniband/hw/hfi1/verbs.c u16 mentry = ent & PKEY_LOW_15_MASK; ent 1137 drivers/infiniband/hw/hfi1/verbs.c return !!(ent & PKEY_MEMBER_MASK); ent 170 drivers/infiniband/hw/mlx4/cm.c struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout); ent 172 drivers/infiniband/hw/mlx4/cm.c struct mlx4_ib_dev *dev = ent->dev; ent 177 drivers/infiniband/hw/mlx4/cm.c if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id)) ent 179 drivers/infiniband/hw/mlx4/cm.c found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id); ent 180 drivers/infiniband/hw/mlx4/cm.c if (found_ent && found_ent == ent) ent 184 drivers/infiniband/hw/mlx4/cm.c list_del(&ent->list); ent 186 drivers/infiniband/hw/mlx4/cm.c kfree(ent); ent 193 drivers/infiniband/hw/mlx4/cm.c struct id_map_entry *ent; ent 197 drivers/infiniband/hw/mlx4/cm.c ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id); ent 198 drivers/infiniband/hw/mlx4/cm.c if (ent) { ent 202 drivers/infiniband/hw/mlx4/cm.c rb_replace_node(&ent->node, &new->node, sl_id_map); ent 209 drivers/infiniband/hw/mlx4/cm.c ent = rb_entry(parent, struct id_map_entry, node); ent 211 drivers/infiniband/hw/mlx4/cm.c if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id)) ent 225 drivers/infiniband/hw/mlx4/cm.c struct id_map_entry *ent; ent 228 drivers/infiniband/hw/mlx4/cm.c ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL); ent 229 drivers/infiniband/hw/mlx4/cm.c if (!ent) ent 232 drivers/infiniband/hw/mlx4/cm.c ent->sl_cm_id = sl_cm_id; ent 233 drivers/infiniband/hw/mlx4/cm.c ent->slave_id = slave_id; ent 234 drivers/infiniband/hw/mlx4/cm.c ent->scheduled_delete = 0; ent 235 drivers/infiniband/hw/mlx4/cm.c ent->dev = to_mdev(ibdev); ent 236 drivers/infiniband/hw/mlx4/cm.c INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout); ent 238 drivers/infiniband/hw/mlx4/cm.c ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent, ent 242 drivers/infiniband/hw/mlx4/cm.c sl_id_map_add(ibdev, ent); ent 243 drivers/infiniband/hw/mlx4/cm.c list_add_tail(&ent->list, &sriov->cm_list); ent 245 drivers/infiniband/hw/mlx4/cm.c return ent; ent 249 drivers/infiniband/hw/mlx4/cm.c kfree(ent); ent 257 drivers/infiniband/hw/mlx4/cm.c struct id_map_entry *ent; ent 262 drivers/infiniband/hw/mlx4/cm.c ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id); ent 263 drivers/infiniband/hw/mlx4/cm.c if (ent) ent 264 drivers/infiniband/hw/mlx4/cm.c *pv_cm_id = (int) ent->pv_cm_id; ent 266 drivers/infiniband/hw/mlx4/cm.c ent = xa_load(&sriov->pv_id_table, *pv_cm_id); ent 269 drivers/infiniband/hw/mlx4/cm.c return ent; ent 409 drivers/infiniband/hw/mlx4/cm.c struct id_map_entry *ent = ent 413 drivers/infiniband/hw/mlx4/cm.c rb_erase(&ent->node, sl_id_map); ent 414 drivers/infiniband/hw/mlx4/cm.c xa_erase(&sriov->pv_id_table, ent->pv_cm_id); ent 421 drivers/infiniband/hw/mlx4/cm.c struct id_map_entry *ent = ent 424 drivers/infiniband/hw/mlx4/cm.c if (ent->slave_id == slave) ent 425 drivers/infiniband/hw/mlx4/cm.c list_move_tail(&ent->list, &lh); ent 693 drivers/infiniband/hw/mlx5/mlx5_ib.h struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; ent 1257 drivers/infiniband/hw/mlx5/mlx5_ib.h void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent); ent 1274 drivers/infiniband/hw/mlx5/mlx5_ib.h static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {} ent 75 drivers/infiniband/hw/mlx5/mr.c if (order < cache->ent[0].order) ent 78 drivers/infiniband/hw/mlx5/mr.c return order - cache->ent[0].order; ent 94 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = &cache->ent[c]; ent 100 drivers/infiniband/hw/mlx5/mr.c spin_lock_irqsave(&ent->lock, flags); ent 101 drivers/infiniband/hw/mlx5/mr.c ent->pending--; ent 102 drivers/infiniband/hw/mlx5/mr.c spin_unlock_irqrestore(&ent->lock, flags); ent 119 drivers/infiniband/hw/mlx5/mr.c spin_lock_irqsave(&ent->lock, flags); ent 120 drivers/infiniband/hw/mlx5/mr.c list_add_tail(&mr->list, &ent->head); ent 121 drivers/infiniband/hw/mlx5/mr.c ent->cur++; ent 122 drivers/infiniband/hw/mlx5/mr.c ent->size++; ent 123 drivers/infiniband/hw/mlx5/mr.c spin_unlock_irqrestore(&ent->lock, flags); ent 132 drivers/infiniband/hw/mlx5/mr.c if (!completion_done(&ent->compl)) ent 133 drivers/infiniband/hw/mlx5/mr.c complete(&ent->compl); ent 139 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = &cache->ent[c]; ent 153 drivers/infiniband/hw/mlx5/mr.c if (ent->pending >= MAX_PENDING_REG_MR) { ent 163 drivers/infiniband/hw/mlx5/mr.c mr->order = ent->order; ent 169 drivers/infiniband/hw/mlx5/mr.c MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); ent 171 drivers/infiniband/hw/mlx5/mr.c (ent->access_mode >> 2) & 0x7); ent 174 drivers/infiniband/hw/mlx5/mr.c MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); ent 175 drivers/infiniband/hw/mlx5/mr.c MLX5_SET(mkc, mkc, log_page_size, ent->page); ent 177 drivers/infiniband/hw/mlx5/mr.c spin_lock_irq(&ent->lock); ent 178 drivers/infiniband/hw/mlx5/mr.c ent->pending++; ent 179 drivers/infiniband/hw/mlx5/mr.c spin_unlock_irq(&ent->lock); ent 185 drivers/infiniband/hw/mlx5/mr.c spin_lock_irq(&ent->lock); ent 186 drivers/infiniband/hw/mlx5/mr.c ent->pending--; ent 187 drivers/infiniband/hw/mlx5/mr.c spin_unlock_irq(&ent->lock); ent 201 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = &cache->ent[c]; ent 208 drivers/infiniband/hw/mlx5/mr.c spin_lock_irq(&ent->lock); ent 209 drivers/infiniband/hw/mlx5/mr.c if (list_empty(&ent->head)) { ent 210 drivers/infiniband/hw/mlx5/mr.c spin_unlock_irq(&ent->lock); ent 213 drivers/infiniband/hw/mlx5/mr.c mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); ent 215 drivers/infiniband/hw/mlx5/mr.c ent->cur--; ent 216 drivers/infiniband/hw/mlx5/mr.c ent->size--; ent 217 drivers/infiniband/hw/mlx5/mr.c spin_unlock_irq(&ent->lock); ent 233 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = filp->private_data; ent 234 drivers/infiniband/hw/mlx5/mr.c struct mlx5_ib_dev *dev = ent->dev; ent 244 drivers/infiniband/hw/mlx5/mr.c c = order2idx(dev, ent->order); ent 249 drivers/infiniband/hw/mlx5/mr.c if (var < ent->limit) ent 252 drivers/infiniband/hw/mlx5/mr.c if (var > ent->size) { ent 254 drivers/infiniband/hw/mlx5/mr.c err = add_keys(dev, c, var - ent->size); ent 260 drivers/infiniband/hw/mlx5/mr.c } else if (var < ent->size) { ent 261 drivers/infiniband/hw/mlx5/mr.c remove_keys(dev, c, ent->size - var); ent 270 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = filp->private_data; ent 274 drivers/infiniband/hw/mlx5/mr.c err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size); ent 291 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = filp->private_data; ent 292 drivers/infiniband/hw/mlx5/mr.c struct mlx5_ib_dev *dev = ent->dev; ent 302 drivers/infiniband/hw/mlx5/mr.c c = order2idx(dev, ent->order); ent 307 drivers/infiniband/hw/mlx5/mr.c if (var > ent->size) ent 310 drivers/infiniband/hw/mlx5/mr.c ent->limit = var; ent 312 drivers/infiniband/hw/mlx5/mr.c if (ent->cur < ent->limit) { ent 313 drivers/infiniband/hw/mlx5/mr.c err = add_keys(dev, c, 2 * ent->limit - ent->cur); ent 324 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = filp->private_data; ent 328 drivers/infiniband/hw/mlx5/mr.c err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); ent 347 drivers/infiniband/hw/mlx5/mr.c if (cache->ent[i].cur < cache->ent[i].limit) ent 354 drivers/infiniband/hw/mlx5/mr.c static void __cache_work_func(struct mlx5_cache_ent *ent) ent 356 drivers/infiniband/hw/mlx5/mr.c struct mlx5_ib_dev *dev = ent->dev; ent 358 drivers/infiniband/hw/mlx5/mr.c int i = order2idx(dev, ent->order); ent 364 drivers/infiniband/hw/mlx5/mr.c ent = &dev->cache.ent[i]; ent 365 drivers/infiniband/hw/mlx5/mr.c if (ent->cur < 2 * ent->limit && !dev->fill_delay) { ent 367 drivers/infiniband/hw/mlx5/mr.c if (ent->cur < 2 * ent->limit) { ent 371 drivers/infiniband/hw/mlx5/mr.c queue_delayed_work(cache->wq, &ent->dwork, ent 376 drivers/infiniband/hw/mlx5/mr.c queue_delayed_work(cache->wq, &ent->dwork, ent 379 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); ent 382 drivers/infiniband/hw/mlx5/mr.c } else if (ent->cur > 2 * ent->limit) { ent 398 drivers/infiniband/hw/mlx5/mr.c if (ent->cur > ent->limit) ent 399 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); ent 401 drivers/infiniband/hw/mlx5/mr.c queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); ent 408 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent; ent 410 drivers/infiniband/hw/mlx5/mr.c ent = container_of(work, struct mlx5_cache_ent, dwork.work); ent 411 drivers/infiniband/hw/mlx5/mr.c __cache_work_func(ent); ent 416 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent; ent 418 drivers/infiniband/hw/mlx5/mr.c ent = container_of(work, struct mlx5_cache_ent, work); ent 419 drivers/infiniband/hw/mlx5/mr.c __cache_work_func(ent); ent 425 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent; ent 434 drivers/infiniband/hw/mlx5/mr.c ent = &cache->ent[entry]; ent 436 drivers/infiniband/hw/mlx5/mr.c spin_lock_irq(&ent->lock); ent 437 drivers/infiniband/hw/mlx5/mr.c if (list_empty(&ent->head)) { ent 438 drivers/infiniband/hw/mlx5/mr.c spin_unlock_irq(&ent->lock); ent 444 drivers/infiniband/hw/mlx5/mr.c wait_for_completion(&ent->compl); ent 446 drivers/infiniband/hw/mlx5/mr.c mr = list_first_entry(&ent->head, struct mlx5_ib_mr, ent 449 drivers/infiniband/hw/mlx5/mr.c ent->cur--; ent 450 drivers/infiniband/hw/mlx5/mr.c spin_unlock_irq(&ent->lock); ent 451 drivers/infiniband/hw/mlx5/mr.c if (ent->cur < ent->limit) ent 452 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); ent 462 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent; ent 475 drivers/infiniband/hw/mlx5/mr.c ent = &cache->ent[i]; ent 477 drivers/infiniband/hw/mlx5/mr.c mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i); ent 479 drivers/infiniband/hw/mlx5/mr.c spin_lock_irq(&ent->lock); ent 480 drivers/infiniband/hw/mlx5/mr.c if (!list_empty(&ent->head)) { ent 481 drivers/infiniband/hw/mlx5/mr.c mr = list_first_entry(&ent->head, struct mlx5_ib_mr, ent 484 drivers/infiniband/hw/mlx5/mr.c ent->cur--; ent 485 drivers/infiniband/hw/mlx5/mr.c spin_unlock_irq(&ent->lock); ent 486 drivers/infiniband/hw/mlx5/mr.c if (ent->cur < ent->limit) ent 487 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); ent 490 drivers/infiniband/hw/mlx5/mr.c spin_unlock_irq(&ent->lock); ent 492 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); ent 496 drivers/infiniband/hw/mlx5/mr.c cache->ent[c].miss++; ent 504 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent; ent 517 drivers/infiniband/hw/mlx5/mr.c ent = &cache->ent[c]; ent 518 drivers/infiniband/hw/mlx5/mr.c if (ent->cur < ent->limit) ent 519 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); ent 523 drivers/infiniband/hw/mlx5/mr.c ent = &cache->ent[c]; ent 524 drivers/infiniband/hw/mlx5/mr.c spin_lock_irq(&ent->lock); ent 525 drivers/infiniband/hw/mlx5/mr.c list_add_tail(&mr->list, &ent->head); ent 526 drivers/infiniband/hw/mlx5/mr.c ent->cur++; ent 527 drivers/infiniband/hw/mlx5/mr.c if (ent->cur > 2 * ent->limit) ent 529 drivers/infiniband/hw/mlx5/mr.c spin_unlock_irq(&ent->lock); ent 532 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); ent 538 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = &cache->ent[c]; ent 543 drivers/infiniband/hw/mlx5/mr.c cancel_delayed_work(&ent->dwork); ent 545 drivers/infiniband/hw/mlx5/mr.c spin_lock_irq(&ent->lock); ent 546 drivers/infiniband/hw/mlx5/mr.c if (list_empty(&ent->head)) { ent 547 drivers/infiniband/hw/mlx5/mr.c spin_unlock_irq(&ent->lock); ent 550 drivers/infiniband/hw/mlx5/mr.c mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); ent 552 drivers/infiniband/hw/mlx5/mr.c ent->cur--; ent 553 drivers/infiniband/hw/mlx5/mr.c ent->size--; ent 554 drivers/infiniband/hw/mlx5/mr.c spin_unlock_irq(&ent->lock); ent 580 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent; ent 590 drivers/infiniband/hw/mlx5/mr.c ent = &cache->ent[i]; ent 591 drivers/infiniband/hw/mlx5/mr.c sprintf(ent->name, "%d", ent->order); ent 592 drivers/infiniband/hw/mlx5/mr.c dir = debugfs_create_dir(ent->name, cache->root); ent 593 drivers/infiniband/hw/mlx5/mr.c debugfs_create_file("size", 0600, dir, ent, &size_fops); ent 594 drivers/infiniband/hw/mlx5/mr.c debugfs_create_file("limit", 0600, dir, ent, &limit_fops); ent 595 drivers/infiniband/hw/mlx5/mr.c debugfs_create_u32("cur", 0400, dir, &ent->cur); ent 596 drivers/infiniband/hw/mlx5/mr.c debugfs_create_u32("miss", 0600, dir, &ent->miss); ent 610 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent; ent 623 drivers/infiniband/hw/mlx5/mr.c ent = &cache->ent[i]; ent 624 drivers/infiniband/hw/mlx5/mr.c INIT_LIST_HEAD(&ent->head); ent 625 drivers/infiniband/hw/mlx5/mr.c spin_lock_init(&ent->lock); ent 626 drivers/infiniband/hw/mlx5/mr.c ent->order = i + 2; ent 627 drivers/infiniband/hw/mlx5/mr.c ent->dev = dev; ent 628 drivers/infiniband/hw/mlx5/mr.c ent->limit = 0; ent 630 drivers/infiniband/hw/mlx5/mr.c init_completion(&ent->compl); ent 631 drivers/infiniband/hw/mlx5/mr.c INIT_WORK(&ent->work, cache_work_func); ent 632 drivers/infiniband/hw/mlx5/mr.c INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); ent 635 drivers/infiniband/hw/mlx5/mr.c mlx5_odp_init_mr_cache_entry(ent); ent 639 drivers/infiniband/hw/mlx5/mr.c if (ent->order > mr_cache_max_order(dev)) ent 642 drivers/infiniband/hw/mlx5/mr.c ent->page = PAGE_SHIFT; ent 643 drivers/infiniband/hw/mlx5/mr.c ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / ent 645 drivers/infiniband/hw/mlx5/mr.c ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; ent 649 drivers/infiniband/hw/mlx5/mr.c ent->limit = dev->mdev->profile->mr_cache[i].limit; ent 651 drivers/infiniband/hw/mlx5/mr.c ent->limit = 0; ent 652 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); ent 1556 drivers/infiniband/hw/mlx5/odp.c void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) ent 1558 drivers/infiniband/hw/mlx5/odp.c if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) ent 1561 drivers/infiniband/hw/mlx5/odp.c switch (ent->order - 2) { ent 1563 drivers/infiniband/hw/mlx5/odp.c ent->page = PAGE_SHIFT; ent 1564 drivers/infiniband/hw/mlx5/odp.c ent->xlt = MLX5_IMR_MTT_ENTRIES * ent 1567 drivers/infiniband/hw/mlx5/odp.c ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; ent 1568 drivers/infiniband/hw/mlx5/odp.c ent->limit = 0; ent 1572 drivers/infiniband/hw/mlx5/odp.c ent->page = MLX5_KSM_PAGE_SHIFT; ent 1573 drivers/infiniband/hw/mlx5/odp.c ent->xlt = mlx5_imr_ksm_entries * ent 1576 drivers/infiniband/hw/mlx5/odp.c ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM; ent 1577 drivers/infiniband/hw/mlx5/odp.c ent->limit = 0; ent 3433 drivers/infiniband/hw/qib/qib_iba6120.c const struct pci_device_id *ent) ent 3500 drivers/infiniband/hw/qib/qib_iba6120.c ret = qib_pcie_ddinit(dd, pdev, ent); ent 4478 drivers/infiniband/hw/qib/qib_iba7220.c const struct pci_device_id *ent) ent 4546 drivers/infiniband/hw/qib/qib_iba7220.c ret = qib_pcie_ddinit(dd, pdev, ent); ent 7190 drivers/infiniband/hw/qib/qib_iba7322.c const struct pci_device_id *ent) ent 7262 drivers/infiniband/hw/qib/qib_iba7322.c ret = qib_pcie_ddinit(dd, pdev, ent); ent 1394 drivers/infiniband/hw/qib/qib_init.c static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1399 drivers/infiniband/hw/qib/qib_init.c ret = qib_pcie_init(pdev, ent); ent 1407 drivers/infiniband/hw/qib/qib_init.c switch (ent->device) { ent 1410 drivers/infiniband/hw/qib/qib_init.c dd = qib_init_iba6120_funcs(pdev, ent); ent 1414 drivers/infiniband/hw/qib/qib_init.c ent->device); ent 1420 drivers/infiniband/hw/qib/qib_init.c dd = qib_init_iba7220_funcs(pdev, ent); ent 1424 drivers/infiniband/hw/qib/qib_init.c dd = qib_init_iba7322_funcs(pdev, ent); ent 1430 drivers/infiniband/hw/qib/qib_init.c ent->device); ent 64 drivers/infiniband/hw/qib/qib_pcie.c int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent) ent 137 drivers/infiniband/hw/qib/qib_pcie.c const struct pci_device_id *ent) ent 161 drivers/infiniband/hw/qib/qib_pcie.c dd->deviceid = ent->device; /* save for later use */ ent 162 drivers/infiniband/hw/qib/qib_pcie.c dd->vendorid = ent->vendor; ent 43 drivers/input/gameport/emu10k1-gp.c static int emu_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 824 drivers/iommu/arm-smmu-v3.c static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) ent 829 drivers/iommu/arm-smmu-v3.c queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords); ent 836 drivers/iommu/arm-smmu-v3.c static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) ent 839 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode); ent 841 drivers/iommu/arm-smmu-v3.c switch (ent->opcode) { ent 846 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_PREFETCH_0_SID, ent->prefetch.sid); ent 847 drivers/iommu/arm-smmu-v3.c cmd[1] |= FIELD_PREP(CMDQ_PREFETCH_1_SIZE, ent->prefetch.size); ent 848 drivers/iommu/arm-smmu-v3.c cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK; ent 851 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, ent->cfgi.sid); ent 852 drivers/iommu/arm-smmu-v3.c cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, ent->cfgi.leaf); ent 859 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); ent 860 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); ent 861 drivers/iommu/arm-smmu-v3.c cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); ent 862 drivers/iommu/arm-smmu-v3.c cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; ent 865 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); ent 866 drivers/iommu/arm-smmu-v3.c cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); ent 867 drivers/iommu/arm-smmu-v3.c cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; ent 870 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); ent 873 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); ent 876 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid); ent 877 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_ATC_0_GLOBAL, ent->atc.global); ent 878 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SSID, ent->atc.ssid); ent 879 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_ATC_0_SID, ent->atc.sid); ent 880 drivers/iommu/arm-smmu-v3.c cmd[1] |= FIELD_PREP(CMDQ_ATC_1_SIZE, ent->atc.size); ent 881 drivers/iommu/arm-smmu-v3.c cmd[1] |= ent->atc.addr & CMDQ_ATC_1_ADDR_MASK; ent 884 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_0_SSV, ent->substream_valid); ent 885 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SSID, ent->pri.ssid); ent 886 drivers/iommu/arm-smmu-v3.c cmd[0] |= FIELD_PREP(CMDQ_PRI_0_SID, ent->pri.sid); ent 887 drivers/iommu/arm-smmu-v3.c cmd[1] |= FIELD_PREP(CMDQ_PRI_1_GRPID, ent->pri.grpid); ent 888 drivers/iommu/arm-smmu-v3.c switch (ent->pri.resp) { ent 896 drivers/iommu/arm-smmu-v3.c cmd[1] |= FIELD_PREP(CMDQ_PRI_1_RESP, ent->pri.resp); ent 899 drivers/iommu/arm-smmu-v3.c if (ent->sync.msiaddr) { ent 901 drivers/iommu/arm-smmu-v3.c cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK; ent 919 drivers/iommu/arm-smmu-v3.c struct arm_smmu_cmdq_ent ent = { ent 929 drivers/iommu/arm-smmu-v3.c ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) * ent 933 drivers/iommu/arm-smmu-v3.c arm_smmu_cmdq_build_cmd(cmd, &ent); ent 1428 drivers/iommu/arm-smmu-v3.c struct arm_smmu_cmdq_ent *ent) ent 1432 drivers/iommu/arm-smmu-v3.c if (arm_smmu_cmdq_build_cmd(cmd, ent)) { ent 1434 drivers/iommu/arm-smmu-v3.c ent->opcode); ent 97 drivers/iommu/exynos-iommu.c #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT) ent 391 drivers/iommu/exynos-iommu.c sysmmu_pte_t *ent; ent 396 drivers/iommu/exynos-iommu.c ent = section_entry(phys_to_virt(data->pgtable), fault_addr); ent 397 drivers/iommu/exynos-iommu.c dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent); ent 398 drivers/iommu/exynos-iommu.c if (lv1ent_page(ent)) { ent 399 drivers/iommu/exynos-iommu.c ent = page_entry(ent, fault_addr); ent 400 drivers/iommu/exynos-iommu.c dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); ent 724 drivers/iommu/exynos-iommu.c static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val) ent 726 drivers/iommu/exynos-iommu.c dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent), ent 728 drivers/iommu/exynos-iommu.c *ent = cpu_to_le32(val); ent 729 drivers/iommu/exynos-iommu.c dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent), ent 1136 drivers/iommu/exynos-iommu.c sysmmu_pte_t *ent; ent 1144 drivers/iommu/exynos-iommu.c ent = section_entry(domain->pgtable, iova); ent 1146 drivers/iommu/exynos-iommu.c if (lv1ent_section(ent)) { ent 1153 drivers/iommu/exynos-iommu.c update_pte(ent, ZERO_LV2LINK); ent 1158 drivers/iommu/exynos-iommu.c if (unlikely(lv1ent_fault(ent))) { ent 1166 drivers/iommu/exynos-iommu.c ent = page_entry(ent, iova); ent 1168 drivers/iommu/exynos-iommu.c if (unlikely(lv2ent_fault(ent))) { ent 1173 drivers/iommu/exynos-iommu.c if (lv2ent_small(ent)) { ent 1174 drivers/iommu/exynos-iommu.c update_pte(ent, 0); ent 1186 drivers/iommu/exynos-iommu.c dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), ent 1187 drivers/iommu/exynos-iommu.c sizeof(*ent) * SPAGES_PER_LPAGE, ent 1189 drivers/iommu/exynos-iommu.c memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); ent 1190 drivers/iommu/exynos-iommu.c dma_sync_single_for_device(dma_dev, virt_to_phys(ent), ent 1191 drivers/iommu/exynos-iommu.c sizeof(*ent) * SPAGES_PER_LPAGE, ent 1087 drivers/isdn/hardware/mISDN/avmfritz.c fritzpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1109 drivers/isdn/hardware/mISDN/avmfritz.c (char *) ent->driver_data, pci_name(pdev)); ent 4342 drivers/isdn/hardware/mISDN/hfcmulti.c const struct pci_device_id *ent) ent 4344 drivers/isdn/hardware/mISDN/hfcmulti.c struct hm_map *m = (struct hm_map *)ent->driver_data; ent 4354 drivers/isdn/hardware/mISDN/hfcmulti.c if (ent->vendor == PCI_VENDOR_ID_DIGIUM && ent 4355 drivers/isdn/hardware/mISDN/hfcmulti.c ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) { ent 4988 drivers/isdn/hardware/mISDN/hfcmulti.c const struct pci_device_id *ent) ent 5106 drivers/isdn/hardware/mISDN/hfcmulti.c if (pdev && ent) ent 5108 drivers/isdn/hardware/mISDN/hfcmulti.c ret_err = setup_pci(hc, pdev, ent); ent 5428 drivers/isdn/hardware/mISDN/hfcmulti.c hfcmulti_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 5430 drivers/isdn/hardware/mISDN/hfcmulti.c struct hm_map *m = (struct hm_map *)ent->driver_data; ent 5433 drivers/isdn/hardware/mISDN/hfcmulti.c if (m == NULL && ent->vendor == PCI_VENDOR_ID_CCD && ( ent 5434 drivers/isdn/hardware/mISDN/hfcmulti.c ent->device == PCI_DEVICE_ID_CCD_HFC4S || ent 5435 drivers/isdn/hardware/mISDN/hfcmulti.c ent->device == PCI_DEVICE_ID_CCD_HFC8S || ent 5436 drivers/isdn/hardware/mISDN/hfcmulti.c ent->device == PCI_DEVICE_ID_CCD_HFCE1)) { ent 5446 drivers/isdn/hardware/mISDN/hfcmulti.c ret = hfcmulti_init(m, pdev, ent); ent 2210 drivers/isdn/hardware/mISDN/hfcpci.c hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 2214 drivers/isdn/hardware/mISDN/hfcpci.c struct _hfc_map *m = (struct _hfc_map *)ent->driver_data; ent 1066 drivers/isdn/hardware/mISDN/mISDNinfineon.c inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1082 drivers/isdn/hardware/mISDN/mISDNinfineon.c card->ci = get_card_info(ent->driver_data); ent 1100 drivers/isdn/hardware/mISDN/mISDNinfineon.c } else if (ent->driver_data == INF_SCT_1) { ent 1049 drivers/isdn/hardware/mISDN/netjet.c nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 442 drivers/isdn/hardware/mISDN/speedfax.c sfaxpci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 459 drivers/isdn/hardware/mISDN/speedfax.c (char *)ent->driver_data, pci_name(pdev)); ent 1338 drivers/isdn/hardware/mISDN/w6692.c w6692_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1342 drivers/isdn/hardware/mISDN/w6692.c struct w6692map *m = (struct w6692map *)ent->driver_data; ent 680 drivers/macintosh/macio_asic.c static int macio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 685 drivers/macintosh/macio_asic.c if (ent->vendor != PCI_VENDOR_ID_APPLE) ent 956 drivers/md/raid5.c struct r5pending_data *ent; ent 959 drivers/md/raid5.c ent = list_first_entry(&conf->free_list, struct r5pending_data, ent 961 drivers/md/raid5.c list_move_tail(&ent->sibling, &conf->pending_list); ent 962 drivers/md/raid5.c ent->sector = sector; ent 963 drivers/md/raid5.c bio_list_init(&ent->bios); ent 964 drivers/md/raid5.c bio_list_merge(&ent->bios, bios); ent 341 drivers/media/common/saa7146/saa7146_core.c static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent) ent 343 drivers/media/common/saa7146/saa7146_core.c struct saa7146_pci_extension_data *pci_ext = (struct saa7146_pci_extension_data *)ent->driver_data; ent 101 drivers/media/mc/mc-device.c struct media_entity *ent; ent 103 drivers/media/mc/mc-device.c ent = find_entity(mdev, entd->id); ent 104 drivers/media/mc/mc-device.c if (ent == NULL) ent 109 drivers/media/mc/mc-device.c entd->id = media_entity_id(ent); ent 110 drivers/media/mc/mc-device.c if (ent->name) ent 111 drivers/media/mc/mc-device.c strscpy(entd->name, ent->name, sizeof(entd->name)); ent 112 drivers/media/mc/mc-device.c entd->type = ent->function; ent 114 drivers/media/mc/mc-device.c entd->flags = ent->flags; ent 116 drivers/media/mc/mc-device.c entd->pads = ent->num_pads; ent 117 drivers/media/mc/mc-device.c entd->links = ent->num_links - ent->num_backlinks; ent 130 drivers/media/mc/mc-device.c if (ent->function < MEDIA_ENT_F_OLD_BASE || ent 131 drivers/media/mc/mc-device.c ent->function > MEDIA_ENT_F_TUNER) { ent 132 drivers/media/mc/mc-device.c if (is_media_entity_v4l2_subdev(ent)) ent 134 drivers/media/mc/mc-device.c else if (ent->function != MEDIA_ENT_F_IO_V4L) ent 138 drivers/media/mc/mc-device.c memcpy(&entd->raw, &ent->info, sizeof(ent->info)); ent 341 drivers/media/pci/b2c2/flexcop-pci.c const struct pci_device_id *ent) ent 970 drivers/media/pci/dm1105/dm1105.c const struct pci_device_id *ent) ent 1567 drivers/media/pci/meye/meye.c static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) ent 577 drivers/media/pci/pluto2/pluto2.c static int pluto2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1334 drivers/media/pci/pt1/pt1.c static int pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 696 drivers/media/pci/pt3/pt3.c static int pt3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 874 drivers/media/pci/saa7134/saa7134-core.c struct media_entity *ent = &dev->input_ent[i]; ent 884 drivers/media/pci/saa7134/saa7134-core.c ent->name = saa7134_input_name[in->type]; ent 885 drivers/media/pci/saa7134/saa7134-core.c ent->flags = MEDIA_ENT_FL_CONNECTOR; ent 895 drivers/media/pci/saa7134/saa7134-core.c ent->function = MEDIA_ENT_F_CONN_COMPOSITE; ent 900 drivers/media/pci/saa7134/saa7134-core.c ent->function = MEDIA_ENT_F_CONN_SVIDEO; ent 910 drivers/media/pci/saa7134/saa7134-core.c ent->function = MEDIA_ENT_F_CONN_RF; ent 914 drivers/media/pci/saa7134/saa7134-core.c ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]); ent 918 drivers/media/pci/saa7134/saa7134-core.c ret = media_device_register_entity(dev->media_dev, ent); ent 926 drivers/media/pci/saa7134/saa7134-core.c struct media_entity *ent = &dev->input_ent[i]; ent 928 drivers/media/pci/saa7134/saa7134-core.c ent->name = saa7134_input_name[in->type]; ent 929 drivers/media/pci/saa7134/saa7134-core.c ent->flags = MEDIA_ENT_FL_CONNECTOR; ent 931 drivers/media/pci/saa7134/saa7134-core.c ent->function = MEDIA_ENT_F_CONN_RF; ent 933 drivers/media/pci/saa7134/saa7134-core.c ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]); ent 937 drivers/media/pci/saa7134/saa7134-core.c ret = media_device_register_entity(dev->media_dev, ent); ent 961 drivers/media/pci/sta2x11/sta2x11_vip.c const struct pci_device_id *ent) ent 351 drivers/media/platform/vimc/vimc-capture.c media_entity_cleanup(ved->ent); ent 460 drivers/media/platform/vimc/vimc-capture.c vcap->ved.ent = &vcap->vdev.entity; ent 221 drivers/media/platform/vimc/vimc-common.c int vimc_pipeline_s_stream(struct media_entity *ent, int enable) ent 228 drivers/media/platform/vimc/vimc-common.c for (i = 0; i < ent->num_pads; i++) { ent 229 drivers/media/platform/vimc/vimc-common.c if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE) ent 233 drivers/media/platform/vimc/vimc-common.c pad = media_entity_remote_pad(&ent->pads[i]); ent 384 drivers/media/platform/vimc/vimc-common.c ved->ent = &sd->entity; ent 426 drivers/media/platform/vimc/vimc-common.c media_entity_cleanup(ved->ent); ent 106 drivers/media/platform/vimc/vimc-common.h struct media_entity *ent; ent 146 drivers/media/platform/vimc/vimc-common.h int vimc_pipeline_s_stream(struct media_entity *ent, int enable); ent 156 drivers/media/platform/vimc/vimc-core.c ret = media_create_pad_link(ved_src->ent, link->src_pad, ent 157 drivers/media/platform/vimc/vimc-core.c ved_sink->ent, link->sink_pad, ent 26 drivers/media/platform/vimc/vimc-streamer.c static struct media_entity *vimc_get_source_entity(struct media_entity *ent) ent 31 drivers/media/platform/vimc/vimc-streamer.c for (i = 0; i < ent->num_pads; i++) { ent 32 drivers/media/platform/vimc/vimc-streamer.c if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE) ent 34 drivers/media/platform/vimc/vimc-streamer.c pad = media_entity_remote_pad(&ent->pads[i]); ent 59 drivers/media/platform/vimc/vimc-streamer.c if (!is_media_entity_v4l2_subdev(ved->ent)) ent 62 drivers/media/platform/vimc/vimc-streamer.c sd = media_entity_to_v4l2_subdev(ved->ent); ent 96 drivers/media/platform/vimc/vimc-streamer.c if (is_media_entity_v4l2_subdev(ved->ent)) { ent 97 drivers/media/platform/vimc/vimc-streamer.c sd = media_entity_to_v4l2_subdev(ved->ent); ent 101 drivers/media/platform/vimc/vimc-streamer.c ved->ent->name); ent 107 drivers/media/platform/vimc/vimc-streamer.c entity = vimc_get_source_entity(ved->ent); ent 557 drivers/media/platform/vsp1/vsp1_entity.c #define VSP1_ENTITY_ROUTE(ent) \ ent 558 drivers/media/platform/vsp1/vsp1_entity.c { VSP1_ENTITY_##ent, 0, VI6_DPR_##ent##_ROUTE, \ ent 559 drivers/media/platform/vsp1/vsp1_entity.c { VI6_DPR_NODE_##ent }, VI6_DPR_NODE_##ent } ent 76 drivers/media/platform/xilinx/xilinx-vipp.c struct xvip_graph_entity *ent; ent 128 drivers/media/platform/xilinx/xilinx-vipp.c ent = xvip_graph_find_entity(xdev, link.remote_node); ent 129 drivers/media/platform/xilinx/xilinx-vipp.c if (ent == NULL) { ent 137 drivers/media/platform/xilinx/xilinx-vipp.c remote = ent->entity; ent 193 drivers/media/platform/xilinx/xilinx-vipp.c struct xvip_graph_entity *ent; ent 230 drivers/media/platform/xilinx/xilinx-vipp.c ent = xvip_graph_find_entity(xdev, link.remote_node); ent 231 drivers/media/platform/xilinx/xilinx-vipp.c if (ent == NULL) { ent 239 drivers/media/platform/xilinx/xilinx-vipp.c if (link.remote_port >= ent->entity->num_pads) { ent 251 drivers/media/platform/xilinx/xilinx-vipp.c sink = ent->entity; ent 254 drivers/media/platform/xilinx/xilinx-vipp.c source = ent->entity; ent 119 drivers/media/radio/radio-maxiradio.c const struct pci_device_id *ent) ent 463 drivers/media/tuners/si2157.c dev->ent.name = KBUILD_MODNAME; ent 464 drivers/media/tuners/si2157.c dev->ent.function = MEDIA_ENT_F_TUNER; ent 473 drivers/media/tuners/si2157.c ret = media_entity_pads_init(&dev->ent, SI2157_NUM_PADS, ent 479 drivers/media/tuners/si2157.c ret = media_device_register_entity(cfg->mdev, &dev->ent); ent 481 drivers/media/tuners/si2157.c media_entity_cleanup(&dev->ent); ent 513 drivers/media/tuners/si2157.c media_device_unregister_entity(&dev->ent); ent 35 drivers/media/tuners/si2157_priv.h struct media_entity ent; ent 1865 drivers/media/usb/au0828/au0828-video.c struct media_entity *ent = &dev->input_ent[i]; ent 1870 drivers/media/usb/au0828/au0828-video.c ent->name = inames[AUVI_INPUT(i).type]; ent 1871 drivers/media/usb/au0828/au0828-video.c ent->flags = MEDIA_ENT_FL_CONNECTOR; ent 1876 drivers/media/usb/au0828/au0828-video.c ent->function = MEDIA_ENT_F_CONN_COMPOSITE; ent 1879 drivers/media/usb/au0828/au0828-video.c ent->function = MEDIA_ENT_F_CONN_SVIDEO; ent 1885 drivers/media/usb/au0828/au0828-video.c ent->function = MEDIA_ENT_F_CONN_RF; ent 1889 drivers/media/usb/au0828/au0828-video.c ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]); ent 1893 drivers/media/usb/au0828/au0828-video.c ret = media_device_register_entity(dev->media_dev, ent); ent 1102 drivers/media/usb/cx231xx/cx231xx-video.c struct media_entity *ent = &dev->input_ent[i]; ent 1107 drivers/media/usb/cx231xx/cx231xx-video.c ent->name = iname[INPUT(i)->type]; ent 1108 drivers/media/usb/cx231xx/cx231xx-video.c ent->flags = MEDIA_ENT_FL_CONNECTOR; ent 1113 drivers/media/usb/cx231xx/cx231xx-video.c ent->function = MEDIA_ENT_F_CONN_COMPOSITE; ent 1116 drivers/media/usb/cx231xx/cx231xx-video.c ent->function = MEDIA_ENT_F_CONN_SVIDEO; ent 1126 drivers/media/usb/cx231xx/cx231xx-video.c ent->function = MEDIA_ENT_F_CONN_RF; ent 1130 drivers/media/usb/cx231xx/cx231xx-video.c ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]); ent 1134 drivers/media/usb/cx231xx/cx231xx-video.c ret = media_device_register_entity(dev->media_dev, ent); ent 999 drivers/media/usb/em28xx/em28xx-video.c struct media_entity *ent = &dev->input_ent[i]; ent 1004 drivers/media/usb/em28xx/em28xx-video.c ent->name = iname[INPUT(i)->type]; ent 1005 drivers/media/usb/em28xx/em28xx-video.c ent->flags = MEDIA_ENT_FL_CONNECTOR; ent 1010 drivers/media/usb/em28xx/em28xx-video.c ent->function = MEDIA_ENT_F_CONN_COMPOSITE; ent 1013 drivers/media/usb/em28xx/em28xx-video.c ent->function = MEDIA_ENT_F_CONN_SVIDEO; ent 1017 drivers/media/usb/em28xx/em28xx-video.c ent->function = MEDIA_ENT_F_CONN_RF; ent 1021 drivers/media/usb/em28xx/em28xx-video.c ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]); ent 1026 drivers/media/usb/em28xx/em28xx-video.c ret = media_device_register_entity(dev->media_dev, ent); ent 61 drivers/mfd/rdc321x-southbridge.c const struct pci_device_id *ent) ent 241 drivers/misc/cardreader/alcor_pci.c const struct pci_device_id *ent) ent 247 drivers/misc/cardreader/alcor_pci.c cfg = (void *)ent->driver_data; ent 201 drivers/misc/cb710/core.c const struct pci_device_id *ent) ent 943 drivers/misc/habanalabs/debugfs.c struct dentry *ent; ent 1033 drivers/misc/habanalabs/debugfs.c ent = debugfs_create_file(hl_debugfs_list[i].name, ent 1038 drivers/misc/habanalabs/debugfs.c entry->dent = ent; ent 768 drivers/misc/hpilo.c const struct pci_device_id *ent) ent 155 drivers/misc/mei/pci-me.c static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 163 drivers/misc/mei/pci-me.c cfg = mei_me_get_cfg(ent->driver_data); ent 53 drivers/misc/mei/pci-txe.c static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 159 drivers/misc/mic/host/mic_main.c const struct pci_device_id *ent) ent 637 drivers/misc/pci_endpoint_test.c const struct pci_device_id *ent) ent 665 drivers/misc/pci_endpoint_test.c data = (struct pci_endpoint_test_data *)ent->driver_data; ent 792 drivers/misc/pti.c const struct pci_device_id *ent) ent 1283 drivers/misc/vmw_vmci/vmci_queue_pair.c void *client_data, struct qp_broker_entry **ent) ent 1402 drivers/misc/vmw_vmci/vmci_queue_pair.c if (ent != NULL) ent 1403 drivers/misc/vmw_vmci/vmci_queue_pair.c *ent = entry; ent 1512 drivers/misc/vmw_vmci/vmci_queue_pair.c struct qp_broker_entry **ent) ent 1677 drivers/misc/vmw_vmci/vmci_queue_pair.c if (ent != NULL) ent 1678 drivers/misc/vmw_vmci/vmci_queue_pair.c *ent = entry; ent 1697 drivers/misc/vmw_vmci/vmci_queue_pair.c struct qp_broker_entry **ent, ent 1739 drivers/misc/vmw_vmci/vmci_queue_pair.c context, wakeup_cb, client_data, ent); ent 1745 drivers/misc/vmw_vmci/vmci_queue_pair.c context, wakeup_cb, client_data, ent); ent 2086 drivers/mmc/host/sdhci-pci-core.c const struct pci_device_id *ent) ent 2095 drivers/mmc/host/sdhci-pci-core.c BUG_ON(ent == NULL); ent 2129 drivers/mmc/host/sdhci-pci-core.c chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; ent 604 drivers/mmc/host/toshsd.c static int toshsd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 105 drivers/mtd/maps/amd76xrom.c const struct pci_device_id *ent) ent 117 drivers/mtd/maps/ck804xrom.c const struct pci_device_id *ent) ent 129 drivers/mtd/maps/ck804xrom.c switch (ent->driver_data) { ent 149 drivers/mtd/maps/esb2rom.c const struct pci_device_id *ent) ent 91 drivers/mtd/maps/ichxrom.c const struct pci_device_id *ent) ent 136 drivers/mtd/maps/scb2_flash.c const struct pci_device_id *ent) ent 667 drivers/mtd/nand/raw/cafe_nand.c const struct pci_device_id *ent) ent 115 drivers/net/can/c_can/c_can_pci.c const struct pci_device_id *ent) ent 117 drivers/net/can/c_can/c_can_pci.c struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data; ent 716 drivers/net/can/peak_canfd/peak_pciefd_main.c const struct pci_device_id *ent) ent 211 drivers/net/can/sja1000/ems_pci.c const struct pci_device_id *ent) ent 95 drivers/net/can/sja1000/f81601.c const struct pci_device_id *ent) ent 280 drivers/net/can/sja1000/kvaser_pci.c const struct pci_device_id *ent) ent 546 drivers/net/can/sja1000/peak_pci.c static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 624 drivers/net/can/sja1000/plx_pci.c const struct pci_device_id *ent) ent 634 drivers/net/can/sja1000/plx_pci.c ci = (struct plx_pci_card_info *)ent->driver_data; ent 1454 drivers/net/dsa/b53/b53_common.c u16 vid, struct b53_arl_entry *ent, u8 *idx, ent 1476 drivers/net/dsa/b53/b53_common.c b53_arl_to_entry(ent, mac_vid, fwd_entry); ent 1502 drivers/net/dsa/b53/b53_common.c struct b53_arl_entry ent; ent 1520 drivers/net/dsa/b53/b53_common.c ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid); ent 1542 drivers/net/dsa/b53/b53_common.c memset(&ent, 0, sizeof(ent)); ent 1543 drivers/net/dsa/b53/b53_common.c ent.port = port; ent 1544 drivers/net/dsa/b53/b53_common.c ent.vid = vid; ent 1545 drivers/net/dsa/b53/b53_common.c ent.is_static = true; ent 1546 drivers/net/dsa/b53/b53_common.c memcpy(ent.mac, addr, ETH_ALEN); ent 1547 drivers/net/dsa/b53/b53_common.c b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); ent 1601 drivers/net/dsa/b53/b53_common.c struct b53_arl_entry *ent) ent 1610 drivers/net/dsa/b53/b53_common.c b53_arl_to_entry(ent, mac_vid, fwd_entry); ent 1613 drivers/net/dsa/b53/b53_common.c static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, ent 1616 drivers/net/dsa/b53/b53_common.c if (!ent->is_valid) ent 1619 drivers/net/dsa/b53/b53_common.c if (port != ent->port) ent 1622 drivers/net/dsa/b53/b53_common.c return cb(ent->mac, ent->vid, ent->is_static, data); ent 261 drivers/net/dsa/b53/b53_priv.h static inline void b53_arl_to_entry(struct b53_arl_entry *ent, ent 264 drivers/net/dsa/b53/b53_priv.h memset(ent, 0, sizeof(*ent)); ent 265 drivers/net/dsa/b53/b53_priv.h ent->port = fwd_entry & ARLTBL_DATA_PORT_ID_MASK; ent 266 drivers/net/dsa/b53/b53_priv.h ent->is_valid = !!(fwd_entry & ARLTBL_VALID); ent 267 drivers/net/dsa/b53/b53_priv.h ent->is_age = !!(fwd_entry & ARLTBL_AGE); ent 268 drivers/net/dsa/b53/b53_priv.h ent->is_static = !!(fwd_entry & ARLTBL_STATIC); ent 269 drivers/net/dsa/b53/b53_priv.h u64_to_ether_addr(mac_vid, ent->mac); ent 270 drivers/net/dsa/b53/b53_priv.h ent->vid = mac_vid >> ARLTBL_VID_S; ent 274 drivers/net/dsa/b53/b53_priv.h const struct b53_arl_entry *ent) ent 276 drivers/net/dsa/b53/b53_priv.h *mac_vid = ether_addr_to_u64(ent->mac); ent 277 drivers/net/dsa/b53/b53_priv.h *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S; ent 278 drivers/net/dsa/b53/b53_priv.h *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK; ent 279 drivers/net/dsa/b53/b53_priv.h if (ent->is_valid) ent 281 drivers/net/dsa/b53/b53_priv.h if (ent->is_static) ent 283 drivers/net/dsa/b53/b53_priv.h if (ent->is_age) ent 996 drivers/net/ethernet/3com/3c59x.c const struct pci_device_id *ent) ent 1015 drivers/net/ethernet/3com/3c59x.c vci = &vortex_info_tbl[ent->driver_data]; ent 1031 drivers/net/ethernet/3com/3c59x.c ent->driver_data, unit); ent 2265 drivers/net/ethernet/3com/typhoon.c typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 2269 drivers/net/ethernet/3com/typhoon.c int card_id = (int) ent->driver_data; ent 57 drivers/net/ethernet/8390/hydra.c const struct zorro_device_id *ent); ent 84 drivers/net/ethernet/8390/hydra.c const struct zorro_device_id *ent) ent 299 drivers/net/ethernet/8390/mac8390.c struct nubus_dirent ent; ent 319 drivers/net/ethernet/8390/mac8390.c if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) { ent 324 drivers/net/ethernet/8390/mac8390.c nubus_get_rsrc_mem(dev->dev_addr, &ent, 6); ent 329 drivers/net/ethernet/8390/mac8390.c &ent) == -1) { ent 334 drivers/net/ethernet/8390/mac8390.c nubus_get_rsrc_mem(&offset, &ent, 4); ent 340 drivers/net/ethernet/8390/mac8390.c &ent) == -1) { ent 345 drivers/net/ethernet/8390/mac8390.c nubus_get_rsrc_mem(&offset, &ent, 4); ent 218 drivers/net/ethernet/8390/ne2k-pci.c const struct pci_device_id *ent) ent 224 drivers/net/ethernet/8390/ne2k-pci.c int irq, reg0, chip_idx = ent->driver_data; ent 245 drivers/net/ethernet/8390/xsurf100.c const struct zorro_device_id *ent) ent 401 drivers/net/ethernet/8390/zorro8390.c const struct zorro_device_id *ent) ent 646 drivers/net/ethernet/adaptec/starfire.c const struct pci_device_id *ent) ent 650 drivers/net/ethernet/adaptec/starfire.c int i, irq, chip_idx = ent->driver_data; ent 3906 drivers/net/ethernet/agere/et131x.c const struct pci_device_id *ent) ent 1741 drivers/net/ethernet/alacritech/slicoss.c static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 3432 drivers/net/ethernet/amazon/ena/ena_netdev.c static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 650 drivers/net/ethernet/amd/a2065.c const struct zorro_device_id *ent); ent 680 drivers/net/ethernet/amd/a2065.c const struct zorro_device_id *ent) ent 1752 drivers/net/ethernet/amd/amd8111e.c const struct pci_device_id *ent) ent 713 drivers/net/ethernet/amd/ariadne.c const struct zorro_device_id *ent) ent 50 drivers/net/ethernet/amd/hplance.c static int hplance_init_one(struct dio_dev *d, const struct dio_device_id *ent); ent 84 drivers/net/ethernet/amd/hplance.c static int hplance_init_one(struct dio_dev *d, const struct dio_device_id *ent) ent 1538 drivers/net/ethernet/amd/pcnet32.c pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1690 drivers/net/ethernet/atheros/alx/main.c static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1764 drivers/net/ethernet/atheros/alx/main.c if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG) ent 2531 drivers/net/ethernet/atheros/atl1c/atl1c_main.c static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 2283 drivers/net/ethernet/atheros/atl1e/atl1e_main.c static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 136 drivers/net/ethernet/atheros/atl1e/atl1e_param.c struct atl1e_opt_list *ent; ent 139 drivers/net/ethernet/atheros/atl1e/atl1e_param.c ent = &opt->arg.l.p[i]; ent 140 drivers/net/ethernet/atheros/atl1e/atl1e_param.c if (*value == ent->i) { ent 141 drivers/net/ethernet/atheros/atl1e/atl1e_param.c if (ent->str[0] != '\0') ent 143 drivers/net/ethernet/atheros/atl1e/atl1e_param.c "%s\n", ent->str); ent 157 drivers/net/ethernet/atheros/atlx/atl1.c struct atl1_opt_list *ent; ent 160 drivers/net/ethernet/atheros/atlx/atl1.c ent = &opt->arg.l.p[i]; ent 161 drivers/net/ethernet/atheros/atlx/atl1.c if (*value == ent->i) { ent 162 drivers/net/ethernet/atheros/atlx/atl1.c if (ent->str[0] != '\0') ent 164 drivers/net/ethernet/atheros/atlx/atl1.c ent->str); ent 2906 drivers/net/ethernet/atheros/atlx/atl1.c static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1319 drivers/net/ethernet/atheros/atlx/atl2.c static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 2902 drivers/net/ethernet/atheros/atlx/atl2.c struct atl2_opt_list *ent; ent 2928 drivers/net/ethernet/atheros/atlx/atl2.c ent = &opt->arg.l.p[i]; ent 2929 drivers/net/ethernet/atheros/atlx/atl2.c if (*value == ent->i) { ent 2930 drivers/net/ethernet/atheros/atlx/atl2.c if (ent->str[0] != '\0') ent 2931 drivers/net/ethernet/atheros/atlx/atl2.c printk(KERN_INFO "%s\n", ent->str); ent 2345 drivers/net/ethernet/broadcom/b44.c const struct ssb_device_id *ent) ent 8563 drivers/net/ethernet/broadcom/bnx2.c bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 8625 drivers/net/ethernet/broadcom/bnx2.c "node addr %pM\n", board_info[ent->driver_data].name, ent 682 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent) ent 688 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c if (ent->pri_bitmap & (1 << pri)) ent 694 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent) ent 696 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) == ent 706 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c struct dcbx_app_priority_entry *ent = ent 709 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c if (ent->appBitfield & DCBX_APP_ENTRY_VALID) { ent 710 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c u8 up = bnx2x_dcbx_dcbnl_app_up(ent); ent 715 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent); ent 716 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c app.protocol = ent->app_id; ent 2517 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c struct dcbx_app_priority_entry *ent = ent 2520 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c if (ent->appBitfield & DCBX_APP_ENTRY_VALID) { ent 2521 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent); ent 2522 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent); ent 2523 drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c table[j++].protocol = ent->app_id; ent 13949 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c const struct pci_device_id *ent) ent 13978 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c max_cos_est = set_max_cos_est(ent->driver_data); ent 13981 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c is_vf = set_is_vf(ent->driver_data); ent 14022 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data); ent 14122 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c board_info[ent->driver_data].name, ent 11717 drivers/net/ethernet/broadcom/bnxt/bnxt.c static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 11746 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (bnxt_vf_pciid(ent->driver_data)) ent 11891 drivers/net/ethernet/broadcom/bnxt/bnxt.c board_info[ent->driver_data].name, ent 97 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent) ent 105 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c ent[i].vector = bp->irq_tbl[idx + i].vector; ent 106 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c ent[i].ring_idx = idx + i; ent 107 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c ent[i].db_offset = (idx + i) * 0x80; ent 112 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c struct bnxt_msix_entry *ent, int num_msix) ent 170 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c bnxt_fill_msix_vecs(bp, ent); ent 377 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c struct bnxt_msix_entry *ent = NULL; ent 387 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c ent = kcalloc(ulp->msix_requested, sizeof(*ent), ent 389 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c if (!ent) ent 391 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c bnxt_fill_msix_vecs(bp, ent); ent 393 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c ops->ulp_irq_restart(ulp->handle, ent); ent 394 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c kfree(ent); ent 16181 drivers/net/ethernet/broadcom/tg3.c const struct pci_device_id *ent) ent 16190 drivers/net/ethernet/broadcom/tg3.c if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) { ent 16192 drivers/net/ethernet/broadcom/tg3.c if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100) ent 16202 drivers/net/ethernet/broadcom/tg3.c static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) ent 16901 drivers/net/ethernet/broadcom/tg3.c if (tg3_10_100_only_device(tp, ent)) ent 17620 drivers/net/ethernet/broadcom/tg3.c const struct pci_device_id *ent) ent 17746 drivers/net/ethernet/broadcom/tg3.c err = tg3_get_invariants(tp, ent); ent 218 drivers/net/ethernet/cavium/common/cavium_ptp.c const struct pci_device_id *ent) ent 158 drivers/net/ethernet/cavium/liquidio/lio_main.c const struct pci_device_id *ent); ent 859 drivers/net/ethernet/cavium/liquidio/lio_main.c const struct pci_device_id *ent __attribute__((unused))) ent 69 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); ent 384 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c const struct pci_device_id *ent __attribute__((unused))) ent 1291 drivers/net/ethernet/cavium/thunder/nic_main.c static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 2115 drivers/net/ethernet/cavium/thunder/nicvf_main.c static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1589 drivers/net/ethernet/cavium/thunder/thunder_bgx.c static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 154 drivers/net/ethernet/cavium/thunder/thunder_xcv.c static int xcv_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 979 drivers/net/ethernet/chelsio/cxgb/cxgb2.c static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1025 drivers/net/ethernet/chelsio/cxgb/cxgb2.c bi = t1_get_board_info(ent->driver_data); ent 3205 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 3254 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c ai = t3_get_adapter_info(ent->driver_data); ent 5651 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 5733 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (func != ent->driver_data) { ent 2926 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c const struct pci_device_id *ent) ent 2686 drivers/net/ethernet/cisco/enic/enic_main.c static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1969 drivers/net/ethernet/dec/tulip/de2104x.c static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1996 drivers/net/ethernet/dec/tulip/de2104x.c de->de21040 = ent->driver_data == 0 ? 1 : 0; ent 2195 drivers/net/ethernet/dec/tulip/de4x5.c const struct pci_device_id *ent) ent 358 drivers/net/ethernet/dec/tulip/dmfe.c static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 375 drivers/net/ethernet/dec/tulip/dmfe.c if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) || ent 376 drivers/net/ethernet/dec/tulip/dmfe.c ent->driver_data == PCI_DM9102_ID) { ent 453 drivers/net/ethernet/dec/tulip/dmfe.c db->chip_id = ent->driver_data; ent 494 drivers/net/ethernet/dec/tulip/dmfe.c ent->driver_data >> 16, ent 1298 drivers/net/ethernet/dec/tulip/tulip_core.c static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1312 drivers/net/ethernet/dec/tulip/tulip_core.c int chip_idx = ent->driver_data; ent 276 drivers/net/ethernet/dec/tulip/uli526x.c const struct pci_device_id *ent) ent 342 drivers/net/ethernet/dec/tulip/uli526x.c switch (ent->driver_data) { ent 403 drivers/net/ethernet/dec/tulip/uli526x.c ent->driver_data >> 16, pci_name(pdev), ent 360 drivers/net/ethernet/dec/tulip/winbond-840.c static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) ent 365 drivers/net/ethernet/dec/tulip/winbond-840.c int chip_idx = ent->driver_data; ent 108 drivers/net/ethernet/dlink/dl2k.c rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) ent 113 drivers/net/ethernet/dlink/dl2k.c int chip_idx = ent->driver_data; ent 500 drivers/net/ethernet/dlink/sundance.c const struct pci_device_id *ent) ent 505 drivers/net/ethernet/dlink/sundance.c int chip_idx = ent->driver_data; ent 480 drivers/net/ethernet/fealnx.c const struct pci_device_id *ent) ent 488 drivers/net/ethernet/fealnx.c unsigned int chip_id = ent->driver_data; ent 12 drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c const struct pci_device_id *ent) ent 813 drivers/net/ethernet/freescale/enetc/enetc_pf.c const struct pci_device_id *ent) ent 30 drivers/net/ethernet/freescale/enetc/enetc_ptp.c const struct pci_device_id *ent) ent 156 drivers/net/ethernet/freescale/enetc/enetc_vf.c const struct pci_device_id *ent) ent 1070 drivers/net/ethernet/google/gve/gve_main.c static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1896 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1908 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ae_dev->flag = ent->driver_data; ent 2870 drivers/net/ethernet/hp/hp100.c const struct pci_device_id *ent) ent 174 drivers/net/ethernet/ibm/ehea/ehea.h u64 ent[EHEA_MAP_ENTRIES]; ent 590 drivers/net/ethernet/ibm/ehea/ehea_qmr.c if (!ehea_dir->ent[idx]) ent 593 drivers/net/ethernet/ibm/ehea/ehea_qmr.c ehea_dir->ent[idx] = vaddr; ent 645 drivers/net/ethernet/ibm/ehea/ehea_qmr.c ehea_bmap->top[top]->dir[dir]->ent[idx] = flag; ent 778 drivers/net/ethernet/ibm/ehea/ehea_qmr.c if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) ent 782 drivers/net/ethernet/ibm/ehea/ehea_qmr.c return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset; ent 832 drivers/net/ethernet/ibm/ehea/ehea_qmr.c if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) ent 2818 drivers/net/ethernet/intel/e100.c static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 2877 drivers/net/ethernet/intel/e100.c if (ent->driver_data) ent 88 drivers/net/ethernet/intel/e1000/e1000_main.c static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); ent 920 drivers/net/ethernet/intel/e1000/e1000_main.c static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 211 drivers/net/ethernet/intel/e1000/e1000_param.c const struct e1000_opt_list *ent; ent 214 drivers/net/ethernet/intel/e1000/e1000_param.c ent = &opt->arg.l.p[i]; ent 215 drivers/net/ethernet/intel/e1000/e1000_param.c if (*value == ent->i) { ent 216 drivers/net/ethernet/intel/e1000/e1000_param.c if (ent->str[0] != '\0') ent 217 drivers/net/ethernet/intel/e1000/e1000_param.c e_dev_info("%s\n", ent->str); ent 7056 drivers/net/ethernet/intel/e1000e/netdev.c static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 7061 drivers/net/ethernet/intel/e1000e/netdev.c const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; ent 194 drivers/net/ethernet/intel/e1000e/param.c struct e1000_opt_list *ent; ent 197 drivers/net/ethernet/intel/e1000e/param.c ent = &opt->arg.l.p[i]; ent 198 drivers/net/ethernet/intel/e1000e/param.c if (*value == ent->i) { ent 199 drivers/net/ethernet/intel/e1000e/param.c if (ent->str[0] != '\0') ent 201 drivers/net/ethernet/intel/e1000e/param.c ent->str); ent 1968 drivers/net/ethernet/intel/fm10k/fm10k_pci.c const struct pci_device_id *ent) ent 1970 drivers/net/ethernet/intel/fm10k/fm10k_pci.c const struct fm10k_info *fi = fm10k_info_tbl[ent->driver_data]; ent 2098 drivers/net/ethernet/intel/fm10k/fm10k_pci.c static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 2138 drivers/net/ethernet/intel/fm10k/fm10k_pci.c netdev = fm10k_alloc_netdev(fm10k_info_tbl[ent->driver_data]); ent 2159 drivers/net/ethernet/intel/fm10k/fm10k_pci.c err = fm10k_sw_init(interface, ent); ent 14733 drivers/net/ethernet/intel/i40e/i40e_main.c static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 3651 drivers/net/ethernet/intel/iavf/iavf_main.c static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 2792 drivers/net/ethernet/intel/ice/ice_main.c ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ent 2998 drivers/net/ethernet/intel/igb/igb_main.c static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 3006 drivers/net/ethernet/intel/igb/igb_main.c const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; ent 2708 drivers/net/ethernet/intel/igbvf/netdev.c static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 2713 drivers/net/ethernet/intel/igbvf/netdev.c const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; ent 4120 drivers/net/ethernet/intel/igc/igc_main.c const struct pci_device_id *ent) ent 4125 drivers/net/ethernet/intel/igc/igc_main.c const struct igc_info *ei = igc_info_tbl[ent->driver_data]; ent 50 drivers/net/ethernet/intel/ixgb/ixgb_main.c static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); ent 363 drivers/net/ethernet/intel/ixgb/ixgb_main.c ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 204 drivers/net/ethernet/intel/ixgb/ixgb_param.c const struct ixgb_opt_list *ent; ent 207 drivers/net/ethernet/intel/ixgb/ixgb_param.c ent = &opt->arg.l.p[i]; ent 208 drivers/net/ethernet/intel/ixgb/ixgb_param.c if (*value == ent->i) { ent 209 drivers/net/ethernet/intel/ixgb/ixgb_param.c if (ent->str[0] != '\0') ent 210 drivers/net/ethernet/intel/ixgb/ixgb_param.c pr_info("%s\n", ent->str); ent 10731 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 10736 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; ent 4536 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 4541 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; ent 2928 drivers/net/ethernet/jme.c const struct pci_device_id *ent) ent 3877 drivers/net/ethernet/marvell/skge.c static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 4658 drivers/net/ethernet/marvell/sky2.c struct dentry *ent; ent 4660 drivers/net/ethernet/marvell/sky2.c ent = debugfs_create_dir("sky2", NULL); ent 4661 drivers/net/ethernet/marvell/sky2.c if (!ent || IS_ERR(ent)) ent 4664 drivers/net/ethernet/marvell/sky2.c sky2_debug = ent; ent 4944 drivers/net/ethernet/marvell/sky2.c static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 80 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_cmd_work_ent *ent; ent 82 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent = kzalloc(sizeof(*ent), alloc_flags); ent 83 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (!ent) ent 86 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->in = in; ent 87 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->out = out; ent 88 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->uout = uout; ent 89 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->uout_size = uout_size; ent 90 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->callback = cbk; ent 91 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->context = context; ent 92 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->cmd = cmd; ent 93 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->page_queue = page_queue; ent 95 drivers/net/ethernet/mellanox/mlx5/core/cmd.c return ent; ent 196 drivers/net/ethernet/mellanox/mlx5/core/cmd.c static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) ent 198 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay)); ent 200 drivers/net/ethernet/mellanox/mlx5/core/cmd.c calc_chain_sig(ent->in); ent 201 drivers/net/ethernet/mellanox/mlx5/core/cmd.c calc_chain_sig(ent->out); ent 205 drivers/net/ethernet/mellanox/mlx5/core/cmd.c static void poll_timeout(struct mlx5_cmd_work_ent *ent) ent 211 drivers/net/ethernet/mellanox/mlx5/core/cmd.c own = READ_ONCE(ent->lay->status_own); ent 213 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->ret = 0; ent 219 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->ret = -ETIMEDOUT; ent 222 drivers/net/ethernet/mellanox/mlx5/core/cmd.c static void free_cmd(struct mlx5_cmd_work_ent *ent) ent 224 drivers/net/ethernet/mellanox/mlx5/core/cmd.c kfree(ent); ent 227 drivers/net/ethernet/mellanox/mlx5/core/cmd.c static int verify_signature(struct mlx5_cmd_work_ent *ent) ent 229 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_cmd_mailbox *next = ent->out->next; ent 230 drivers/net/ethernet/mellanox/mlx5/core/cmd.c int n = mlx5_calc_cmd_blocks(ent->out); ent 235 drivers/net/ethernet/mellanox/mlx5/core/cmd.c sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay)); ent 770 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_cmd_work_ent *ent, int input) ent 772 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; ent 773 drivers/net/ethernet/mellanox/mlx5/core/cmd.c u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode); ent 795 drivers/net/ethernet/mellanox/mlx5/core/cmd.c dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); ent 796 drivers/net/ethernet/mellanox/mlx5/core/cmd.c offset += sizeof(ent->lay->in); ent 798 drivers/net/ethernet/mellanox/mlx5/core/cmd.c dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); ent 799 drivers/net/ethernet/mellanox/mlx5/core/cmd.c offset += sizeof(ent->lay->out); ent 802 drivers/net/ethernet/mellanox/mlx5/core/cmd.c dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); ent 803 drivers/net/ethernet/mellanox/mlx5/core/cmd.c offset += sizeof(*ent->lay); ent 834 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_cmd_work_ent *ent = container_of(dwork, ent 837 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, ent 840 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->ret = -ETIMEDOUT; ent 842 drivers/net/ethernet/mellanox/mlx5/core/cmd.c mlx5_command_str(msg_to_opcode(ent->in)), ent 843 drivers/net/ethernet/mellanox/mlx5/core/cmd.c msg_to_opcode(ent->in)); ent 844 drivers/net/ethernet/mellanox/mlx5/core/cmd.c mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); ent 853 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); ent 854 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_cmd *cmd = ent->cmd; ent 860 drivers/net/ethernet/mellanox/mlx5/core/cmd.c bool poll_cmd = ent->polling; ent 864 drivers/net/ethernet/mellanox/mlx5/core/cmd.c complete(&ent->handling); ent 865 drivers/net/ethernet/mellanox/mlx5/core/cmd.c sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; ent 867 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (!ent->page_queue) { ent 871 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (ent->callback) { ent 872 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->callback(-EAGAIN, ent->context); ent 873 drivers/net/ethernet/mellanox/mlx5/core/cmd.c mlx5_free_cmd_msg(dev, ent->out); ent 874 drivers/net/ethernet/mellanox/mlx5/core/cmd.c free_msg(dev, ent->in); ent 875 drivers/net/ethernet/mellanox/mlx5/core/cmd.c free_cmd(ent); ent 877 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->ret = -EAGAIN; ent 878 drivers/net/ethernet/mellanox/mlx5/core/cmd.c complete(&ent->done); ent 883 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->idx = alloc_ret; ent 885 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->idx = cmd->max_reg_cmds; ent 887 drivers/net/ethernet/mellanox/mlx5/core/cmd.c clear_bit(ent->idx, &cmd->bitmask); ent 891 drivers/net/ethernet/mellanox/mlx5/core/cmd.c cmd->ent_arr[ent->idx] = ent; ent 892 drivers/net/ethernet/mellanox/mlx5/core/cmd.c lay = get_inst(cmd, ent->idx); ent 893 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->lay = lay; ent 895 drivers/net/ethernet/mellanox/mlx5/core/cmd.c memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); ent 896 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->op = be32_to_cpu(lay->in[0]) >> 16; ent 897 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (ent->in->next) ent 898 drivers/net/ethernet/mellanox/mlx5/core/cmd.c lay->in_ptr = cpu_to_be64(ent->in->next->dma); ent 899 drivers/net/ethernet/mellanox/mlx5/core/cmd.c lay->inlen = cpu_to_be32(ent->in->len); ent 900 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (ent->out->next) ent 901 drivers/net/ethernet/mellanox/mlx5/core/cmd.c lay->out_ptr = cpu_to_be64(ent->out->next->dma); ent 902 drivers/net/ethernet/mellanox/mlx5/core/cmd.c lay->outlen = cpu_to_be32(ent->out->len); ent 904 drivers/net/ethernet/mellanox/mlx5/core/cmd.c lay->token = ent->token; ent 906 drivers/net/ethernet/mellanox/mlx5/core/cmd.c set_signature(ent, !cmd->checksum_disabled); ent 907 drivers/net/ethernet/mellanox/mlx5/core/cmd.c dump_command(dev, ent, 1); ent 908 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->ts1 = ktime_get_ns(); ent 911 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (ent->callback) ent 912 drivers/net/ethernet/mellanox/mlx5/core/cmd.c schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); ent 913 drivers/net/ethernet/mellanox/mlx5/core/cmd.c set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); ent 921 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status); ent 922 drivers/net/ethernet/mellanox/mlx5/core/cmd.c MLX5_SET(mbox_out, ent->out, status, status); ent 923 drivers/net/ethernet/mellanox/mlx5/core/cmd.c MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); ent 925 drivers/net/ethernet/mellanox/mlx5/core/cmd.c mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); ent 927 drivers/net/ethernet/mellanox/mlx5/core/cmd.c free_ent(cmd, ent->idx); ent 928 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (ent->callback) ent 929 drivers/net/ethernet/mellanox/mlx5/core/cmd.c free_cmd(ent); ent 934 drivers/net/ethernet/mellanox/mlx5/core/cmd.c mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); ent 936 drivers/net/ethernet/mellanox/mlx5/core/cmd.c iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); ent 939 drivers/net/ethernet/mellanox/mlx5/core/cmd.c poll_timeout(ent); ent 942 drivers/net/ethernet/mellanox/mlx5/core/cmd.c mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT)); ent 976 drivers/net/ethernet/mellanox/mlx5/core/cmd.c static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) ent 982 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (!wait_for_completion_timeout(&ent->handling, timeout) && ent 983 drivers/net/ethernet/mellanox/mlx5/core/cmd.c cancel_work_sync(&ent->work)) { ent 984 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->ret = -ECANCELED; ent 987 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (cmd->mode == CMD_MODE_POLLING || ent->polling) { ent 988 drivers/net/ethernet/mellanox/mlx5/core/cmd.c wait_for_completion(&ent->done); ent 989 drivers/net/ethernet/mellanox/mlx5/core/cmd.c } else if (!wait_for_completion_timeout(&ent->done, timeout)) { ent 990 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->ret = -ETIMEDOUT; ent 991 drivers/net/ethernet/mellanox/mlx5/core/cmd.c mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); ent 995 drivers/net/ethernet/mellanox/mlx5/core/cmd.c err = ent->ret; ent 999 drivers/net/ethernet/mellanox/mlx5/core/cmd.c mlx5_command_str(msg_to_opcode(ent->in)), ent 1000 drivers/net/ethernet/mellanox/mlx5/core/cmd.c msg_to_opcode(ent->in)); ent 1003 drivers/net/ethernet/mellanox/mlx5/core/cmd.c mlx5_command_str(msg_to_opcode(ent->in)), ent 1004 drivers/net/ethernet/mellanox/mlx5/core/cmd.c msg_to_opcode(ent->in)); ent 1007 drivers/net/ethernet/mellanox/mlx5/core/cmd.c err, deliv_status_to_str(ent->status), ent->status); ent 1023 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_cmd_work_ent *ent; ent 1032 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context, ent 1034 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (IS_ERR(ent)) ent 1035 drivers/net/ethernet/mellanox/mlx5/core/cmd.c return PTR_ERR(ent); ent 1037 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->token = token; ent 1038 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->polling = force_polling; ent 1040 drivers/net/ethernet/mellanox/mlx5/core/cmd.c init_completion(&ent->handling); ent 1042 drivers/net/ethernet/mellanox/mlx5/core/cmd.c init_completion(&ent->done); ent 1044 drivers/net/ethernet/mellanox/mlx5/core/cmd.c INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); ent 1045 drivers/net/ethernet/mellanox/mlx5/core/cmd.c INIT_WORK(&ent->work, cmd_work_handler); ent 1047 drivers/net/ethernet/mellanox/mlx5/core/cmd.c cmd_work_handler(&ent->work); ent 1048 drivers/net/ethernet/mellanox/mlx5/core/cmd.c } else if (!queue_work(cmd->wq, &ent->work)) { ent 1057 drivers/net/ethernet/mellanox/mlx5/core/cmd.c err = wait_func(dev, ent); ent 1063 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ds = ent->ts2 - ent->ts1; ent 1075 drivers/net/ethernet/mellanox/mlx5/core/cmd.c *status = ent->status; ent 1078 drivers/net/ethernet/mellanox/mlx5/core/cmd.c free_cmd(ent); ent 1468 drivers/net/ethernet/mellanox/mlx5/core/cmd.c struct mlx5_cmd_work_ent *ent; ent 1484 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent = cmd->ent_arr[i]; ent 1488 drivers/net/ethernet/mellanox/mlx5/core/cmd.c &ent->state)) { ent 1492 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->idx); ent 1493 drivers/net/ethernet/mellanox/mlx5/core/cmd.c free_ent(cmd, ent->idx); ent 1494 drivers/net/ethernet/mellanox/mlx5/core/cmd.c free_cmd(ent); ent 1499 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (ent->callback) ent 1500 drivers/net/ethernet/mellanox/mlx5/core/cmd.c cancel_delayed_work(&ent->cb_timeout_work); ent 1501 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (ent->page_queue) ent 1505 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->ts2 = ktime_get_ns(); ent 1506 drivers/net/ethernet/mellanox/mlx5/core/cmd.c memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); ent 1507 drivers/net/ethernet/mellanox/mlx5/core/cmd.c dump_command(dev, ent, 0); ent 1508 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (!ent->ret) { ent 1510 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->ret = verify_signature(ent); ent 1512 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->ret = 0; ent 1514 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->status = MLX5_DRIVER_STATUS_ABORTED; ent 1516 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->status = ent->lay->status_own >> 1; ent 1519 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->ret, deliv_status_to_str(ent->status), ent->status); ent 1524 drivers/net/ethernet/mellanox/mlx5/core/cmd.c free_ent(cmd, ent->idx); ent 1526 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (ent->callback) { ent 1527 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ds = ent->ts2 - ent->ts1; ent 1528 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (ent->op < ARRAY_SIZE(cmd->stats)) { ent 1529 drivers/net/ethernet/mellanox/mlx5/core/cmd.c stats = &cmd->stats[ent->op]; ent 1536 drivers/net/ethernet/mellanox/mlx5/core/cmd.c callback = ent->callback; ent 1537 drivers/net/ethernet/mellanox/mlx5/core/cmd.c context = ent->context; ent 1538 drivers/net/ethernet/mellanox/mlx5/core/cmd.c err = ent->ret; ent 1540 drivers/net/ethernet/mellanox/mlx5/core/cmd.c err = mlx5_copy_from_msg(ent->uout, ent 1541 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->out, ent 1542 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->uout_size); ent 1545 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->in->first.data, ent 1546 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ent->uout); ent 1549 drivers/net/ethernet/mellanox/mlx5/core/cmd.c mlx5_free_cmd_msg(dev, ent->out); ent 1550 drivers/net/ethernet/mellanox/mlx5/core/cmd.c free_msg(dev, ent->in); ent 1552 drivers/net/ethernet/mellanox/mlx5/core/cmd.c err = err ? err : ent->status; ent 1554 drivers/net/ethernet/mellanox/mlx5/core/cmd.c free_cmd(ent); ent 1557 drivers/net/ethernet/mellanox/mlx5/core/cmd.c complete(&ent->done); ent 3765 drivers/net/ethernet/myricom/myri10ge/myri10ge.c static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 802 drivers/net/ethernet/natsemi/natsemi.c static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent) ent 806 drivers/net/ethernet/natsemi/natsemi.c int i, option, irq, chip_idx = ent->driver_data; ent 244 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c const struct pch_gbe_opt_list *ent; ent 247 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c ent = &opt->arg.l.p[i]; ent 248 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c if (*value == ent->i) { ent 249 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c if (ent->str[0] != '\0') ent 251 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c ent->str); ent 579 drivers/net/ethernet/packetengines/hamachi.c const struct pci_device_id *ent) ent 583 drivers/net/ethernet/packetengines/hamachi.c int chip_id = ent->driver_data; ent 370 drivers/net/ethernet/packetengines/yellowfin.c const struct pci_device_id *ent) ent 375 drivers/net/ethernet/packetengines/yellowfin.c int chip_idx = ent->driver_data; ent 1672 drivers/net/ethernet/pasemi/pasemi_mac.c pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 107 drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 46 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c const struct pci_device_id *ent); ent 1445 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 760 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c const struct pci_device_id *ent) ent 55 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); ent 1121 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c const struct pci_device_id *ent) ent 2462 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 2496 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c switch (ent->device) { ent 2560 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c qlcnic_check_vf(adapter, ent); ent 2592 drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c qlcnic_83xx_check_vf(adapter, ent); ent 1023 drivers/net/ethernet/rdc/r6040.c static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1884 drivers/net/ethernet/realtek/8139cp.c static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ent 944 drivers/net/ethernet/realtek/8139too.c const struct pci_device_id *ent) ent 953 drivers/net/ethernet/realtek/8139too.c assert (ent != NULL); ent 1024 drivers/net/ethernet/realtek/8139too.c tp->drv_flags = board_info[ent->driver_data].hw_flags; ent 1046 drivers/net/ethernet/realtek/8139too.c board_info[ent->driver_data].name, ent 7069 drivers/net/ethernet/realtek/r8169_main.c static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 7086 drivers/net/ethernet/realtek/r8169_main.c tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; ent 1168 drivers/net/ethernet/sgi/ioc3-eth.c static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1846 drivers/net/ethernet/sis/sis190.c const struct pci_device_id *ent) ent 1897 drivers/net/ethernet/sis/sis190.c sis_chip_info[ent->driver_data].name, ent 319 drivers/net/ethernet/smsc/epic100.c static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 323 drivers/net/ethernet/smsc/epic100.c int chip_idx = (int) ent->driver_data; ent 60 drivers/net/ethernet/socionext/sni_ave.c #define AVE_PKTF(ent) (AVE_PKTF_BASE + (ent) * 0x40) ent 61 drivers/net/ethernet/socionext/sni_ave.c #define AVE_PFMBYTE(ent) (AVE_PFMBYTE_BASE + (ent) * 8) ent 62 drivers/net/ethernet/socionext/sni_ave.c #define AVE_PFMBIT(ent) (AVE_PFMBIT_BASE + (ent) * 4) ent 63 drivers/net/ethernet/socionext/sni_ave.c #define AVE_PFSEL(ent) (AVE_PFSEL_BASE + (ent) * 4) ent 3897 drivers/net/ethernet/sun/cassini.c int ent = i & (size - 1); ent 3902 drivers/net/ethernet/sun/cassini.c daddr = le64_to_cpu(txd[ent].buffer); ent 3904 drivers/net/ethernet/sun/cassini.c le64_to_cpu(txd[ent].control)); ent 3914 drivers/net/ethernet/sun/cassini.c ent = i & (size - 1); ent 3915 drivers/net/ethernet/sun/cassini.c if (cp->tx_tiny_use[ring][ent].used) ent 4903 drivers/net/ethernet/sun/cassini.c static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 3145 drivers/net/ethernet/sun/niu.c struct fcram_hash_ipv4 ent; ent 3149 drivers/net/ethernet/sun/niu.c memset(&ent, 0, sizeof(ent)); ent 3150 drivers/net/ethernet/sun/niu.c ent.header = HASH_HEADER_EXT; ent 3152 drivers/net/ethernet/sun/niu.c for (i = 0; i < FCRAM_SIZE; i += sizeof(ent)) { ent 3153 drivers/net/ethernet/sun/niu.c int err = hash_write(np, 0, i, 1, (u64 *) &ent); ent 9712 drivers/net/ethernet/sun/niu.c const struct pci_device_id *ent) ent 1609 drivers/net/ethernet/sun/sungem.c int ent = i & (TX_RING_SIZE - 1); ent 1611 drivers/net/ethernet/sun/sungem.c txd = &gb->txd[ent]; ent 2843 drivers/net/ethernet/sun/sungem.c static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 2985 drivers/net/ethernet/sun/sunhme.c const struct pci_device_id *ent) ent 1897 drivers/net/ethernet/tehuti/tehuti.c bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 163 drivers/net/ethernet/ti/tlan.c int irq, int rev, const struct pci_device_id *ent); ent 167 drivers/net/ethernet/ti/tlan.c const struct pci_device_id *ent); ent 438 drivers/net/ethernet/ti/tlan.c const struct pci_device_id *ent) ent 440 drivers/net/ethernet/ti/tlan.c return tlan_probe1(pdev, -1, -1, 0, ent); ent 464 drivers/net/ethernet/ti/tlan.c const struct pci_device_id *ent) ent 502 drivers/net/ethernet/ti/tlan.c priv->adapter = &board_info[ent->driver_data]; ent 2427 drivers/net/ethernet/toshiba/spider_net.c spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 763 drivers/net/ethernet/toshiba/tc35815.c const struct pci_device_id *ent) ent 815 drivers/net/ethernet/toshiba/tc35815.c lp->chiptype = ent->driver_data; ent 835 drivers/net/ethernet/toshiba/tc35815.c chip_info[ent->driver_data].name, ent 1035 drivers/net/ethernet/via/via-rhine.c const struct pci_device_id *ent) ent 2904 drivers/net/ethernet/via/via-velocity.c const struct pci_device_id *ent) ent 2907 drivers/net/ethernet/via/via-velocity.c &chip_info_table[ent->driver_data]; ent 3758 drivers/net/fddi/defxx.c const struct pci_device_id *ent) ent 200 drivers/net/fddi/skfp/skfddi.c const struct pci_device_id *ent) ent 86 drivers/net/hippi/rrunner.c static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 23 drivers/net/phy/mdio-thunder.c const struct pci_device_id *ent) ent 572 drivers/net/ppp/bsd_comp.c int ent; ent 596 drivers/net/ppp/bsd_comp.c #define OUTPUT(ent) \ ent 599 drivers/net/ppp/bsd_comp.c accm |= ((ent) << bitno); \ ent 615 drivers/net/ppp/bsd_comp.c ent = PPP_PROTOCOL(rptr); ent 616 drivers/net/ppp/bsd_comp.c if (ent < 0x21 || ent > 0xf9) ent 657 drivers/net/ppp/bsd_comp.c fcode = BSD_KEY (ent, c); ent 658 drivers/net/ppp/bsd_comp.c hval = BSD_HASH (ent, c, hshift); ent 669 drivers/net/ppp/bsd_comp.c ent = dictp->codem1 + 1; ent 691 drivers/net/ppp/bsd_comp.c ent = dictp->codem1 + 1; /* finally found (prefix,suffix) */ ent 695 drivers/net/ppp/bsd_comp.c OUTPUT(ent); /* output the prefix */ ent 732 drivers/net/ppp/bsd_comp.c unsigned short *len2 = lens_ptr (db, ent); ent 736 drivers/net/ppp/bsd_comp.c ent = c; ent 739 drivers/net/ppp/bsd_comp.c OUTPUT(ent); /* output the last code */ ent 2401 drivers/net/wan/farsync.c fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 2473 drivers/net/wan/farsync.c card->type = ent->driver_data; ent 2474 drivers/net/wan/farsync.c card->family = ((ent->driver_data == FST_TYPE_T2P) || ent 2475 drivers/net/wan/farsync.c (ent->driver_data == FST_TYPE_T4P)) ent 2477 drivers/net/wan/farsync.c if ((ent->driver_data == FST_TYPE_T1U) || ent 2478 drivers/net/wan/farsync.c (ent->driver_data == FST_TYPE_TE1)) ent 2481 drivers/net/wan/farsync.c card->nports = ((ent->driver_data == FST_TYPE_T2P) || ent 2482 drivers/net/wan/farsync.c (ent->driver_data == FST_TYPE_T2U)) ? 2 : 4; ent 807 drivers/net/wan/lmc/lmc_main.c static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 296 drivers/net/wan/pc300too.c const struct pci_device_id *ent) ent 274 drivers/net/wan/pci200syn.c const struct pci_device_id *ent) ent 557 drivers/net/wan/wanxl.c const struct pci_device_id *ent) ent 6152 drivers/net/wireless/intel/ipw2x00/ipw2100.c const struct pci_device_id *ent) ent 11601 drivers/net/wireless/intel/ipw2x00/ipw2200.c const struct pci_device_id *ent) ent 3580 drivers/net/wireless/intel/iwlegacy/3945-mac.c il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 3585 drivers/net/wireless/intel/iwlegacy/3945-mac.c struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data); ent 6479 drivers/net/wireless/intel/iwlegacy/4965-mac.c il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 6484 drivers/net/wireless/intel/iwlegacy/4965-mac.c struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data); ent 1077 drivers/net/wireless/intel/iwlwifi/mvm/fw.c union acpi_object *ent; ent 1079 drivers/net/wireless/intel/iwlwifi/mvm/fw.c ent = &wifi_pkg->package.elements[idx++]; ent 1080 drivers/net/wireless/intel/iwlwifi/mvm/fw.c if (ent->type != ACPI_TYPE_INTEGER || ent 1081 drivers/net/wireless/intel/iwlwifi/mvm/fw.c (j == 0 && ent->integer.value > ACPI_PPAG_MAX_LB) || ent 1082 drivers/net/wireless/intel/iwlwifi/mvm/fw.c (j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) || ent 1083 drivers/net/wireless/intel/iwlwifi/mvm/fw.c (j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) || ent 1084 drivers/net/wireless/intel/iwlwifi/mvm/fw.c (j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) { ent 1089 drivers/net/wireless/intel/iwlwifi/mvm/fw.c mvm->ppag_table.gain[i][j] = ent->integer.value; ent 993 drivers/net/wireless/intel/iwlwifi/pcie/drv.c static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 995 drivers/net/wireless/intel/iwlwifi/pcie/drv.c const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); ent 1001 drivers/net/wireless/intel/iwlwifi/pcie/drv.c iwl_trans = iwl_trans_pcie_alloc(pdev, ent, &cfg->trans); ent 642 drivers/net/wireless/intel/iwlwifi/pcie/internal.h const struct pci_device_id *ent, ent 3461 drivers/net/wireless/intel/iwlwifi/pcie/trans.c const struct pci_device_id *ent, ent 142 drivers/net/wireless/intersil/orinoco/orinoco_nortel.c const struct pci_device_id *ent) ent 114 drivers/net/wireless/intersil/orinoco/orinoco_pci.c const struct pci_device_id *ent) ent 181 drivers/net/wireless/intersil/orinoco/orinoco_plx.c const struct pci_device_id *ent) ent 92 drivers/net/wireless/intersil/orinoco/orinoco_tmd.c const struct pci_device_id *ent) ent 229 drivers/net/wireless/marvell/mwifiex/pcie.c const struct pci_device_id *ent) ent 245 drivers/net/wireless/marvell/mwifiex/pcie.c if (ent->driver_data) { ent 246 drivers/net/wireless/marvell/mwifiex/pcie.c struct mwifiex_pcie_device *data = (void *)ent->driver_data; ent 242 drivers/nubus/nubus.c struct nubus_dirent ent; ent 250 drivers/nubus/nubus.c if (nubus_readdir(dir, &ent) == -1) ent 252 drivers/nubus/nubus.c if (nubus_get_subdir(&ent, dir) == -1) ent 258 drivers/nubus/nubus.c int nubus_get_subdir(const struct nubus_dirent *ent, ent 261 drivers/nubus/nubus.c dir->ptr = dir->base = nubus_dirptr(ent); ent 263 drivers/nubus/nubus.c dir->mask = ent->mask; ent 268 drivers/nubus/nubus.c int nubus_readdir(struct nubus_dir *nd, struct nubus_dirent *ent) ent 276 drivers/nubus/nubus.c ent->base = nd->ptr; ent 289 drivers/nubus/nubus.c ent->type = resid >> 24; ent 291 drivers/nubus/nubus.c ent->data = resid & 0xffffff; ent 292 drivers/nubus/nubus.c ent->mask = nd->mask; ent 324 drivers/nubus/nubus.c struct nubus_dirent *ent) ent 326 drivers/nubus/nubus.c while (nubus_readdir(dir, ent) != -1) { ent 327 drivers/nubus/nubus.c if (ent->type == rsrc_type) ent 343 drivers/nubus/nubus.c struct nubus_dirent ent; ent 348 drivers/nubus/nubus.c while (nubus_readdir(&dir, &ent) != -1) { ent 351 drivers/nubus/nubus.c nubus_get_rsrc_mem(&size, &ent, 4); ent 352 drivers/nubus/nubus.c pr_debug(" block (0x%x), size %d\n", ent.type, size); ent 353 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(dir.procdir, &ent, size); ent 363 drivers/nubus/nubus.c struct nubus_dirent ent; ent 368 drivers/nubus/nubus.c while (nubus_readdir(&dir, &ent) != -1) { ent 369 drivers/nubus/nubus.c switch (ent.type) { ent 375 drivers/nubus/nubus.c nubus_get_rsrc_mem(&size, &ent, 4); ent 376 drivers/nubus/nubus.c pr_debug(" block (0x%x), size %d\n", ent.type, ent 378 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(dir.procdir, &ent, size); ent 383 drivers/nubus/nubus.c ent.type, ent.data); ent 384 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(dir.procdir, &ent, 0); ent 392 drivers/nubus/nubus.c const struct nubus_dirent *ent) ent 394 drivers/nubus/nubus.c switch (ent->type) { ent 396 drivers/nubus/nubus.c pr_debug(" gamma directory offset: 0x%06x\n", ent->data); ent 397 drivers/nubus/nubus.c nubus_get_block_rsrc_dir(fres->board, procdir, ent); ent 401 drivers/nubus/nubus.c ent->type, ent->data); ent 402 drivers/nubus/nubus.c nubus_get_display_vidmode(fres->board, procdir, ent); ent 406 drivers/nubus/nubus.c ent->type, ent->data); ent 407 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(procdir, ent, 0); ent 414 drivers/nubus/nubus.c const struct nubus_dirent *ent) ent 416 drivers/nubus/nubus.c switch (ent->type) { ent 421 drivers/nubus/nubus.c nubus_get_rsrc_mem(addr, ent, 6); ent 423 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(procdir, ent, 6); ent 428 drivers/nubus/nubus.c ent->type, ent->data); ent 429 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(procdir, ent, 0); ent 436 drivers/nubus/nubus.c const struct nubus_dirent *ent) ent 438 drivers/nubus/nubus.c switch (ent->type) { ent 443 drivers/nubus/nubus.c nubus_get_rsrc_mem(&meminfo, ent, 8); ent 446 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(procdir, ent, 8); ent 453 drivers/nubus/nubus.c nubus_get_rsrc_mem(&rominfo, ent, 8); ent 456 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(procdir, ent, 8); ent 461 drivers/nubus/nubus.c ent->type, ent->data); ent 462 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(procdir, ent, 0); ent 469 drivers/nubus/nubus.c const struct nubus_dirent *ent) ent 473 drivers/nubus/nubus.c nubus_get_display_resource(fres, procdir, ent); ent 476 drivers/nubus/nubus.c nubus_get_network_resource(fres, procdir, ent); ent 479 drivers/nubus/nubus.c nubus_get_cpu_resource(fres, procdir, ent); ent 483 drivers/nubus/nubus.c ent->type, ent->data); ent 484 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(procdir, ent, 0); ent 494 drivers/nubus/nubus.c struct nubus_dirent ent; ent 509 drivers/nubus/nubus.c while (nubus_readdir(&dir, &ent) != -1) { ent 510 drivers/nubus/nubus.c switch (ent.type) { ent 515 drivers/nubus/nubus.c nubus_get_rsrc_mem(nbtdata, &ent, 8); ent 522 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(dir.procdir, &ent, 8); ent 530 drivers/nubus/nubus.c len = nubus_get_rsrc_str(name, &ent, sizeof(name)); ent 532 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1); ent 540 drivers/nubus/nubus.c ent.data); ent 541 drivers/nubus/nubus.c nubus_get_block_rsrc_dir(board, dir.procdir, &ent); ent 551 drivers/nubus/nubus.c nubus_get_rsrc_mem(&base_offset, &ent, 4); ent 553 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(dir.procdir, &ent, 4); ent 561 drivers/nubus/nubus.c nubus_get_rsrc_mem(&length, &ent, 4); ent 563 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(dir.procdir, &ent, 4); ent 567 drivers/nubus/nubus.c pr_debug(" flags: 0x%06x\n", ent.data); ent 568 drivers/nubus/nubus.c nubus_proc_add_rsrc(dir.procdir, &ent); ent 571 drivers/nubus/nubus.c pr_debug(" hwdevid: 0x%06x\n", ent.data); ent 572 drivers/nubus/nubus.c nubus_proc_add_rsrc(dir.procdir, &ent); ent 577 drivers/nubus/nubus.c nubus_get_private_resource(fres, dir.procdir, &ent); ent 587 drivers/nubus/nubus.c const struct nubus_dirent *ent) ent 593 drivers/nubus/nubus.c nubus_get_rsrc_mem(&icon, ent, 128); ent 599 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(procdir, ent, 128); ent 609 drivers/nubus/nubus.c struct nubus_dirent ent; ent 617 drivers/nubus/nubus.c while (nubus_readdir(&dir, &ent) != -1) { ent 622 drivers/nubus/nubus.c len = nubus_get_rsrc_str(name, &ent, sizeof(name)); ent 623 drivers/nubus/nubus.c if (ent.type < 1 || ent.type > 5) ent 624 drivers/nubus/nubus.c ent.type = 5; ent 625 drivers/nubus/nubus.c pr_debug(" %s: %s\n", vendor_fields[ent.type - 1], name); ent 626 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1); ent 635 drivers/nubus/nubus.c struct nubus_dirent ent; ent 641 drivers/nubus/nubus.c while (nubus_readdir(&dir, &ent) != -1) { ent 642 drivers/nubus/nubus.c switch (ent.type) { ent 649 drivers/nubus/nubus.c nubus_get_rsrc_mem(nbtdata, &ent, 8); ent 656 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(dir.procdir, &ent, 8); ent 663 drivers/nubus/nubus.c len = nubus_get_rsrc_str(board->name, &ent, ent 666 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1); ent 670 drivers/nubus/nubus.c nubus_get_icon(board, dir.procdir, &ent); ent 673 drivers/nubus/nubus.c pr_debug(" board id: 0x%x\n", ent.data); ent 674 drivers/nubus/nubus.c nubus_proc_add_rsrc(dir.procdir, &ent); ent 677 drivers/nubus/nubus.c pr_debug(" primary init offset: 0x%06x\n", ent.data); ent 678 drivers/nubus/nubus.c nubus_proc_add_rsrc(dir.procdir, &ent); ent 681 drivers/nubus/nubus.c nubus_get_vendorinfo(board, dir.procdir, &ent); ent 684 drivers/nubus/nubus.c pr_debug(" flags: 0x%06x\n", ent.data); ent 685 drivers/nubus/nubus.c nubus_proc_add_rsrc(dir.procdir, &ent); ent 688 drivers/nubus/nubus.c pr_debug(" hwdevid: 0x%06x\n", ent.data); ent 689 drivers/nubus/nubus.c nubus_proc_add_rsrc(dir.procdir, &ent); ent 693 drivers/nubus/nubus.c ent.data); ent 694 drivers/nubus/nubus.c nubus_proc_add_rsrc(dir.procdir, &ent); ent 699 drivers/nubus/nubus.c ent.data); ent 700 drivers/nubus/nubus.c nubus_get_block_rsrc_dir(board, dir.procdir, &ent); ent 705 drivers/nubus/nubus.c ent.data); ent 706 drivers/nubus/nubus.c nubus_proc_add_rsrc(dir.procdir, &ent); ent 710 drivers/nubus/nubus.c ent.type, ent.data); ent 711 drivers/nubus/nubus.c nubus_proc_add_rsrc_mem(dir.procdir, &ent, 0); ent 723 drivers/nubus/nubus.c struct nubus_dirent ent; ent 792 drivers/nubus/nubus.c if (nubus_readdir(&dir, &ent) == -1) { ent 799 drivers/nubus/nubus.c if (ent.type < 1 || ent.type > 127) ent 804 drivers/nubus/nubus.c nubus_get_board_resource(board, slot, &ent); ent 806 drivers/nubus/nubus.c while (nubus_readdir(&dir, &ent) != -1) { ent 809 drivers/nubus/nubus.c fres = nubus_get_functional_resource(board, slot, &ent); ent 69 drivers/nubus/proc.c const struct nubus_dirent *ent, ent 77 drivers/nubus/proc.c snprintf(name, sizeof(name), "%x", ent->type); ent 121 drivers/nubus/proc.c struct nubus_dirent ent; ent 126 drivers/nubus/proc.c ent.mask = lanes; ent 127 drivers/nubus/proc.c ent.base = pde_data->res_ptr; ent 128 drivers/nubus/proc.c ent.data = 0; ent 129 drivers/nubus/proc.c nubus_seq_write_rsrc_mem(m, &ent, pde_data->res_size); ent 141 drivers/nubus/proc.c const struct nubus_dirent *ent, ent 150 drivers/nubus/proc.c snprintf(name, sizeof(name), "%x", ent->type); ent 152 drivers/nubus/proc.c pde_data = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size); ent 160 drivers/nubus/proc.c const struct nubus_dirent *ent) ent 163 drivers/nubus/proc.c unsigned char *data = (unsigned char *)ent->data; ent 168 drivers/nubus/proc.c snprintf(name, sizeof(name), "%x", ent->type); ent 273 drivers/nvdimm/btt.c return le32_to_cpu(log->ent[log_idx].seq); ent 297 drivers/nvdimm/btt.c log->ent[idx0].seq = cpu_to_le32(1); ent 328 drivers/nvdimm/btt.c struct log_entry *ent, int old_flag) ent 342 drivers/nvdimm/btt.c old_ent, lane, log.ent[arena->log_index[0]].seq, ent 343 drivers/nvdimm/btt.c log.ent[arena->log_index[1]].seq); ent 350 drivers/nvdimm/btt.c if (ent != NULL) ent 351 drivers/nvdimm/btt.c memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE); ent 362 drivers/nvdimm/btt.c u32 sub, struct log_entry *ent, unsigned long flags) ent 367 drivers/nvdimm/btt.c void *src = ent; ent 383 drivers/nvdimm/btt.c struct log_entry *ent) ent 387 drivers/nvdimm/btt.c ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC); ent 395 drivers/nvdimm/btt.c if (ent_e_flag(le32_to_cpu(ent->old_map))) ent 397 drivers/nvdimm/btt.c arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map)); ent 454 drivers/nvdimm/btt.c struct log_entry ent; ent 486 drivers/nvdimm/btt.c ent.lba = cpu_to_le32(i); ent 487 drivers/nvdimm/btt.c ent.old_map = cpu_to_le32(arena->external_nlba + i); ent 488 drivers/nvdimm/btt.c ent.new_map = cpu_to_le32(arena->external_nlba + i); ent 489 drivers/nvdimm/btt.c ent.seq = cpu_to_le32(LOG_SEQ_INIT); ent 490 drivers/nvdimm/btt.c ret = __btt_log_write(arena, i, 0, &ent, 0); ent 604 drivers/nvdimm/btt.c static bool ent_is_padding(struct log_entry *ent) ent 606 drivers/nvdimm/btt.c return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0) ent 607 drivers/nvdimm/btt.c && (ent->seq == 0); ent 638 drivers/nvdimm/btt.c if (ent_is_padding(&log.ent[j])) { ent 666 drivers/nvdimm/btt.c if (ent_is_padding(&log.ent[j])) ent 678 drivers/nvdimm/btt.c if (!ent_is_padding(&log.ent[j])) ent 35 drivers/nvdimm/btt.h #define ent_lba(ent) (ent & MAP_LBA_MASK) ent 36 drivers/nvdimm/btt.h #define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK)) ent 37 drivers/nvdimm/btt.h #define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK)) ent 38 drivers/nvdimm/btt.h #define set_e_flag(ent) (ent |= MAP_ERR_MASK) ent 40 drivers/nvdimm/btt.h #define ent_normal(ent) (ent_e_flag(ent) && ent_z_flag(ent)) ent 93 drivers/nvdimm/btt.h struct log_entry ent[4]; ent 245 drivers/parisc/led.c struct proc_dir_entry *ent; ent 254 drivers/parisc/led.c ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root, ent 256 drivers/parisc/led.c if (!ent) return -1; ent 261 drivers/parisc/led.c ent = proc_create_data("lcd", S_IRUGO|S_IWUSR, proc_pdc_root, ent 263 drivers/parisc/led.c if (!ent) return -1; ent 188 drivers/pci/hotplug/cpcihp_zt5550.c static int zt5550_hc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 753 drivers/pci/hotplug/cpqphp_core.c static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 255 drivers/pci/hotplug/shpchp_core.c static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 365 drivers/pci/pci.c u16 ent; ent 373 drivers/pci/pci.c pci_bus_read_config_word(bus, devfn, pos, &ent); ent 375 drivers/pci/pci.c id = ent & 0xff; ent 380 drivers/pci/pci.c pos = (ent >> 8); ent 460 drivers/pcmcia/vrc4173_cardu.c const struct pci_device_id *ent) ent 156 drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c const struct pci_device_id *ent) ent 79 drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 482 drivers/platform/x86/pmc_atom.c static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent) ent 485 drivers/platform/x86/pmc_atom.c const struct pmc_data *data = (struct pmc_data *)ent->driver_data; ent 537 drivers/platform/x86/pmc_atom.c const struct pci_device_id *ent; ent 548 drivers/platform/x86/pmc_atom.c ent = pci_match_id(pmc_pci_ids, pdev); ent 549 drivers/platform/x86/pmc_atom.c if (ent) ent 550 drivers/platform/x86/pmc_atom.c return pmc_setup_dev(pdev, ent); ent 167 drivers/scsi/a2091.c static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent) ent 11735 drivers/scsi/advansys.c const struct pci_device_id *ent) ent 157 drivers/scsi/aic7xxx/aic79xx_osm_pci.c ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 203 drivers/scsi/aic7xxx/aic7xxx_osm_pci.c ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 180 drivers/scsi/aic94xx/aic94xx_sds.c struct asd_ocm_dir_ent *ent; ent 188 drivers/scsi/aic94xx/aic94xx_sds.c ent = &dir->entry[i]; ent 189 drivers/scsi/aic94xx/aic94xx_sds.c *offs = (u32) THREE_TO_NUM(ent->offs); ent 190 drivers/scsi/aic94xx/aic94xx_sds.c *size = (u32) THREE_TO_NUM(ent->size); ent 175 drivers/scsi/am53c974.c struct esp_cmd_entry *ent = esp->active_cmd; ent 177 drivers/scsi/am53c974.c ent->flags |= ESP_CMD_FLAG_RESIDUAL; ent 134 drivers/scsi/arm/queue.c static struct scsi_cmnd *__queue_remove(Queue_t *queue, struct list_head *ent) ent 141 drivers/scsi/arm/queue.c list_del(ent); ent 142 drivers/scsi/arm/queue.c q = list_entry(ent, QE_t, list); ent 146 drivers/scsi/arm/queue.c list_add(ent, &queue->free); ent 1497 drivers/scsi/atp870u.c static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1503 drivers/scsi/atp870u.c if (ent->device == PCI_DEVICE_ID_ARTOP_AEC7610 && pdev->revision < 2) { ent 401 drivers/scsi/esp_scsi.c static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent, ent 406 drivers/scsi/esp_scsi.c if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent 407 drivers/scsi/esp_scsi.c return ent->sense_dma + ent 408 drivers/scsi/esp_scsi.c (ent->sense_ptr - cmd->sense_buffer); ent 416 drivers/scsi/esp_scsi.c static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent, ent 421 drivers/scsi/esp_scsi.c if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent 423 drivers/scsi/esp_scsi.c (ent->sense_ptr - cmd->sense_buffer); ent 428 drivers/scsi/esp_scsi.c static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent, ent 433 drivers/scsi/esp_scsi.c if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent 434 drivers/scsi/esp_scsi.c ent->sense_ptr += len; ent 462 drivers/scsi/esp_scsi.c static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) ent 464 drivers/scsi/esp_scsi.c struct scsi_cmnd *cmd = ent->cmd; ent 467 drivers/scsi/esp_scsi.c if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent 468 drivers/scsi/esp_scsi.c ent->saved_sense_ptr = ent->sense_ptr; ent 471 drivers/scsi/esp_scsi.c ent->saved_cur_residue = spriv->cur_residue; ent 472 drivers/scsi/esp_scsi.c ent->saved_prv_sg = spriv->prv_sg; ent 473 drivers/scsi/esp_scsi.c ent->saved_cur_sg = spriv->cur_sg; ent 474 drivers/scsi/esp_scsi.c ent->saved_tot_residue = spriv->tot_residue; ent 477 drivers/scsi/esp_scsi.c static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent) ent 479 drivers/scsi/esp_scsi.c struct scsi_cmnd *cmd = ent->cmd; ent 482 drivers/scsi/esp_scsi.c if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent 483 drivers/scsi/esp_scsi.c ent->sense_ptr = ent->saved_sense_ptr; ent 486 drivers/scsi/esp_scsi.c spriv->cur_residue = ent->saved_cur_residue; ent 487 drivers/scsi/esp_scsi.c spriv->prv_sg = ent->saved_prv_sg; ent 488 drivers/scsi/esp_scsi.c spriv->cur_sg = ent->saved_cur_sg; ent 489 drivers/scsi/esp_scsi.c spriv->tot_residue = ent->saved_tot_residue; ent 571 drivers/scsi/esp_scsi.c static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, ent 574 drivers/scsi/esp_scsi.c if (!ent->orig_tag[0]) { ent 598 drivers/scsi/esp_scsi.c lp->non_tagged_cmd = ent; ent 606 drivers/scsi/esp_scsi.c BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]); ent 608 drivers/scsi/esp_scsi.c lp->tagged_cmds[ent->orig_tag[1]] = ent; ent 614 drivers/scsi/esp_scsi.c static void esp_free_lun_tag(struct esp_cmd_entry *ent, ent 617 drivers/scsi/esp_scsi.c if (ent->orig_tag[0]) { ent 618 drivers/scsi/esp_scsi.c BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent); ent 619 drivers/scsi/esp_scsi.c lp->tagged_cmds[ent->orig_tag[1]] = NULL; ent 622 drivers/scsi/esp_scsi.c BUG_ON(lp->non_tagged_cmd != ent); ent 627 drivers/scsi/esp_scsi.c static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent) ent 629 drivers/scsi/esp_scsi.c ent->sense_ptr = ent->cmd->sense_buffer; ent 631 drivers/scsi/esp_scsi.c ent->sense_dma = (uintptr_t)ent->sense_ptr; ent 635 drivers/scsi/esp_scsi.c ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr, ent 639 drivers/scsi/esp_scsi.c static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent) ent 642 drivers/scsi/esp_scsi.c dma_unmap_single(esp->dev, ent->sense_dma, ent 644 drivers/scsi/esp_scsi.c ent->sense_ptr = NULL; ent 654 drivers/scsi/esp_scsi.c static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent) ent 656 drivers/scsi/esp_scsi.c struct scsi_cmnd *cmd = ent->cmd; ent 665 drivers/scsi/esp_scsi.c if (!ent->sense_ptr) { ent 668 drivers/scsi/esp_scsi.c esp_map_sense(esp, ent); ent 670 drivers/scsi/esp_scsi.c ent->saved_sense_ptr = ent->sense_ptr; ent 672 drivers/scsi/esp_scsi.c esp->active_cmd = ent; ent 703 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent; ent 705 drivers/scsi/esp_scsi.c list_for_each_entry(ent, &esp->queued_cmds, list) { ent 706 drivers/scsi/esp_scsi.c struct scsi_cmnd *cmd = ent->cmd; ent 710 drivers/scsi/esp_scsi.c if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent 711 drivers/scsi/esp_scsi.c ent->tag[0] = 0; ent 712 drivers/scsi/esp_scsi.c ent->tag[1] = 0; ent 713 drivers/scsi/esp_scsi.c return ent; ent 716 drivers/scsi/esp_scsi.c if (!spi_populate_tag_msg(&ent->tag[0], cmd)) { ent 717 drivers/scsi/esp_scsi.c ent->tag[0] = 0; ent 718 drivers/scsi/esp_scsi.c ent->tag[1] = 0; ent 720 drivers/scsi/esp_scsi.c ent->orig_tag[0] = ent->tag[0]; ent 721 drivers/scsi/esp_scsi.c ent->orig_tag[1] = ent->tag[1]; ent 723 drivers/scsi/esp_scsi.c if (esp_alloc_lun_tag(ent, lp) < 0) ent 726 drivers/scsi/esp_scsi.c return ent; ent 737 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent; ent 747 drivers/scsi/esp_scsi.c ent = find_and_prep_issuable_command(esp); ent 748 drivers/scsi/esp_scsi.c if (!ent) ent 751 drivers/scsi/esp_scsi.c if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent 752 drivers/scsi/esp_scsi.c esp_autosense(esp, ent); ent 756 drivers/scsi/esp_scsi.c cmd = ent->cmd; ent 762 drivers/scsi/esp_scsi.c list_move(&ent->list, &esp->active_cmds); ent 764 drivers/scsi/esp_scsi.c esp->active_cmd = ent; ent 767 drivers/scsi/esp_scsi.c esp_save_pointers(esp, ent); ent 819 drivers/scsi/esp_scsi.c if (ent->tag[0] && esp->rev == ESP100) { ent 830 drivers/scsi/esp_scsi.c if (ent->tag[0]) { ent 834 drivers/scsi/esp_scsi.c esp->msg_out[0] = ent->tag[0]; ent 835 drivers/scsi/esp_scsi.c esp->msg_out[1] = ent->tag[1]; ent 843 drivers/scsi/esp_scsi.c if (ent->tag[0]) { ent 844 drivers/scsi/esp_scsi.c *p++ = ent->tag[0]; ent 845 drivers/scsi/esp_scsi.c *p++ = ent->tag[1]; ent 890 drivers/scsi/esp_scsi.c static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent) ent 892 drivers/scsi/esp_scsi.c list_add(&ent->list, &esp->esp_cmd_pool); ent 895 drivers/scsi/esp_scsi.c static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent, ent 904 drivers/scsi/esp_scsi.c esp_free_lun_tag(ent, dev->hostdata); ent 907 drivers/scsi/esp_scsi.c if (ent->eh_done) { ent 908 drivers/scsi/esp_scsi.c complete(ent->eh_done); ent 909 drivers/scsi/esp_scsi.c ent->eh_done = NULL; ent 912 drivers/scsi/esp_scsi.c if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { ent 913 drivers/scsi/esp_scsi.c esp_unmap_sense(esp, ent); ent 924 drivers/scsi/esp_scsi.c ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE; ent 938 drivers/scsi/esp_scsi.c list_del(&ent->list); ent 939 drivers/scsi/esp_scsi.c esp_put_ent(esp, ent); ent 950 drivers/scsi/esp_scsi.c static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent) ent 952 drivers/scsi/esp_scsi.c struct scsi_device *dev = ent->cmd->device; ent 963 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent; ent 965 drivers/scsi/esp_scsi.c ent = esp_get_ent(esp); ent 966 drivers/scsi/esp_scsi.c if (!ent) ent 969 drivers/scsi/esp_scsi.c ent->cmd = cmd; ent 976 drivers/scsi/esp_scsi.c list_add_tail(&ent->list, &esp->queued_cmds); ent 1054 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent; ent 1131 drivers/scsi/esp_scsi.c ent = lp->tagged_cmds[esp->command_block[1]]; ent 1132 drivers/scsi/esp_scsi.c if (!ent) { ent 1139 drivers/scsi/esp_scsi.c return ent; ent 1144 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent; ent 1208 drivers/scsi/esp_scsi.c ent = lp->non_tagged_cmd; ent 1209 drivers/scsi/esp_scsi.c if (!ent) { ent 1210 drivers/scsi/esp_scsi.c ent = esp_reconnect_with_tag(esp, lp); ent 1211 drivers/scsi/esp_scsi.c if (!ent) ent 1215 drivers/scsi/esp_scsi.c esp->active_cmd = ent; ent 1218 drivers/scsi/esp_scsi.c esp_restore_pointers(esp, ent); ent 1229 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent; ent 1236 drivers/scsi/esp_scsi.c ent = esp->active_cmd; ent 1237 drivers/scsi/esp_scsi.c cmd = ent->cmd; ent 1244 drivers/scsi/esp_scsi.c esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16)); ent 1257 drivers/scsi/esp_scsi.c if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { ent 1259 drivers/scsi/esp_scsi.c esp_free_lun_tag(ent, cmd->device->hostdata); ent 1264 drivers/scsi/esp_scsi.c esp_unmap_sense(esp, ent); ent 1270 drivers/scsi/esp_scsi.c list_move(&ent->list, &esp->queued_cmds); ent 1289 drivers/scsi/esp_scsi.c esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16)); ent 1321 drivers/scsi/esp_scsi.c static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent, ent 1352 drivers/scsi/esp_scsi.c if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) { ent 1357 drivers/scsi/esp_scsi.c if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) ent 1358 drivers/scsi/esp_scsi.c ent->sense_ptr[bytes_sent] = bval; ent 1371 drivers/scsi/esp_scsi.c ent->flags &= ~ESP_CMD_FLAG_RESIDUAL; ent 1373 drivers/scsi/esp_scsi.c if (!(ent->flags & ESP_CMD_FLAG_WRITE)) ent 1405 drivers/scsi/esp_scsi.c if (!(ent->flags & ESP_CMD_FLAG_WRITE)) ent 1456 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent = esp->active_cmd; ent 1457 drivers/scsi/esp_scsi.c struct scsi_cmnd *cmd = ent->cmd; ent 1596 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent = esp->active_cmd; ent 1597 drivers/scsi/esp_scsi.c struct scsi_cmnd *cmd = ent->cmd; ent 1644 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent; ent 1652 drivers/scsi/esp_scsi.c ent = esp->active_cmd; ent 1653 drivers/scsi/esp_scsi.c spriv = ESP_CMD_PRIV(ent->cmd); ent 1674 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent = esp->active_cmd; ent 1676 drivers/scsi/esp_scsi.c ent->message = msg0; ent 1744 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent = esp->active_cmd; ent 1745 drivers/scsi/esp_scsi.c struct scsi_cmnd *cmd = ent->cmd; ent 1746 drivers/scsi/esp_scsi.c dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd); ent 1747 drivers/scsi/esp_scsi.c unsigned int dma_len = esp_cur_dma_len(ent, cmd); ent 1753 drivers/scsi/esp_scsi.c ent->flags |= ESP_CMD_FLAG_WRITE; ent 1755 drivers/scsi/esp_scsi.c ent->flags &= ~ESP_CMD_FLAG_WRITE; ent 1770 drivers/scsi/esp_scsi.c (unsigned long long)esp_cur_dma_addr(ent, cmd), ent 1771 drivers/scsi/esp_scsi.c esp_cur_dma_len(ent, cmd)); ent 1785 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent = esp->active_cmd; ent 1786 drivers/scsi/esp_scsi.c struct scsi_cmnd *cmd = ent->cmd; ent 1796 drivers/scsi/esp_scsi.c if (ent->flags & ESP_CMD_FLAG_WRITE) { ent 1813 drivers/scsi/esp_scsi.c bytes_sent = esp_data_bytes_sent(esp, ent, cmd); ent 1816 drivers/scsi/esp_scsi.c ent->flags, bytes_sent); ent 1824 drivers/scsi/esp_scsi.c esp_advance_dma(esp, ent, cmd, bytes_sent); ent 1830 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent = esp->active_cmd; ent 1833 drivers/scsi/esp_scsi.c ent->status = esp_read8(ESP_FDATA); ent 1834 drivers/scsi/esp_scsi.c ent->message = esp_read8(ESP_FDATA); ent 1837 drivers/scsi/esp_scsi.c ent->status = esp_read8(ESP_FDATA); ent 1838 drivers/scsi/esp_scsi.c ent->message = 0xff; ent 1843 drivers/scsi/esp_scsi.c if (ent->message != COMMAND_COMPLETE) { ent 1846 drivers/scsi/esp_scsi.c ent->message); ent 1856 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent = esp->active_cmd; ent 1857 drivers/scsi/esp_scsi.c struct scsi_cmnd *cmd = ent->cmd; ent 1859 drivers/scsi/esp_scsi.c if (ent->message == COMMAND_COMPLETE || ent 1860 drivers/scsi/esp_scsi.c ent->message == DISCONNECT) ent 1863 drivers/scsi/esp_scsi.c if (ent->message == COMMAND_COMPLETE) { ent 1865 drivers/scsi/esp_scsi.c ent->status, ent->message); ent 1866 drivers/scsi/esp_scsi.c if (ent->status == SAM_STAT_TASK_SET_FULL) ent 1867 drivers/scsi/esp_scsi.c esp_event_queue_full(esp, ent); ent 1869 drivers/scsi/esp_scsi.c if (ent->status == SAM_STAT_CHECK_CONDITION && ent 1870 drivers/scsi/esp_scsi.c !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { ent 1871 drivers/scsi/esp_scsi.c ent->flags |= ESP_CMD_FLAG_AUTOSENSE; ent 1872 drivers/scsi/esp_scsi.c esp_autosense(esp, ent); ent 1874 drivers/scsi/esp_scsi.c esp_cmd_is_done(esp, ent, cmd, ent 1875 drivers/scsi/esp_scsi.c compose_result(ent->status, ent 1876 drivers/scsi/esp_scsi.c ent->message, ent 1879 drivers/scsi/esp_scsi.c } else if (ent->message == DISCONNECT) { ent 1882 drivers/scsi/esp_scsi.c ent->tag[0], ent->tag[1]); ent 1889 drivers/scsi/esp_scsi.c ent->message); ent 2036 drivers/scsi/esp_scsi.c static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent) ent 2038 drivers/scsi/esp_scsi.c struct scsi_cmnd *cmd = ent->cmd; ent 2041 drivers/scsi/esp_scsi.c esp_free_lun_tag(ent, cmd->device->hostdata); ent 2044 drivers/scsi/esp_scsi.c if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) ent 2045 drivers/scsi/esp_scsi.c esp_unmap_sense(esp, ent); ent 2048 drivers/scsi/esp_scsi.c list_del(&ent->list); ent 2049 drivers/scsi/esp_scsi.c esp_put_ent(esp, ent); ent 2062 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent, *tmp; ent 2065 drivers/scsi/esp_scsi.c list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) { ent 2066 drivers/scsi/esp_scsi.c struct scsi_cmnd *cmd = ent->cmd; ent 2068 drivers/scsi/esp_scsi.c list_del(&ent->list); ent 2071 drivers/scsi/esp_scsi.c esp_put_ent(esp, ent); ent 2074 drivers/scsi/esp_scsi.c list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) { ent 2075 drivers/scsi/esp_scsi.c if (ent == esp->active_cmd) ent 2077 drivers/scsi/esp_scsi.c esp_reset_cleanup_one(esp, ent); ent 2500 drivers/scsi/esp_scsi.c struct esp_cmd_entry *ent, *tmp; ent 2510 drivers/scsi/esp_scsi.c ent = esp->active_cmd; ent 2511 drivers/scsi/esp_scsi.c if (ent) ent 2514 drivers/scsi/esp_scsi.c ent->cmd, ent->cmd->cmnd[0]); ent 2515 drivers/scsi/esp_scsi.c list_for_each_entry(ent, &esp->queued_cmds, list) { ent 2517 drivers/scsi/esp_scsi.c ent->cmd, ent->cmd->cmnd[0]); ent 2519 drivers/scsi/esp_scsi.c list_for_each_entry(ent, &esp->active_cmds, list) { ent 2521 drivers/scsi/esp_scsi.c ent->cmd, ent->cmd->cmnd[0]); ent 2528 drivers/scsi/esp_scsi.c ent = NULL; ent 2531 drivers/scsi/esp_scsi.c ent = tmp; ent 2536 drivers/scsi/esp_scsi.c if (ent) { ent 2540 drivers/scsi/esp_scsi.c list_del(&ent->list); ent 2545 drivers/scsi/esp_scsi.c esp_put_ent(esp, ent); ent 2552 drivers/scsi/esp_scsi.c ent = esp->active_cmd; ent 2553 drivers/scsi/esp_scsi.c if (ent && ent->cmd == cmd) { ent 2566 drivers/scsi/esp_scsi.c ent->eh_done = &eh_done; ent 2593 drivers/scsi/esp_scsi.c ent->eh_done = NULL; ent 559 drivers/scsi/fnic/fnic_main.c static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 419 drivers/scsi/gdth.c const struct pci_device_id *ent); ent 452 drivers/scsi/gdth.c const struct pci_device_id *ent) ent 271 drivers/scsi/gvp11.c static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent) ent 282 drivers/scsi/gvp11.c default_dma_xfer_mask = ent->driver_data; ent 8602 drivers/scsi/hpsa.c static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 384 drivers/scsi/ips.c static int ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent); ent 6777 drivers/scsi/ips.c ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent) ent 257 drivers/scsi/isci/host.c static void sci_controller_task_completion(struct isci_host *ihost, u32 ent) ent 259 drivers/scsi/isci/host.c u32 index = SCU_GET_COMPLETION_INDEX(ent); ent 269 drivers/scsi/isci/host.c sci_io_request_tc_completion(ireq, ent); ent 272 drivers/scsi/isci/host.c static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent) ent 278 drivers/scsi/isci/host.c index = SCU_GET_COMPLETION_INDEX(ent); ent 280 drivers/scsi/isci/host.c switch (scu_get_command_request_type(ent)) { ent 285 drivers/scsi/isci/host.c __func__, ent, ireq); ent 295 drivers/scsi/isci/host.c __func__, ent, idev); ent 302 drivers/scsi/isci/host.c __func__, ent); ent 307 drivers/scsi/isci/host.c static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent) ent 318 drivers/scsi/isci/host.c frame_index = SCU_GET_FRAME_INDEX(ent); ent 323 drivers/scsi/isci/host.c if (SCU_GET_FRAME_ERROR(ent)) { ent 333 drivers/scsi/isci/host.c index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); ent 338 drivers/scsi/isci/host.c index = SCU_GET_COMPLETION_INDEX(ent); ent 345 drivers/scsi/isci/host.c index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); ent 368 drivers/scsi/isci/host.c static void sci_controller_event_completion(struct isci_host *ihost, u32 ent) ent 375 drivers/scsi/isci/host.c index = SCU_GET_COMPLETION_INDEX(ent); ent 377 drivers/scsi/isci/host.c switch (scu_get_event_type(ent)) { ent 385 drivers/scsi/isci/host.c ent); ent 399 drivers/scsi/isci/host.c ent); ent 404 drivers/scsi/isci/host.c sci_io_request_event_handler(ireq, ent); ent 408 drivers/scsi/isci/host.c switch (scu_get_event_specifier(ent)) { ent 413 drivers/scsi/isci/host.c sci_io_request_event_handler(ireq, ent); ent 421 drivers/scsi/isci/host.c ent); ent 428 drivers/scsi/isci/host.c sci_remote_device_event_handler(idev, ent); ent 436 drivers/scsi/isci/host.c ent); ent 451 drivers/scsi/isci/host.c index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); ent 453 drivers/scsi/isci/host.c sci_phy_event_handler(iphy, ent); ent 463 drivers/scsi/isci/host.c sci_remote_device_event_handler(idev, ent); ent 471 drivers/scsi/isci/host.c ent, ent 480 drivers/scsi/isci/host.c ent); ent 488 drivers/scsi/isci/host.c u32 ent; ent 512 drivers/scsi/isci/host.c ent = ihost->completion_queue[get_index]; ent 522 drivers/scsi/isci/host.c ent); ent 524 drivers/scsi/isci/host.c switch (SCU_GET_COMPLETION_TYPE(ent)) { ent 526 drivers/scsi/isci/host.c sci_controller_task_completion(ihost, ent); ent 530 drivers/scsi/isci/host.c sci_controller_sdma_completion(ihost, ent); ent 534 drivers/scsi/isci/host.c sci_controller_unsolicited_frame(ihost, ent); ent 538 drivers/scsi/isci/host.c sci_controller_event_completion(ihost, ent); ent 546 drivers/scsi/isci/host.c sci_controller_event_completion(ihost, ent); ent 554 drivers/scsi/isci/host.c ent); ent 347 drivers/scsi/mvsas/mv_init.c const struct pci_device_id *ent, ent 354 drivers/scsi/mvsas/mv_init.c (1L << mvs_chips[ent->driver_data].slot_width) * ent 361 drivers/scsi/mvsas/mv_init.c mvi->chip_id = ent->driver_data; ent 492 drivers/scsi/mvsas/mv_init.c static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) ent 523 drivers/scsi/mvsas/mv_init.c chip = &mvs_chips[ent->driver_data]; ent 542 drivers/scsi/mvsas/mv_init.c mvi = mvs_pci_alloc(pdev, ent, shost, nhost); ent 244 drivers/scsi/pm8001/pm8001_init.c const struct pci_device_id *ent) ent 282 drivers/scsi/pm8001/pm8001_init.c if ((ent->driver_data) != chip_8001) { ent 307 drivers/scsi/pm8001/pm8001_init.c if (ent->driver_data != chip_8001) { ent 449 drivers/scsi/pm8001/pm8001_init.c const struct pci_device_id *ent, ent 463 drivers/scsi/pm8001/pm8001_init.c pm8001_ha->chip_id = ent->driver_data; ent 489 drivers/scsi/pm8001/pm8001_init.c if (!pm8001_alloc(pm8001_ha, ent)) ent 959 drivers/scsi/pm8001/pm8001_init.c const struct pci_device_id *ent) ent 994 drivers/scsi/pm8001/pm8001_init.c chip = &pm8001_chips[ent->driver_data]; ent 1009 drivers/scsi/pm8001/pm8001_init.c pm8001_ha = pm8001_pci_alloc(pdev, ent, shost); ent 123 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf) ent 126 drivers/scsi/qla2xxx/qla_tmpl.c ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY; ent 130 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_next_entry(struct qla27xx_fwdt_entry *ent) ent 132 drivers/scsi/qla2xxx/qla_tmpl.c return (void *)ent + le32_to_cpu(ent->hdr.size); ent 137 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 141 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 143 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 148 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 152 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 160 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 162 drivers/scsi/qla2xxx/qla_tmpl.c ulong addr = le32_to_cpu(ent->t256.base_addr); ent 163 drivers/scsi/qla2xxx/qla_tmpl.c uint offset = ent->t256.pci_offset; ent 164 drivers/scsi/qla2xxx/qla_tmpl.c ulong count = le16_to_cpu(ent->t256.reg_count); ent 165 drivers/scsi/qla2xxx/qla_tmpl.c uint width = ent->t256.reg_width; ent 171 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 176 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 178 drivers/scsi/qla2xxx/qla_tmpl.c ulong addr = le32_to_cpu(ent->t257.base_addr); ent 179 drivers/scsi/qla2xxx/qla_tmpl.c uint offset = ent->t257.pci_offset; ent 180 drivers/scsi/qla2xxx/qla_tmpl.c ulong data = le32_to_cpu(ent->t257.write_data); ent 187 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 192 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 194 drivers/scsi/qla2xxx/qla_tmpl.c uint banksel = ent->t258.banksel_offset; ent 195 drivers/scsi/qla2xxx/qla_tmpl.c ulong bank = le32_to_cpu(ent->t258.bank); ent 196 drivers/scsi/qla2xxx/qla_tmpl.c ulong addr = le32_to_cpu(ent->t258.base_addr); ent 197 drivers/scsi/qla2xxx/qla_tmpl.c uint offset = ent->t258.pci_offset; ent 198 drivers/scsi/qla2xxx/qla_tmpl.c uint count = le16_to_cpu(ent->t258.reg_count); ent 199 drivers/scsi/qla2xxx/qla_tmpl.c uint width = ent->t258.reg_width; ent 206 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 211 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 213 drivers/scsi/qla2xxx/qla_tmpl.c ulong addr = le32_to_cpu(ent->t259.base_addr); ent 214 drivers/scsi/qla2xxx/qla_tmpl.c uint banksel = ent->t259.banksel_offset; ent 215 drivers/scsi/qla2xxx/qla_tmpl.c ulong bank = le32_to_cpu(ent->t259.bank); ent 216 drivers/scsi/qla2xxx/qla_tmpl.c uint offset = ent->t259.pci_offset; ent 217 drivers/scsi/qla2xxx/qla_tmpl.c ulong data = le32_to_cpu(ent->t259.write_data); ent 225 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 230 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 232 drivers/scsi/qla2xxx/qla_tmpl.c uint offset = ent->t260.pci_offset; ent 239 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 244 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 246 drivers/scsi/qla2xxx/qla_tmpl.c uint offset = ent->t261.pci_offset; ent 247 drivers/scsi/qla2xxx/qla_tmpl.c ulong data = le32_to_cpu(ent->t261.write_data); ent 253 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 258 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 260 drivers/scsi/qla2xxx/qla_tmpl.c uint area = ent->t262.ram_area; ent 261 drivers/scsi/qla2xxx/qla_tmpl.c ulong start = le32_to_cpu(ent->t262.start_addr); ent 262 drivers/scsi/qla2xxx/qla_tmpl.c ulong end = le32_to_cpu(ent->t262.end_addr); ent 266 drivers/scsi/qla2xxx/qla_tmpl.c "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len); ent 273 drivers/scsi/qla2xxx/qla_tmpl.c ent->t262.end_addr = cpu_to_le32(end); ent 278 drivers/scsi/qla2xxx/qla_tmpl.c ent->t262.start_addr = cpu_to_le32(start); ent 279 drivers/scsi/qla2xxx/qla_tmpl.c ent->t262.end_addr = cpu_to_le32(end); ent 285 drivers/scsi/qla2xxx/qla_tmpl.c ent->t262.start_addr = cpu_to_le32(start); ent 286 drivers/scsi/qla2xxx/qla_tmpl.c ent->t262.end_addr = cpu_to_le32(end); ent 290 drivers/scsi/qla2xxx/qla_tmpl.c ent->t262.start_addr = cpu_to_le32(start); ent 291 drivers/scsi/qla2xxx/qla_tmpl.c ent->t262.end_addr = cpu_to_le32(end); ent 296 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 304 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 315 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 320 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 322 drivers/scsi/qla2xxx/qla_tmpl.c uint type = ent->t263.queue_type; ent 358 drivers/scsi/qla2xxx/qla_tmpl.c ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) { ent 372 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 377 drivers/scsi/qla2xxx/qla_tmpl.c ent->t263.num_queues = count; ent 379 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 382 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 387 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 393 drivers/scsi/qla2xxx/qla_tmpl.c ent->t264.fce_trace_size = FCE_SIZE; ent 394 drivers/scsi/qla2xxx/qla_tmpl.c ent->t264.write_pointer = vha->hw->fce_wr; ent 395 drivers/scsi/qla2xxx/qla_tmpl.c ent->t264.base_pointer = vha->hw->fce_dma; ent 396 drivers/scsi/qla2xxx/qla_tmpl.c ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0]; ent 397 drivers/scsi/qla2xxx/qla_tmpl.c ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2]; ent 398 drivers/scsi/qla2xxx/qla_tmpl.c ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3]; ent 399 drivers/scsi/qla2xxx/qla_tmpl.c ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4]; ent 400 drivers/scsi/qla2xxx/qla_tmpl.c ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5]; ent 401 drivers/scsi/qla2xxx/qla_tmpl.c ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6]; ent 407 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 410 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 415 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 422 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 427 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 434 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 439 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 441 drivers/scsi/qla2xxx/qla_tmpl.c uint offset = ent->t267.pci_offset; ent 442 drivers/scsi/qla2xxx/qla_tmpl.c ulong data = le32_to_cpu(ent->t267.data); ent 448 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 453 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 456 drivers/scsi/qla2xxx/qla_tmpl.c "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len); ent 457 drivers/scsi/qla2xxx/qla_tmpl.c switch (ent->t268.buf_type) { ent 461 drivers/scsi/qla2xxx/qla_tmpl.c ent->t268.buf_size = EFT_SIZE; ent 462 drivers/scsi/qla2xxx/qla_tmpl.c ent->t268.start_addr = vha->hw->eft_dma; ent 468 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 474 drivers/scsi/qla2xxx/qla_tmpl.c ent->t268.buf_size = vha->hw->exchoffld_size; ent 475 drivers/scsi/qla2xxx/qla_tmpl.c ent->t268.start_addr = ent 483 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 489 drivers/scsi/qla2xxx/qla_tmpl.c ent->t268.buf_size = vha->hw->exlogin_size; ent 490 drivers/scsi/qla2xxx/qla_tmpl.c ent->t268.start_addr = ent 498 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 509 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 513 drivers/scsi/qla2xxx/qla_tmpl.c "%s: unknown buffer %x\n", __func__, ent->t268.buf_type); ent 514 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 518 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 523 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 533 drivers/scsi/qla2xxx/qla_tmpl.c ent->t269.scratch_size = 5 * sizeof(uint32_t); ent 535 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 540 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 542 drivers/scsi/qla2xxx/qla_tmpl.c ulong addr = le32_to_cpu(ent->t270.addr); ent 543 drivers/scsi/qla2xxx/qla_tmpl.c ulong dwords = le32_to_cpu(ent->t270.count); ent 555 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 560 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 562 drivers/scsi/qla2xxx/qla_tmpl.c ulong addr = le32_to_cpu(ent->t271.addr); ent 563 drivers/scsi/qla2xxx/qla_tmpl.c ulong data = le32_to_cpu(ent->t271.data); ent 571 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 576 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 578 drivers/scsi/qla2xxx/qla_tmpl.c ulong dwords = le32_to_cpu(ent->t272.count); ent 579 drivers/scsi/qla2xxx/qla_tmpl.c ulong start = le32_to_cpu(ent->t272.addr); ent 591 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 596 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 598 drivers/scsi/qla2xxx/qla_tmpl.c ulong dwords = le32_to_cpu(ent->t273.count); ent 599 drivers/scsi/qla2xxx/qla_tmpl.c ulong addr = le32_to_cpu(ent->t273.addr); ent 614 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 619 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 621 drivers/scsi/qla2xxx/qla_tmpl.c ulong type = ent->t274.queue_type; ent 652 drivers/scsi/qla2xxx/qla_tmpl.c ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) { ent 666 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 671 drivers/scsi/qla2xxx/qla_tmpl.c ent->t274.num_queues = count; ent 673 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 676 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 681 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 683 drivers/scsi/qla2xxx/qla_tmpl.c ulong offset = offsetof(typeof(*ent), t275.buffer); ent 684 drivers/scsi/qla2xxx/qla_tmpl.c ulong length = le32_to_cpu(ent->t275.length); ent 685 drivers/scsi/qla2xxx/qla_tmpl.c ulong size = le32_to_cpu(ent->hdr.size); ent 686 drivers/scsi/qla2xxx/qla_tmpl.c void *buffer = ent->t275.buffer; ent 693 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 700 drivers/scsi/qla2xxx/qla_tmpl.c ent->t275.length = cpu_to_le32(length); ent 705 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 710 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 716 drivers/scsi/qla2xxx/qla_tmpl.c ulong cond1 = le32_to_cpu(ent->t276.cond1); ent 717 drivers/scsi/qla2xxx/qla_tmpl.c ulong cond2 = le32_to_cpu(ent->t276.cond2); ent 725 drivers/scsi/qla2xxx/qla_tmpl.c ent = qla27xx_next_entry(ent); ent 726 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 730 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 735 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 737 drivers/scsi/qla2xxx/qla_tmpl.c ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr); ent 738 drivers/scsi/qla2xxx/qla_tmpl.c ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data); ent 739 drivers/scsi/qla2xxx/qla_tmpl.c ulong data_addr = le32_to_cpu(ent->t277.data_addr); ent 747 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 752 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 754 drivers/scsi/qla2xxx/qla_tmpl.c ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr); ent 755 drivers/scsi/qla2xxx/qla_tmpl.c ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data); ent 756 drivers/scsi/qla2xxx/qla_tmpl.c ulong data_addr = le32_to_cpu(ent->t278.data_addr); ent 757 drivers/scsi/qla2xxx/qla_tmpl.c ulong wr_data = le32_to_cpu(ent->t278.wr_data); ent 764 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 769 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) ent 771 drivers/scsi/qla2xxx/qla_tmpl.c ulong type = le32_to_cpu(ent->hdr.type); ent 775 drivers/scsi/qla2xxx/qla_tmpl.c qla27xx_skip_entry(ent, buf); ent 777 drivers/scsi/qla2xxx/qla_tmpl.c return qla27xx_next_entry(ent); ent 829 drivers/scsi/qla2xxx/qla_tmpl.c struct qla27xx_fwdt_entry *ent = (void *)tmp + ent 836 drivers/scsi/qla2xxx/qla_tmpl.c while (ent && tmp->count--) { ent 837 drivers/scsi/qla2xxx/qla_tmpl.c type = le32_to_cpu(ent->hdr.type); ent 838 drivers/scsi/qla2xxx/qla_tmpl.c ent = qla27xx_find_entry(type)(vha, ent, buf, len); ent 839 drivers/scsi/qla2xxx/qla_tmpl.c if (!ent) ent 847 drivers/scsi/qla2xxx/qla_tmpl.c if (ent) ent 8601 drivers/scsi/qla4xxx/ql4_os.c const struct pci_device_id *ent) ent 1039 drivers/scsi/scsi_transport_sas.c struct list_head *ent = port->phy_list.next; ent 1041 drivers/scsi/scsi_transport_sas.c phy = list_entry(ent, typeof(*phy), port_siblings); ent 360 drivers/scsi/snic/snic_main.c snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1662 drivers/scsi/sym53c8xx_2/sym_glue.c static int sym2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 76 drivers/scsi/zorro7xx.c const struct zorro_device_id *ent) ent 84 drivers/scsi/zorro7xx.c zdd = (struct zorro_driver_data *)ent->driver_data; ent 714 drivers/scsi/zorro_esp.c const struct zorro_device_id *ent) ent 725 drivers/scsi/zorro_esp.c zdd = &zorro_esp_boards[ent->driver_data]; ent 756 drivers/scsi/zorro_esp.c if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) { ent 802 drivers/scsi/zorro_esp.c if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) { ent 112 drivers/soc/qcom/cmd-db.c rsc_offset(const struct rsc_hdr *hdr, const struct entry_header *ent) ent 115 drivers/soc/qcom/cmd-db.c u16 loffset = le16_to_cpu(ent->offset); ent 140 drivers/soc/qcom/cmd-db.c const struct entry_header *ent; ent 156 drivers/soc/qcom/cmd-db.c ent = rsc_to_entry_header(rsc_hdr); ent 157 drivers/soc/qcom/cmd-db.c for (j = 0; j < le16_to_cpu(rsc_hdr->cnt); j++, ent++) { ent 158 drivers/soc/qcom/cmd-db.c if (memcmp(ent->id, query, sizeof(ent->id)) == 0) { ent 160 drivers/soc/qcom/cmd-db.c *eh = ent; ent 184 drivers/soc/qcom/cmd-db.c const struct entry_header *ent; ent 186 drivers/soc/qcom/cmd-db.c ret = cmd_db_get_header(id, &ent, NULL); ent 188 drivers/soc/qcom/cmd-db.c return ret < 0 ? 0 : le32_to_cpu(ent->addr); ent 203 drivers/soc/qcom/cmd-db.c const struct entry_header *ent; ent 206 drivers/soc/qcom/cmd-db.c ret = cmd_db_get_header(id, &ent, &rsc_hdr); ent 211 drivers/soc/qcom/cmd-db.c *len = le16_to_cpu(ent->len); ent 213 drivers/soc/qcom/cmd-db.c return rsc_offset(rsc_hdr, ent); ent 227 drivers/soc/qcom/cmd-db.c const struct entry_header *ent; ent 230 drivers/soc/qcom/cmd-db.c ret = cmd_db_get_header(id, &ent, NULL); ent 234 drivers/soc/qcom/cmd-db.c addr = le32_to_cpu(ent->addr); ent 20 drivers/spi/spi-cavium-thunderx.c const struct pci_device_id *ent) ent 43 drivers/spi/spi-dw-pci.c static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 46 drivers/spi/spi-dw-pci.c struct spi_pci_desc *desc = (struct spi_pci_desc *)ent->driver_data; ent 198 drivers/spi/spi-pxa2xx-pci.c const struct pci_device_id *ent) ent 216 drivers/spi/spi-pxa2xx-pci.c c = &spi_info_configs[ent->driver_data]; ent 806 drivers/ssb/main.c struct ssb_device *ent; ent 810 drivers/ssb/main.c ent = &(bus->devices[i]); ent 811 drivers/ssb/main.c if (ent->id.vendor != dev->id.vendor) ent 813 drivers/ssb/main.c if (ent->id.coreid != dev->id.coreid) ent 816 drivers/ssb/main.c ent->devtypedata = data; ent 243 drivers/staging/android/vsoc.c const struct pci_device_id *ent); ent 746 drivers/staging/android/vsoc.c const struct pci_device_id *ent) ent 303 drivers/staging/isdn/avm/b1pci.c const struct pci_device_id *ent) ent 1239 drivers/staging/isdn/avm/c4.c static int c4_probe(struct pci_dev *dev, const struct pci_device_id *ent) ent 1241 drivers/staging/isdn/avm/c4.c int nr = ent->driver_data; ent 190 drivers/staging/isdn/avm/t1pci.c static int t1pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) ent 60 drivers/staging/isdn/hysdn/hysdn_init.c const struct pci_device_id *ent) ent 86 drivers/staging/isdn/hysdn/hysdn_init.c card->brdtype = ent->driver_data; ent 1076 drivers/staging/sm750fb/sm750.c const struct pci_device_id *ent) ent 214 drivers/staging/unisys/visorhba/visorhba_main.c int ent) ent 216 drivers/staging/unisys/visorhba/visorhba_main.c if (ddata->pending[ent].sent) ent 217 drivers/staging/unisys/visorhba/visorhba_main.c return &ddata->pending[ent].cmdrsp; ent 124 drivers/staging/vt6655/device_main.c static int vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent); ent 1654 drivers/staging/vt6655/device_main.c vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent) ent 893 drivers/target/iscsi/iscsi_target.c u32 ent = data_offset / PAGE_SIZE; ent 898 drivers/target/iscsi/iscsi_target.c if (ent >= cmd->se_cmd.t_data_nents) { ent 903 drivers/target/iscsi/iscsi_target.c sg = &cmd->se_cmd.t_data_sg[ent]; ent 3641 drivers/tty/cyclades.c const struct pci_device_id *ent) ent 191 drivers/tty/hvc/hvc_iucv.c struct iucv_tty_buffer *ent, *next; ent 193 drivers/tty/hvc/hvc_iucv.c list_for_each_entry_safe(ent, next, list, list) { ent 194 drivers/tty/hvc/hvc_iucv.c list_del(&ent->list); ent 195 drivers/tty/hvc/hvc_iucv.c destroy_tty_buffer(ent); ent 955 drivers/tty/hvc/hvc_iucv.c struct iucv_tty_buffer *ent, *next; ent 959 drivers/tty/hvc/hvc_iucv.c list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list) ent 960 drivers/tty/hvc/hvc_iucv.c if (ent->msg.id == msg->id) { ent 961 drivers/tty/hvc/hvc_iucv.c list_move(&ent->list, &list_remove); ent 1515 drivers/tty/isicom.c const struct pci_device_id *ent) ent 1530 drivers/tty/isicom.c dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device); ent 932 drivers/tty/moxa.c const struct pci_device_id *ent) ent 936 drivers/tty/moxa.c int board_type = ent->driver_data; ent 2556 drivers/tty/mxser.c const struct pci_device_id *ent) ent 2578 drivers/tty/mxser.c mxser_cards[ent->driver_data].name, ent 2593 drivers/tty/mxser.c brd->info = &mxser_cards[ent->driver_data]; ent 1318 drivers/tty/nozomi.c const struct pci_device_id *ent) ent 552 drivers/tty/serial/8250/8250_exar.c exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) ent 560 drivers/tty/serial/8250/8250_exar.c board = (struct exar8250_board *)ent->driver_data; ent 39 drivers/tty/serial/8250/8250_hp300.c const struct dio_device_id *ent); ent 158 drivers/tty/serial/8250/8250_hp300.c const struct dio_device_id *ent) ent 4017 drivers/tty/serial/8250/8250_pci.c pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent) ent 4033 drivers/tty/serial/8250/8250_pci.c if (ent->driver_data >= ARRAY_SIZE(pci_boards)) { ent 4035 drivers/tty/serial/8250/8250_pci.c ent->driver_data); ent 4039 drivers/tty/serial/8250/8250_pci.c board = &pci_boards[ent->driver_data]; ent 4050 drivers/tty/serial/8250/8250_pci.c if (ent->driver_data == pbn_default) { ent 1476 drivers/tty/serial/icom.c const struct pci_device_id *ent) ent 1509 drivers/tty/serial/icom.c if (ent->driver_data == ADAPTER_V1) { ent 1526 drivers/tty/serial/icom.c icom_adapter->version = ent->driver_data; ent 1527 drivers/tty/serial/icom.c icom_adapter->subsystem_id = ent->subdevice; ent 52 drivers/tty/serial/jsm/jsm_driver.c static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1173 drivers/tty/serial/serial_txx9.c pciserial_txx9_init_one(struct pci_dev *dev, const struct pci_device_id *ent) ent 883 drivers/tty/synclink.c const struct pci_device_id *ent); ent 7977 drivers/tty/synclink.c const struct pci_device_id *ent) ent 106 drivers/tty/synclink_gt.c static int init_one(struct pci_dev *dev,const struct pci_device_id *ent); ent 3664 drivers/tty/synclink_gt.c const struct pci_device_id *ent) ent 479 drivers/tty/synclinkmp.c static int synclinkmp_init_one(struct pci_dev *dev,const struct pci_device_id *ent); ent 5571 drivers/tty/synclinkmp.c const struct pci_device_id *ent) ent 964 drivers/video/console/sticore.c static int sticore_pci_init(struct pci_dev *pd, const struct pci_device_id *ent) ent 539 drivers/video/fbdev/asiliantfb.c const struct pci_device_id *ent) ent 163 drivers/video/fbdev/aty/aty128fb.c const struct pci_device_id *ent); ent 1905 drivers/video/fbdev/aty/aty128fb.c static int aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent) ent 1918 drivers/video/fbdev/aty/aty128fb.c video_card[8] = ent->device >> 8; ent 1919 drivers/video/fbdev/aty/aty128fb.c video_card[9] = ent->device & 0xFF; ent 1922 drivers/video/fbdev/aty/aty128fb.c if (ent->driver_data < ARRAY_SIZE(r128_family)) ent 1923 drivers/video/fbdev/aty/aty128fb.c strlcat(video_card, r128_family[ent->driver_data], ent 1933 drivers/video/fbdev/aty/aty128fb.c par->chip_gen = ent->driver_data; ent 2054 drivers/video/fbdev/aty/aty128fb.c static int aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 2140 drivers/video/fbdev/aty/aty128fb.c if (!aty128_init(pdev, ent)) ent 3494 drivers/video/fbdev/aty/atyfb_base.c const struct pci_device_id *ent) ent 2275 drivers/video/fbdev/aty/radeon_base.c const struct pci_device_id *ent) ent 2305 drivers/video/fbdev/aty/radeon_base.c c1 = ent->device >> 8; ent 2306 drivers/video/fbdev/aty/radeon_base.c c2 = ent->device & 0xff; ent 2309 drivers/video/fbdev/aty/radeon_base.c "ATI Radeon %x \"%c%c\"", ent->device & 0xffff, c1, c2); ent 2312 drivers/video/fbdev/aty/radeon_base.c "ATI Radeon %x", ent->device & 0xffff); ent 2314 drivers/video/fbdev/aty/radeon_base.c rinfo->family = ent->driver_data & CHIP_FAMILY_MASK; ent 2316 drivers/video/fbdev/aty/radeon_base.c rinfo->has_CRTC2 = (ent->driver_data & CHIP_HAS_CRTC2) != 0; ent 2317 drivers/video/fbdev/aty/radeon_base.c rinfo->is_mobility = (ent->driver_data & CHIP_IS_MOBILITY) != 0; ent 2318 drivers/video/fbdev/aty/radeon_base.c rinfo->is_IGP = (ent->driver_data & CHIP_IS_IGP) != 0; ent 610 drivers/video/fbdev/carminefb.c static int carminefb_probe(struct pci_dev *dev, const struct pci_device_id *ent) ent 349 drivers/video/fbdev/chipsfb.c static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) ent 2081 drivers/video/fbdev/cirrusfb.c const struct pci_device_id *ent) ent 2101 drivers/video/fbdev/cirrusfb.c cinfo->btype = (enum cirrus_board) ent->driver_data; ent 2198 drivers/video/fbdev/cirrusfb.c const struct zorro_device_id *ent) ent 2211 drivers/video/fbdev/cirrusfb.c zcl = (const struct zorrocl *)ent->driver_data; ent 615 drivers/video/fbdev/gxt4500.c static int gxt4500_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 650 drivers/video/fbdev/gxt4500.c cardtype = ent->driver_data; ent 329 drivers/video/fbdev/hpfb.c static int hpfb_dio_probe(struct dio_dev *d, const struct dio_device_id *ent) ent 910 drivers/video/fbdev/hyperv_fb.c const struct pci_device_id *ent) ent 1000 drivers/video/fbdev/i740fb.c static int i740fb_probe(struct pci_dev *dev, const struct pci_device_id *ent) ent 398 drivers/video/fbdev/imsttfb.c static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); ent 1466 drivers/video/fbdev/imsttfb.c static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 162 drivers/video/fbdev/intelfb/intelfbdrv.c const struct pci_device_id *ent); ent 470 drivers/video/fbdev/intelfb/intelfbdrv.c const struct pci_device_id *ent) ent 526 drivers/video/fbdev/intelfb/intelfbdrv.c if ((ent->device == PCI_DEVICE_ID_INTEL_915G) || ent 527 drivers/video/fbdev/intelfb/intelfbdrv.c (ent->device == PCI_DEVICE_ID_INTEL_915GM) || ent 528 drivers/video/fbdev/intelfb/intelfbdrv.c (ent->device == PCI_DEVICE_ID_INTEL_945G) || ent 529 drivers/video/fbdev/intelfb/intelfbdrv.c (ent->device == PCI_DEVICE_ID_INTEL_945GM) || ent 530 drivers/video/fbdev/intelfb/intelfbdrv.c (ent->device == PCI_DEVICE_ID_INTEL_945GME) || ent 531 drivers/video/fbdev/intelfb/intelfbdrv.c (ent->device == PCI_DEVICE_ID_INTEL_965G) || ent 532 drivers/video/fbdev/intelfb/intelfbdrv.c (ent->device == PCI_DEVICE_ID_INTEL_965GM)) { ent 87 drivers/video/fbdev/kyro/fbdev.c static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); ent 662 drivers/video/fbdev/kyro/fbdev.c static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 991 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c const struct pci_device_id *ent) ent 1270 drivers/video/fbdev/nvidia/nvidia.c static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent) ent 1310 drivers/video/fbdev/pm3fb.c static int pm3fb_probe(struct pci_dev *dev, const struct pci_device_id *ent) ent 929 drivers/video/fbdev/pvr2fb.c const struct pci_device_id *ent) ent 1894 drivers/video/fbdev/riva/fbdev.c static int rivafb_probe(struct pci_dev *pd, const struct pci_device_id *ent) ent 735 drivers/video/fbdev/s3c2410fb.c unsigned long ent = fbi->palette_buffer[i]; ent 736 drivers/video/fbdev/s3c2410fb.c if (ent == PALETTE_BUFF_CLEAR) ent 739 drivers/video/fbdev/s3c2410fb.c writel(ent, regs + S3C2410_TFTPAL(i)); ent 746 drivers/video/fbdev/s3c2410fb.c if (readw(regs + S3C2410_TFTPAL(i)) == ent) ent 5841 drivers/video/fbdev/sis/sis_main.c static int sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 5843 drivers/video/fbdev/sis/sis_main.c struct sisfb_chip_info *chipinfo = &sisfb_chip_info[ent->driver_data]; ent 664 drivers/video/fbdev/skeletonfb.c static int xxxfb_probe(struct pci_dev *dev, const struct pci_device_id *ent) ent 1517 drivers/video/fbdev/sm712fb.c const struct pci_device_id *ent) ent 1537 drivers/video/fbdev/sm712fb.c sprintf(smtcfb_fix.id, "sm%Xfb", ent->device); ent 1547 drivers/video/fbdev/sm712fb.c sfb->chip_id = ent->device; ent 120 drivers/video/fbdev/sunxvr2500.c const struct pci_device_id *ent) ent 243 drivers/video/fbdev/sunxvr500.c const struct pci_device_id *ent) ent 107 drivers/video/fbdev/tgafb.c const struct pci_device_id *ent) ent 631 drivers/video/fbdev/via/via-core.c static int via_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 644 drivers/video/fbdev/via/via-core.c global_dev.chip_type = ent->driver_data; ent 116 drivers/w1/masters/matrox_w1.c static int matrox_w1_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 371 drivers/w1/w1.c struct list_head *ent, *n; ent 376 drivers/w1/w1.c list_for_each_safe(ent, n, &md->slist) { ent 377 drivers/w1/w1.c sl = list_entry(ent, struct w1_slave, w1_slave_entry); ent 23 drivers/w1/w1_family.c struct list_head *ent, *n; ent 28 drivers/w1/w1_family.c list_for_each_safe(ent, n, &w1_families) { ent 29 drivers/w1/w1_family.c f = list_entry(ent, struct w1_family, family_entry); ent 56 drivers/w1/w1_family.c struct list_head *ent, *n; ent 60 drivers/w1/w1_family.c list_for_each_safe(ent, n, &w1_families) { ent 61 drivers/w1/w1_family.c f = list_entry(ent, struct w1_family, family_entry); ent 88 drivers/w1/w1_family.c struct list_head *ent, *n; ent 92 drivers/w1/w1_family.c list_for_each_safe(ent, n, &w1_families) { ent 93 drivers/w1/w1_family.c f = list_entry(ent, struct w1_family, family_entry); ent 283 drivers/watchdog/hpwdt.c const struct pci_device_id *ent) ent 307 drivers/watchdog/hpwdt.c ent->vendor, ent->device); ent 290 drivers/watchdog/i6300esb.c const struct pci_device_id *ent) ent 682 drivers/watchdog/pcwd_pci.c const struct pci_device_id *ent) ent 159 drivers/watchdog/via_wdt.c const struct pci_device_id *ent) ent 604 drivers/watchdog/wdt_pci.c const struct pci_device_id *ent) ent 93 drivers/xen/platform-pci.c const struct pci_device_id *ent) ent 1054 drivers/xen/xen-scsiback.c char *ent) ent 1066 drivers/xen/xen-scsiback.c snprintf(state, sizeof(state), "vscsi-devs/%s/state", ent); ent 1072 drivers/xen/xen-scsiback.c snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent); ent 1086 drivers/xen/xen-scsiback.c snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", ent); ent 868 drivers/xen/xenbus/xenbus_xs.c struct list_head *ent; ent 883 drivers/xen/xenbus/xenbus_xs.c ent = watch_events.next; ent 884 drivers/xen/xenbus/xenbus_xs.c if (ent != &watch_events) ent 885 drivers/xen/xenbus/xenbus_xs.c list_del(ent); ent 888 drivers/xen/xenbus/xenbus_xs.c if (ent != &watch_events) { ent 889 drivers/xen/xenbus/xenbus_xs.c event = list_entry(ent, struct xs_watch_event, list); ent 607 fs/binfmt_elf_fdpic.c struct { unsigned long _id, _val; } __user *ent; \ ent 609 fs/binfmt_elf_fdpic.c ent = (void __user *) csp; \ ent 610 fs/binfmt_elf_fdpic.c __put_user((id), &ent[nr]._id); \ ent 611 fs/binfmt_elf_fdpic.c __put_user((val), &ent[nr]._val); \ ent 57 fs/ceph/cache.c struct ceph_fscache_entry *ent; ent 61 fs/ceph/cache.c list_for_each_entry(ent, &ceph_fscache_list, list) { ent 62 fs/ceph/cache.c if (memcmp(&ent->fsid, fsid, sizeof(*fsid))) ent 64 fs/ceph/cache.c if (ent->uniq_len != uniq_len) ent 66 fs/ceph/cache.c if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len)) ent 75 fs/ceph/cache.c ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL); ent 76 fs/ceph/cache.c if (!ent) { ent 81 fs/ceph/cache.c memcpy(&ent->fsid, fsid, sizeof(*fsid)); ent 83 fs/ceph/cache.c memcpy(&ent->uniquifier, fscache_uniq, uniq_len); ent 84 fs/ceph/cache.c ent->uniq_len = uniq_len; ent 89 fs/ceph/cache.c &ent->fsid, sizeof(ent->fsid) + uniq_len, ent 94 fs/ceph/cache.c ent->fscache = fsc->fscache; ent 95 fs/ceph/cache.c list_add_tail(&ent->list, &ceph_fscache_list); ent 97 fs/ceph/cache.c kfree(ent); ent 312 fs/ceph/cache.c struct ceph_fscache_entry *ent; ent 316 fs/ceph/cache.c list_for_each_entry(ent, &ceph_fscache_list, list) { ent 317 fs/ceph/cache.c if (ent->fscache == fsc->fscache) { ent 318 fs/ceph/cache.c list_del(&ent->list); ent 319 fs/ceph/cache.c kfree(ent); ent 3469 fs/ext4/namei.c static int ext4_rename_dir_prepare(handle_t *handle, struct ext4_renament *ent) ent 3473 fs/ext4/namei.c ent->dir_bh = ext4_get_first_dir_block(handle, ent->inode, ent 3474 fs/ext4/namei.c &retval, &ent->parent_de, ent 3475 fs/ext4/namei.c &ent->dir_inlined); ent 3476 fs/ext4/namei.c if (!ent->dir_bh) ent 3478 fs/ext4/namei.c if (le32_to_cpu(ent->parent_de->inode) != ent->dir->i_ino) ent 3480 fs/ext4/namei.c BUFFER_TRACE(ent->dir_bh, "get_write_access"); ent 3481 fs/ext4/namei.c return ext4_journal_get_write_access(handle, ent->dir_bh); ent 3484 fs/ext4/namei.c static int ext4_rename_dir_finish(handle_t *handle, struct ext4_renament *ent, ent 3489 fs/ext4/namei.c ent->parent_de->inode = cpu_to_le32(dir_ino); ent 3490 fs/ext4/namei.c BUFFER_TRACE(ent->dir_bh, "call ext4_handle_dirty_metadata"); ent 3491 fs/ext4/namei.c if (!ent->dir_inlined) { ent 3492 fs/ext4/namei.c if (is_dx(ent->inode)) { ent 3494 fs/ext4/namei.c ent->inode, ent 3495 fs/ext4/namei.c ent->dir_bh); ent 3497 fs/ext4/namei.c retval = ext4_handle_dirty_dirblock(handle, ent->inode, ent 3498 fs/ext4/namei.c ent->dir_bh); ent 3501 fs/ext4/namei.c retval = ext4_mark_inode_dirty(handle, ent->inode); ent 3504 fs/ext4/namei.c ext4_std_error(ent->dir->i_sb, retval); ent 3510 fs/ext4/namei.c static int ext4_setent(handle_t *handle, struct ext4_renament *ent, ent 3515 fs/ext4/namei.c BUFFER_TRACE(ent->bh, "get write access"); ent 3516 fs/ext4/namei.c retval = ext4_journal_get_write_access(handle, ent->bh); ent 3519 fs/ext4/namei.c ent->de->inode = cpu_to_le32(ino); ent 3520 fs/ext4/namei.c if (ext4_has_feature_filetype(ent->dir->i_sb)) ent 3521 fs/ext4/namei.c ent->de->file_type = file_type; ent 3522 fs/ext4/namei.c inode_inc_iversion(ent->dir); ent 3523 fs/ext4/namei.c ent->dir->i_ctime = ent->dir->i_mtime = ent 3524 fs/ext4/namei.c current_time(ent->dir); ent 3525 fs/ext4/namei.c ext4_mark_inode_dirty(handle, ent->dir); ent 3526 fs/ext4/namei.c BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata"); ent 3527 fs/ext4/namei.c if (!ent->inlined) { ent 3528 fs/ext4/namei.c retval = ext4_handle_dirty_dirblock(handle, ent->dir, ent->bh); ent 3530 fs/ext4/namei.c ext4_std_error(ent->dir->i_sb, retval); ent 3534 fs/ext4/namei.c brelse(ent->bh); ent 3535 fs/ext4/namei.c ent->bh = NULL; ent 3557 fs/ext4/namei.c static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent, ent 3567 fs/ext4/namei.c if (le32_to_cpu(ent->de->inode) != ent->inode->i_ino || ent 3568 fs/ext4/namei.c ent->de->name_len != ent->dentry->d_name.len || ent 3569 fs/ext4/namei.c strncmp(ent->de->name, ent->dentry->d_name.name, ent 3570 fs/ext4/namei.c ent->de->name_len) || ent 3572 fs/ext4/namei.c retval = ext4_find_delete_entry(handle, ent->dir, ent 3573 fs/ext4/namei.c &ent->dentry->d_name); ent 3575 fs/ext4/namei.c retval = ext4_delete_entry(handle, ent->dir, ent->de, ent->bh); ent 3577 fs/ext4/namei.c retval = ext4_find_delete_entry(handle, ent->dir, ent 3578 fs/ext4/namei.c &ent->dentry->d_name); ent 3583 fs/ext4/namei.c ext4_warning_inode(ent->dir, ent 3585 fs/ext4/namei.c ent->dir->i_nlink, retval); ent 3589 fs/ext4/namei.c static void ext4_update_dir_count(handle_t *handle, struct ext4_renament *ent) ent 3591 fs/ext4/namei.c if (ent->dir_nlink_delta) { ent 3592 fs/ext4/namei.c if (ent->dir_nlink_delta == -1) ent 3593 fs/ext4/namei.c ext4_dec_count(handle, ent->dir); ent 3595 fs/ext4/namei.c ext4_inc_count(handle, ent->dir); ent 3596 fs/ext4/namei.c ext4_mark_inode_dirty(handle, ent->dir); ent 3600 fs/ext4/namei.c static struct inode *ext4_whiteout_for_rename(struct ext4_renament *ent, ent 3611 fs/ext4/namei.c credits += (EXT4_MAXQUOTAS_TRANS_BLOCKS(ent->dir->i_sb) + ent 3614 fs/ext4/namei.c wh = ext4_new_inode_start_handle(ent->dir, S_IFCHR | WHITEOUT_MODE, ent 3615 fs/ext4/namei.c &ent->dentry->d_name, 0, NULL, ent 3623 fs/ext4/namei.c ext4_should_retry_alloc(ent->dir->i_sb, &retries)) ent 112 fs/hostfs/hostfs_user.c struct dirent *ent; ent 114 fs/hostfs/hostfs_user.c ent = readdir(dir); ent 115 fs/hostfs/hostfs_user.c if (ent == NULL) ent 117 fs/hostfs/hostfs_user.c *len_out = strlen(ent->d_name); ent 118 fs/hostfs/hostfs_user.c *ino_out = ent->d_ino; ent 119 fs/hostfs/hostfs_user.c *type_out = ent->d_type; ent 120 fs/hostfs/hostfs_user.c *pos_out = ent->d_off; ent 121 fs/hostfs/hostfs_user.c return ent->d_name; ent 773 fs/nfs/dir.c struct nfs_cache_array_entry *ent; ent 775 fs/nfs/dir.c ent = &array->array[i]; ent 776 fs/nfs/dir.c if (!dir_emit(desc->ctx, ent->string.name, ent->string.len, ent 777 fs/nfs/dir.c nfs_compat_user_ino64(ent->ino), ent->d_type)) { ent 79 fs/nfsd/nfs4idmap.c struct ent *new = container_of(cnew, struct ent, h); ent 80 fs/nfsd/nfs4idmap.c struct ent *itm = container_of(citm, struct ent, h); ent 92 fs/nfsd/nfs4idmap.c struct ent *map = container_of(ref, struct ent, h.ref); ent 99 fs/nfsd/nfs4idmap.c struct ent *e = kmalloc(sizeof(*e), GFP_KERNEL); ent 111 fs/nfsd/nfs4idmap.c idtoname_hash(struct ent *ent) ent 115 fs/nfsd/nfs4idmap.c hash = hash_str(ent->authname, ENT_HASHBITS); ent 116 fs/nfsd/nfs4idmap.c hash = hash_long(hash ^ ent->id, ENT_HASHBITS); ent 119 fs/nfsd/nfs4idmap.c if (ent->type == IDMAP_TYPE_GROUP) ent 129 fs/nfsd/nfs4idmap.c struct ent *ent = container_of(ch, struct ent, h); ent 132 fs/nfsd/nfs4idmap.c qword_add(bpp, blen, ent->authname); ent 133 fs/nfsd/nfs4idmap.c snprintf(idstr, sizeof(idstr), "%u", ent->id); ent 134 fs/nfsd/nfs4idmap.c qword_add(bpp, blen, ent->type == IDMAP_TYPE_GROUP ? "group" : "user"); ent 143 fs/nfsd/nfs4idmap.c struct ent *a = container_of(ca, struct ent, h); ent 144 fs/nfsd/nfs4idmap.c struct ent *b = container_of(cb, struct ent, h); ent 153 fs/nfsd/nfs4idmap.c struct ent *ent; ent 159 fs/nfsd/nfs4idmap.c ent = container_of(h, struct ent, h); ent 160 fs/nfsd/nfs4idmap.c seq_printf(m, "%s %s %u", ent->authname, ent 161 fs/nfsd/nfs4idmap.c ent->type == IDMAP_TYPE_GROUP ? "group" : "user", ent 162 fs/nfsd/nfs4idmap.c ent->id); ent 164 fs/nfsd/nfs4idmap.c seq_printf(m, " %s", ent->name); ent 178 fs/nfsd/nfs4idmap.c static struct ent *idtoname_lookup(struct cache_detail *, struct ent *); ent 179 fs/nfsd/nfs4idmap.c static struct ent *idtoname_update(struct cache_detail *, struct ent *, ent 180 fs/nfsd/nfs4idmap.c struct ent *); ent 200 fs/nfsd/nfs4idmap.c struct ent ent, *res; ent 213 fs/nfsd/nfs4idmap.c memset(&ent, 0, sizeof(ent)); ent 219 fs/nfsd/nfs4idmap.c memcpy(ent.authname, buf1, sizeof(ent.authname)); ent 224 fs/nfsd/nfs4idmap.c ent.type = strcmp(buf1, "user") == 0 ? ent 230 fs/nfsd/nfs4idmap.c ent.id = simple_strtoul(buf1, &bp, 10); ent 235 fs/nfsd/nfs4idmap.c ent.h.expiry_time = get_expiry(&buf); ent 236 fs/nfsd/nfs4idmap.c if (ent.h.expiry_time == 0) ent 240 fs/nfsd/nfs4idmap.c res = idtoname_lookup(cd, &ent); ent 250 fs/nfsd/nfs4idmap.c set_bit(CACHE_NEGATIVE, &ent.h.flags); ent 252 fs/nfsd/nfs4idmap.c memcpy(ent.name, buf1, sizeof(ent.name)); ent 254 fs/nfsd/nfs4idmap.c res = idtoname_update(cd, &ent, res); ent 265 fs/nfsd/nfs4idmap.c static struct ent * ent 266 fs/nfsd/nfs4idmap.c idtoname_lookup(struct cache_detail *cd, struct ent *item) ent 271 fs/nfsd/nfs4idmap.c return container_of(ch, struct ent, h); ent 276 fs/nfsd/nfs4idmap.c static struct ent * ent 277 fs/nfsd/nfs4idmap.c idtoname_update(struct cache_detail *cd, struct ent *new, struct ent *old) ent 282 fs/nfsd/nfs4idmap.c return container_of(ch, struct ent, h); ent 293 fs/nfsd/nfs4idmap.c nametoid_hash(struct ent *ent) ent 295 fs/nfsd/nfs4idmap.c return hash_str(ent->name, ENT_HASHBITS); ent 302 fs/nfsd/nfs4idmap.c struct ent *ent = container_of(ch, struct ent, h); ent 304 fs/nfsd/nfs4idmap.c qword_add(bpp, blen, ent->authname); ent 305 fs/nfsd/nfs4idmap.c qword_add(bpp, blen, ent->type == IDMAP_TYPE_GROUP ? "group" : "user"); ent 306 fs/nfsd/nfs4idmap.c qword_add(bpp, blen, ent->name); ent 314 fs/nfsd/nfs4idmap.c struct ent *a = container_of(ca, struct ent, h); ent 315 fs/nfsd/nfs4idmap.c struct ent *b = container_of(cb, struct ent, h); ent 324 fs/nfsd/nfs4idmap.c struct ent *ent; ent 330 fs/nfsd/nfs4idmap.c ent = container_of(h, struct ent, h); ent 331 fs/nfsd/nfs4idmap.c seq_printf(m, "%s %s %s", ent->authname, ent 332 fs/nfsd/nfs4idmap.c ent->type == IDMAP_TYPE_GROUP ? "group" : "user", ent 333 fs/nfsd/nfs4idmap.c ent->name); ent 335 fs/nfsd/nfs4idmap.c seq_printf(m, " %u", ent->id); ent 340 fs/nfsd/nfs4idmap.c static struct ent *nametoid_lookup(struct cache_detail *, struct ent *); ent 341 fs/nfsd/nfs4idmap.c static struct ent *nametoid_update(struct cache_detail *, struct ent *, ent 342 fs/nfsd/nfs4idmap.c struct ent *); ent 363 fs/nfsd/nfs4idmap.c struct ent ent, *res; ent 375 fs/nfsd/nfs4idmap.c memset(&ent, 0, sizeof(ent)); ent 381 fs/nfsd/nfs4idmap.c memcpy(ent.authname, buf1, sizeof(ent.authname)); ent 386 fs/nfsd/nfs4idmap.c ent.type = strcmp(buf1, "user") == 0 ? ent 393 fs/nfsd/nfs4idmap.c memcpy(ent.name, buf1, sizeof(ent.name)); ent 396 fs/nfsd/nfs4idmap.c ent.h.expiry_time = get_expiry(&buf); ent 397 fs/nfsd/nfs4idmap.c if (ent.h.expiry_time == 0) ent 401 fs/nfsd/nfs4idmap.c error = get_int(&buf, &ent.id); ent 405 fs/nfsd/nfs4idmap.c set_bit(CACHE_NEGATIVE, &ent.h.flags); ent 408 fs/nfsd/nfs4idmap.c res = nametoid_lookup(cd, &ent); ent 411 fs/nfsd/nfs4idmap.c res = nametoid_update(cd, &ent, res); ent 423 fs/nfsd/nfs4idmap.c static struct ent * ent 424 fs/nfsd/nfs4idmap.c nametoid_lookup(struct cache_detail *cd, struct ent *item) ent 429 fs/nfsd/nfs4idmap.c return container_of(ch, struct ent, h); ent 434 fs/nfsd/nfs4idmap.c static struct ent * ent 435 fs/nfsd/nfs4idmap.c nametoid_update(struct cache_detail *cd, struct ent *new, struct ent *old) ent 440 fs/nfsd/nfs4idmap.c return container_of(ch, struct ent, h); ent 493 fs/nfsd/nfs4idmap.c struct ent *(*lookup_fn)(struct cache_detail *, struct ent *), ent 494 fs/nfsd/nfs4idmap.c struct ent *key, struct cache_detail *detail, struct ent **item) ent 505 fs/nfsd/nfs4idmap.c struct ent *prev_item = *item; ent 527 fs/nfsd/nfs4idmap.c struct ent *item, key = { ent 565 fs/nfsd/nfs4idmap.c struct ent *item, key = { ent 393 fs/nilfs2/recovery.c struct nilfs_segment_entry *ent = kmalloc(sizeof(*ent), GFP_NOFS); ent 395 fs/nilfs2/recovery.c if (unlikely(!ent)) ent 398 fs/nilfs2/recovery.c ent->segnum = segnum; ent 399 fs/nilfs2/recovery.c INIT_LIST_HEAD(&ent->list); ent 400 fs/nilfs2/recovery.c list_add_tail(&ent->list, head); ent 407 fs/nilfs2/recovery.c struct nilfs_segment_entry *ent; ent 409 fs/nilfs2/recovery.c ent = list_first_entry(head, struct nilfs_segment_entry, list); ent 410 fs/nilfs2/recovery.c list_del(&ent->list); ent 411 fs/nilfs2/recovery.c kfree(ent); ent 420 fs/nilfs2/recovery.c struct nilfs_segment_entry *ent, *n; ent 449 fs/nilfs2/recovery.c list_for_each_entry_safe(ent, n, head, list) { ent 450 fs/nilfs2/recovery.c if (ent->segnum != segnum[0]) { ent 451 fs/nilfs2/recovery.c err = nilfs_sufile_scrap(sufile, ent->segnum); ent 455 fs/nilfs2/recovery.c list_del(&ent->list); ent 456 fs/nilfs2/recovery.c kfree(ent); ent 209 fs/ocfs2/filecheck.c ocfs2_filecheck_erase_entries(struct ocfs2_filecheck_sysfs_entry *ent, ent 212 fs/ocfs2/filecheck.c ocfs2_filecheck_adjust_max(struct ocfs2_filecheck_sysfs_entry *ent, ent 220 fs/ocfs2/filecheck.c spin_lock(&ent->fs_fcheck->fc_lock); ent 221 fs/ocfs2/filecheck.c if (len < (ent->fs_fcheck->fc_size - ent->fs_fcheck->fc_done)) { ent 225 fs/ocfs2/filecheck.c len, ent->fs_fcheck->fc_size - ent->fs_fcheck->fc_done); ent 228 fs/ocfs2/filecheck.c if (len < ent->fs_fcheck->fc_size) ent 229 fs/ocfs2/filecheck.c BUG_ON(!ocfs2_filecheck_erase_entries(ent, ent 230 fs/ocfs2/filecheck.c ent->fs_fcheck->fc_size - len)); ent 232 fs/ocfs2/filecheck.c ent->fs_fcheck->fc_max = len; ent 235 fs/ocfs2/filecheck.c spin_unlock(&ent->fs_fcheck->fc_lock); ent 307 fs/ocfs2/filecheck.c struct ocfs2_filecheck_sysfs_entry *ent = container_of(kobj, ent 314 fs/ocfs2/filecheck.c spin_lock(&ent->fs_fcheck->fc_lock); ent 315 fs/ocfs2/filecheck.c total = snprintf(buf, remain, "%u\n", ent->fs_fcheck->fc_max); ent 316 fs/ocfs2/filecheck.c spin_unlock(&ent->fs_fcheck->fc_lock); ent 323 fs/ocfs2/filecheck.c spin_lock(&ent->fs_fcheck->fc_lock); ent 324 fs/ocfs2/filecheck.c list_for_each_entry(p, &ent->fs_fcheck->fc_head, fe_list) { ent 343 fs/ocfs2/filecheck.c spin_unlock(&ent->fs_fcheck->fc_lock); ent 350 fs/ocfs2/filecheck.c ocfs2_filecheck_is_dup_entry(struct ocfs2_filecheck_sysfs_entry *ent, ent 355 fs/ocfs2/filecheck.c list_for_each_entry(p, &ent->fs_fcheck->fc_head, fe_list) { ent 366 fs/ocfs2/filecheck.c ocfs2_filecheck_erase_entry(struct ocfs2_filecheck_sysfs_entry *ent) ent 370 fs/ocfs2/filecheck.c list_for_each_entry(p, &ent->fs_fcheck->fc_head, fe_list) { ent 374 fs/ocfs2/filecheck.c ent->fs_fcheck->fc_size--; ent 375 fs/ocfs2/filecheck.c ent->fs_fcheck->fc_done--; ent 384 fs/ocfs2/filecheck.c ocfs2_filecheck_erase_entries(struct ocfs2_filecheck_sysfs_entry *ent, ent 391 fs/ocfs2/filecheck.c if (ocfs2_filecheck_erase_entry(ent)) ent 401 fs/ocfs2/filecheck.c ocfs2_filecheck_done_entry(struct ocfs2_filecheck_sysfs_entry *ent, ent 404 fs/ocfs2/filecheck.c spin_lock(&ent->fs_fcheck->fc_lock); ent 406 fs/ocfs2/filecheck.c ent->fs_fcheck->fc_done++; ent 407 fs/ocfs2/filecheck.c spin_unlock(&ent->fs_fcheck->fc_lock); ent 433 fs/ocfs2/filecheck.c ocfs2_filecheck_handle_entry(struct ocfs2_filecheck_sysfs_entry *ent, ent 436 fs/ocfs2/filecheck.c struct ocfs2_super *osb = container_of(ent, struct ocfs2_super, ent 448 fs/ocfs2/filecheck.c ocfs2_filecheck_done_entry(ent, entry); ent 458 fs/ocfs2/filecheck.c struct ocfs2_filecheck_sysfs_entry *ent = container_of(kobj, ent 468 fs/ocfs2/filecheck.c ret = ocfs2_filecheck_adjust_max(ent, args.fa_len); ent 478 fs/ocfs2/filecheck.c spin_lock(&ent->fs_fcheck->fc_lock); ent 479 fs/ocfs2/filecheck.c if (ocfs2_filecheck_is_dup_entry(ent, args.fa_ino)) { ent 482 fs/ocfs2/filecheck.c } else if ((ent->fs_fcheck->fc_size >= ent->fs_fcheck->fc_max) && ent 483 fs/ocfs2/filecheck.c (ent->fs_fcheck->fc_done == 0)) { ent 487 fs/ocfs2/filecheck.c ent->fs_fcheck->fc_max); ent 491 fs/ocfs2/filecheck.c if ((ent->fs_fcheck->fc_size >= ent->fs_fcheck->fc_max) && ent 492 fs/ocfs2/filecheck.c (ent->fs_fcheck->fc_done > 0)) { ent 497 fs/ocfs2/filecheck.c BUG_ON(!ocfs2_filecheck_erase_entry(ent)); ent 504 fs/ocfs2/filecheck.c list_add_tail(&entry->fe_list, &ent->fs_fcheck->fc_head); ent 505 fs/ocfs2/filecheck.c ent->fs_fcheck->fc_size++; ent 507 fs/ocfs2/filecheck.c spin_unlock(&ent->fs_fcheck->fc_lock); ent 510 fs/ocfs2/filecheck.c ocfs2_filecheck_handle_entry(ent, entry); ent 191 fs/omfs/file.c static sector_t find_block(struct inode *inode, struct omfs_extent_entry *ent, ent 198 fs/omfs/file.c be64_to_cpu(ent->e_blocks)); ent 208 fs/omfs/file.c be64_to_cpu(ent->e_cluster)) + ent 212 fs/omfs/file.c ent++; ent 380 fs/proc/generic.c struct proc_dir_entry *ent = NULL; ent 409 fs/proc/generic.c ent = kmem_cache_zalloc(proc_dir_entry_cache, GFP_KERNEL); ent 410 fs/proc/generic.c if (!ent) ent 414 fs/proc/generic.c ent->name = ent->inline_name; ent 416 fs/proc/generic.c ent->name = kmalloc(qstr.len + 1, GFP_KERNEL); ent 417 fs/proc/generic.c if (!ent->name) { ent 418 fs/proc/generic.c pde_free(ent); ent 423 fs/proc/generic.c memcpy(ent->name, fn, qstr.len + 1); ent 424 fs/proc/generic.c ent->namelen = qstr.len; ent 425 fs/proc/generic.c ent->mode = mode; ent 426 fs/proc/generic.c ent->nlink = nlink; ent 427 fs/proc/generic.c ent->subdir = RB_ROOT; ent 428 fs/proc/generic.c refcount_set(&ent->refcnt, 1); ent 429 fs/proc/generic.c spin_lock_init(&ent->pde_unload_lock); ent 430 fs/proc/generic.c INIT_LIST_HEAD(&ent->pde_openers); ent 431 fs/proc/generic.c proc_set_user(ent, (*parent)->uid, (*parent)->gid); ent 433 fs/proc/generic.c ent->proc_dops = &proc_misc_dentry_ops; ent 436 fs/proc/generic.c return ent; ent 442 fs/proc/generic.c struct proc_dir_entry *ent; ent 444 fs/proc/generic.c ent = __proc_create(&parent, name, ent 447 fs/proc/generic.c if (ent) { ent 448 fs/proc/generic.c ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL); ent 449 fs/proc/generic.c if (ent->data) { ent 450 fs/proc/generic.c strcpy((char*)ent->data,dest); ent 451 fs/proc/generic.c ent->proc_iops = &proc_link_inode_operations; ent 452 fs/proc/generic.c ent = proc_register(parent, ent); ent 454 fs/proc/generic.c pde_free(ent); ent 455 fs/proc/generic.c ent = NULL; ent 458 fs/proc/generic.c return ent; ent 465 fs/proc/generic.c struct proc_dir_entry *ent; ent 470 fs/proc/generic.c ent = __proc_create(&parent, name, S_IFDIR | mode, 2); ent 471 fs/proc/generic.c if (ent) { ent 472 fs/proc/generic.c ent->data = data; ent 473 fs/proc/generic.c ent->proc_fops = &proc_dir_operations; ent 474 fs/proc/generic.c ent->proc_iops = &proc_dir_inode_operations; ent 476 fs/proc/generic.c ent = proc_register(parent, ent); ent 477 fs/proc/generic.c if (!ent) ent 480 fs/proc/generic.c return ent; ent 501 fs/proc/generic.c struct proc_dir_entry *ent, *parent = NULL; ent 503 fs/proc/generic.c ent = __proc_create(&parent, name, mode, 2); ent 504 fs/proc/generic.c if (ent) { ent 505 fs/proc/generic.c ent->data = NULL; ent 506 fs/proc/generic.c ent->proc_fops = NULL; ent 507 fs/proc/generic.c ent->proc_iops = NULL; ent 509 fs/proc/generic.c ent = proc_register(parent, ent); ent 510 fs/proc/generic.c if (!ent) ent 513 fs/proc/generic.c return ent; ent 128 fs/proc/kcore.c struct kcore_list *ent; ent 130 fs/proc/kcore.c ent = kmalloc(sizeof(*ent), GFP_KERNEL); ent 131 fs/proc/kcore.c if (!ent) ent 133 fs/proc/kcore.c ent->addr = (unsigned long)__va(0); ent 134 fs/proc/kcore.c ent->size = max_low_pfn << PAGE_SHIFT; ent 135 fs/proc/kcore.c ent->type = KCORE_RAM; ent 136 fs/proc/kcore.c list_add(&ent->list, head); ent 145 fs/proc/kcore.c get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) ent 147 fs/proc/kcore.c unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT; ent 148 fs/proc/kcore.c unsigned long nr_pages = ent->size >> PAGE_SHIFT; ent 178 fs/proc/kcore.c get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) ent 189 fs/proc/kcore.c struct kcore_list *ent; ent 199 fs/proc/kcore.c ent = kmalloc(sizeof(*ent), GFP_KERNEL); ent 200 fs/proc/kcore.c if (!ent) ent 202 fs/proc/kcore.c ent->addr = (unsigned long)page_to_virt(p); ent 203 fs/proc/kcore.c ent->size = nr_pages << PAGE_SHIFT; ent 205 fs/proc/kcore.c if (!virt_addr_valid(ent->addr)) ent 209 fs/proc/kcore.c if (ULONG_MAX - ent->addr < ent->size) ent 210 fs/proc/kcore.c ent->size = ULONG_MAX - ent->addr; ent 217 fs/proc/kcore.c if (VMALLOC_START > ent->addr) { ent 218 fs/proc/kcore.c if (VMALLOC_START - ent->addr < ent->size) ent 219 fs/proc/kcore.c ent->size = VMALLOC_START - ent->addr; ent 222 fs/proc/kcore.c ent->type = KCORE_RAM; ent 223 fs/proc/kcore.c list_add_tail(&ent->list, head); ent 225 fs/proc/kcore.c if (!get_sparsemem_vmemmap_info(ent, head)) { ent 226 fs/proc/kcore.c list_del(&ent->list); ent 232 fs/proc/kcore.c kfree(ent); ent 135 fs/proc/proc_tty.c struct proc_dir_entry *ent; ent 141 fs/proc/proc_tty.c ent = proc_create_single_data(driver->driver_name, 0, proc_tty_driver, ent 143 fs/proc/proc_tty.c driver->proc_entry = ent; ent 151 fs/proc/proc_tty.c struct proc_dir_entry *ent; ent 153 fs/proc/proc_tty.c ent = driver->proc_entry; ent 154 fs/proc/proc_tty.c if (!ent) ent 157 fs/proc/proc_tty.c remove_proc_entry(ent->name, proc_tty_driver); ent 610 fs/xfs/libxfs/xfs_dir2_block.c int ent; /* entry index */ ent 619 fs/xfs/libxfs/xfs_dir2_block.c if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) ent 631 fs/xfs/libxfs/xfs_dir2_block.c be32_to_cpu(blp[ent].address))); ent 760 fs/xfs/libxfs/xfs_dir2_block.c int ent; /* block leaf entry index */ ent 774 fs/xfs/libxfs/xfs_dir2_block.c if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) { ent 787 fs/xfs/libxfs/xfs_dir2_block.c be32_to_cpu(blp[ent].address))); ent 803 fs/xfs/libxfs/xfs_dir2_block.c blp[ent].address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); ent 804 fs/xfs/libxfs/xfs_dir2_block.c xfs_dir2_block_log_leaf(tp, bp, ent, ent); ent 840 fs/xfs/libxfs/xfs_dir2_block.c int ent; /* leaf entry index */ ent 849 fs/xfs/libxfs/xfs_dir2_block.c if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) { ent 861 fs/xfs/libxfs/xfs_dir2_block.c be32_to_cpu(blp[ent].address))); ent 248 fs/xfs/scrub/attr.c struct xfs_attr_leaf_entry *ent, ent 261 fs/xfs/scrub/attr.c if (ent->pad2 != 0) ent 265 fs/xfs/scrub/attr.c if (be32_to_cpu(ent->hashval) < *last_hashval) ent 267 fs/xfs/scrub/attr.c *last_hashval = be32_to_cpu(ent->hashval); ent 269 fs/xfs/scrub/attr.c nameidx = be16_to_cpu(ent->nameidx); ent 277 fs/xfs/scrub/attr.c if (ent->flags & XFS_ATTR_LOCAL) { ent 312 fs/xfs/scrub/attr.c struct xfs_attr_leaf_entry *ent; ent 370 fs/xfs/scrub/attr.c for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) { ent 372 fs/xfs/scrub/attr.c off = (char *)ent - (char *)leaf; ent 381 fs/xfs/scrub/attr.c ent, i, &usedbytes, &last_hashval); ent 405 fs/xfs/scrub/attr.c struct xfs_attr_leaf_entry *ent = rec; ent 427 fs/xfs/scrub/attr.c error = xchk_da_btree_hash(ds, level, &ent->hashval); ent 434 fs/xfs/scrub/attr.c nameidx = be16_to_cpu(ent->nameidx); ent 441 fs/xfs/scrub/attr.c hash = be32_to_cpu(ent->hashval); ent 444 fs/xfs/scrub/attr.c if ((ent->flags & badflags) != 0) ent 446 fs/xfs/scrub/attr.c if (ent->flags & XFS_ATTR_LOCAL) { ent 186 fs/xfs/scrub/dir.c struct xfs_dir2_leaf_entry *ent = rec; ent 202 fs/xfs/scrub/dir.c error = xchk_da_btree_hash(ds, level, &ent->hashval); ent 207 fs/xfs/scrub/dir.c ptr = be32_to_cpu(ent->address); ent 263 fs/xfs/scrub/dir.c hash = be32_to_cpu(ent->hashval); ent 50 include/drm/drm_pci.h const struct pci_device_id *ent, ent 54 include/drm/drm_pci.h const struct pci_device_id *ent, ent 99 include/linux/nubus.h const struct nubus_dirent *ent, ent 102 include/linux/nubus.h const struct nubus_dirent *ent, ent 105 include/linux/nubus.h const struct nubus_dirent *ent); ent 113 include/linux/nubus.h const struct nubus_dirent *ent, ent 117 include/linux/nubus.h const struct nubus_dirent *ent, ent 120 include/linux/nubus.h const struct nubus_dirent *ent) {} ent 147 include/linux/nubus.h struct nubus_dirent *ent); ent 150 include/linux/nubus.h struct nubus_dirent *ent); ent 154 include/linux/nubus.h int nubus_get_subdir(const struct nubus_dirent *ent, ent 9 include/linux/swap_cgroup.h extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, ent 11 include/linux/swap_cgroup.h extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, ent 13 include/linux/swap_cgroup.h extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); ent 20 include/linux/swap_cgroup.h unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, ent 27 include/linux/swap_cgroup.h unsigned short lookup_swap_cgroup_id(swp_entry_t ent) ent 94 include/linux/trace_events.h struct trace_entry *ent; ent 152 include/media/drv-intf/exynos-fimc.h #define fimc_pipeline_call(ent, op, args...) \ ent 153 include/media/drv-intf/exynos-fimc.h ((!(ent) || !(ent)->pipe) ? -ENOENT : \ ent 154 include/media/drv-intf/exynos-fimc.h (((ent)->pipe->ops && (ent)->pipe->ops->op) ? \ ent 155 include/media/drv-intf/exynos-fimc.h (ent)->pipe->ops->op(((ent)->pipe), ##args) : -ENOIOCTLCMD)) \ ent 881 include/media/v4l2-subdev.h #define media_entity_to_v4l2_subdev(ent) \ ent 883 include/media/v4l2-subdev.h typeof(ent) __me_sd_ent = (ent); \ ent 420 include/trace/events/xen.h TP_PROTO(gate_desc *dt, int entrynum, const gate_desc *ent), ent 421 include/trace/events/xen.h TP_ARGS(dt, entrynum, ent), ent 113 include/trace/trace_events.h struct trace_entry ent; \ ent 354 include/trace/trace_events.h field = (typeof(field))iter->ent; \ ent 378 include/trace/trace_events.h entry = iter->ent; \ ent 59 init/do_mounts_md.c int ent; ent 70 init/do_mounts_md.c for (ent=0 ; ent< md_setup_ents ; ent++) ent 71 init/do_mounts_md.c if (md_setup_args[ent].minor == minor && ent 72 init/do_mounts_md.c md_setup_args[ent].partitioned == partitioned) { ent 77 init/do_mounts_md.c if (ent >= ARRAY_SIZE(md_setup_args)) { ent 81 init/do_mounts_md.c if (ent >= md_setup_ents) ent 91 init/do_mounts_md.c md_setup_args[ent].level = level; ent 92 init/do_mounts_md.c md_setup_args[ent].chunk = 1 << (factor+12); ent 104 init/do_mounts_md.c md_setup_args[ent].level = LEVEL_NONE; ent 110 init/do_mounts_md.c md_setup_args[ent].device_names = str; ent 111 init/do_mounts_md.c md_setup_args[ent].partitioned = partitioned; ent 112 init/do_mounts_md.c md_setup_args[ent].minor = minor; ent 119 init/do_mounts_md.c int minor, i, ent, partitioned; ent 123 init/do_mounts_md.c for (ent = 0; ent < md_setup_ents ; ent++) { ent 130 init/do_mounts_md.c minor = md_setup_args[ent].minor; ent 131 init/do_mounts_md.c partitioned = md_setup_args[ent].partitioned; ent 132 init/do_mounts_md.c devname = md_setup_args[ent].device_names; ent 172 init/do_mounts_md.c md_setup_args[ent].device_names); ent 188 init/do_mounts_md.c if (md_setup_args[ent].level != LEVEL_NONE) { ent 191 init/do_mounts_md.c ainfo.level = md_setup_args[ent].level; ent 202 init/do_mounts_md.c ainfo.chunk_size = md_setup_args[ent].chunk; ent 1431 kernel/kprobes.c struct kprobe_blacklist_entry *ent; ent 1439 kernel/kprobes.c list_for_each_entry(ent, &kprobe_blacklist, list) { ent 1440 kernel/kprobes.c if (addr >= ent->start_addr && addr < ent->end_addr) ent 2148 kernel/kprobes.c struct kprobe_blacklist_entry *ent; ent 2155 kernel/kprobes.c ent = kmalloc(sizeof(*ent), GFP_KERNEL); ent 2156 kernel/kprobes.c if (!ent) ent 2158 kernel/kprobes.c ent->start_addr = entry; ent 2159 kernel/kprobes.c ent->end_addr = entry + size; ent 2160 kernel/kprobes.c INIT_LIST_HEAD(&ent->list); ent 2161 kernel/kprobes.c list_add_tail(&ent->list, &kprobe_blacklist); ent 2433 kernel/kprobes.c struct kprobe_blacklist_entry *ent = ent 2442 kernel/kprobes.c (void *)ent->start_addr); ent 2444 kernel/kprobes.c seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr, ent 2445 kernel/kprobes.c (void *)ent->end_addr, (void *)ent->start_addr); ent 1213 kernel/trace/blktrace.c const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent) ent 1215 kernel/trace/blktrace.c return (const struct blk_io_trace *)ent; ent 1218 kernel/trace/blktrace.c static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg) ent 1220 kernel/trace/blktrace.c return (void *)(te_blk_io_trace(ent) + 1) + ent 1224 kernel/trace/blktrace.c static inline const void *cgid_start(const struct trace_entry *ent) ent 1226 kernel/trace/blktrace.c return (void *)(te_blk_io_trace(ent) + 1); ent 1229 kernel/trace/blktrace.c static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg) ent 1231 kernel/trace/blktrace.c return te_blk_io_trace(ent)->pdu_len - ent 1235 kernel/trace/blktrace.c static inline u32 t_action(const struct trace_entry *ent) ent 1237 kernel/trace/blktrace.c return te_blk_io_trace(ent)->action; ent 1240 kernel/trace/blktrace.c static inline u32 t_bytes(const struct trace_entry *ent) ent 1242 kernel/trace/blktrace.c return te_blk_io_trace(ent)->bytes; ent 1245 kernel/trace/blktrace.c static inline u32 t_sec(const struct trace_entry *ent) ent 1247 kernel/trace/blktrace.c return te_blk_io_trace(ent)->bytes >> 9; ent 1250 kernel/trace/blktrace.c static inline unsigned long long t_sector(const struct trace_entry *ent) ent 1252 kernel/trace/blktrace.c return te_blk_io_trace(ent)->sector; ent 1255 kernel/trace/blktrace.c static inline __u16 t_error(const struct trace_entry *ent) ent 1257 kernel/trace/blktrace.c return te_blk_io_trace(ent)->error; ent 1260 kernel/trace/blktrace.c static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg) ent 1262 kernel/trace/blktrace.c const __u64 *val = pdu_start(ent, has_cg); ent 1266 kernel/trace/blktrace.c static void get_pdu_remap(const struct trace_entry *ent, ent 1269 kernel/trace/blktrace.c const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg); ent 1287 kernel/trace/blktrace.c const struct blk_io_trace *t = te_blk_io_trace(iter->ent); ent 1294 kernel/trace/blktrace.c secs, nsec_rem, iter->ent->pid, act, rwbs); ent 1301 kernel/trace/blktrace.c const struct blk_io_trace *t = te_blk_io_trace(iter->ent); ent 1305 kernel/trace/blktrace.c const union kernfs_node_id *id = cgid_start(iter->ent); ent 1326 kernel/trace/blktrace.c const struct trace_entry *ent, bool has_cg) ent 1332 kernel/trace/blktrace.c pdu_buf = pdu_start(ent, has_cg); ent 1333 kernel/trace/blktrace.c pdu_len = pdu_real_len(ent, has_cg); ent 1364 kernel/trace/blktrace.c static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) ent 1368 kernel/trace/blktrace.c trace_find_cmdline(ent->pid, cmd); ent 1370 kernel/trace/blktrace.c if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { ent 1371 kernel/trace/blktrace.c trace_seq_printf(s, "%u ", t_bytes(ent)); ent 1372 kernel/trace/blktrace.c blk_log_dump_pdu(s, ent, has_cg); ent 1375 kernel/trace/blktrace.c if (t_sec(ent)) ent 1377 kernel/trace/blktrace.c t_sector(ent), t_sec(ent), cmd); ent 1384 kernel/trace/blktrace.c const struct trace_entry *ent, bool has_cg) ent 1386 kernel/trace/blktrace.c if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { ent 1387 kernel/trace/blktrace.c blk_log_dump_pdu(s, ent, has_cg); ent 1388 kernel/trace/blktrace.c trace_seq_printf(s, "[%d]\n", t_error(ent)); ent 1390 kernel/trace/blktrace.c if (t_sec(ent)) ent 1392 kernel/trace/blktrace.c t_sector(ent), ent 1393 kernel/trace/blktrace.c t_sec(ent), t_error(ent)); ent 1396 kernel/trace/blktrace.c t_sector(ent), t_error(ent)); ent 1400 kernel/trace/blktrace.c static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) ent 1404 kernel/trace/blktrace.c get_pdu_remap(ent, &r, has_cg); ent 1406 kernel/trace/blktrace.c t_sector(ent), t_sec(ent), ent 1411 kernel/trace/blktrace.c static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) ent 1415 kernel/trace/blktrace.c trace_find_cmdline(ent->pid, cmd); ent 1420 kernel/trace/blktrace.c static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) ent 1424 kernel/trace/blktrace.c trace_find_cmdline(ent->pid, cmd); ent 1426 kernel/trace/blktrace.c trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg)); ent 1429 kernel/trace/blktrace.c static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg) ent 1433 kernel/trace/blktrace.c trace_find_cmdline(ent->pid, cmd); ent 1435 kernel/trace/blktrace.c trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), ent 1436 kernel/trace/blktrace.c get_pdu_int(ent, has_cg), cmd); ent 1439 kernel/trace/blktrace.c static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent, ent 1443 kernel/trace/blktrace.c trace_seq_putmem(s, pdu_start(ent, has_cg), ent 1444 kernel/trace/blktrace.c pdu_real_len(ent, has_cg)); ent 1484 kernel/trace/blktrace.c void (*print)(struct trace_seq *s, const struct trace_entry *ent, ent 1515 kernel/trace/blktrace.c t = te_blk_io_trace(iter->ent); ent 1523 kernel/trace/blktrace.c blk_log_msg(s, iter->ent, has_cg); ent 1531 kernel/trace/blktrace.c what2act[what].print(s, iter->ent, has_cg); ent 1546 kernel/trace/blktrace.c struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; ent 762 kernel/trace/trace.c struct trace_entry *ent = ring_buffer_event_data(event); ent 764 kernel/trace/trace.c tracing_generic_entry_update(ent, type, flags, pc); ent 2548 kernel/trace/trace.c iter->ent = fbuffer->entry; ent 3301 kernel/trace/trace.c struct trace_entry *ent, *next = NULL; ent 3316 kernel/trace/trace.c ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); ent 3320 kernel/trace/trace.c return ent; ent 3328 kernel/trace/trace.c ent = peek_next_entry(iter, cpu, &ts, &lost_events); ent 3333 kernel/trace/trace.c if (ent && (!next || ts < next_ts)) { ent 3334 kernel/trace/trace.c next = ent; ent 3366 kernel/trace/trace.c iter->ent = __find_next_entry(iter, &iter->cpu, ent 3369 kernel/trace/trace.c if (iter->ent) ent 3372 kernel/trace/trace.c return iter->ent ? iter : NULL; ent 3385 kernel/trace/trace.c void *ent; ent 3396 kernel/trace/trace.c ent = trace_find_next_entry_inc(iter); ent 3398 kernel/trace/trace.c ent = iter; ent 3400 kernel/trace/trace.c while (ent && iter->idx < i) ent 3401 kernel/trace/trace.c ent = trace_find_next_entry_inc(iter); ent 3405 kernel/trace/trace.c return ent; ent 3471 kernel/trace/trace.c iter->ent = NULL; ent 3731 kernel/trace/trace.c entry = iter->ent; ent 3762 kernel/trace/trace.c entry = iter->ent; ent 3788 kernel/trace/trace.c entry = iter->ent; ent 3817 kernel/trace/trace.c entry = iter->ent; ent 3885 kernel/trace/trace.c if (iter->ent->type == TRACE_BPUTS && ent 3890 kernel/trace/trace.c if (iter->ent->type == TRACE_BPRINT && ent 3895 kernel/trace/trace.c if (iter->ent->type == TRACE_PRINT && ent 4013 kernel/trace/trace.c if (iter->ent == NULL) { ent 6080 kernel/trace/trace.c iter->ent->type); ent 6155 kernel/trace/trace.c iter->ent = NULL; ent 6200 kernel/trace/trace.c if (!iter->ent && !trace_find_next_entry_inc(iter)) { ent 73 kernel/trace/trace.h struct trace_entry ent; \ ent 99 kernel/trace/trace.h struct trace_entry ent; ent 105 kernel/trace/trace.h struct trace_entry ent; ent 111 kernel/trace/trace.h struct trace_entry ent; ent 116 kernel/trace/trace.h struct trace_entry ent; ent 392 kernel/trace/trace.h #define trace_assign_type(var, ent) \ ent 394 kernel/trace/trace.h IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ ent 395 kernel/trace/trace.h IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ ent 396 kernel/trace/trace.h IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ ent 397 kernel/trace/trace.h IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ ent 398 kernel/trace/trace.h IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ ent 399 kernel/trace/trace.h IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ ent 400 kernel/trace/trace.h IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ ent 401 kernel/trace/trace.h IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \ ent 402 kernel/trace/trace.h IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\ ent 403 kernel/trace/trace.h IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ ent 405 kernel/trace/trace.h IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ ent 407 kernel/trace/trace.h IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ ent 408 kernel/trace/trace.h IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ ent 410 kernel/trace/trace.h IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ ent 145 kernel/trace/trace_branch.c trace_assign_type(field, iter->ent); ent 157 kernel/trace/trace_events.c offsetof(typeof(ent), item), \ ent 158 kernel/trace/trace_events.c sizeof(ent.item), \ ent 178 kernel/trace/trace_events.c struct trace_entry ent; ent 642 kernel/trace/trace_events_hist.c struct trace_entry ent; ent 849 kernel/trace/trace_events_hist.c entry = (struct synth_trace_event *)iter->ent; ent 34 kernel/trace/trace_functions_graph.c struct ftrace_graph_ent_entry ent; ent 194 kernel/trace/trace_functions_graph.c struct ftrace_graph_ent ent = { ent 205 kernel/trace/trace_functions_graph.c __trace_graph_entry(tr, &ent, flags, pc); ent 433 kernel/trace/trace_functions_graph.c curr = &data->ent; ent 463 kernel/trace/trace_functions_graph.c data->ent = *curr; ent 469 kernel/trace/trace_functions_graph.c if (next->ent.type == TRACE_GRAPH_RET) ent 472 kernel/trace/trace_functions_graph.c data->ret.ent.type = next->ent.type; ent 476 kernel/trace/trace_functions_graph.c if (next->ent.type != TRACE_GRAPH_RET) ent 479 kernel/trace/trace_functions_graph.c if (curr->ent.pid != next->ent.pid || ent 518 kernel/trace/trace_functions_graph.c struct trace_entry *ent = iter->ent; ent 545 kernel/trace/trace_functions_graph.c print_graph_lat_fmt(s, ent); ent 671 kernel/trace/trace_functions_graph.c cpu, iter->ent->pid, flags); ent 723 kernel/trace/trace_functions_graph.c struct trace_entry *ent = iter->ent; ent 728 kernel/trace/trace_functions_graph.c verif_pid(s, ent->pid, cpu, data); ent 732 kernel/trace/trace_functions_graph.c print_graph_irq(iter, addr, type, cpu, ent->pid, flags); ent 751 kernel/trace/trace_functions_graph.c print_graph_proc(s, ent->pid); ent 757 kernel/trace/trace_functions_graph.c print_graph_lat_fmt(s, ent); ent 902 kernel/trace/trace_functions_graph.c struct trace_entry *ent, struct trace_iterator *iter, ent 908 kernel/trace/trace_functions_graph.c pid_t pid = ent->pid; ent 970 kernel/trace/trace_functions_graph.c print_graph_comment(struct trace_seq *s, struct trace_entry *ent, ent 997 kernel/trace/trace_functions_graph.c switch (iter->ent->type) { ent 1014 kernel/trace/trace_functions_graph.c event = ftrace_find_event(ent->type); ent 1043 kernel/trace/trace_functions_graph.c struct trace_entry *entry = iter->ent; ent 1058 kernel/trace/trace_functions_graph.c field = &data->ent; ent 1280 kernel/trace/trace_kprobe.c field = (struct kprobe_trace_entry_head *)iter->ent; ent 1310 kernel/trace/trace_kprobe.c field = (struct kretprobe_trace_entry_head *)iter->ent; ent 169 kernel/trace/trace_mmiotrace.c struct trace_entry *entry = iter->ent; ent 214 kernel/trace/trace_mmiotrace.c struct trace_entry *entry = iter->ent; ent 248 kernel/trace/trace_mmiotrace.c struct trace_entry *entry = iter->ent; ent 264 kernel/trace/trace_mmiotrace.c switch (iter->ent->type) { ent 28 kernel/trace/trace_output.c struct trace_entry *entry = iter->ent; ent 41 kernel/trace/trace_output.c struct trace_entry *entry = iter->ent; ent 54 kernel/trace/trace_output.c struct trace_entry *entry = iter->ent; ent 286 kernel/trace/trace_output.c entry = iter->ent; ent 569 kernel/trace/trace_output.c struct trace_entry *entry = iter->ent; ent 610 kernel/trace/trace_output.c struct trace_entry *entry = iter->ent, ent 813 kernel/trace/trace_output.c trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type); ent 825 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 844 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 859 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 873 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 902 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 937 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 973 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 1008 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 1055 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 1056 kernel/trace/trace_output.c end = (unsigned long *)((long)iter->ent + iter->ent_size); ent 1092 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 1140 kernel/trace/trace_output.c struct trace_entry *entry = iter->ent; ent 1178 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 1205 kernel/trace/trace_output.c struct trace_entry *entry = iter->ent; ent 1226 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 1249 kernel/trace/trace_output.c struct trace_entry *entry = iter->ent; ent 1270 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 1295 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 1308 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 1331 kernel/trace/trace_output.c trace_assign_type(field, iter->ent); ent 34 kernel/trace/trace_probe.c int PRINT_TYPE_FUNC_NAME(tname)(struct trace_seq *s, void *data, void *ent)\ ent 54 kernel/trace/trace_probe.c int PRINT_TYPE_FUNC_NAME(symbol)(struct trace_seq *s, void *data, void *ent) ent 62 kernel/trace/trace_probe.c int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, void *data, void *ent) ent 70 kernel/trace/trace_probe.c (const char *)get_loc_data(data, ent)); ent 65 kernel/trace/trace_probe.h static nokprobe_inline void *get_loc_data(u32 *dl, void *ent) ent 67 kernel/trace/trace_probe.h return (u8 *)ent + get_loc_offs(*dl); ent 151 kernel/trace/trace_probe.h int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, void *data, void *ent);\ ent 127 kernel/trace/trace_syscalls.c struct trace_entry *ent = iter->ent; ent 132 kernel/trace/trace_syscalls.c trace = (typeof(trace))ent; ent 139 kernel/trace/trace_syscalls.c if (entry->enter_event->event.type != ent->type) { ent 173 kernel/trace/trace_syscalls.c struct trace_entry *ent = iter->ent; ent 178 kernel/trace/trace_syscalls.c trace = (typeof(trace))ent; ent 187 kernel/trace/trace_syscalls.c if (entry->exit_event->event.type != ent->type) { ent 26 kernel/trace/trace_uprobe.c struct trace_entry ent; ent 1008 kernel/trace/trace_uprobe.c entry = (struct uprobe_trace_entry_head *)iter->ent; ent 25 lib/error-inject.c struct ei_entry *ent; ent 29 lib/error-inject.c list_for_each_entry(ent, &error_injection_list, list) { ent 30 lib/error-inject.c if (addr >= ent->start_addr && addr < ent->end_addr) { ent 41 lib/error-inject.c struct ei_entry *ent; ent 43 lib/error-inject.c list_for_each_entry(ent, &error_injection_list, list) { ent 44 lib/error-inject.c if (addr >= ent->start_addr && addr < ent->end_addr) ent 45 lib/error-inject.c return ent->etype; ent 62 lib/error-inject.c struct ei_entry *ent; ent 76 lib/error-inject.c ent = kmalloc(sizeof(*ent), GFP_KERNEL); ent 77 lib/error-inject.c if (!ent) ent 79 lib/error-inject.c ent->start_addr = entry; ent 80 lib/error-inject.c ent->end_addr = entry + size; ent 81 lib/error-inject.c ent->etype = iter->etype; ent 82 lib/error-inject.c ent->priv = priv; ent 83 lib/error-inject.c INIT_LIST_HEAD(&ent->list); ent 84 lib/error-inject.c list_add_tail(&ent->list, &error_injection_list); ent 112 lib/error-inject.c struct ei_entry *ent, *n; ent 118 lib/error-inject.c list_for_each_entry_safe(ent, n, &error_injection_list, list) { ent 119 lib/error-inject.c if (ent->priv == mod) { ent 120 lib/error-inject.c list_del_init(&ent->list); ent 121 lib/error-inject.c kfree(ent); ent 190 lib/error-inject.c struct ei_entry *ent = list_entry(v, struct ei_entry, list); ent 192 lib/error-inject.c seq_printf(m, "%ps\t%s\n", (void *)ent->start_addr, ent 193 lib/error-inject.c error_type_string(ent->etype)); ent 5362 mm/memcontrol.c swp_entry_t ent; ent 5397 mm/memcontrol.c swp_entry_t ent = pte_to_swp_entry(ptent); ent 5399 mm/memcontrol.c if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent)) ent 5407 mm/memcontrol.c if (is_device_private_entry(ent)) { ent 5408 mm/memcontrol.c page = device_private_entry_to_page(ent); ent 5422 mm/memcontrol.c page = find_get_page(swap_address_space(ent), swp_offset(ent)); ent 5424 mm/memcontrol.c entry->val = ent.val; ent 5599 mm/memcontrol.c swp_entry_t ent = { .val = 0 }; ent 5604 mm/memcontrol.c page = mc_handle_swap_pte(vma, ptent, &ent); ent 5606 mm/memcontrol.c page = mc_handle_file_pte(vma, addr, ptent, &ent); ent 5608 mm/memcontrol.c if (!page && !ent.val) ent 5630 mm/memcontrol.c if (ent.val && !ret && (!page || !PageTransCompound(page)) && ent 5631 mm/memcontrol.c mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { ent 5634 mm/memcontrol.c target->ent = ent; ent 5926 mm/memcontrol.c swp_entry_t ent; ent 5959 mm/memcontrol.c ent = target.ent; ent 5960 mm/memcontrol.c if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { ent 6537 mm/memcontrol.c swp_entry_t ent = { .val = page_private(page), }; ent 6538 mm/memcontrol.c unsigned short id = lookup_swap_cgroup_id(ent); ent 76 mm/swap_cgroup.c static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, ent 79 mm/swap_cgroup.c pgoff_t offset = swp_offset(ent); ent 82 mm/swap_cgroup.c ctrl = &swap_cgroup_ctrl[swp_type(ent)]; ent 97 mm/swap_cgroup.c unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, ent 105 mm/swap_cgroup.c sc = lookup_swap_cgroup(ent, &ctrl); ent 126 mm/swap_cgroup.c unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, ent 133 mm/swap_cgroup.c pgoff_t offset = swp_offset(ent); ent 136 mm/swap_cgroup.c sc = lookup_swap_cgroup(ent, &ctrl); ent 162 mm/swap_cgroup.c unsigned short lookup_swap_cgroup_id(swp_entry_t ent) ent 164 mm/swap_cgroup.c return lookup_swap_cgroup(ent, NULL)->id; ent 111 mm/swapfile.c static inline unsigned char swap_count(unsigned char ent) ent 113 mm/swapfile.c return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */ ent 84 net/bridge/br_fdb.c struct net_bridge_fdb_entry *ent ent 86 net/bridge/br_fdb.c kmem_cache_free(br_fdb_cache, ent); ent 71 net/bridge/br_multicast.c struct net_bridge_mdb_entry *ent; ent 76 net/bridge/br_multicast.c ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params); ent 79 net/bridge/br_multicast.c return ent; ent 51 net/can/j1939/bus.c struct j1939_addr_ent *ent; ent 58 net/can/j1939/bus.c ent = &priv->ents[ecu->addr]; ent 60 net/can/j1939/bus.c if (ent->ecu) { ent 67 net/can/j1939/bus.c ent->ecu = ecu; ent 68 net/can/j1939/bus.c ent->nusers += ecu->nusers; ent 75 net/can/j1939/bus.c struct j1939_addr_ent *ent; ent 85 net/can/j1939/bus.c ent = &priv->ents[ecu->addr]; ent 86 net/can/j1939/bus.c ent->ecu = NULL; ent 87 net/can/j1939/bus.c ent->nusers -= ecu->nusers; ent 177 net/netfilter/xt_hashlimit.c static inline bool dst_cmp(const struct dsthash_ent *ent, ent 180 net/netfilter/xt_hashlimit.c return !memcmp(&ent->dst, b, sizeof(ent->dst)); ent 202 net/netfilter/xt_hashlimit.c struct dsthash_ent *ent; ent 206 net/netfilter/xt_hashlimit.c hlist_for_each_entry_rcu(ent, &ht->hash[hash], node) ent 207 net/netfilter/xt_hashlimit.c if (dst_cmp(ent, dst)) { ent 208 net/netfilter/xt_hashlimit.c spin_lock(&ent->lock); ent 209 net/netfilter/xt_hashlimit.c return ent; ent 220 net/netfilter/xt_hashlimit.c struct dsthash_ent *ent; ent 227 net/netfilter/xt_hashlimit.c ent = dsthash_find(ht, dst); ent 228 net/netfilter/xt_hashlimit.c if (ent != NULL) { ent 231 net/netfilter/xt_hashlimit.c return ent; ent 244 net/netfilter/xt_hashlimit.c ent = NULL; ent 246 net/netfilter/xt_hashlimit.c ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC); ent 247 net/netfilter/xt_hashlimit.c if (ent) { ent 248 net/netfilter/xt_hashlimit.c memcpy(&ent->dst, dst, sizeof(ent->dst)); ent 249 net/netfilter/xt_hashlimit.c spin_lock_init(&ent->lock); ent 251 net/netfilter/xt_hashlimit.c spin_lock(&ent->lock); ent 252 net/netfilter/xt_hashlimit.c hlist_add_head_rcu(&ent->node, &ht->hash[hash_dst(ht, dst)]); ent 256 net/netfilter/xt_hashlimit.c return ent; ent 261 net/netfilter/xt_hashlimit.c struct dsthash_ent *ent = container_of(head, struct dsthash_ent, rcu); ent 263 net/netfilter/xt_hashlimit.c kmem_cache_free(hashlimit_cachep, ent); ent 267 net/netfilter/xt_hashlimit.c dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent) ent 269 net/netfilter/xt_hashlimit.c hlist_del_rcu(&ent->node); ent 270 net/netfilter/xt_hashlimit.c call_rcu(&ent->rcu, dsthash_free_rcu); ent 1094 net/netfilter/xt_hashlimit.c static void dl_seq_print(struct dsthash_ent *ent, u_int8_t family, ent 1100 net/netfilter/xt_hashlimit.c (long)(ent->expires - jiffies)/HZ, ent 1101 net/netfilter/xt_hashlimit.c &ent->dst.ip.src, ent 1102 net/netfilter/xt_hashlimit.c ntohs(ent->dst.src_port), ent 1103 net/netfilter/xt_hashlimit.c &ent->dst.ip.dst, ent 1104 net/netfilter/xt_hashlimit.c ntohs(ent->dst.dst_port), ent 1105 net/netfilter/xt_hashlimit.c ent->rateinfo.credit, ent->rateinfo.credit_cap, ent 1106 net/netfilter/xt_hashlimit.c ent->rateinfo.cost); ent 1111 net/netfilter/xt_hashlimit.c (long)(ent->expires - jiffies)/HZ, ent 1112 net/netfilter/xt_hashlimit.c &ent->dst.ip6.src, ent 1113 net/netfilter/xt_hashlimit.c ntohs(ent->dst.src_port), ent 1114 net/netfilter/xt_hashlimit.c &ent->dst.ip6.dst, ent 1115 net/netfilter/xt_hashlimit.c ntohs(ent->dst.dst_port), ent 1116 net/netfilter/xt_hashlimit.c ent->rateinfo.credit, ent->rateinfo.credit_cap, ent 1117 net/netfilter/xt_hashlimit.c ent->rateinfo.cost); ent 1125 net/netfilter/xt_hashlimit.c static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family, ent 1130 net/netfilter/xt_hashlimit.c spin_lock(&ent->lock); ent 1132 net/netfilter/xt_hashlimit.c rateinfo_recalc(ent, jiffies, ht->cfg.mode, 2); ent 1134 net/netfilter/xt_hashlimit.c dl_seq_print(ent, family, s); ent 1136 net/netfilter/xt_hashlimit.c spin_unlock(&ent->lock); ent 1140 net/netfilter/xt_hashlimit.c static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family, ent 1145 net/netfilter/xt_hashlimit.c spin_lock(&ent->lock); ent 1147 net/netfilter/xt_hashlimit.c rateinfo_recalc(ent, jiffies, ht->cfg.mode, 1); ent 1149 net/netfilter/xt_hashlimit.c dl_seq_print(ent, family, s); ent 1151 net/netfilter/xt_hashlimit.c spin_unlock(&ent->lock); ent 1155 net/netfilter/xt_hashlimit.c static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, ent 1160 net/netfilter/xt_hashlimit.c spin_lock(&ent->lock); ent 1162 net/netfilter/xt_hashlimit.c rateinfo_recalc(ent, jiffies, ht->cfg.mode, 3); ent 1164 net/netfilter/xt_hashlimit.c dl_seq_print(ent, family, s); ent 1166 net/netfilter/xt_hashlimit.c spin_unlock(&ent->lock); ent 1174 net/netfilter/xt_hashlimit.c struct dsthash_ent *ent; ent 1177 net/netfilter/xt_hashlimit.c hlist_for_each_entry(ent, &htable->hash[*bucket], node) ent 1178 net/netfilter/xt_hashlimit.c if (dl_seq_real_show_v2(ent, htable->family, s)) ent 1188 net/netfilter/xt_hashlimit.c struct dsthash_ent *ent; ent 1191 net/netfilter/xt_hashlimit.c hlist_for_each_entry(ent, &htable->hash[*bucket], node) ent 1192 net/netfilter/xt_hashlimit.c if (dl_seq_real_show_v1(ent, htable->family, s)) ent 1202 net/netfilter/xt_hashlimit.c struct dsthash_ent *ent; ent 1205 net/netfilter/xt_hashlimit.c hlist_for_each_entry(ent, &htable->hash[*bucket], node) ent 1206 net/netfilter/xt_hashlimit.c if (dl_seq_real_show(ent, htable->family, s)) ent 99 net/sctp/objcnt.c struct proc_dir_entry *ent; ent 101 net/sctp/objcnt.c ent = proc_create_seq("sctp_dbg_objcnt", 0, ent 103 net/sctp/objcnt.c if (!ent) ent 754 samples/v4l/v4l2-pci-skeleton.c static int skeleton_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ent 99 samples/vfio-mdev/mdpy-fb.c const struct pci_device_id *ent) ent 1789 security/apparmor/apparmorfs.c struct aa_loaddata *ent, *tmp; ent 1793 security/apparmor/apparmorfs.c list_for_each_entry_safe(ent, tmp, &ns->rawdata_list, list) ent 1794 security/apparmor/apparmorfs.c __aa_fs_remove_rawdata(ent); ent 67 security/apparmor/capability.c struct audit_cache *ent; ent 89 security/apparmor/capability.c ent = &get_cpu_var(audit_cache); ent 90 security/apparmor/capability.c if (profile == ent->profile && cap_raised(ent->caps, cap)) { ent 96 security/apparmor/capability.c aa_put_profile(ent->profile); ent 97 security/apparmor/capability.c ent->profile = aa_get_profile(profile); ent 98 security/apparmor/capability.c cap_raise(ent->caps, cap); ent 27 security/apparmor/include/policy_unpack.h void aa_load_ent_free(struct aa_load_ent *ent); ent 709 security/apparmor/policy.c struct aa_load_ent *ent; ent 716 security/apparmor/policy.c list_for_each_entry(ent, lh, list) { ent 717 security/apparmor/policy.c if (ent->new == profile) ent 719 security/apparmor/policy.c if (strncmp(ent->new->base.hname, profile->base.hname, len) == ent 720 security/apparmor/policy.c 0 && ent->new->base.hname[len] == 0) ent 721 security/apparmor/policy.c return ent->new; ent 861 security/apparmor/policy.c struct aa_load_ent *ent, *tmp; ent 880 security/apparmor/policy.c list_for_each_entry(ent, &lh, list) { ent 882 security/apparmor/policy.c if (ent->ns_name && ent 883 security/apparmor/policy.c strcmp(ent->ns_name, ns_name) != 0) { ent 888 security/apparmor/policy.c } else if (ent->ns_name) { ent 894 security/apparmor/policy.c ns_name = ent->ns_name; ent 906 security/apparmor/policy.c ent = NULL; ent 928 security/apparmor/policy.c list_for_each_entry(ent, &lh, list) { ent 931 security/apparmor/policy.c ent->new->rawdata = aa_get_loaddata(udata); ent 932 security/apparmor/policy.c error = __lookup_replace(ns, ent->new->base.hname, ent 934 security/apparmor/policy.c &ent->old, &info); ent 938 security/apparmor/policy.c if (ent->new->rename) { ent 939 security/apparmor/policy.c error = __lookup_replace(ns, ent->new->rename, ent 941 security/apparmor/policy.c &ent->rename, &info); ent 947 security/apparmor/policy.c ent->new->ns = aa_get_ns(ns); ent 949 security/apparmor/policy.c if (ent->old || ent->rename) ent 953 security/apparmor/policy.c policy = __lookup_parent(ns, ent->new->base.hname); ent 956 security/apparmor/policy.c p = __list_lookup_parent(&lh, ent->new); ent 962 security/apparmor/policy.c rcu_assign_pointer(ent->new->parent, aa_get_profile(p)); ent 966 security/apparmor/policy.c rcu_assign_pointer(ent->new->parent, aa_get_profile(p)); ent 975 security/apparmor/policy.c ent = NULL; ent 979 security/apparmor/policy.c list_for_each_entry(ent, &lh, list) { ent 980 security/apparmor/policy.c if (!ent->old) { ent 982 security/apparmor/policy.c if (rcu_access_pointer(ent->new->parent)) { ent 984 security/apparmor/policy.c p = aa_deref_parent(ent->new); ent 987 security/apparmor/policy.c parent = ns_subprofs_dir(ent->new->ns); ent 988 security/apparmor/policy.c error = __aafs_profile_mkdir(ent->new, parent); ent 1000 security/apparmor/policy.c list_for_each_entry_safe(ent, tmp, &lh, list) { ent 1001 security/apparmor/policy.c list_del_init(&ent->list); ent 1002 security/apparmor/policy.c op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL; ent 1004 security/apparmor/policy.c if (ent->old && ent->old->rawdata == ent->new->rawdata) { ent 1006 security/apparmor/policy.c audit_policy(label, op, ns_name, ent->new->base.hname, ent 1010 security/apparmor/policy.c aa_put_proxy(ent->new->label.proxy); ent 1011 security/apparmor/policy.c ent->new->label.proxy = NULL; ent 1019 security/apparmor/policy.c audit_policy(label, op, ns_name, ent->new->base.hname, NULL, ent 1022 security/apparmor/policy.c if (ent->old) { ent 1023 security/apparmor/policy.c share_name(ent->old, ent->new); ent 1024 security/apparmor/policy.c __replace_profile(ent->old, ent->new); ent 1028 security/apparmor/policy.c if (rcu_access_pointer(ent->new->parent)) { ent 1031 security/apparmor/policy.c parent = update_to_newest_parent(ent->new); ent 1035 security/apparmor/policy.c __add_profile(lh, ent->new); ent 1038 security/apparmor/policy.c aa_load_ent_free(ent); ent 1055 security/apparmor/policy.c op = (ent && !ent->old) ? OP_PROF_LOAD : OP_PROF_REPL; ent 1057 security/apparmor/policy.c audit_policy(label, op, ns_name, ent ? ent->new->base.hname : NULL, ent 1062 security/apparmor/policy.c if (tmp == ent) { ent 1071 security/apparmor/policy.c list_for_each_entry_safe(ent, tmp, &lh, list) { ent 1072 security/apparmor/policy.c list_del_init(&ent->list); ent 1073 security/apparmor/policy.c aa_load_ent_free(ent); ent 1023 security/apparmor/policy_unpack.c void aa_load_ent_free(struct aa_load_ent *ent) ent 1025 security/apparmor/policy_unpack.c if (ent) { ent 1026 security/apparmor/policy_unpack.c aa_put_profile(ent->rename); ent 1027 security/apparmor/policy_unpack.c aa_put_profile(ent->old); ent 1028 security/apparmor/policy_unpack.c aa_put_profile(ent->new); ent 1029 security/apparmor/policy_unpack.c kfree(ent->ns_name); ent 1030 security/apparmor/policy_unpack.c kzfree(ent); ent 1036 security/apparmor/policy_unpack.c struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL); ent 1037 security/apparmor/policy_unpack.c if (ent) ent 1038 security/apparmor/policy_unpack.c INIT_LIST_HEAD(&ent->list); ent 1039 security/apparmor/policy_unpack.c return ent; ent 1057 security/apparmor/policy_unpack.c struct aa_load_ent *tmp, *ent; ent 1091 security/apparmor/policy_unpack.c ent = aa_load_ent_alloc(); ent 1092 security/apparmor/policy_unpack.c if (!ent) { ent 1097 security/apparmor/policy_unpack.c ent->new = profile; ent 1098 security/apparmor/policy_unpack.c ent->ns_name = ns_name; ent 1099 security/apparmor/policy_unpack.c list_add_tail(&ent->list, lh); ent 1116 security/apparmor/policy_unpack.c list_for_each_entry_safe(ent, tmp, lh, list) { ent 1117 security/apparmor/policy_unpack.c list_del_init(&ent->list); ent 1118 security/apparmor/policy_unpack.c aa_load_ent_free(ent); ent 165 tools/gpio/lsgpio.c const struct dirent *ent; ent 176 tools/gpio/lsgpio.c while (ent = readdir(dp), ent) { ent 177 tools/gpio/lsgpio.c if (check_prefix(ent->d_name, "gpiochip")) { ent 178 tools/gpio/lsgpio.c ret = list_device(ent->d_name); ent 115 tools/hv/hv_vss_daemon.c struct mntent *ent; ent 137 tools/hv/hv_vss_daemon.c while ((ent = getmntent(mounts))) { ent 138 tools/hv/hv_vss_daemon.c if (strncmp(ent->mnt_fsname, match, strlen(match))) ent 140 tools/hv/hv_vss_daemon.c if (stat(ent->mnt_fsname, &sb)) { ent 142 tools/hv/hv_vss_daemon.c ent->mnt_fsname, errno, strerror(errno)); ent 149 tools/hv/hv_vss_daemon.c if (hasmntopt(ent, MNTOPT_RO) != NULL) ent 151 tools/hv/hv_vss_daemon.c if (strcmp(ent->mnt_type, "vfat") == 0) ent 153 tools/hv/hv_vss_daemon.c if (strcmp(ent->mnt_dir, "/") == 0) { ent 157 tools/hv/hv_vss_daemon.c error |= vss_do_freeze(ent->mnt_dir, cmd); ent 173 tools/hv/hv_vss_daemon.c if (ent) { ent 174 tools/hv/hv_vss_daemon.c strncpy(errdir, ent->mnt_dir, sizeof(errdir)-1); ent 179 tools/hv/hv_vss_daemon.c if (ent) ent 203 tools/iio/iio_generic_buffer.c const struct dirent *ent; ent 220 tools/iio/iio_generic_buffer.c while (ent = readdir(dp), ent) { ent 221 tools/iio/iio_generic_buffer.c if (iioutils_check_suffix(ent->d_name, "_en")) { ent 224 tools/iio/iio_generic_buffer.c ent->d_name); ent 225 tools/iio/iio_generic_buffer.c ret = write_sysfs_int(ent->d_name, scanelemdir, ent 229 tools/iio/iio_generic_buffer.c ent->d_name); ent 96 tools/iio/iio_utils.c const struct dirent *ent; ent 120 tools/iio/iio_utils.c while (ent = readdir(dp), ent) ent 121 tools/iio/iio_utils.c if ((strcmp(builtname, ent->d_name) == 0) || ent 122 tools/iio/iio_utils.c (strcmp(builtname_generic, ent->d_name) == 0)) { ent 124 tools/iio/iio_utils.c "%s/%s", scan_el_dir, ent->d_name); ent 179 tools/iio/iio_utils.c if (strcmp(builtname, ent->d_name) == 0) ent 225 tools/iio/iio_utils.c const struct dirent *ent; ent 245 tools/iio/iio_utils.c while (ent = readdir(dp), ent) ent 246 tools/iio/iio_utils.c if ((strcmp(builtname, ent->d_name) == 0) || ent 247 tools/iio/iio_utils.c (strcmp(builtname_generic, ent->d_name) == 0)) { ent 249 tools/iio/iio_utils.c "%s/%s", device_dir, ent->d_name); ent 319 tools/iio/iio_utils.c const struct dirent *ent; ent 334 tools/iio/iio_utils.c while (ent = readdir(dp), ent) ent 335 tools/iio/iio_utils.c if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"), ent 338 tools/iio/iio_utils.c "%s/%s", scan_el_dir, ent->d_name); ent 379 tools/iio/iio_utils.c while (ent = readdir(dp), ent) { ent 380 tools/iio/iio_utils.c if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"), ent 386 tools/iio/iio_utils.c "%s/%s", scan_el_dir, ent->d_name); ent 425 tools/iio/iio_utils.c current->name = strndup(ent->d_name, ent 426 tools/iio/iio_utils.c strlen(ent->d_name) - ent 565 tools/iio/iio_utils.c const struct dirent *ent; ent 579 tools/iio/iio_utils.c while (ent = readdir(dp), ent) { ent 580 tools/iio/iio_utils.c if (strcmp(ent->d_name, ".") != 0 && ent 581 tools/iio/iio_utils.c strcmp(ent->d_name, "..") != 0 && ent 582 tools/iio/iio_utils.c strlen(ent->d_name) > strlen(type) && ent 583 tools/iio/iio_utils.c strncmp(ent->d_name, type, strlen(type)) == 0) { ent 585 tools/iio/iio_utils.c ret = sscanf(ent->d_name + strlen(type), "%d", &number); ent 600 tools/iio/iio_utils.c if (strncmp(ent->d_name + strlen(type) + numstrlen, ent 43 tools/iio/lsiio.c const struct dirent *ent; ent 49 tools/iio/lsiio.c while (ent = readdir(dp), ent) ent 50 tools/iio/lsiio.c if (check_prefix(ent->d_name, "in_") && ent 51 tools/iio/lsiio.c (check_postfix(ent->d_name, "_raw") || ent 52 tools/iio/lsiio.c check_postfix(ent->d_name, "_input"))) ent 53 tools/iio/lsiio.c printf(" %-10s\n", ent->d_name); ent 103 tools/iio/lsiio.c const struct dirent *ent; ent 113 tools/iio/lsiio.c while (ent = readdir(dp), ent) { ent 114 tools/iio/lsiio.c if (check_prefix(ent->d_name, type_device)) { ent 118 tools/iio/lsiio.c ent->d_name) < 0) { ent 135 tools/iio/lsiio.c while (ent = readdir(dp), ent) { ent 136 tools/iio/lsiio.c if (check_prefix(ent->d_name, type_trigger)) { ent 140 tools/iio/lsiio.c ent->d_name) < 0) { ent 18 tools/lib/subcmd/help.c struct cmdname *ent = malloc(sizeof(*ent) + len + 1); ent 20 tools/lib/subcmd/help.c ent->len = len; ent 21 tools/lib/subcmd/help.c memcpy(ent->name, name, len); ent 22 tools/lib/subcmd/help.c ent->name[len] = 0; ent 25 tools/lib/subcmd/help.c cmds->names[cmds->cnt++] = ent; ent 474 tools/perf/builtin-inject.c struct event_entry *ent; ent 476 tools/perf/builtin-inject.c list_for_each_entry(ent, &inject->samples, node) { ent 477 tools/perf/builtin-inject.c if (sample->tid == ent->tid) { ent 478 tools/perf/builtin-inject.c list_del_init(&ent->node); ent 479 tools/perf/builtin-inject.c free(ent); ent 494 tools/perf/builtin-inject.c struct event_entry *ent; ent 498 tools/perf/builtin-inject.c ent = malloc(event->header.size + sizeof(struct event_entry)); ent 499 tools/perf/builtin-inject.c if (ent == NULL) { ent 505 tools/perf/builtin-inject.c ent->tid = sample->tid; ent 506 tools/perf/builtin-inject.c memcpy(&ent->event, event, event->header.size); ent 507 tools/perf/builtin-inject.c list_add(&ent->node, &inject->samples); ent 517 tools/perf/builtin-inject.c struct event_entry *ent; ent 523 tools/perf/builtin-inject.c list_for_each_entry(ent, &inject->samples, node) { ent 524 tools/perf/builtin-inject.c if (pid == ent->tid) ent 530 tools/perf/builtin-inject.c event_sw = &ent->event[0]; ent 422 tools/perf/builtin-probe.c struct str_node *ent; ent 446 tools/perf/builtin-probe.c strlist__for_each_entry(ent, klist) ent 447 tools/perf/builtin-probe.c pr_info("Removed event: %s\n", ent->s); ent 456 tools/perf/builtin-probe.c strlist__for_each_entry(ent, ulist) ent 457 tools/perf/builtin-probe.c pr_info("Removed event: %s\n", ent->s); ent 447 tools/perf/tests/builtin-test.c #define for_each_shell_test(dir, base, ent) \ ent 448 tools/perf/tests/builtin-test.c while ((ent = readdir(dir)) != NULL) \ ent 449 tools/perf/tests/builtin-test.c if (!is_directory(base, ent) && ent->d_name[0] != '.') ent 476 tools/perf/tests/builtin-test.c struct dirent *ent; ent 488 tools/perf/tests/builtin-test.c for_each_shell_test(dir, path, ent) { ent 490 tools/perf/tests/builtin-test.c const char *desc = shell_test__description(bf, sizeof(bf), path, ent->d_name); ent 527 tools/perf/tests/builtin-test.c struct dirent *ent; ent 540 tools/perf/tests/builtin-test.c for_each_shell_test(dir, st.dir, ent) { ent 544 tools/perf/tests/builtin-test.c .desc = shell_test__description(desc, sizeof(desc), st.dir, ent->d_name), ent 552 tools/perf/tests/builtin-test.c st.file = ent->d_name; ent 639 tools/perf/tests/builtin-test.c struct dirent *ent; ent 650 tools/perf/tests/builtin-test.c for_each_shell_test(dir, path, ent) { ent 654 tools/perf/tests/builtin-test.c .desc = shell_test__description(bf, sizeof(bf), path, ent->d_name), ent 1872 tools/perf/tests/parse-events.c struct dirent *ent; ent 1891 tools/perf/tests/parse-events.c while (!ret && (ent = readdir(dir))) { ent 1896 tools/perf/tests/parse-events.c if (strchr(ent->d_name, '.')) ent 1899 tools/perf/tests/parse-events.c snprintf(name, sizeof(name), "cpu/event=%s/u", ent->d_name); ent 1907 tools/perf/tests/parse-events.c snprintf(name, sizeof(name), "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name); ent 678 tools/perf/util/auxtrace.c struct auxtrace_index_entry ent; ent 682 tools/perf/util/auxtrace.c ent.file_offset = auxtrace_index->entries[i].file_offset; ent 683 tools/perf/util/auxtrace.c ent.sz = auxtrace_index->entries[i].sz; ent 684 tools/perf/util/auxtrace.c if (writen(fd, &ent, sizeof(ent)) != sizeof(ent)) ent 715 tools/perf/util/auxtrace.c struct auxtrace_index_entry ent; ent 718 tools/perf/util/auxtrace.c if (readn(fd, &ent, sizeof(ent)) != sizeof(ent)) ent 728 tools/perf/util/auxtrace.c bswap_64(ent.file_offset); ent 729 tools/perf/util/auxtrace.c auxtrace_index->entries[nr].sz = bswap_64(ent.sz); ent 731 tools/perf/util/auxtrace.c auxtrace_index->entries[nr].file_offset = ent.file_offset; ent 732 tools/perf/util/auxtrace.c auxtrace_index->entries[nr].sz = ent.sz; ent 768 tools/perf/util/auxtrace.c struct auxtrace_index_entry *ent) ent 771 tools/perf/util/auxtrace.c ent->file_offset, ent->sz); ent 778 tools/perf/util/auxtrace.c struct auxtrace_index_entry *ent; ent 787 tools/perf/util/auxtrace.c ent = &auxtrace_index->entries[i]; ent 790 tools/perf/util/auxtrace.c ent); ent 293 tools/perf/util/genelf_debug.c static ubyte get_special_opcode(struct debug_entry *ent, ent 303 tools/perf/util/genelf_debug.c temp = (ent->lineno - last_line) - default_debug_line_header.line_base; ent 311 tools/perf/util/genelf_debug.c delta_addr = (ent->addr - last_vma) / default_debug_line_header.minimum_instruction_length; ent 328 tools/perf/util/genelf_debug.c struct debug_entry *ent, size_t nr_entry, ent 352 tools/perf/util/genelf_debug.c for (i = 0; i < nr_entry; i++, ent = debug_entry_next(ent)) { ent 359 tools/perf/util/genelf_debug.c if (!cur_filename || strcmp(cur_filename, ent->name)) { ent 360 tools/perf/util/genelf_debug.c emit_lne_define_filename(be, ent->name); ent 361 tools/perf/util/genelf_debug.c cur_filename = ent->name; ent 366 tools/perf/util/genelf_debug.c special_opcode = get_special_opcode(ent, last_line, last_vma); ent 368 tools/perf/util/genelf_debug.c last_line = ent->lineno; ent 369 tools/perf/util/genelf_debug.c last_vma = ent->addr; ent 375 tools/perf/util/genelf_debug.c if (last_line != ent->lineno) { ent 376 tools/perf/util/genelf_debug.c emit_advance_lineno(be, ent->lineno - last_line); ent 377 tools/perf/util/genelf_debug.c last_line = ent->lineno; ent 383 tools/perf/util/genelf_debug.c if (last_vma != ent->addr) { ent 384 tools/perf/util/genelf_debug.c emit_advance_pc(be, ent->addr - last_vma); ent 385 tools/perf/util/genelf_debug.c last_vma = ent->addr; ent 398 tools/perf/util/genelf_debug.c struct debug_entry *ent, size_t nr_entry, ent 421 tools/perf/util/genelf_debug.c emit_lineno_info(be, ent, nr_entry, code_addr); ent 468 tools/perf/util/genelf_debug.c struct debug_entry *ent = debug; ent 472 tools/perf/util/genelf_debug.c ent->addr = ent->addr - code_addr; ent 473 tools/perf/util/genelf_debug.c ent = debug_entry_next(ent); ent 1207 tools/perf/util/header.c struct dirent *ent; ent 1211 tools/perf/util/header.c while ((ent = readdir(dir))) \ ent 1212 tools/perf/util/header.c if (strcmp(ent->d_name, ".") && \ ent 1213 tools/perf/util/header.c strcmp(ent->d_name, "..") && \ ent 1214 tools/perf/util/header.c sscanf(ent->d_name, "memory%u", &mem) == 1) ent 1262 tools/perf/util/header.c struct dirent *ent; ent 1277 tools/perf/util/header.c while (!ret && (ent = readdir(dir))) { ent 1281 tools/perf/util/header.c if (!strcmp(ent->d_name, ".") || ent 1282 tools/perf/util/header.c !strcmp(ent->d_name, "..")) ent 1285 tools/perf/util/header.c r = sscanf(ent->d_name, "node%u", &idx); ent 126 tools/perf/util/jitdump.h debug_entry_next(struct debug_entry *ent) ent 128 tools/perf/util/jitdump.h void *a = ent + 1; ent 129 tools/perf/util/jitdump.h size_t l = strlen(ent->name) + 1; ent 134 tools/perf/util/jitdump.h debug_entry_file(struct debug_entry *ent) ent 136 tools/perf/util/jitdump.h void *a = ent + 1; ent 2258 tools/perf/util/machine.c u8 *cpumode, int ent) ent 2262 tools/perf/util/machine.c while (--ent >= 0) { ent 2263 tools/perf/util/machine.c u64 ip = chain->ips[ent]; ent 2370 tools/perf/util/parse-events.c struct probe_cache_entry *ent; ent 2392 tools/perf/util/parse-events.c list_for_each_entry(ent, &pcache->entries, node) { ent 2393 tools/perf/util/parse-events.c if (!ent->sdt) ent 2396 tools/perf/util/parse-events.c !strglobmatch(ent->pev.group, subsys_glob)) ent 2399 tools/perf/util/parse-events.c !strglobmatch(ent->pev.event, event_glob)) ent 2401 tools/perf/util/parse-events.c ret = asprintf(&buf, "%s:%s@%s", ent->pev.group, ent 2402 tools/perf/util/parse-events.c ent->pev.event, nd->s); ent 2529 tools/perf/util/probe-event.c struct str_node *ent; ent 2538 tools/perf/util/probe-event.c strlist__for_each_entry(ent, rawlist) { ent 2539 tools/perf/util/probe-event.c ret = parse_probe_trace_command(ent->s, &tev); ent 188 tools/perf/util/probe-file.c struct str_node *ent; ent 197 tools/perf/util/probe-file.c strlist__for_each_entry(ent, rawlist) { ent 198 tools/perf/util/probe-file.c ret = parse_probe_trace_command(ent->s, &tev); ent 254 tools/perf/util/probe-file.c static int __del_trace_probe_event(int fd, struct str_node *ent) ent 261 tools/perf/util/probe-file.c ret = e_snprintf(buf, 128, "-:%s", ent->s); ent 268 tools/perf/util/probe-file.c ent->s); ent 292 tools/perf/util/probe-file.c struct str_node *ent; ent 303 tools/perf/util/probe-file.c strlist__for_each_entry(ent, namelist) { ent 304 tools/perf/util/probe-file.c p = strchr(ent->s, ':'); ent 306 tools/perf/util/probe-file.c strfilter__compare(filter, ent->s)) { ent 307 tools/perf/util/probe-file.c strlist__add(plist, ent->s); ent 319 tools/perf/util/probe-file.c struct str_node *ent; ent 321 tools/perf/util/probe-file.c strlist__for_each_entry(ent, namelist) { ent 322 tools/perf/util/probe-file.c ret = __del_trace_probe_event(fd, ent); ent 131 tools/testing/selftests/gpio/gpio-mockup-chardev.c const struct dirent *ent; ent 150 tools/testing/selftests/gpio/gpio-mockup-chardev.c while (ent = readdir(dp), ent) { ent 151 tools/testing/selftests/gpio/gpio-mockup-chardev.c if (check_prefix(ent->d_name, "gpiochip")) { ent 152 tools/testing/selftests/gpio/gpio-mockup-chardev.c *ret = asprintf(&chrdev_name, "/dev/%s", ent->d_name); ent 25 tools/testing/selftests/vm/transhuge-stress.c #define PAGEMAP_PRESENT(ent) (((ent) & (1ull << 63)) != 0) ent 26 tools/testing/selftests/vm/transhuge-stress.c #define PAGEMAP_PFN(ent) ((ent) & ((1ull << 55) - 1)) ent 32 tools/testing/selftests/vm/transhuge-stress.c uint64_t ent[2]; ent 46 tools/testing/selftests/vm/transhuge-stress.c if (pread(pagemap_fd, ent, sizeof(ent), ent 47 tools/testing/selftests/vm/transhuge-stress.c (uintptr_t)ptr >> (PAGE_SHIFT - 3)) != sizeof(ent)) ent 50 tools/testing/selftests/vm/transhuge-stress.c if (PAGEMAP_PRESENT(ent[0]) && PAGEMAP_PRESENT(ent[1]) && ent 51 tools/testing/selftests/vm/transhuge-stress.c PAGEMAP_PFN(ent[0]) + 1 == PAGEMAP_PFN(ent[1]) && ent 52 tools/testing/selftests/vm/transhuge-stress.c !(PAGEMAP_PFN(ent[0]) & ((1 << (HPAGE_SHIFT - PAGE_SHIFT)) - 1))) ent 53 tools/testing/selftests/vm/transhuge-stress.c return PAGEMAP_PFN(ent[0]);