entries 263 arch/arc/include/asm/arcregs.h unsigned int pad:16, entries:8, ver:8; entries 265 arch/arc/include/asm/arcregs.h unsigned int ver:8, entries:8, pad:16; entries 331 arch/arc/kernel/setup.c lpb.entries, entries 179 arch/arc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = address; entries 197 arch/arc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = address; entries 86 arch/arm/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; entries 96 arch/arm/kernel/stacktrace.c trace->entries[trace->nr_entries++] = regs->ARM_pc; entries 64 arch/arm/kernel/unwind.c int entries; /* number of entries left to interpret */ entries 214 arch/arm/kernel/unwind.c if (ctrl->entries <= 0) { entries 223 arch/arm/kernel/unwind.c ctrl->entries--; entries 342 arch/arm/kernel/unwind.c ctrl->entries = 0; entries 420 arch/arm/kernel/unwind.c ctrl.entries = 1; entries 423 arch/arm/kernel/unwind.c ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16); entries 432 arch/arm/kernel/unwind.c while (ctrl.entries > 0) { entries 152 arch/arm64/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; entries 49 arch/csky/kernel/stacktrace.c trace->entries[trace->nr_entries++] = lpp; entries 39 arch/hexagon/kernel/stacktrace.c trace->entries[trace->nr_entries++] = frame->rets; entries 25 arch/ia64/kernel/stacktrace.c trace->entries[trace->nr_entries++] = ip; entries 243 arch/microblaze/kernel/unwind.c trace->entries[trace->nr_entries++] = pc; entries 390 arch/mips/alchemy/common/dbdma.c u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries) entries 414 arch/mips/alchemy/common/dbdma.c desc_base = (u32)kmalloc_array(entries, sizeof(au1x_ddma_desc_t), entries 425 arch/mips/alchemy/common/dbdma.c i = entries * sizeof(au1x_ddma_desc_t); entries 563 arch/mips/alchemy/common/dbdma.c for (i = 0; i < entries; i++) { entries 51 arch/mips/generic/yamon-dt.c unsigned int entries = 0; entries 54 arch/mips/generic/yamon-dt.c if (entries >= max_entries) { entries 67 arch/mips/generic/yamon-dt.c ++entries; entries 72 arch/mips/generic/yamon-dt.c return entries; entries 358 arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries); entries 30 arch/mips/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; entries 58 arch/mips/kernel/stacktrace.c trace->entries[trace->nr_entries++] = pc; entries 74 arch/mips/mti-malta/malta-dtshim.c unsigned entries; entries 76 arch/mips/mti-malta/malta-dtshim.c entries = 1; entries 108 arch/mips/mti-malta/malta-dtshim.c entries++; entries 121 arch/mips/mti-malta/malta-dtshim.c entries++; entries 127 arch/mips/mti-malta/malta-dtshim.c BUG_ON(entries > MAX_MEM_ARRAY_ENTRIES); entries 128 arch/mips/mti-malta/malta-dtshim.c return entries; entries 45 arch/nds32/kernel/stacktrace.c trace->entries[trace->nr_entries++] = lpp; entries 38 arch/openrisc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; entries 64 arch/openrisc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; entries 75 arch/parisc/include/asm/pdc.h struct pdc_memory_table *tbl, unsigned long entries); entries 1085 arch/parisc/kernel/firmware.c struct pdc_memory_table *tbl, unsigned long entries) entries 1091 arch/parisc/kernel/firmware.c retval = mem_pdc_call(PDC_MEM, PDC_MEM_TABLE, __pa(pdc_result), __pa(pdc_result2), entries); entries 1094 arch/parisc/kernel/firmware.c memcpy(tbl, pdc_result2, entries * sizeof(*tbl)); entries 1604 arch/parisc/kernel/firmware.c unsigned long flags, entries; entries 1614 arch/parisc/kernel/firmware.c entries = min(pdc_result[0], max_entries); entries 1615 arch/parisc/kernel/firmware.c pret->pdt_entries = entries; entries 1616 arch/parisc/kernel/firmware.c pret->actual_count_bytes = entries * sizeof(unsigned long); entries 1637 arch/parisc/kernel/firmware.c unsigned long flags, entries; entries 1645 arch/parisc/kernel/firmware.c entries = min(pdc_result[0], count); entries 1646 arch/parisc/kernel/firmware.c pret->actual_count_bytes = entries; entries 1647 arch/parisc/kernel/firmware.c pret->pdt_entries = entries / sizeof(unsigned long); entries 309 arch/parisc/kernel/inventory.c int entries; entries 331 arch/parisc/kernel/inventory.c entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry); entries 333 arch/parisc/kernel/inventory.c if (entries > PAT_MAX_RANGES) { entries 346 arch/parisc/kernel/inventory.c for (i = 0; i < entries; i++,mtbl_ptr++) { entries 403 arch/parisc/kernel/inventory.c int entries; entries 426 arch/parisc/kernel/inventory.c entries = (int)r_addr.entries_returned; entries 431 arch/parisc/kernel/inventory.c for (i = 0; i < entries; i++,mtbl_ptr++) { entries 148 arch/parisc/kernel/pdt.c unsigned long entries; entries 172 arch/parisc/kernel/pdt.c entries = pdt_status.pdt_entries; entries 173 arch/parisc/kernel/pdt.c if (WARN_ON(entries > MAX_PDT_ENTRIES)) entries 174 arch/parisc/kernel/pdt.c entries = pdt_status.pdt_entries = MAX_PDT_ENTRIES; entries 185 arch/parisc/kernel/pdt.c if (entries == 0) { entries 195 arch/parisc/kernel/pdt.c entries); entries 29 arch/parisc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = info.ip; entries 25 arch/powerpc/include/asm/mmu_context.h unsigned long ua, unsigned long entries, entries 28 arch/powerpc/include/asm/mmu_context.h unsigned long entries, unsigned long dev_hpa, entries 39 arch/powerpc/include/asm/mmu_context.h unsigned long ua, unsigned long entries); entries 44 arch/powerpc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = ip; entries 198 arch/powerpc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = ip; entries 2274 arch/powerpc/kernel/traps.c struct ppc_emulated_entry *entries = (void *)&ppc_emulated; entries 2289 arch/powerpc/kernel/traps.c for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { entries 2290 arch/powerpc/kernel/traps.c d = debugfs_create_u32(entries[i].name, 0644, dir, entries 2291 arch/powerpc/kernel/traps.c (u32 *)&entries[i].val.counter); entries 1053 arch/powerpc/kvm/book3s.c struct kvm_kernel_irq_routing_entry *entries, int gsi) entries 1055 arch/powerpc/kvm/book3s.c entries->gsi = gsi; entries 1056 arch/powerpc/kvm/book3s.c entries->type = KVM_IRQ_ROUTING_IRQCHIP; entries 1057 arch/powerpc/kvm/book3s.c entries->set = kvmppc_book3s_set_irq; entries 1058 arch/powerpc/kvm/book3s.c entries->irqchip.irqchip = 0; entries 1059 arch/powerpc/kvm/book3s.c entries->irqchip.pin = gsi; entries 54 arch/powerpc/kvm/e500.h int entries, ways, sets; entries 73 arch/powerpc/kvm/e500_mmu.c esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; entries 83 arch/powerpc/kvm/e500_mmu.c int size = vcpu_e500->gtlb_params[tlbsel].entries; entries 153 arch/powerpc/kvm/e500_mmu.c int size = vcpu_e500->gtlb_params[1].entries; entries 233 arch/powerpc/kvm/e500_mmu.c for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++) entries 236 arch/powerpc/kvm/e500_mmu.c for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++) entries 258 arch/powerpc/kvm/e500_mmu.c for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; entries 282 arch/powerpc/kvm/e500_mmu.c for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { entries 828 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0]; entries 829 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1]; entries 879 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[0] |= params[0].entries; entries 884 arch/powerpc/kvm/e500_mmu.c vcpu->arch.tlbcfg[1] |= params[1].entries; entries 909 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE; entries 910 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE; entries 929 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_priv[0] = kcalloc(vcpu_e500->gtlb_params[0].entries, entries 935 arch/powerpc/kvm/e500_mmu.c vcpu_e500->gtlb_priv[1] = kcalloc(vcpu_e500->gtlb_params[1].entries, entries 941 arch/powerpc/kvm/e500_mmu.c vcpu_e500->g2h_tlb1_map = kcalloc(vcpu_e500->gtlb_params[1].entries, entries 38 arch/powerpc/kvm/e500_mmu_host.c #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) entries 45 arch/powerpc/kvm/e500_mmu_host.c return host_tlb_params[1].entries - tlbcam_index - 1; entries 275 arch/powerpc/kvm/e500_mmu_host.c sizeof(u64) * vcpu_e500->gtlb_params[1].entries); entries 278 arch/powerpc/kvm/e500_mmu_host.c sizeof(unsigned int) * host_tlb_params[1].entries); entries 287 arch/powerpc/kvm/e500_mmu_host.c for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { entries 768 arch/powerpc/kvm/e500_mmu_host.c host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; entries 769 arch/powerpc/kvm/e500_mmu_host.c host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; entries 776 arch/powerpc/kvm/e500_mmu_host.c if (host_tlb_params[0].entries == 0 || entries 777 arch/powerpc/kvm/e500_mmu_host.c host_tlb_params[1].entries == 0) { entries 784 arch/powerpc/kvm/e500_mmu_host.c host_tlb_params[1].ways = host_tlb_params[1].entries; entries 786 arch/powerpc/kvm/e500_mmu_host.c if (!is_power_of_2(host_tlb_params[0].entries) || entries 788 arch/powerpc/kvm/e500_mmu_host.c host_tlb_params[0].entries < host_tlb_params[0].ways || entries 791 arch/powerpc/kvm/e500_mmu_host.c __func__, host_tlb_params[0].entries, entries 797 arch/powerpc/kvm/e500_mmu_host.c host_tlb_params[0].entries / host_tlb_params[0].ways; entries 799 arch/powerpc/kvm/e500_mmu_host.c vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries, entries 34 arch/powerpc/mm/book3s64/iommu_api.c u64 entries; /* number of entries in hpas/hpages[] */ entries 57 arch/powerpc/mm/book3s64/iommu_api.c unsigned long entries, unsigned long dev_hpa, entries 66 arch/powerpc/mm/book3s64/iommu_api.c ret = account_locked_vm(mm, entries, true); entries 70 arch/powerpc/mm/book3s64/iommu_api.c locked_entries = entries; entries 80 arch/powerpc/mm/book3s64/iommu_api.c mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); entries 91 arch/powerpc/mm/book3s64/iommu_api.c mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); entries 92 arch/powerpc/mm/book3s64/iommu_api.c mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); entries 102 arch/powerpc/mm/book3s64/iommu_api.c chunk = min(chunk, entries); entries 103 arch/powerpc/mm/book3s64/iommu_api.c for (entry = 0; entry < entries; entry += chunk) { entries 104 arch/powerpc/mm/book3s64/iommu_api.c unsigned long n = min(entries - entry, chunk); entries 118 arch/powerpc/mm/book3s64/iommu_api.c if (pinned != entries) { entries 125 arch/powerpc/mm/book3s64/iommu_api.c for (i = 0; i < entries; ++i) { entries 146 arch/powerpc/mm/book3s64/iommu_api.c mem->entries = entries; entries 152 arch/powerpc/mm/book3s64/iommu_api.c if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) && entries 154 arch/powerpc/mm/book3s64/iommu_api.c (mem2->entries << PAGE_SHIFT)))) { entries 183 arch/powerpc/mm/book3s64/iommu_api.c long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries, entries 186 arch/powerpc/mm/book3s64/iommu_api.c return mm_iommu_do_alloc(mm, ua, entries, MM_IOMMU_TABLE_INVALID_HPA, entries 192 arch/powerpc/mm/book3s64/iommu_api.c unsigned long entries, unsigned long dev_hpa, entries 195 arch/powerpc/mm/book3s64/iommu_api.c return mm_iommu_do_alloc(mm, ua, entries, dev_hpa, pmem); entries 207 arch/powerpc/mm/book3s64/iommu_api.c for (i = 0; i < mem->entries; ++i) { entries 270 arch/powerpc/mm/book3s64/iommu_api.c unlock_entries = mem->entries; entries 292 arch/powerpc/mm/book3s64/iommu_api.c (mem->entries << PAGE_SHIFT))) { entries 311 arch/powerpc/mm/book3s64/iommu_api.c (mem->entries << PAGE_SHIFT))) { entries 321 arch/powerpc/mm/book3s64/iommu_api.c unsigned long ua, unsigned long entries) entries 328 arch/powerpc/mm/book3s64/iommu_api.c if ((mem->ua == ua) && (mem->entries == entries)) { entries 347 arch/powerpc/mm/book3s64/iommu_api.c if (entry >= mem->entries) entries 371 arch/powerpc/mm/book3s64/iommu_api.c if (entry >= mem->entries) entries 425 arch/powerpc/mm/book3s64/iommu_api.c end = mem->dev_hpa + (mem->entries << PAGE_SHIFT); entries 235 arch/powerpc/platforms/powernv/pci-ioda-tce.c unsigned int entries = 1UL << (shift - 3); entries 247 arch/powerpc/platforms/powernv/pci-ioda-tce.c for (i = 0; i < entries; ++i) { entries 980 arch/powerpc/platforms/pseries/hotplug-memory.c u32 entries; entries 1003 arch/powerpc/platforms/pseries/hotplug-memory.c entries = be32_to_cpu(*p++); entries 1010 arch/powerpc/platforms/pseries/hotplug-memory.c for (i = 0; i < entries; i++) { entries 137 arch/powerpc/sysdev/fsl_rio.h void *dev_id, int mbox, int entries); entries 140 arch/powerpc/sysdev/fsl_rio.h void *dev_id, int mbox, int entries); entries 720 arch/powerpc/sysdev/fsl_rmu.c fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) entries 726 arch/powerpc/sysdev/fsl_rmu.c if ((entries < RIO_MIN_TX_RING_SIZE) || entries 727 arch/powerpc/sysdev/fsl_rmu.c (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) { entries 734 arch/powerpc/sysdev/fsl_rmu.c rmu->msg_tx_ring.size = entries; entries 793 arch/powerpc/sysdev/fsl_rmu.c ((get_bitmask_order(entries) - 2) << 12)); entries 852 arch/powerpc/sysdev/fsl_rmu.c fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) entries 858 arch/powerpc/sysdev/fsl_rmu.c if ((entries < RIO_MIN_RX_RING_SIZE) || entries 859 arch/powerpc/sysdev/fsl_rmu.c (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) { entries 866 arch/powerpc/sysdev/fsl_rmu.c rmu->msg_rx_ring.size = entries; entries 907 arch/powerpc/sysdev/fsl_rmu.c setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12); entries 145 arch/riscv/kernel/stacktrace.c trace->entries[trace->nr_entries++] = pc; entries 20 arch/s390/boot/ipl_report.c for (entry = rb->entries; \ entries 38 arch/s390/boot/mem_detect.c return &mem_detect.entries[n]; entries 33 arch/s390/include/asm/mem_detect.h struct mem_detect_block entries[MEM_INLINED_ENTRIES]; entries 50 arch/s390/include/asm/mem_detect.h *start = (unsigned long)mem_detect.entries[n].start; entries 51 arch/s390/include/asm/mem_detect.h *end = (unsigned long)mem_detect.entries[n].end; entries 131 arch/s390/include/uapi/asm/ipl.h struct ipl_rb_certificate_entry entries[]; entries 151 arch/s390/include/uapi/asm/ipl.h struct ipl_rb_component_entry entries[]; entries 107 arch/s390/include/uapi/asm/qeth.h char *entries; entries 1124 arch/s390/kvm/priv.c int r1, r2, nappended, entries; entries 1136 arch/s390/kvm/priv.c entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; entries 1170 arch/s390/kvm/priv.c cbrlo[entries] = gfn << PAGE_SHIFT; entries 1187 arch/s390/kvm/priv.c int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3; entries 1192 arch/s390/kvm/priv.c VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries); entries 1245 arch/s390/kvm/priv.c entries += i; entries 1250 arch/s390/kvm/priv.c for (i = 0; i < entries; ++i) entries 326 arch/s390/pci/pci_clp.c int entries, i, rc; entries 349 arch/s390/pci/pci_clp.c entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) / entries 353 arch/s390/pci/pci_clp.c for (i = 0; i < entries; i++) entries 70 arch/sh/include/asm/processor.h unsigned int entries; entries 39 arch/sh/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; entries 72 arch/sh/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; entries 23 arch/sh/mm/tlb-sh5.c cpu_data->dtlb.entries = 64; entries 30 arch/sh/mm/tlb-sh5.c ((cpu_data->dtlb.entries - 1) * entries 34 arch/sh/mm/tlb-sh5.c cpu_data->itlb.entries = 64; entries 40 arch/sh/mm/tlb-sh5.c ((cpu_data->itlb.entries - 1) * entries 58 arch/sparc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = pc; entries 68 arch/sparc/kernel/stacktrace.c trace->entries[trace->nr_entries++] = pc; entries 53 arch/um/kernel/stacktrace.c trace->entries[trace->nr_entries++] = address; entries 90 arch/unicore32/kernel/stacktrace.c trace->entries[trace->nr_entries++] = addr; entries 2411 arch/x86/events/core.c desc = &ldt->entries[idx]; entries 130 arch/x86/include/asm/desc.h static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) entries 134 arch/x86/include/asm/desc.h static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) entries 196 arch/x86/include/asm/desc.h static inline void native_set_ldt(const void *addr, unsigned int entries) entries 198 arch/x86/include/asm/desc.h if (likely(entries == 0)) entries 205 arch/x86/include/asm/desc.h entries * LDT_ENTRY_SIZE - 1); entries 88 arch/x86/include/asm/e820/types.h struct e820_entry entries[E820_MAX_ENTRIES]; entries 45 arch/x86/include/asm/io_apic.h entries : 8, entries 55 arch/x86/include/asm/mmu_context.h struct desc_struct *entries; entries 142 arch/x86/include/asm/mmu_context.h set_ldt(ldt->entries, ldt->nr_entries); entries 237 arch/x86/include/asm/paravirt.h static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) entries 239 arch/x86/include/asm/paravirt.h PVOP_VCALL2(cpu.alloc_ldt, ldt, entries); entries 242 arch/x86/include/asm/paravirt.h static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) entries 244 arch/x86/include/asm/paravirt.h PVOP_VCALL2(cpu.free_ldt, ldt, entries); entries 259 arch/x86/include/asm/paravirt.h static inline void set_ldt(const void *addr, unsigned entries) entries 261 arch/x86/include/asm/paravirt.h PVOP_VCALL2(cpu.set_ldt, addr, entries); entries 126 arch/x86/include/asm/paravirt_types.h void (*set_ldt)(const void *desc, unsigned entries); entries 138 arch/x86/include/asm/paravirt_types.h void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); entries 139 arch/x86/include/asm/paravirt_types.h void (*free_ldt)(struct desc_struct *ldt, unsigned entries); entries 120 arch/x86/include/asm/setup.h #define RESERVE_BRK_ARRAY(type, name, entries) \ entries 122 arch/x86/include/asm/setup.h RESERVE_BRK(name, sizeof(type) * entries) entries 272 arch/x86/include/asm/xen/hypercall.h HYPERVISOR_set_gdt(unsigned long *frame_list, int entries) entries 274 arch/x86/include/asm/xen/hypercall.h return _hypercall2(int, set_gdt, frame_list, entries); entries 186 arch/x86/include/uapi/asm/kvm.h struct kvm_msr_entry entries[0]; entries 209 arch/x86/include/uapi/asm/kvm.h struct kvm_cpuid_entry entries[0]; entries 231 arch/x86/include/uapi/asm/kvm.h struct kvm_cpuid_entry2 entries[0]; entries 1288 arch/x86/kernel/apic/io_apic.c reg_01.bits.entries); entries 1316 arch/x86/kernel/apic/io_apic.c io_apic_print_entries(ioapic_idx, reg_01.bits.entries); entries 2435 arch/x86/kernel/apic/io_apic.c return reg_01.bits.entries + 1; entries 2798 arch/x86/kernel/apic/io_apic.c int idx, ioapic, entries; entries 2836 arch/x86/kernel/apic/io_apic.c entries = io_apic_get_redir_entries(idx); entries 2837 arch/x86/kernel/apic/io_apic.c gsi_end = gsi_base + entries - 1; entries 2877 arch/x86/kernel/apic/io_apic.c ioapics[idx].nr_registers = entries; entries 34 arch/x86/kernel/cpu/cpu.h unsigned int entries; entries 871 arch/x86/kernel/cpu/intel.c if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) entries 872 arch/x86/kernel/cpu/intel.c tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; entries 873 arch/x86/kernel/cpu/intel.c if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) entries 874 arch/x86/kernel/cpu/intel.c tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; entries 877 arch/x86/kernel/cpu/intel.c if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) entries 878 arch/x86/kernel/cpu/intel.c tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; entries 879 arch/x86/kernel/cpu/intel.c if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) entries 880 arch/x86/kernel/cpu/intel.c tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; entries 881 arch/x86/kernel/cpu/intel.c if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) entries 882 arch/x86/kernel/cpu/intel.c tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; entries 883 arch/x86/kernel/cpu/intel.c if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) entries 884 arch/x86/kernel/cpu/intel.c tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; entries 885 arch/x86/kernel/cpu/intel.c if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) entries 886 arch/x86/kernel/cpu/intel.c tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; entries 887 arch/x86/kernel/cpu/intel.c if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) entries 888 arch/x86/kernel/cpu/intel.c tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; entries 891 arch/x86/kernel/cpu/intel.c if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) entries 892 arch/x86/kernel/cpu/intel.c tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; entries 893 arch/x86/kernel/cpu/intel.c if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) entries 894 arch/x86/kernel/cpu/intel.c tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; entries 895 arch/x86/kernel/cpu/intel.c if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) entries 896 arch/x86/kernel/cpu/intel.c tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; entries 899 arch/x86/kernel/cpu/intel.c if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) entries 900 arch/x86/kernel/cpu/intel.c tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; entries 903 arch/x86/kernel/cpu/intel.c if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) entries 904 arch/x86/kernel/cpu/intel.c tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; entries 907 arch/x86/kernel/cpu/intel.c if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) entries 908 arch/x86/kernel/cpu/intel.c tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; entries 909 arch/x86/kernel/cpu/intel.c if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) entries 910 arch/x86/kernel/cpu/intel.c tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; entries 914 arch/x86/kernel/cpu/intel.c if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) entries 915 arch/x86/kernel/cpu/intel.c tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; entries 919 arch/x86/kernel/cpu/intel.c if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) entries 920 arch/x86/kernel/cpu/intel.c tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; entries 924 arch/x86/kernel/cpu/intel.c if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) entries 925 arch/x86/kernel/cpu/intel.c tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; entries 926 arch/x86/kernel/cpu/intel.c if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) entries 927 arch/x86/kernel/cpu/intel.c tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; entries 930 arch/x86/kernel/cpu/intel.c if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) entries 931 arch/x86/kernel/cpu/intel.c tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; entries 932 arch/x86/kernel/cpu/intel.c if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) entries 933 arch/x86/kernel/cpu/intel.c tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; entries 936 arch/x86/kernel/cpu/intel.c if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries) entries 937 arch/x86/kernel/cpu/intel.c tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries; entries 83 arch/x86/kernel/e820.c struct e820_entry *entry = &table->entries[i]; entries 118 arch/x86/kernel/e820.c struct e820_entry *entry = &e820_table->entries[i]; entries 170 arch/x86/kernel/e820.c if (x >= ARRAY_SIZE(table->entries)) { entries 176 arch/x86/kernel/e820.c table->entries[x].addr = start; entries 177 arch/x86/kernel/e820.c table->entries[x].size = size; entries 178 arch/x86/kernel/e820.c table->entries[x].type = type; entries 209 arch/x86/kernel/e820.c e820_table->entries[i].addr, entries 210 arch/x86/kernel/e820.c e820_table->entries[i].addr + e820_table->entries[i].size - 1); entries 212 arch/x86/kernel/e820.c e820_print_type(e820_table->entries[i].type); entries 309 arch/x86/kernel/e820.c struct e820_entry *entries = table->entries; entries 310 arch/x86/kernel/e820.c u32 max_nr_entries = ARRAY_SIZE(table->entries); entries 324 arch/x86/kernel/e820.c if (entries[i].addr + entries[i].size < entries[i].addr) entries 338 arch/x86/kernel/e820.c if (entries[i].size != 0) { entries 339 arch/x86/kernel/e820.c change_point[chg_idx]->addr = entries[i].addr; entries 340 arch/x86/kernel/e820.c change_point[chg_idx++]->entry = &entries[i]; entries 341 arch/x86/kernel/e820.c change_point[chg_idx]->addr = entries[i].addr + entries[i].size; entries 342 arch/x86/kernel/e820.c change_point[chg_idx++]->entry = &entries[i]; entries 401 arch/x86/kernel/e820.c memcpy(entries, new_entries, new_nr_entries*sizeof(*entries)); entries 407 arch/x86/kernel/e820.c static int __init __append_e820_table(struct boot_e820_entry *entries, u32 nr_entries) entries 409 arch/x86/kernel/e820.c struct boot_e820_entry *entry = entries; entries 438 arch/x86/kernel/e820.c static int __init append_e820_table(struct boot_e820_entry *entries, u32 nr_entries) entries 444 arch/x86/kernel/e820.c return __append_e820_table(entries, nr_entries); entries 467 arch/x86/kernel/e820.c struct e820_entry *entry = &table->entries[i]; entries 542 arch/x86/kernel/e820.c struct e820_entry *entry = &e820_table->entries[i]; entries 613 arch/x86/kernel/e820.c unsigned long long start = e820_table->entries[i].addr; entries 614 arch/x86/kernel/e820.c unsigned long long end = start + e820_table->entries[i].size; entries 687 arch/x86/kernel/e820.c size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table->nr_entries; entries 692 arch/x86/kernel/e820.c size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_kexec->nr_entries; entries 697 arch/x86/kernel/e820.c size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_firmware->nr_entries; entries 711 arch/x86/kernel/e820.c int entries; entries 716 arch/x86/kernel/e820.c entries = sdata->len / sizeof(*extmap); entries 719 arch/x86/kernel/e820.c __append_e820_table(extmap, entries); entries 744 arch/x86/kernel/e820.c struct e820_entry *entry = &e820_table->entries[i]; entries 769 arch/x86/kernel/e820.c struct e820_entry *entry = &e820_table->entries[i]; entries 822 arch/x86/kernel/e820.c struct e820_entry *entry = &e820_table->entries[i]; entries 1119 arch/x86/kernel/e820.c struct e820_entry *entry = e820_table->entries + i; entries 1146 arch/x86/kernel/e820.c struct e820_entry *entry = e820_table_firmware->entries + i; entries 1190 arch/x86/kernel/e820.c struct e820_entry *entry = &e820_table->entries[i]; entries 1282 arch/x86/kernel/e820.c struct e820_entry *entry = &e820_table->entries[i]; entries 108 arch/x86/kernel/kexec-bzimage64.c memcpy(¶ms->e820_table, &e820_table_kexec->entries, nr_e820_entries*sizeof(struct e820_entry)); entries 87 arch/x86/kernel/ldt.c new_ldt->entries = vzalloc(alloc_size); entries 89 arch/x86/kernel/ldt.c new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL); entries 91 arch/x86/kernel/ldt.c if (!new_ldt->entries) { entries 223 arch/x86/kernel/ldt.c is_vmalloc = is_vmalloc_addr(ldt->entries); entries 229 arch/x86/kernel/ldt.c const void *src = (char *)ldt->entries + offset; entries 326 arch/x86/kernel/ldt.c paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); entries 347 arch/x86/kernel/ldt.c paravirt_free_ldt(ldt->entries, ldt->nr_entries); entries 349 arch/x86/kernel/ldt.c vfree_atomic(ldt->entries); entries 351 arch/x86/kernel/ldt.c free_page((unsigned long)ldt->entries); entries 377 arch/x86/kernel/ldt.c memcpy(new_ldt->entries, old_mm->context.ldt->entries, entries 430 arch/x86/kernel/ldt.c if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) { entries 518 arch/x86/kernel/ldt.c memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE); entries 520 arch/x86/kernel/ldt.c new_ldt->entries[ldt_info.entry_number] = ldt; entries 322 arch/x86/kernel/process_64.c base = get_desc_base(ldt->entries + idx); entries 32 arch/x86/kernel/resource.c entry = &e820_table->entries[i]; entries 41 arch/x86/kernel/step.c desc = &child->mm->context.ldt->entries[seg]; entries 194 arch/x86/kernel/tboot.c if ((e820_table->entries[i].type != E820_TYPE_RAM) entries 195 arch/x86/kernel/tboot.c && (e820_table->entries[i].type != E820_TYPE_RESERVED_KERN)) entries 198 arch/x86/kernel/tboot.c add_mac_region(e820_table->entries[i].addr, e820_table->entries[i].size); entries 200 arch/x86/kvm/cpuid.c struct kvm_cpuid_entry __user *entries) entries 216 arch/x86/kvm/cpuid.c if (copy_from_user(cpuid_entries, entries, entries 245 arch/x86/kvm/cpuid.c struct kvm_cpuid_entry2 __user *entries) entries 253 arch/x86/kvm/cpuid.c if (copy_from_user(&vcpu->arch.cpuid_entries, entries, entries 266 arch/x86/kvm/cpuid.c struct kvm_cpuid_entry2 __user *entries) entries 274 arch/x86/kvm/cpuid.c if (copy_to_user(entries, &vcpu->arch.cpuid_entries, entries 838 arch/x86/kvm/cpuid.c static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries, entries 856 arch/x86/kvm/cpuid.c if (copy_from_user(pad, entries[i].padding, sizeof(pad))) entries 866 arch/x86/kvm/cpuid.c struct kvm_cpuid_entry2 __user *entries, entries 884 arch/x86/kvm/cpuid.c if (sanity_check_entries(entries, cpuid->nent, type)) entries 916 arch/x86/kvm/cpuid.c if (copy_to_user(entries, cpuid_entries, entries 14 arch/x86/kvm/cpuid.h struct kvm_cpuid_entry2 __user *entries, entries 18 arch/x86/kvm/cpuid.h struct kvm_cpuid_entry __user *entries); entries 21 arch/x86/kvm/cpuid.h struct kvm_cpuid_entry2 __user *entries); entries 24 arch/x86/kvm/cpuid.h struct kvm_cpuid_entry2 __user *entries); entries 1787 arch/x86/kvm/hyperv.c struct kvm_cpuid_entry2 __user *entries) entries 1909 arch/x86/kvm/hyperv.c if (copy_to_user(entries, cpuid_entries, entries 98 arch/x86/kvm/hyperv.h struct kvm_cpuid_entry2 __user *entries); entries 3200 arch/x86/kvm/x86.c struct kvm_msr_entry *entries, entries 3207 arch/x86/kvm/x86.c if (do_msr(vcpu, entries[i].index, &entries[i].data)) entries 3224 arch/x86/kvm/x86.c struct kvm_msr_entry *entries; entries 3237 arch/x86/kvm/x86.c entries = memdup_user(user_msrs->entries, size); entries 3238 arch/x86/kvm/x86.c if (IS_ERR(entries)) { entries 3239 arch/x86/kvm/x86.c r = PTR_ERR(entries); entries 3243 arch/x86/kvm/x86.c r = n = __msr_io(vcpu, &msrs, entries, do_msr); entries 3248 arch/x86/kvm/x86.c if (writeback && copy_to_user(user_msrs->entries, entries, size)) entries 3254 arch/x86/kvm/x86.c kfree(entries); entries 3432 arch/x86/kvm/x86.c r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, entries 4246 arch/x86/kvm/x86.c r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); entries 4257 arch/x86/kvm/x86.c cpuid_arg->entries); entries 4268 arch/x86/kvm/x86.c cpuid_arg->entries); entries 4534 arch/x86/kvm/x86.c cpuid_arg->entries); entries 589 arch/x86/lib/insn-eval.c *out = ldt->entries[sel]; entries 32 arch/x86/math-emu/fpu_system.h ret = current->mm->context.ldt->entries[seg]; entries 245 arch/x86/mm/mem_encrypt_identity.c unsigned long entries = 0, tables = 0; entries 262 arch/x86/mm/mem_encrypt_identity.c entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D; entries 263 arch/x86/mm/mem_encrypt_identity.c entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD; entries 264 arch/x86/mm/mem_encrypt_identity.c entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD; entries 265 arch/x86/mm/mem_encrypt_identity.c entries += 2 * sizeof(pte_t) * PTRS_PER_PTE; entries 273 arch/x86/mm/mem_encrypt_identity.c tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D; entries 274 arch/x86/mm/mem_encrypt_identity.c tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD; entries 275 arch/x86/mm/mem_encrypt_identity.c tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD; entries 277 arch/x86/mm/mem_encrypt_identity.c return entries + tables; entries 870 arch/x86/pci/irq.c int entries = (rt->size - sizeof(struct irq_routing_table)) / entries 874 arch/x86/pci/irq.c for (info = rt->slots; entries--; info++) entries 571 arch/x86/pci/mmconfig-shared.c int entries; entries 580 arch/x86/pci/mmconfig-shared.c entries = 0; entries 583 arch/x86/pci/mmconfig-shared.c entries++; entries 586 arch/x86/pci/mmconfig-shared.c if (entries == 0) { entries 592 arch/x86/pci/mmconfig-shared.c for (i = 0; i < entries; i++) { entries 95 arch/x86/power/hibernate.c size = offsetof(struct e820_table, entries) + entries 33 arch/x86/um/asm/mm_context.h struct ldt_entry entries[LDT_DIRECT_ENTRIES]; entries 71 arch/x86/um/ldt.c if (copy_to_user(ptr, ldt->u.entries, size)) entries 159 arch/x86/um/ldt.c memcpy(&entry0, ldt->u.entries, entries 173 arch/x86/um/ldt.c memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, entries 183 arch/x86/um/ldt.c ldt_p = ldt->u.entries + ldt_info.entry_number; entries 337 arch/x86/um/ldt.c memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries, entries 338 arch/x86/um/ldt.c sizeof(new_mm->arch.ldt.u.entries)); entries 404 arch/x86/xen/enlighten_pv.c static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) entries 420 arch/x86/xen/enlighten_pv.c for (i = 0; i < entries; i += entries_per_page) entries 424 arch/x86/xen/enlighten_pv.c static void xen_free_ldt(struct desc_struct *ldt, unsigned entries) entries 429 arch/x86/xen/enlighten_pv.c for (i = 0; i < entries; i += entries_per_page) entries 433 arch/x86/xen/enlighten_pv.c static void xen_set_ldt(const void *addr, unsigned entries) entries 438 arch/x86/xen/enlighten_pv.c trace_xen_cpu_set_ldt(addr, entries); entries 443 arch/x86/xen/enlighten_pv.c op->arg2.nr_ents = entries; entries 41 arch/x86/xen/multicalls.c struct multicall_entry entries[MC_BATCH]; entries 73 arch/x86/xen/multicalls.c memcpy(b->debug, b->entries, entries 86 arch/x86/xen/multicalls.c mc = &b->entries[0]; entries 95 arch/x86/xen/multicalls.c if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) entries 98 arch/x86/xen/multicalls.c if (b->entries[i].result < 0) entries 106 arch/x86/xen/multicalls.c if (b->entries[i].result < 0) { entries 112 arch/x86/xen/multicalls.c b->entries[i].result, entries 117 arch/x86/xen/multicalls.c b->entries[i].op, entries 118 arch/x86/xen/multicalls.c b->entries[i].args[0], entries 119 arch/x86/xen/multicalls.c b->entries[i].result); entries 157 arch/x86/xen/multicalls.c ret.mc = &b->entries[b->mcidx]; entries 178 arch/x86/xen/multicalls.c b->entries[b->mcidx - 1].op != op)) { entries 188 arch/x86/xen/multicalls.c ret.mc = &b->entries[b->mcidx - 1]; entries 202 arch/x86/xen/setup.c const struct e820_entry *entry = xen_e820_table.entries; entries 459 arch/x86/xen/setup.c const struct e820_entry *entry = xen_e820_table.entries; entries 608 arch/x86/xen/setup.c struct e820_entry *entry = xen_e820_table.entries; entries 627 arch/x86/xen/setup.c entry = xen_e820_table.entries; entries 652 arch/x86/xen/setup.c struct e820_entry *entry = xen_e820_table.entries; entries 757 arch/x86/xen/setup.c memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); entries 758 arch/x86/xen/setup.c set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); entries 771 arch/x86/xen/setup.c xen_e820_table.entries[0].addr = 0ULL; entries 772 arch/x86/xen/setup.c xen_e820_table.entries[0].size = mem_end; entries 774 arch/x86/xen/setup.c xen_e820_table.entries[0].size += 8ULL << 20; entries 775 arch/x86/xen/setup.c xen_e820_table.entries[0].type = E820_TYPE_RAM; entries 821 arch/x86/xen/setup.c addr = xen_e820_table.entries[0].addr; entries 822 arch/x86/xen/setup.c size = xen_e820_table.entries[0].size; entries 827 arch/x86/xen/setup.c type = xen_e820_table.entries[i].type; entries 851 arch/x86/xen/setup.c addr = xen_e820_table.entries[i].addr; entries 852 arch/x86/xen/setup.c size = xen_e820_table.entries[i].size; entries 218 arch/xtensa/kernel/stacktrace.c trace->entries[trace->nr_entries++] = frame->pc; entries 41 drivers/acpi/acpi_extlog.c u32 entries; /* Valid L1 Directory entries per logical processor */ entries 250 drivers/acpi/acpi_extlog.c l1_percpu_entry = l1_head->entries; entries 30 drivers/acpi/acpi_watchdog.c const struct acpi_wdat_entry *entries; entries 33 drivers/acpi/acpi_watchdog.c entries = (struct acpi_wdat_entry *)(wdat + 1); entries 34 drivers/acpi/acpi_watchdog.c for (i = 0; i < wdat->entries; i++) { entries 37 drivers/acpi/acpi_watchdog.c gas = &entries[i].register_region; entries 103 drivers/acpi/acpi_watchdog.c const struct acpi_wdat_entry *entries; entries 129 drivers/acpi/acpi_watchdog.c entries = (struct acpi_wdat_entry *)(wdat + 1); entries 130 drivers/acpi/acpi_watchdog.c for (i = 0; i < wdat->entries; i++) { entries 136 drivers/acpi/acpi_watchdog.c gas = &entries[i].register_region; entries 48 drivers/acpi/apei/apei-base.c u32 entries) entries 53 drivers/acpi/apei/apei-base.c ctx->entries = entries; entries 166 drivers/acpi/apei/apei-base.c for (i = 0; i < ctx->entries; i++) { entries 208 drivers/acpi/apei/apei-base.c for (i = 0; i < ctx->entries; i++) { entries 252 drivers/acpi/apei/apei-base.c ctx_unmap.entries = end; entries 35 drivers/acpi/apei/apei-internal.h u32 entries; entries 42 drivers/acpi/apei/apei-internal.h u32 entries); entries 143 drivers/acpi/apei/einj.c EINJ_TAB_ENTRY(einj_tab), einj_tab->entries); entries 211 drivers/acpi/apei/einj.c for (i = 0; i < einj_tab->entries; i++) { entries 661 drivers/acpi/apei/einj.c if (einj_tab->entries != entries 371 drivers/acpi/apei/erst.c ERST_TAB_ENTRY(erst_tab), erst_tab->entries); entries 429 drivers/acpi/apei/erst.c u64 *entries; entries 478 drivers/acpi/apei/erst.c u64 *entries; entries 499 drivers/acpi/apei/erst.c entries = erst_record_id_cache.entries; entries 501 drivers/acpi/apei/erst.c if (entries[i] == id) entries 519 drivers/acpi/apei/erst.c new_entries = kvmalloc_array(new_size, sizeof(entries[0]), entries 523 drivers/acpi/apei/erst.c memcpy(new_entries, entries, entries 524 drivers/acpi/apei/erst.c erst_record_id_cache.len * sizeof(entries[0])); entries 525 drivers/acpi/apei/erst.c kvfree(entries); entries 526 drivers/acpi/apei/erst.c erst_record_id_cache.entries = entries = new_entries; entries 529 drivers/acpi/apei/erst.c entries[i] = id; entries 543 drivers/acpi/apei/erst.c u64 *entries; entries 553 drivers/acpi/apei/erst.c entries = erst_record_id_cache.entries; entries 555 drivers/acpi/apei/erst.c if (entries[*pos] != APEI_ERST_INVALID_RECORD_ID) entries 559 drivers/acpi/apei/erst.c *record_id = entries[*pos]; entries 570 drivers/acpi/apei/erst.c *record_id = erst_record_id_cache.entries[*pos]; entries 588 drivers/acpi/apei/erst.c u64 *entries; entries 593 drivers/acpi/apei/erst.c entries = erst_record_id_cache.entries; entries 595 drivers/acpi/apei/erst.c if (entries[i] == APEI_ERST_INVALID_RECORD_ID) entries 598 drivers/acpi/apei/erst.c entries[wpos] = entries[i]; entries 863 drivers/acpi/apei/erst.c u64 *entries; entries 879 drivers/acpi/apei/erst.c entries = erst_record_id_cache.entries; entries 881 drivers/acpi/apei/erst.c if (entries[i] == record_id) entries 882 drivers/acpi/apei/erst.c entries[i] = APEI_ERST_INVALID_RECORD_ID; entries 907 drivers/acpi/apei/erst.c if (erst_tab->entries != entries 271 drivers/acpi/hmat/hmat.c u16 *entries; entries 284 drivers/acpi/hmat/hmat.c total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds + entries 298 drivers/acpi/hmat/hmat.c entries = (u16 *)(targs + tpds); entries 302 drivers/acpi/hmat/hmat.c value = hmat_normalize(entries[init * tpds + targ], entries 465 drivers/acpi/hmat/hmat.c u16 *entries; entries 471 drivers/acpi/hmat/hmat.c entries = (u16 *)(targs + tpds); entries 492 drivers/acpi/hmat/hmat.c return hmat_normalize(entries[idx * tpds + tdx], entries 948 drivers/acpi/processor_idle.c struct acpi_lpi_state *entries; entries 1003 drivers/acpi/processor_idle.c info->entries = lpi_state; entries 1124 drivers/acpi/processor_idle.c struct acpi_lpi_state *p, *t = curr_level->entries; entries 1160 drivers/acpi/processor_idle.c kfree(curr_level->entries); entries 708 drivers/atm/idt77252.c int entries; entries 713 drivers/atm/idt77252.c entries = atomic_read(&scq->used); entries 714 drivers/atm/idt77252.c if (entries > (SCQ_ENTRIES - 1)) { entries 463 drivers/atm/iphase.c int entries; entries 480 drivers/atm/iphase.c entries = rate / dev->Granularity; entries 482 drivers/atm/iphase.c entries, rate, dev->Granularity);) entries 483 drivers/atm/iphase.c if (entries < 1) entries 485 drivers/atm/iphase.c rateLow = entries * dev->Granularity; entries 486 drivers/atm/iphase.c rateHigh = (entries + 1) * dev->Granularity; entries 488 drivers/atm/iphase.c entries++; entries 489 drivers/atm/iphase.c if (entries > dev->CbrRemEntries) { entries 492 drivers/atm/iphase.c entries, dev->CbrRemEntries);) entries 497 drivers/atm/iphase.c ia_vcc->NumCbrEntry = entries; entries 498 drivers/atm/iphase.c dev->sum_mcr += entries * dev->Granularity; entries 503 drivers/atm/iphase.c spacing = dev->CbrTotEntries / entries; entries 504 drivers/atm/iphase.c sp_mod = dev->CbrTotEntries % entries; // get modulo entries 505 drivers/atm/iphase.c toBeAssigned = entries; entries 513 drivers/atm/iphase.c if (toBeAssigned == entries) entries 522 drivers/atm/iphase.c fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part entries 523 drivers/atm/iphase.c sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part entries 34 drivers/char/xillybus/xillybus.h int entries; entries 404 drivers/char/xillybus/xillybus_core.c int entries) entries 482 drivers/char/xillybus/xillybus_core.c for (entry = 0; entry < entries; entry++, chandesc += 4) { entries 601 drivers/char/xillybus/xillybus_core.c idt_handle->entries = len >> 2; entries 2026 drivers/char/xillybus/xillybus_core.c idt_handle.entries); entries 47 drivers/clk/tegra/clk-tegra124-dfll-fcpu.c .entries = { entries 99 drivers/clk/tegra/clk-tegra124-dfll-fcpu.c .entries = { \ entries 126 drivers/clk/tegra/clk-tegra124-dfll-fcpu.c .entries = { \ entries 149 drivers/clk/tegra/clk-tegra124-dfll-fcpu.c .entries = { \ entries 172 drivers/clk/tegra/clk-tegra124-dfll-fcpu.c .entries = { \ entries 194 drivers/clk/tegra/clk-tegra124-dfll-fcpu.c .entries = { \ entries 215 drivers/clk/tegra/clk-tegra124-dfll-fcpu.c .entries = { \ entries 65 drivers/clk/tegra/cvb.c const struct cvb_table_freq_entry *entry = &table->entries[i]; entries 134 drivers/clk/tegra/cvb.c const struct cvb_table_freq_entry *entry = &table->entries[i]; entries 47 drivers/clk/tegra/cvb.h struct cvb_table_freq_entry entries[MAX_DVFS_FREQS]; entries 159 drivers/crypto/nx/nx-842-pseries.c struct nx842_slentry *entries; /* ptr to array of slentries */ entries 177 drivers/crypto/nx/nx-842-pseries.c entry = sl->entries; entries 313 drivers/crypto/nx/nx-842-pseries.c slin.entries = (struct nx842_slentry *)workmem->slin; entries 314 drivers/crypto/nx/nx-842-pseries.c slout.entries = (struct nx842_slentry *)workmem->slout; entries 330 drivers/crypto/nx/nx-842-pseries.c op.in = nx842_get_pa(slin.entries); entries 342 drivers/crypto/nx/nx-842-pseries.c op.out = nx842_get_pa(slout.entries); entries 445 drivers/crypto/nx/nx-842-pseries.c slin.entries = (struct nx842_slentry *)workmem->slin; entries 446 drivers/crypto/nx/nx-842-pseries.c slout.entries = (struct nx842_slentry *)workmem->slout; entries 462 drivers/crypto/nx/nx-842-pseries.c op.in = nx842_get_pa(slin.entries); entries 474 drivers/crypto/nx/nx-842-pseries.c op.out = nx842_get_pa(slout.entries); entries 97 drivers/crypto/qat/qat_common/adf_accel_devices.h struct msix_entry *entries; entries 74 drivers/crypto/qat/qat_common/adf_isr.c pci_dev_info->msix_entries.entries[i].entry = i; entries 76 drivers/crypto/qat/qat_common/adf_isr.c pci_dev_info->msix_entries.entries[0].entry = entries 81 drivers/crypto/qat/qat_common/adf_isr.c pci_dev_info->msix_entries.entries, entries 167 drivers/crypto/qat/qat_common/adf_isr.c struct msix_entry *msixe = pci_dev_info->msix_entries.entries; entries 215 drivers/crypto/qat/qat_common/adf_isr.c struct msix_entry *msixe = pci_dev_info->msix_entries.entries; entries 233 drivers/crypto/qat/qat_common/adf_isr.c struct msix_entry *entries; entries 241 drivers/crypto/qat/qat_common/adf_isr.c entries = kcalloc_node(msix_num_entries, sizeof(*entries), entries 243 drivers/crypto/qat/qat_common/adf_isr.c if (!entries) entries 248 drivers/crypto/qat/qat_common/adf_isr.c kfree(entries); entries 257 drivers/crypto/qat/qat_common/adf_isr.c accel_dev->accel_pci_dev.msix_entries.entries = entries; entries 263 drivers/crypto/qat/qat_common/adf_isr.c kfree(entries); entries 273 drivers/crypto/qat/qat_common/adf_isr.c kfree(accel_dev->accel_pci_dev.msix_entries.entries); entries 97 drivers/dma/dw-edma/dw-edma-v0-debugfs.c static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[], entries 103 drivers/dma/dw-edma/dw-edma-v0-debugfs.c if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir, entries 104 drivers/dma/dw-edma/dw-edma-v0-debugfs.c entries[i].reg, &fops_x32)) entries 264 drivers/firmware/efi/efi.c LIST_HEAD(entries); entries 273 drivers/firmware/efi/efi.c ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries); entries 275 drivers/firmware/efi/efi.c list_for_each_entry_safe(entry, aux, &entries, list) { entries 54 drivers/firmware/efi/esrt.c u8 entries[]; entries 341 drivers/firmware/efi/esrt.c struct efi_system_resource_entry_v1 *v1_entries = (void *)esrt->entries; entries 1365 drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v == entries 1377 drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c cpu_to_le32(adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk); entries 1527 drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c voltage_table->entries[i].value = entries 1529 drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c voltage_table->entries[i].smio_low = entries 132 drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; entries 234 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c amdgpu_table->entries = kzalloc(size, GFP_KERNEL); entries 235 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c if (!amdgpu_table->entries) entries 238 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c entry = &atom_table->entries[0]; entries 240 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | entries 242 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage); entries 381 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c le16_to_cpu(clk_v->entries[0].usSclkLow) | entries 382 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c (clk_v->entries[0].ucSclkHigh << 16); entries 384 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c le16_to_cpu(clk_v->entries[0].usMclkLow) | entries 385 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c (clk_v->entries[0].ucMclkHigh << 16); entries 387 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c le16_to_cpu(clk_v->entries[0].usVddc); entries 389 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c le16_to_cpu(clk_v->entries[0].usVddci); entries 399 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = entries 403 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { entries 408 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c entry = &psl->entries[0]; entries 410 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = entries 412 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = entries 414 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = entries 446 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); entries 447 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) { entries 451 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c entry = &cac_table->entries[0]; entries 454 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = entries 456 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = entries 458 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = entries 461 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = entries 463 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = entries 500 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = entries 502 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { entries 508 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c entry = &limits->entries[0]; entries 509 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c state_entry = &states->entries[0]; entries 512 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c ((u8 *)&array->entries[0] + entries 514 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = entries 516 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = entries 518 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = entries 528 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c ((u8 *)&array->entries[0] + entries 555 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = entries 557 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { entries 563 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c entry = &limits->entries[0]; entries 566 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c ((u8 *)&array->entries[0] + entries 568 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = entries 570 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = entries 572 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = entries 587 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = entries 589 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { entries 595 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c entry = &limits->entries[0]; entries 597 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = entries 599 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = entries 645 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = entries 647 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { entries 653 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c entry = &limits->entries[0]; entries 655 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = entries 657 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = entries 710 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries); entries 723 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c kfree(dyn_state->vddc_dependency_on_sclk.entries); entries 724 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c kfree(dyn_state->vddci_dependency_on_mclk.entries); entries 725 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c kfree(dyn_state->vddc_dependency_on_mclk.entries); entries 726 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c kfree(dyn_state->mvdd_dependency_on_mclk.entries); entries 727 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c kfree(dyn_state->cac_leakage_table.entries); entries 728 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c kfree(dyn_state->phase_shedding_limits_table.entries); entries 731 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c kfree(dyn_state->vce_clock_voltage_dependency_table.entries); entries 732 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); entries 733 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c kfree(dyn_state->samu_clock_voltage_dependency_table.entries); entries 734 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c kfree(dyn_state->acp_clock_voltage_dependency_table.entries); entries 735 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c kfree(dyn_state->vddgfx_dependency_on_sclk.entries); entries 130 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h struct amdgpu_clock_voltage_dependency_entry *entries; entries 147 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h union amdgpu_cac_leakage_entry *entries; entries 158 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h struct amdgpu_phase_shedding_limits_entry *entries; entries 169 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h struct amdgpu_uvd_clock_voltage_dependency_entry *entries; entries 180 drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h struct amdgpu_vce_clock_voltage_dependency_entry *entries; entries 771 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c vce_clk_table.entries[i].sclk = vce_state->sclk; entries 772 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c vce_clk_table.entries[i].mclk = vce_state->mclk; entries 773 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c vce_clk_table.entries[i].eclk = vce_state->evclk; entries 389 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (!cursor->entry->entries) entries 399 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c cursor->entry = &cursor->entry->entries[idx]; entries 426 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (cursor->entry == &cursor->parent->entries[num_entries - 1]) entries 710 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c unsigned entries, ats_entries; entries 723 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c entries = amdgpu_bo_size(bo) / 8; entries 729 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c ats_entries = min(ats_entries, entries); entries 730 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c entries -= ats_entries; entries 737 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if ((pt - vm->root.entries) >= ats_entries) { entries 740 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c ats_entries = entries; entries 741 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c entries = 0; entries 787 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (entries) { entries 802 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c r = vm->update_funcs->update(¶ms, bo, addr, 0, entries, entries 860 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (cursor->level < AMDGPU_VM_PTB && !entry->entries) { entries 864 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c entry->entries = kvmalloc_array(num_entries, entries 865 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c sizeof(*entry->entries), entries 867 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (!entry->entries) entries 911 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c kvfree(entry->entries); entries 912 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c entry->entries = NULL; entries 1204 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c pde = (entry - parent->entries) * 8; entries 2791 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c unsigned int entries = amdgpu_vm_num_entries(adev, root); entries 2794 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (!(vm->root.entries)) entries 2797 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c for (i = 0; i < entries; i++) { entries 2798 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (vm->root.entries[i].base.bo) entries 151 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h struct amdgpu_vm_pt *entries; entries 81 drivers/gpu/drm/amd/amdgpu/kv_dpm.c return vddc_sclk_table->entries[vid_2bit].v; entries 83 drivers/gpu/drm/amd/amdgpu/kv_dpm.c return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; entries 86 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) entries 87 drivers/gpu/drm/amd/amdgpu/kv_dpm.c return vid_mapping_table->entries[i].vid_7bit; entries 89 drivers/gpu/drm/amd/amdgpu/kv_dpm.c return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; entries 103 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (vddc_sclk_table->entries[i].v == vid_7bit) entries 109 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) entries 110 drivers/gpu/drm/amd/amdgpu/kv_dpm.c return vid_mapping_table->entries[i].vid_2bit; entries 113 drivers/gpu/drm/amd/amdgpu/kv_dpm.c return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; entries 146 drivers/gpu/drm/amd/amdgpu/kv_dpm.c sclk_voltage_mapping_table->entries[n].sclk_frequency = entries 148 drivers/gpu/drm/amd/amdgpu/kv_dpm.c sclk_voltage_mapping_table->entries[n].vid_2bit = entries 166 drivers/gpu/drm/amd/amdgpu/kv_dpm.c vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = entries 168 drivers/gpu/drm/amd/amdgpu/kv_dpm.c vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = entries 174 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (vid_mapping_table->entries[i].vid_7bit == 0) { entries 176 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (vid_mapping_table->entries[j].vid_7bit != 0) { entries 177 drivers/gpu/drm/amd/amdgpu/kv_dpm.c vid_mapping_table->entries[i] = entries 178 drivers/gpu/drm/amd/amdgpu/kv_dpm.c vid_mapping_table->entries[j]; entries 179 drivers/gpu/drm/amd/amdgpu/kv_dpm.c vid_mapping_table->entries[j].vid_7bit = 0; entries 807 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->entries[i].clk == pi->boot_pl.sclk) entries 821 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) entries 916 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (pi->high_voltage_t < table->entries[i].v)) entries 919 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); entries 920 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); entries 921 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); entries 924 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); entries 926 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); entries 929 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->entries[i].vclk, false, ÷rs); entries 935 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->entries[i].dclk, false, ÷rs); entries 987 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->high_voltage_t < table->entries[i].v) entries 990 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); entries 991 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); entries 994 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); entries 997 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->entries[i].evclk, false, ÷rs); entries 1050 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->high_voltage_t < table->entries[i].v) entries 1053 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); entries 1054 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); entries 1057 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (u8)kv_get_clk_bypass(adev, table->entries[i].clk); entries 1060 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->entries[i].clk, false, ÷rs); entries 1115 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); entries 1116 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); entries 1119 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->entries[i].clk, false, ÷rs); entries 1169 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) entries 1171 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) entries 1173 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) entries 1175 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) entries 1177 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) entries 1190 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) entries 1192 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) entries 1194 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) entries 1196 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) entries 1198 drivers/gpu/drm/amd/amdgpu/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) entries 1536 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->entries[i].evclk >= evclk) entries 1617 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->entries[i].clk >= 0) /* XXX */ entries 1782 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if ((table->entries[i].clk >= new_ps->levels[0].sclk) || entries 1790 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) entries 1796 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > entries 1797 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) entries 1807 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || entries 1815 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (table->entries[i].sclk_frequency <= entries 1823 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->entries[pi->highest_valid].sclk_frequency) > entries 1824 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (table->entries[pi->lowest_valid].sclk_frequency - entries 2042 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; entries 2045 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); entries 2065 drivers/gpu/drm/amd/amdgpu/kv_dpm.c uvd_table->entries[i].v = entries 2067 drivers/gpu/drm/amd/amdgpu/kv_dpm.c uvd_table->entries[i].v); entries 2072 drivers/gpu/drm/amd/amdgpu/kv_dpm.c vce_table->entries[i].v = entries 2074 drivers/gpu/drm/amd/amdgpu/kv_dpm.c vce_table->entries[i].v); entries 2079 drivers/gpu/drm/amd/amdgpu/kv_dpm.c samu_table->entries[i].v = entries 2081 drivers/gpu/drm/amd/amdgpu/kv_dpm.c samu_table->entries[i].v); entries 2086 drivers/gpu/drm/amd/amdgpu/kv_dpm.c acp_table->entries[i].v = entries 2088 drivers/gpu/drm/amd/amdgpu/kv_dpm.c acp_table->entries[i].v); entries 2180 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= entries 2192 drivers/gpu/drm/amd/amdgpu/kv_dpm.c (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= entries 2235 drivers/gpu/drm/amd/amdgpu/kv_dpm.c if (stable_p_state_sclk >= table->entries[i].clk) { entries 2236 drivers/gpu/drm/amd/amdgpu/kv_dpm.c stable_p_state_sclk = table->entries[i].clk; entries 2242 drivers/gpu/drm/amd/amdgpu/kv_dpm.c stable_p_state_sclk = table->entries[0].clk; entries 2265 drivers/gpu/drm/amd/amdgpu/kv_dpm.c ps->levels[i].sclk = table->entries[limit].clk; entries 2277 drivers/gpu/drm/amd/amdgpu/kv_dpm.c ps->levels[i].sclk = table->entries[limit].sclk_frequency; entries 2428 drivers/gpu/drm/amd/amdgpu/kv_dpm.c kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) entries 2431 drivers/gpu/drm/amd/amdgpu/kv_dpm.c kv_set_divider_value(adev, i, table->entries[i].clk); entries 2434 drivers/gpu/drm/amd/amdgpu/kv_dpm.c table->entries[i].v); entries 2448 drivers/gpu/drm/amd/amdgpu/kv_dpm.c kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) entries 2451 drivers/gpu/drm/amd/amdgpu/kv_dpm.c kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); entries 2452 drivers/gpu/drm/amd/amdgpu/kv_dpm.c kv_set_vid(adev, i, table->entries[i].vid_2bit); entries 44 drivers/gpu/drm/amd/amdgpu/kv_dpm.h struct sumo_vid_mapping_entry entries[SUMO_MAX_NUMBER_VOLTAGES]; entries 55 drivers/gpu/drm/amd/amdgpu/kv_dpm.h struct sumo_sclk_voltage_mapping_entry entries[SUMO_MAX_HARDWARE_POWERLEVELS]; entries 2648 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (table->entries[i].vddc > *max) entries 2649 drivers/gpu/drm/amd/amdgpu/si_dpm.c *max = table->entries[i].vddc; entries 2650 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (table->entries[i].vddc < *min) entries 2651 drivers/gpu/drm/amd/amdgpu/si_dpm.c *min = table->entries[i].vddc; entries 3025 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage) entries 3026 drivers/gpu/drm/amd/amdgpu/si_dpm.c highest_leakage = si_pi->leakage_voltage.entries[i].voltage; entries 3050 drivers/gpu/drm/amd/amdgpu/si_dpm.c if ((evclk <= table->entries[i].evclk) && entries 3051 drivers/gpu/drm/amd/amdgpu/si_dpm.c (ecclk <= table->entries[i].ecclk)) { entries 3052 drivers/gpu/drm/amd/amdgpu/si_dpm.c *voltage = table->entries[i].v; entries 3060 drivers/gpu/drm/amd/amdgpu/si_dpm.c *voltage = table->entries[table->count - 1].v; entries 3213 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (voltage <= table->entries[i].value) entries 3214 drivers/gpu/drm/amd/amdgpu/si_dpm.c return table->entries[i].value; entries 3216 drivers/gpu/drm/amd/amdgpu/si_dpm.c return table->entries[table->count - 1].value; entries 3261 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (clock < table->entries[i].clk) entries 3262 drivers/gpu/drm/amd/amdgpu/si_dpm.c clock = table->entries[i].clk; entries 3276 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (clock <= table->entries[i].clk) { entries 3277 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (*voltage < table->entries[i].v) entries 3278 drivers/gpu/drm/amd/amdgpu/si_dpm.c *voltage = (u16)((table->entries[i].v < max_voltage) ? entries 3279 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->entries[i].v : max_voltage); entries 3702 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_pi->leakage_voltage.entries[count].voltage = vddc; entries 3703 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_pi->leakage_voltage.entries[count].leakage_index = entries 3730 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (si_pi->leakage_voltage.entries[i].leakage_index == index) { entries 3731 drivers/gpu/drm/amd/amdgpu/si_dpm.c *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage; entries 4394 drivers/gpu/drm/amd/amdgpu/si_dpm.c voltage_table->entries[i] = voltage_table->entries[i + diff]; entries 4413 drivers/gpu/drm/amd/amdgpu/si_dpm.c voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; entries 4414 drivers/gpu/drm/amd/amdgpu/si_dpm.c voltage_table->entries[i].smio_low = 0; entries 4507 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); entries 4532 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) { entries 4580 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (value <= table->entries[i].value) { entries 4582 drivers/gpu/drm/amd/amdgpu/si_dpm.c voltage->value = cpu_to_be16(table->entries[i].value); entries 4605 drivers/gpu/drm/amd/amdgpu/si_dpm.c voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value); entries 4618 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) { entries 4620 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) entries 4625 drivers/gpu/drm/amd/amdgpu/si_dpm.c (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { entries 4629 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; entries 4632 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; entries 4640 drivers/gpu/drm/amd/amdgpu/si_dpm.c (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { entries 4644 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; entries 4647 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; entries 4654 drivers/gpu/drm/amd/amdgpu/si_dpm.c *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc; entries 4679 drivers/gpu/drm/amd/amdgpu/si_dpm.c if ((voltage <= limits->entries[i].voltage) && entries 4680 drivers/gpu/drm/amd/amdgpu/si_dpm.c (sclk <= limits->entries[i].sclk) && entries 4681 drivers/gpu/drm/amd/amdgpu/si_dpm.c (mclk <= limits->entries[i].mclk)) entries 5624 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) { entries 5626 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v) entries 6332 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->entries[i].v, entries 6335 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->entries[i].v = leakage_voltage; entries 6346 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ? entries 6347 drivers/gpu/drm/amd/amdgpu/si_dpm.c table->entries[j].v : table->entries[j + 1].v; entries 7350 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = entries 7354 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { entries 7359 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; entries 7360 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; entries 7361 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; entries 7362 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; entries 7363 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; entries 7364 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; entries 7365 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; entries 7366 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; entries 7478 drivers/gpu/drm/amd/amdgpu/si_dpm.c kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); entries 945 drivers/gpu/drm/amd/amdgpu/si_dpm.h struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT]; entries 938 drivers/gpu/drm/amd/amdkfd/kfd_crat.c uint32_t entries = 0; entries 1003 drivers/gpu/drm/amd/amdkfd/kfd_crat.c &entries, entries 1007 drivers/gpu/drm/amd/amdkfd/kfd_crat.c crat_table->length += (sub_type_hdr->length * entries); entries 1008 drivers/gpu/drm/amd/amdkfd/kfd_crat.c crat_table->total_entries += entries; entries 1011 drivers/gpu/drm/amd/amdkfd/kfd_crat.c sub_type_hdr->length * entries); entries 138 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c gamma->entries.red[i] = dc_fixpt_from_int(r); entries 139 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c gamma->entries.green[i] = dc_fixpt_from_int(g); entries 140 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c gamma->entries.blue[i] = dc_fixpt_from_int(b); entries 151 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c gamma->entries.red[i] = dc_fixpt_from_fraction(r, MAX_DRM_LUT_VALUE); entries 152 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c gamma->entries.green[i] = dc_fixpt_from_fraction(g, MAX_DRM_LUT_VALUE); entries 153 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c gamma->entries.blue[i] = dc_fixpt_from_fraction(b, MAX_DRM_LUT_VALUE); entries 350 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c .entries = { entries 385 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c .entries = { entries 422 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c if (!bw_params->wm_table.entries[i].valid) entries 425 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst; entries 426 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;; entries 437 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1; entries 439 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz; entries 481 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i].Freq; entries 482 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[i].Freq; entries 483 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[i].Freq; entries 484 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i].Freq; entries 485 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c bw_params->clk_table.entries[i].voltage = clock_table->FClocks[i].Vol; entries 493 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c bw_params->wm_table.entries[i].wm_inst = i; entries 496 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c bw_params->wm_table.entries[i].valid = false; entries 500 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG; entries 501 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c bw_params->wm_table.entries[i].valid = true; entries 508 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c bw_params->wm_table.entries[WM_D].pstate_latency_us = LPDDR_MEM_RETRAIN_LATENCY; entries 509 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c bw_params->wm_table.entries[WM_D].wm_inst = WM_D; entries 510 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c bw_params->wm_table.entries[WM_D].wm_type = WM_TYPE_RETRAINING; entries 511 drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c bw_params->wm_table.entries[WM_D].valid = true; entries 1744 drivers/gpu/drm/amd/display/dc/core/dc.c memcpy(&surface->gamma_correction->entries, entries 1745 drivers/gpu/drm/amd/display/dc/core/dc.c &srf_update->gamma->entries, entries 474 drivers/gpu/drm/amd/display/dc/dc_hw_types.h } entries; entries 201 drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c gamma->entries.red[i])); entries 204 drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c gamma->entries.green[i])); entries 207 drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c gamma->entries.blue[i])); entries 808 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c gamma->entries.red[i])); entries 811 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c gamma->entries.green[i])); entries 814 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c gamma->entries.blue[i])); entries 845 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c uint32_t entries) entries 850 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c for (i = 0 ; i < entries; i += 2) { entries 879 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c uint32_t entries) entries 884 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c for (i = 0; i < entries; i++) { entries 1036 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c table_entry = &bw_params->wm_table.entries[WM_D]; entries 1044 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c table_entry = &bw_params->wm_table.entries[WM_C]; entries 1049 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c table_entry = &bw_params->wm_table.entries[WM_B]; entries 1055 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c table_entry = &bw_params->wm_table.entries[WM_A]; entries 1288 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; entries 1289 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; entries 1290 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; entries 1292 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000; entries 64 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h struct clk_limit_table_entry entries[MAX_NUM_DPM_LVL]; entries 146 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h struct wm_range_table_entry entries[WM_SET_COUNT]; entries 1087 drivers/gpu/drm/amd/display/modules/color/color_gamma.c if (dc_fixpt_lt(max_os, ramp->entries.red[i]) || entries 1088 drivers/gpu/drm/amd/display/modules/color/color_gamma.c dc_fixpt_lt(max_os, ramp->entries.green[i]) || entries 1089 drivers/gpu/drm/amd/display/modules/color/color_gamma.c dc_fixpt_lt(max_os, ramp->entries.blue[i])) { entries 1100 drivers/gpu/drm/amd/display/modules/color/color_gamma.c ramp->entries.red[i], scaler); entries 1102 drivers/gpu/drm/amd/display/modules/color/color_gamma.c ramp->entries.green[i], scaler); entries 1104 drivers/gpu/drm/amd/display/modules/color/color_gamma.c ramp->entries.blue[i], scaler); entries 1148 drivers/gpu/drm/amd/display/modules/color/color_gamma.c if (dc_fixpt_lt(ramp->entries.red[i], min)) entries 1149 drivers/gpu/drm/amd/display/modules/color/color_gamma.c min = ramp->entries.red[i]; entries 1151 drivers/gpu/drm/amd/display/modules/color/color_gamma.c if (dc_fixpt_lt(ramp->entries.green[i], min)) entries 1152 drivers/gpu/drm/amd/display/modules/color/color_gamma.c min = ramp->entries.green[i]; entries 1154 drivers/gpu/drm/amd/display/modules/color/color_gamma.c if (dc_fixpt_lt(ramp->entries.blue[i], min)) entries 1155 drivers/gpu/drm/amd/display/modules/color/color_gamma.c min = ramp->entries.blue[i]; entries 1157 drivers/gpu/drm/amd/display/modules/color/color_gamma.c if (dc_fixpt_lt(max, ramp->entries.red[i])) entries 1158 drivers/gpu/drm/amd/display/modules/color/color_gamma.c max = ramp->entries.red[i]; entries 1160 drivers/gpu/drm/amd/display/modules/color/color_gamma.c if (dc_fixpt_lt(max, ramp->entries.green[i])) entries 1161 drivers/gpu/drm/amd/display/modules/color/color_gamma.c max = ramp->entries.green[i]; entries 1163 drivers/gpu/drm/amd/display/modules/color/color_gamma.c if (dc_fixpt_lt(max, ramp->entries.blue[i])) entries 1164 drivers/gpu/drm/amd/display/modules/color/color_gamma.c max = ramp->entries.blue[i]; entries 1175 drivers/gpu/drm/amd/display/modules/color/color_gamma.c ramp->entries.red[i], delta), offset); entries 1178 drivers/gpu/drm/amd/display/modules/color/color_gamma.c ramp->entries.green[i], delta), offset); entries 1181 drivers/gpu/drm/amd/display/modules/color/color_gamma.c ramp->entries.blue[i], delta), offset); entries 1326 drivers/gpu/drm/amd/display/modules/color/color_gamma.c lut1 = ramp->entries.red[index]; entries 1327 drivers/gpu/drm/amd/display/modules/color/color_gamma.c lut2 = ramp->entries.red[index_next]; entries 1329 drivers/gpu/drm/amd/display/modules/color/color_gamma.c lut1 = ramp->entries.green[index]; entries 1330 drivers/gpu/drm/amd/display/modules/color/color_gamma.c lut2 = ramp->entries.green[index_next]; entries 1332 drivers/gpu/drm/amd/display/modules/color/color_gamma.c lut1 = ramp->entries.blue[index]; entries 1333 drivers/gpu/drm/amd/display/modules/color/color_gamma.c lut2 = ramp->entries.blue[index_next]; entries 81 drivers/gpu/drm/amd/display/modules/stats/stats.c unsigned int entries; entries 135 drivers/gpu/drm/amd/display/modules/stats/stats.c core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT; entries 140 drivers/gpu/drm/amd/display/modules/stats/stats.c core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX; entries 142 drivers/gpu/drm/amd/display/modules/stats/stats.c core_stats->entries = reg_data; entries 144 drivers/gpu/drm/amd/display/modules/stats/stats.c core_stats->time = kcalloc(core_stats->entries, entries 160 drivers/gpu/drm/amd/display/modules/stats/stats.c core_stats->entries = 0; entries 284 drivers/gpu/drm/amd/display/modules/stats/stats.c sizeof(struct stats_time_cache) * core_stats->entries); entries 340 drivers/gpu/drm/amd/display/modules/stats/stats.c if (core_stats->index >= core_stats->entries) entries 385 drivers/gpu/drm/amd/display/modules/stats/stats.c if (core_stats->index >= core_stats->entries) entries 433 drivers/gpu/drm/amd/display/modules/stats/stats.c if (core_stats->index >= core_stats->entries) entries 517 drivers/gpu/drm/amd/include/pptable.h ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries. entries 533 drivers/gpu/drm/amd/include/pptable.h ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries. entries 557 drivers/gpu/drm/amd/include/pptable.h ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries. entries 572 drivers/gpu/drm/amd/include/pptable.h ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries. entries 584 drivers/gpu/drm/amd/include/pptable.h VCEClockInfo entries[1]; entries 596 drivers/gpu/drm/amd/include/pptable.h ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1]; entries 608 drivers/gpu/drm/amd/include/pptable.h ATOM_PPLIB_VCE_State_Record entries[1]; entries 630 drivers/gpu/drm/amd/include/pptable.h UVDClockInfo entries[1]; entries 642 drivers/gpu/drm/amd/include/pptable.h ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1]; entries 661 drivers/gpu/drm/amd/include/pptable.h ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1]; entries 679 drivers/gpu/drm/amd/include/pptable.h ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1]; entries 748 drivers/gpu/drm/amd/include/pptable.h ATOM_PPLIB_VQ_Budgeting_Record entries[1]; entries 51 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h phm_ppt_v1_clock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ entries 74 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h phm_ppt_v1_mm_clock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ entries 89 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h phm_ppt_v1_voltage_lookup_record entries[1]; /* Dynamically allocate count entries. */ entries 106 drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h phm_ppt_v1_pcie_record entries[1]; /* Dynamically allocate count entries. */ entries 553 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c voltage_table->entries[i].value = entries 555 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c voltage_table->entries[i].smio_low = entries 1131 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) { entries 1146 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk); entries 211 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h pp_atomctrl_voltage_table_entry entries[PP_ATOMCTRL_MAX_VOLTAGE_ENTRIES]; entries 127 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c voltage_table->entries[i].value = entries 130 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c voltage_table->entries[i].smio_low = entries 52 drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h struct pp_atomfwctrl_voltage_table_entry entries[PP_ATOMFWCTRL_MAX_VOLTAGE_ENTRIES]; entries 167 drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */ entries 182 drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h ATOM_Tonga_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ entries 197 drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ entries 213 drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ entries 225 drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h ATOM_Tonga_PCIE_Record entries[1]; /* Dynamically allocate entries. */ entries 238 drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h ATOM_Polaris10_PCIE_Record entries[1]; /* Dynamically allocate entries. */ entries 255 drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h ATOM_Tonga_MM_Dependency_Record entries[1]; /* Dynamically allocate entries. */ entries 268 drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h ATOM_Tonga_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries. */ entries 340 drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h ATOM_Tonga_VCE_State_Record entries[1]; entries 419 drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h ATOM_Tonga_Hard_Limit_Record entries[1]; entries 181 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, table, i); entries 184 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, vddc_lookup_pp_tables, i); entries 341 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, clk_volt_pp_table, i); entries 358 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c limits->sclk = le32_to_cpu(limitable->entries[0].ulSCLKLimit); entries 359 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c limits->mclk = le32_to_cpu(limitable->entries[0].ulMCLKLimit); entries 360 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c limits->vddc = le16_to_cpu(limitable->entries[0].usVddcLimit); entries 361 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c limits->vddci = le16_to_cpu(limitable->entries[0].usVddciLimit); entries 362 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c limits->vddgfx = le16_to_cpu(limitable->entries[0].usVddgfxLimit); entries 394 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, mclk_table, i); entries 397 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, mclk_dep_table, i); entries 441 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, tonga_table, i); entries 444 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, sclk_table, i); entries 473 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, polaris_table, i); entries 476 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, sclk_table, i); entries 532 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, pcie_table, i); entries 535 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, atom_pcie_table, i); entries 572 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, pcie_table, i); entries 575 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, atom_pcie_table, i); entries 713 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, mm_dependency_table, i); entries 716 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, mm_table, i); entries 1248 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, vce_state_table, i); entries 1251 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, sclk_dep_table, entries 1255 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, mm_dep_table, entries 1266 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, mclk_dep_table, entries 1271 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c entries, mclk_dep_table, entries 1314 drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c ATOM_Tonga_State, entries, entries 394 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c dep_table->entries[i].clk = entries 395 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c ((unsigned long)table->entries[i].ucClockHigh << 16) | entries 396 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c le16_to_cpu(table->entries[i].usClockLow); entries 397 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c dep_table->entries[i].v = entries 398 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c (unsigned long)le16_to_cpu(table->entries[i].usVoltage); entries 421 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c clock_table->values[i] = (unsigned long)table->entries[i].clk; entries 432 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c limits->sclk = ((unsigned long)table->entries[0].ucSclkHigh << 16) | entries 433 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c le16_to_cpu(table->entries[0].usSclkLow); entries 434 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c limits->mclk = ((unsigned long)table->entries[0].ucMclkHigh << 16) | entries 435 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c le16_to_cpu(table->entries[0].usMclkLow); entries 436 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c limits->vddc = (unsigned long)le16_to_cpu(table->entries[0].usVddc); entries 437 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c limits->vddci = (unsigned long)le16_to_cpu(table->entries[0].usVddci); entries 1127 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c &array->entries[table->entries[i].ucUVDClockInfoIndex]; entries 1128 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c uvd_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage); entries 1129 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c uvd_table->entries[i].vclk = ((unsigned long)entry->ucVClkHigh << 16) entries 1131 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c uvd_table->entries[i].dclk = ((unsigned long)entry->ucDClkHigh << 16) entries 1158 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const VCEClockInfo *entry = &array->entries[table->entries[i].ucVCEClockInfoIndex]; entries 1160 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c vce_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage); entries 1161 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c vce_table->entries[i].evclk = ((unsigned long)entry->ucEVClkHigh << 16) entries 1163 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c vce_table->entries[i].ecclk = ((unsigned long)entry->ucECClkHigh << 16) entries 1190 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c samu_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage); entries 1191 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c samu_table->entries[i].samclk = ((unsigned long)table->entries[i].ucSAMClockHigh << 16) entries 1192 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c | le16_to_cpu(table->entries[i].usSAMClockLow); entries 1218 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c acp_table->entries[i].v = (unsigned long)le16_to_cpu(table->entries[i].usVoltage); entries 1219 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c acp_table->entries[i].acpclk = ((unsigned long)table->entries[i].ucACPClockHigh << 16) entries 1220 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c | le16_to_cpu(table->entries[i].usACPClockLow); entries 1418 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c cac_leakage_table->entries[i].Vddc1 = le16_to_cpu(table->entries[i].usVddc1); entries 1419 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c cac_leakage_table->entries[i].Vddc2 = le16_to_cpu(table->entries[i].usVddc2); entries 1420 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c cac_leakage_table->entries[i].Vddc3 = le16_to_cpu(table->entries[i].usVddc3); entries 1422 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c cac_leakage_table->entries[i].Vddc = le16_to_cpu(table->entries[i].usVddc); entries 1423 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c cac_leakage_table->entries[i].Leakage = le32_to_cpu(table->entries[i].ulLeakageValue); entries 1558 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table->entries[i].Voltage = (unsigned long)le16_to_cpu(ptable->entries[i].usVoltage); entries 1559 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table->entries[i].Sclk = ((unsigned long)ptable->entries[i].ucSclkHigh << 16) entries 1560 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c | le16_to_cpu(ptable->entries[i].usSclkLow); entries 1561 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c table->entries[i].Mclk = ((unsigned long)ptable->entries[i].ucMclkHigh << 16) entries 1562 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c | le16_to_cpu(ptable->entries[i].usMclkLow); entries 1602 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const ATOM_PPLIB_VCE_State_Record *record = &vce_state_table->entries[i]; entries 1604 drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c const VCEClockInfo *vce_clock_info = &vce_clock_info_array->entries[record->ucVCEClockInfoIndex]; entries 140 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 7), entries 149 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0; entries 150 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[0].v = 0; entries 151 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1; entries 152 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[1].v = 1; entries 153 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2; entries 154 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[2].v = 2; entries 155 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3; entries 156 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[3].v = 3; entries 157 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4; entries 158 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[4].v = 4; entries 159 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5; entries 160 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[5].v = 5; entries 161 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6; entries 162 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[6].v = 6; entries 163 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7; entries 164 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c table_clk_vlt->entries[7].v = 7; entries 425 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c ptable->entries[i].clk = pclk_dependency_table->Freq * 100; entries 426 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c ptable->entries[i].vol = pclk_dependency_table->Vol; entries 720 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk; entries 722 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c return data->clock_vol_info.vdd_dep_on_fclk->entries[ entries 867 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c mclk_table->entries[low].clk/100); entries 871 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c mclk_table->entries[high].clk/100); entries 919 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c mclk_table->entries[i].clk / 100, entries 920 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c ((mclk_table->entries[i].clk / 100) entries 942 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk; entries 945 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[ entries 1029 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c if (pclk_vol_table->entries[i].clk) { entries 1031 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c pclk_vol_table->entries[i].clk * 10; entries 1034 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c pclk_vol_table->entries[i].clk) : entries 1083 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c if (pclk_vol_table->entries[i].clk) { entries 1084 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; entries 1085 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol; entries 165 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h struct smu10_display_phy_info_entry entries[SMU10_MAX_DISPLAYPHY_IDS]; entries 184 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h struct smu10_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER]; entries 195 drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h struct smu10_clock_voltage_dependency_record entries[1]; entries 235 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c voltage_table->entries[i].value = entries 236 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c voltage_dependency_table->entries[i].v; entries 237 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c voltage_table->entries[i].smio_low = 0; entries 567 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c pcie_table->entries[i].gen_speed), entries 569 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c pcie_table->entries[i].lane_width)); entries 695 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c allowed_vdd_sclk_table->entries[i].clk) { entries 697 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c allowed_vdd_sclk_table->entries[i].clk; entries 709 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c allowed_vdd_mclk_table->entries[i].clk) { entries 711 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c allowed_vdd_mclk_table->entries[i].clk; entries 719 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; entries 720 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; entries 731 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; entries 745 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; entries 788 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c dep_sclk_table->entries[i].clk) { entries 791 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c dep_sclk_table->entries[i].clk; entries 799 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk; entries 805 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c dep_mclk_table->entries[i].clk) { entries 807 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c dep_mclk_table->entries[i].clk; entries 815 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk; entries 829 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c struct phm_odn_performance_level *entries; entries 839 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c entries = odn_table->odn_core_clock_dpm_levels.entries; entries 841 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value; entries 842 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c entries[i].enabled = true; entries 843 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c entries[i].vddc = dep_sclk_table->entries[i].vddc; entries 851 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c entries = odn_table->odn_memory_clock_dpm_levels.entries; entries 853 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value; entries 854 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c entries[i].enabled = true; entries 855 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c entries[i].vddc = dep_mclk_table->entries[i].vddc; entries 881 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c || min_vddc > dep_sclk_table->entries[0].vddc) entries 882 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c min_vddc = dep_sclk_table->entries[0].vddc; entries 885 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc) entries 886 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc; entries 907 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (odn_table->odn_core_clock_dpm_levels.entries[i].clock != entries 915 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock != entries 926 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { entries 935 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { entries 1718 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (sclk_table->entries[j].clk == sclk && entries 1719 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c sclk_table->entries[j].cks_enable == 0) { entries 1752 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (sclk_table->entries[j].clk == sclk && entries 1753 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c sclk_table->entries[j].cks_enable == 0) { entries 1825 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c &lookup_table->entries[i].us_vdd, leakage_table); entries 1860 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c voltage_id = sclk_table->entries[entry_id].vddInd; entries 1861 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c sclk_table->entries[entry_id].vddgfx = entries 1862 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd; entries 1866 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c voltage_id = sclk_table->entries[entry_id].vddInd; entries 1867 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c sclk_table->entries[entry_id].vddc = entries 1868 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c table_info->vddc_lookup_table->entries[voltage_id].us_vdd; entries 1873 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c voltage_id = mclk_table->entries[entry_id].vddInd; entries 1874 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c mclk_table->entries[entry_id].vddc = entries 1875 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c table_info->vddc_lookup_table->entries[voltage_id].us_vdd; entries 1879 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c voltage_id = mm_table->entries[entry_id].vddcInd; entries 1880 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c mm_table->entries[entry_id].vddc = entries 1881 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c table_info->vddc_lookup_table->entries[voltage_id].us_vdd; entries 1905 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (look_up_table->entries[i].us_vdd == record->us_vdd) { entries 1906 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (look_up_table->entries[i].us_calculated == 1) entries 1912 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c look_up_table->entries[i].us_calculated = 1; entries 1913 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c look_up_table->entries[i].us_vdd = record->us_vdd; entries 1914 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c look_up_table->entries[i].us_cac_low = record->us_cac_low; entries 1915 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c look_up_table->entries[i].us_cac_mid = record->us_cac_mid; entries 1916 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c look_up_table->entries[i].us_cac_high = record->us_cac_high; entries 1937 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (sclk_table->entries[entry_id].vdd_offset & (1 << 15)) entries 1938 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + entries 1939 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c sclk_table->entries[entry_id].vdd_offset - 0xFFFF; entries 1941 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + entries 1942 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c sclk_table->entries[entry_id].vdd_offset; entries 1944 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c sclk_table->entries[entry_id].vddc = entries 1952 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (mclk_table->entries[entry_id].vdd_offset & (1 << 15)) entries 1953 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c v_record.us_vdd = mclk_table->entries[entry_id].vddc + entries 1954 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c mclk_table->entries[entry_id].vdd_offset - 0xFFFF; entries 1956 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c v_record.us_vdd = mclk_table->entries[entry_id].vddc + entries 1957 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c mclk_table->entries[entry_id].vdd_offset; entries 1959 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low = entries 1977 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15)) entries 1978 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c v_record.us_vdd = mm_table->entries[entry_id].vddc + entries 1979 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c mm_table->entries[entry_id].vddgfx_offset - 0xFFFF; entries 1981 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c v_record.us_vdd = mm_table->entries[entry_id].vddc + entries 1982 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c mm_table->entries[entry_id].vddgfx_offset; entries 1985 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c mm_table->entries[entry_id].vddgfx = v_record.us_cac_low = entries 2006 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (lookup_table->entries[j].us_vdd < entries 2007 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c lookup_table->entries[j - 1].us_vdd) { entries 2008 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c tmp_voltage_lookup_record = lookup_table->entries[j - 1]; entries 2009 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c lookup_table->entries[j - 1] = lookup_table->entries[j]; entries 2010 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c lookup_table->entries[j] = tmp_voltage_lookup_record; entries 2095 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; entries 2097 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; entries 2099 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; entries 2101 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; entries 2135 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) entries 2139 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) { entries 2140 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i; entries 2270 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, entries 2284 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, entries 2298 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, entries 2313 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, entries 2327 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage, entries 2341 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, entries 2355 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, entries 2389 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c vddc = (uint32_t)(tab->entries[i].Vddc); entries 2391 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c tab->entries[i].Vddc = (uint16_t)vddc; entries 2476 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v; entries 2477 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; entries 2480 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; entries 2482 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk; entries 2484 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; entries 2487 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v; entries 2488 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; entries 2492 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; entries 2757 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { entries 2758 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk; entries 2765 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk; entries 2775 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) { entries 2776 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk; entries 2783 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; entries 2935 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c table_info->vdd_dep_on_sclk->entries[count].clk) { entries 2937 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c table_info->vdd_dep_on_sclk->entries[count].clk; entries 2943 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; entries 3183 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c performance_level->memory_clock = mclk_dep_table->entries entries 3186 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries entries 3189 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries entries 3198 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c performance_level->memory_clock = mclk_dep_table->entries entries 3202 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries entries 3205 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries entries 3239 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (dep_mclk_table->entries[0].clk != entries 3243 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (dep_mclk_table->entries[0].vddci != entries 3387 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (dep_mclk_table->entries[0].clk != entries 3391 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c if (dep_mclk_table->entries[0].v != entries 3770 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled; entries 3771 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock; entries 3777 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled; entries 3778 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock; entries 4508 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c i, odn_sclk_table->entries[i].clock/100, entries 4509 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c odn_sclk_table->entries[i].vddc); entries 4517 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c i, odn_mclk_table->entries[i].clock/100, entries 4518 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c odn_mclk_table->entries[i].vddc); entries 4664 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c clocks->clock[i] = dep_sclk_table->entries[i].clk * 10; entries 4669 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c clocks->clock[i] = sclk_table->entries[i].clk * 10; entries 4701 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c clocks->clock[i] = dep_mclk_table->entries[i].clk * 10; entries 4703 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c dep_mclk_table->entries[i].clk); entries 4709 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c clocks->clock[i] = mclk_table->entries[i].clk * 10; entries 4902 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c podn_dpm_table_in_backend->entries[input_level].clock = input_clk; entries 4903 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c podn_vdd_dep_in_backend->entries[input_level].clk = input_clk; entries 4904 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c podn_dpm_table_in_backend->entries[input_level].vddc = input_vol; entries 4905 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol; entries 4906 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol; entries 179 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h phm_ppt_v1_clock_voltage_dependency_record entries[MAX_REGULAR_DPM_NUMBER]; entries 79 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (clock <= ptable->entries[i].ecclk) entries 87 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (clock >= ptable->entries[i].ecclk) entries 110 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (clock <= table->entries[i].clk) entries 118 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (clock >= table->entries[i].clk) entries 140 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (clock <= ptable->entries[i].vclk) entries 148 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (clock >= ptable->entries[i].vclk) entries 263 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table->sclk = dep_table->entries[dep_table->count-1].clk; entries 265 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (uint16_t)dep_table->entries[dep_table->count-1].v); entries 277 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 7), entries 286 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0; entries 287 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[0].v = 0; entries 288 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1; entries 289 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[1].v = 1; entries 290 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2; entries 291 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[2].v = 2; entries 292 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3; entries 293 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[3].v = 3; entries 294 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4; entries 295 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[4].v = 4; entries 296 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5; entries 297 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[5].v = 5; entries 298 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6; entries 299 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[6].v = 6; entries 300 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7; entries 301 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c table_clk_vlt->entries[7].v = 7; entries 478 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0; entries 480 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (i < vddc_table->count) ? vddc_table->entries[i].clk : 0; entries 491 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (i < vdd_gfx_table->count) ? (uint8_t)vdd_gfx_table->entries[i].v : 0; entries 495 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0; entries 497 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (i < acp_table->count) ? acp_table->entries[i].acpclk : 0; entries 509 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; entries 511 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0; entries 521 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; entries 523 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0; entries 534 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0; entries 536 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c (i < vce_table->count) ? vce_table->entries[i].ecclk : 0; entries 562 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c data->sclk_dpm.soft_min_clk = table->entries[0].clk; entries 563 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c data->sclk_dpm.hard_min_clk = table->entries[0].clk; entries 568 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[level].clk; entries 570 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[table->count - 1].clk; entries 595 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[level].vclk; entries 597 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[table->count - 1].vclk; entries 622 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[level].ecclk; entries 624 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[table->count - 1].ecclk; entries 649 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[level].acpclk; entries 651 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[table->count - 1].acpclk; entries 692 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c data->sclk_dpm.soft_min_clk = table->entries[0].clk; entries 696 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c data->sclk_dpm.soft_max_clk = table->entries[level].clk; entries 698 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; entries 1151 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c data->sclk_dpm.soft_min_clk = table->entries[0].clk; entries 1152 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c data->sclk_dpm.hard_min_clk = table->entries[0].clk; entries 1153 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c hwmgr->pstate_sclk = table->entries[0].clk; entries 1159 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[level].clk; entries 1161 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clock = table->entries[table->count - 1].clk; entries 1256 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c ptable->entries[ptable->count - 1].ecclk; entries 1350 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c smu8_ps->levels[index].engineClock = table->entries[clock_info_index].clk; entries 1351 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c smu8_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v; entries 1485 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c if (limits->vddc >= table->entries[i].v) { entries 1486 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c info->level = table->entries[i].clk; entries 1530 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c i, sclk_table->entries[i].clk / 100, entries 1617 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clocks->clock[i] = table->entries[i].clk * 10; entries 1645 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clocks->engine_max_clock = table->entries[level].clk; entries 1647 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c clocks->engine_max_clock = table->entries[table->count - 1].clk; entries 1702 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c sclk = table->entries[sclk_index].clk; entries 1724 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c vclk = uvd_table->entries[uvd_index].vclk; entries 1736 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c dclk = uvd_table->entries[uvd_index].dclk; entries 1748 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c ecclk = vce_table->entries[vce_index].ecclk; entries 1851 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c ptable->entries[ptable->count - 1].vclk; entries 96 drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.h struct smu8_display_phy_info_entry entries[SMU8_MAX_DISPLAYPHY_IDS]; entries 223 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c vvalue = vol_table->entries[i].value; entries 227 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c if (vvalue == table->entries[j].value) { entries 234 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->entries[table->count].value = vvalue; entries 235 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table->entries[table->count].smio_low = entries 236 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c vol_table->entries[i].smio_low; entries 264 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c vol_table->entries[i].value = dep_table->entries[i].mvdd; entries 265 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c vol_table->entries[i].smio_low = 0; entries 292 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c vol_table->entries[i].value = dep_table->entries[i].vddci; entries 293 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c vol_table->entries[i].smio_low = 0; entries 320 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c vol_table->entries[i].value = lookup_table->entries[i].us_vdd; entries 321 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c vol_table->entries[i].smio_low = 0; entries 338 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c vol_table->entries[i] = vol_table->entries[i + diff]; entries 401 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c if (lookup_table->entries[i].us_vdd >= voltage) entries 421 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c if (voltage_table->entries[i].value >= voltage) entries 434 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c if (vddci_table->entries[i].value >= vddci) entries 435 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c return vddci_table->entries[i].value; entries 439 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c return vddci_table->entries[i-1].value; entries 472 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd; entries 473 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id) entries 482 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk; entries 507 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW; entries 508 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table_clk_vlt->entries[0].v = 0; entries 509 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW; entries 510 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table_clk_vlt->entries[1].v = 720; entries 511 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL; entries 512 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table_clk_vlt->entries[2].v = 810; entries 513 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE; entries 514 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c table_clk_vlt->entries[3].v = 900; entries 549 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c if (dal_power_level == table->entries[i].clk) { entries 550 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c req_vddc = table->entries[i].v; entries 557 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c if (req_vddc <= vddc_table->entries[i].vddc) { entries 558 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE); entries 689 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c dep_table->entries[i].clk = allowed_dep_table->entries[i].clk; entries 690 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd; entries 691 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset; entries 692 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc; entries 693 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx; entries 694 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci; entries 695 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd; entries 696 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c dep_table->entries[i].phases = allowed_dep_table->entries[i].phases; entries 697 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable; entries 698 drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset; entries 325 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd; entries 340 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc; entries 342 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c odn_table->min_vddc = dep_table[0]->entries[0].vddc; entries 345 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ? entries 347 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c od_table[2]->entries[i].clk; entries 348 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ? entries 350 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c od_table[2]->entries[i].vddc; entries 530 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd; entries 531 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id) entries 539 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk; entries 571 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (socclk_table->entries[j].clk == sclk && entries 572 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c socclk_table->entries[j].cks_enable == 0) { entries 643 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c &lookup_table->entries[i].us_vdd, leakage_table); entries 682 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c voltage_id = vdt->entries[entry_id].vddInd; entries 683 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c vdt->entries[entry_id].vddc = entries 684 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table_info->vddc_lookup_table->entries[voltage_id].us_vdd; entries 689 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c voltage_id = mm_table->entries[entry_id].vddcInd; entries 690 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c mm_table->entries[entry_id].vddc = entries 691 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table_info->vddc_lookup_table->entries[voltage_id].us_vdd; entries 695 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c voltage_id = mclk_table->entries[entry_id].vddInd; entries 696 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c mclk_table->entries[entry_id].vddc = entries 697 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table_info->vddc_lookup_table->entries[voltage_id].us_vdd; entries 698 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c voltage_id = mclk_table->entries[entry_id].vddciInd; entries 699 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c mclk_table->entries[entry_id].vddci = entries 700 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table_info->vddci_lookup_table->entries[voltage_id].us_vdd; entries 701 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c voltage_id = mclk_table->entries[entry_id].mvddInd; entries 702 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c mclk_table->entries[entry_id].mvdd = entries 703 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table_info->vddmem_lookup_table->entries[voltage_id].us_vdd; entries 725 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (lookup_table->entries[j].us_vdd < entries 726 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c lookup_table->entries[j - 1].us_vdd) { entries 727 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c tmp_voltage_lookup_record = lookup_table->entries[j - 1]; entries 728 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c lookup_table->entries[j - 1] = lookup_table->entries[j]; entries 729 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c lookup_table->entries[j] = tmp_voltage_lookup_record; entries 788 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; entries 790 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; entries 792 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; entries 794 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; entries 1025 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c vvalue = vol_table->entries[i].value; entries 1029 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (vvalue == table->entries[j].value) { entries 1036 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table->entries[table->count].value = vvalue; entries 1037 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table->entries[table->count].smio_low = entries 1038 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c vol_table->entries[i].smio_low; entries 1064 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c vol_table->entries[i].value = dep_table->entries[i].mvdd; entries 1065 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c vol_table->entries[i].smio_low = 0; entries 1091 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c vol_table->entries[i].value = dep_table->entries[i].vddci; entries 1092 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c vol_table->entries[i].smio_low = 0; entries 1117 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c vol_table->entries[i].value = dep_table->entries[i].vddc; entries 1118 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c vol_table->entries[i].smio_low = 0; entries 1142 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c vol_table->entries[i] = vol_table->entries[i + diff]; entries 1233 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dep_table->entries[i].clk) { entries 1235 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dep_table->entries[i].clk; entries 1261 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c bios_pcie_table->entries[i].gen_speed; entries 1268 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c bios_pcie_table->entries[i].lane_width); entries 1274 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c bios_pcie_table->entries[i].pcie_sclk; entries 1369 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dep_mm_table->entries[i].eclk) { entries 1371 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dep_mm_table->entries[i].eclk; entries 1385 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dep_mm_table->entries[i].vclk) { entries 1387 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dep_mm_table->entries[i].vclk; entries 1399 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dep_mm_table->entries[i].dclk) { entries 1401 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dep_mm_table->entries[i].dclk; entries 1576 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (dep_on_sclk->entries[i].clk == gfx_clock) entries 1629 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (dep_on_soc->entries[i].clk >= soc_clock) entries 1635 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (dep_on_soc->entries[i].clk == soc_clock) entries 1651 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd); entries 1734 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd); entries 1777 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (dep_on_mclk->entries[i].clk == mem_clock) entries 1791 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd)); entries 1793 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c (uint8_t)(dep_on_mclk->entries[i].vddInd); entries 1887 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c clk = (uint16_t)(dep_table->entries[i].clk / 100); entries 1889 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c entries[dep_table->entries[i].vddInd].us_vdd; entries 1941 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (dep_table->entries[i].eclk == eclock) entries 1942 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c *current_soc_vol = dep_table->entries[i].vddcInd; entries 2063 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (dep_table->entries[i].vclk == entries 2065 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dep_table->entries[i].dclk == entries 2068 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dep_table->entries[i].vddcInd; entries 2075 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd; entries 2093 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c pp_table->CksEnable[i] = dep_table->entries[i].cks_enable; entries 2094 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset entries 2177 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset)); entries 2469 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { entries 2478 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { entries 3073 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c performance_level->soc_clock = socclk_dep_table->entries entries 3075 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c performance_level->gfx_clock = gfxclk_dep_table->entries entries 3077 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c performance_level->mem_clock = mclk_dep_table->entries entries 3082 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c performance_level->soc_clock = socclk_dep_table->entries entries 3085 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c performance_level->gfx_clock = gfxclk_dep_table->entries entries 3088 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries; entries 3092 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c performance_level->mem_clock = mclk_dep_table->entries entries 3198 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table_info->vdd_dep_on_sclk->entries[count].clk) { entries 3200 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c table_info->vdd_dep_on_sclk->entries[count].clk; entries 3206 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; entries 3258 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if ((data->mclk_latency_table.entries[i].latency <= latency) && entries 3259 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c (data->mclk_latency_table.entries[i].frequency >= entries 3261 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c (data->mclk_latency_table.entries[i].frequency <= entries 3263 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c mclk = data->mclk_latency_table.entries[i].frequency; entries 3343 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk; entries 3349 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk; entries 3484 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1; entries 3903 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if(mclk_table->entries[i].clk >= frequency) entries 4043 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk; entries 4044 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk; entries 4219 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (dep_table->entries[i].clk) { entries 4221 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dep_table->entries[i].clk * 10; entries 4240 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (dep_table->entries[i].clk) { entries 4243 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dep_table->entries[i].clk * 10; entries 4244 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c data->mclk_latency_table.entries[j].frequency = entries 4245 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dep_table->entries[i].clk; entries 4247 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c data->mclk_latency_table.entries[j].latency = 25; entries 4264 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10; entries 4280 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10; entries 4340 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10; entries 4342 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c entries[dep_table->entries[i].vddInd].us_vdd); entries 4548 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c i, podn_vdd_dep->entries[i].clk / 100, entries 4549 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep->entries[i].vddc); entries 4558 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c i, podn_vdd_dep->entries[i].clk/100, entries 4559 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep->entries[i].vddc); entries 5102 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc; entries 5107 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (od_vddc_lookup_table->entries[j].us_vdd > entries 5108 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep->entries[i].vddc) entries 5113 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c od_vddc_lookup_table->entries[j].us_vdd = entries 5114 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep->entries[i].vddc; entries 5117 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep->entries[i].vddInd = j; entries 5121 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[podn_vdd_dep->count-1].vddInd && entries 5122 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count-1].clk) { entries 5125 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c (dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk); i++) { entries 5126 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[podn_vdd_dep->count-1].clk; entries 5127 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk; entries 5131 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c dpm_table->dpm_levels[i].value = dep_table->entries[i].clk; entries 5132 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep_on_socclk->entries[i].vddc = dep_table->entries[i].vddc; entries 5133 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep_on_socclk->entries[i].vddInd = dep_table->entries[i].vddInd; entries 5134 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep_on_socclk->entries[i].clk = dep_table->entries[i].clk; entries 5137 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk < entries 5138 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk) { entries 5140 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk = entries 5141 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk; entries 5143 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk; entries 5145 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd < entries 5146 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd) { entries 5148 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd = entries 5149 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd; entries 5211 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep_table->entries[input_level].clk = input_clk; entries 5212 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c podn_vdd_dep_table->entries[input_level].vddc = input_vol; entries 221 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h struct vega10_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER]; entries 286 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h struct phm_ppt_v1_clock_voltage_dependency_record entries[MAX_REGULAR_DPM_NUMBER]; entries 291 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h struct phm_ppt_v1_voltage_lookup_record entries[MAX_REGULAR_DPM_NUMBER]; entries 166 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h ATOM_Vega10_GFXCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ entries 172 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h ATOM_Vega10_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ entries 178 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ entries 184 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ entries 190 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ entries 196 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ entries 202 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ entries 216 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h ATOM_Vega10_MM_Dependency_Record entries[1]; /* Dynamically allocate entries */ entries 228 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h ATOM_Vega10_PCIE_Record entries[1]; /* Dynamically allocate entries. */ entries 238 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h ATOM_Vega10_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries */ entries 332 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h ATOM_Vega10_VCE_State_Record entries[1]; entries 435 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h ATOM_Vega10_Hard_Limit_Record entries[1]; entries 320 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries; entries 365 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c mm_dependency_record = &mm_dependency_table->entries[i]; entries 366 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c mm_table->entries[i].vddcInd = mm_dependency_record->ucVddcInd; entries 367 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c mm_table->entries[i].samclock = entries 369 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c mm_table->entries[i].eclk = le32_to_cpu(mm_dependency_record->ulEClk); entries 370 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c mm_table->entries[i].vclk = le32_to_cpu(mm_dependency_record->ulVClk); entries 371 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c mm_table->entries[i].dclk = le32_to_cpu(mm_dependency_record->ulDClk); entries 592 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].vddInd = entries 593 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_dep_table->entries[i].ucVddInd; entries 594 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].clk = entries 595 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c le32_to_cpu(clk_dep_table->entries[i].ulClk); entries 626 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c mclk_table->entries[i].vddInd = entries 627 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c mclk_dep_table->entries[i].ucVddInd; entries 628 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c mclk_table->entries[i].vddciInd = entries 629 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c mclk_dep_table->entries[i].ucVddciInd; entries 630 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c mclk_table->entries[i].mvddInd = entries 631 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c mclk_dep_table->entries[i].ucVddMemInd; entries 632 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c mclk_table->entries[i].clk = entries 633 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c le32_to_cpu(mclk_dep_table->entries[i].ulMemClk); entries 668 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].vddInd = entries 669 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_dep_table->entries[i].ucVddInd; entries 670 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].clk = entries 671 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c le32_to_cpu(clk_dep_table->entries[i].ulClk); entries 672 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].cks_enable = entries 673 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c (((le16_to_cpu(clk_dep_table->entries[i].usCKSVOffsetandDisable) & 0x8000) entries 675 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].cks_voffset = entries 676 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c le16_to_cpu(clk_dep_table->entries[i].usCKSVOffsetandDisable) & 0x7F; entries 677 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].sclk_offset = entries 678 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c le16_to_cpu(clk_dep_table->entries[i].usAVFSOffset); entries 681 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c patom_record_v2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)clk_dep_table->entries; entries 683 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].vddInd = entries 685 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].clk = entries 687 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].cks_enable = entries 690 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].cks_voffset = entries 692 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].sclk_offset = entries 733 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].vddInd = entries 734 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_dep_table->entries[i].ucVddInd; entries 735 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].clk = entries 736 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c le32_to_cpu(clk_dep_table->entries[i].ulClk); entries 771 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_dep_table->entries[clk_dep_table->ucNumEntries - 1].ulClk < 90000) entries 790 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].vddInd = entries 791 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_dep_table->entries[i].ucVddInd; entries 792 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].clk = entries 793 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c le32_to_cpu(clk_dep_table->entries[i].ulClk); entries 797 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].vddInd = clk_dep_table->entries[i-1].ucVddInd; entries 798 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c clk_table->entries[i].clk = 90000; entries 841 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c pcie_table->entries[i].gen_speed = entries 842 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c atom_pcie_table->entries[i].ucPCIEGenSpeed; entries 843 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c pcie_table->entries[i].lane_width = entries 844 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c atom_pcie_table->entries[i].ucPCIELaneWidth; entries 845 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c pcie_table->entries[i].pcie_sclk = entries 846 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c atom_pcie_table->entries[i].ulLCLK; entries 863 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c limits->sclk = le32_to_cpu(limit_table->entries[0].ulSOCCLKLimit); entries 864 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c limits->mclk = le32_to_cpu(limit_table->entries[0].ulMCLKLimit); entries 865 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c limits->gfxclk = le32_to_cpu(limit_table->entries[0].ulGFXCLKLimit); entries 866 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c limits->vddc = le16_to_cpu(limit_table->entries[0].usVddcLimit); entries 867 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c limits->vddci = le16_to_cpu(limit_table->entries[0].usVddciLimit); entries 868 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c limits->vddmem = le16_to_cpu(limit_table->entries[0].usVddMemLimit); entries 895 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk; entries 1086 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c table->entries[i].us_vdd = entries 1087 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c le16_to_cpu(vddc_lookup_pp_tables->entries[i].usVdd); entries 1759 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c data->mclk_latency_table.entries[i].frequency = dpm_table->dpm_levels[i].value * 100; entries 1761 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c data->mclk_latency_table.entries[i].latency = entries 2240 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c if (data->mclk_latency_table.entries[i].latency <= latency) { entries 115 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h uint32_t entries[MAX_REGULAR_DPM_NUMBER]; entries 212 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h struct vega12_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER]; entries 285 drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h entries[MAX_REGULAR_DPM_NUMBER]; entries 2797 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c data->mclk_latency_table.entries[i].frequency = entries 2800 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c data->mclk_latency_table.entries[i].latency = entries 3699 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c if (data->mclk_latency_table.entries[i].latency <= latency) { entries 167 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h uint32_t entries[MAX_REGULAR_DPM_NUMBER]; entries 272 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h struct vega20_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER]; entries 347 drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h entries[MAX_REGULAR_DPM_NUMBER]; entries 322 drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM]; entries 396 drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h struct phm_odn_performance_level entries[8]; entries 126 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h struct phm_clock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ entries 143 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h struct phm_uvd_clock_voltage_dependency_record entries[1]; entries 153 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h struct phm_acp_clock_voltage_dependency_record entries[1]; entries 164 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h struct phm_phase_shedding_limits_record entries[1]; entries 169 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h struct phm_vceclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ entries 174 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h struct phm_uvdclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ entries 179 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h struct phm_samuclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ entries 184 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h struct phm_acpclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ entries 189 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h struct phm_vce_clock_voltage_dependency_record entries[1]; entries 386 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h union phm_cac_leakage_record entries[1]; entries 397 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h struct phm_samu_clock_voltage_dependency_record entries[1]; entries 507 drivers/gpu/drm/amd/powerplay/inc/hwmgr.h struct phm_vq_budgeting_record entries[1]; entries 179 drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h SMU71_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS]; entries 166 drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h SMU72_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS]; entries 156 drivers/gpu/drm/amd/powerplay/inc/smu73_discrete.h SMU73_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS]; entries 179 drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h SMU74_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS]; entries 192 drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h SMU75_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS]; entries 235 drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS]; entries 335 drivers/gpu/drm/amd/powerplay/smu_v11_0.c struct smc_soft_pptable_entry *entries; entries 340 drivers/gpu/drm/amd/powerplay/smu_v11_0.c entries = (struct smc_soft_pptable_entry *) entries 344 drivers/gpu/drm/amd/powerplay/smu_v11_0.c if (le32_to_cpu(entries[i].id) == pptable_id) { entries 345 drivers/gpu/drm/amd/powerplay/smu_v11_0.c *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); entries 346 drivers/gpu/drm/amd/powerplay/smu_v11_0.c *size = le32_to_cpu(entries[i].ppt_size_bytes); entries 285 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (allowed_clock_voltage_table->entries[i].clk >= clock) { entries 286 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c *vol = allowed_clock_voltage_table->entries[i].v; entries 291 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c *vol = allowed_clock_voltage_table->entries[i - 1].v; entries 379 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (sclk < pl->entries[i].Sclk) { entries 592 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1); entries 593 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2); entries 594 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3); entries 596 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc); entries 597 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage); entries 616 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value); entries 781 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { entries 784 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; entries 785 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE); entries 788 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; entries 789 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); entries 797 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { entries 800 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; entries 801 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE; entries 804 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; entries 805 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); entries 849 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c &(data->vddc_voltage_table.entries[count]), entries 856 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->Smio[count] |= data->vddc_voltage_table.entries[count].smio_low; entries 857 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->SmioMaskVddcVid |= data->vddc_voltage_table.entries[count].smio_low; entries 879 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c &(data->vddci_voltage_table.entries[count]), entries 884 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->Smio[count] |= data->vddci_voltage_table.entries[count].smio_low; entries 885 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->SmioMaskVddciVid |= data->vddci_voltage_table.entries[count].smio_low; entries 907 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c &(data->mvdd_voltage_table.entries[count]), entries 912 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->Smio[count] |= data->mvdd_voltage_table.entries[count].smio_low; entries 913 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->SmioMaskMvddVid |= data->mvdd_voltage_table.entries[count].smio_low; entries 965 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) entries 969 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage); entries 972 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) entries 976 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) entries 1163 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (memory_clock < pl->entries[i].Mclk) { entries 1358 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) { entries 1360 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c voltage->Voltage = data->mvdd_voltage_table.entries[i].value; entries 1529 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c uvd_table->entries[count].vclk; entries 1531 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c uvd_table->entries[count].dclk; entries 1533 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c uvd_table->entries[count].v * VOLTAGE_SCALE; entries 1570 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->VceLevel[count].Frequency = vce_table->entries[count].evclk; entries 1572 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c vce_table->entries[count].v * VOLTAGE_SCALE; entries 1602 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk; entries 1603 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c table->AcpLevel[count].MinVoltage = acp_table->entries[count].v; entries 1663 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c &arb_regs.entries[i][j]); entries 1861 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk entries 1871 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk entries 2879 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (uvd_table->entries[i].v <= max_vddc) entries 2910 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (vce_table->entries[i].v <= max_vddc) entries 370 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c if (dep_table->entries[i].clk >= clock) { entries 371 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c *voltage |= (dep_table->entries[i].vddc * entries 376 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c else if (dep_table->entries[i].vddci) entries 377 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c *voltage |= (dep_table->entries[i].vddci * entries 381 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c (dep_table->entries[i].vddc - entries 389 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c else if (dep_table->entries[i].mvdd) entries 390 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c *mvdd = (uint32_t) dep_table->entries[i].mvdd * entries 399 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; entries 404 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c else if (dep_table->entries[i-1].vddci) { entries 406 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c (dep_table->entries[i].vddc - entries 413 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c else if (dep_table->entries[i].mvdd) entries 414 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE; entries 774 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c data->vddc_voltage_table.entries[count].value); entries 776 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c convert_to_vid(lookup_table->entries[index].us_cac_low); entries 778 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c convert_to_vid(lookup_table->entries[index].us_cac_high); entries 1286 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { entries 1287 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; entries 1437 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->VceLevel[count].Frequency = mm_table->entries[count].eclk; entries 1440 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; entries 1442 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c ((mm_table->entries[count].vddc - VDDC_VDDCI_DELTA) * entries 1476 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->AcpLevel[count].Frequency = mm_table->entries[count].aclk; entries 1477 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc * entries 1479 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - entries 1542 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c &arb_regs.entries[i][j]); entries 1574 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; entries 1575 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; entries 1576 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * entries 1578 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc - entries 1647 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c if (table_info->vdd_dep_on_sclk->entries[level].clk >= entries 1656 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c if (table_info->vdd_dep_on_mclk->entries[level].clk >= entries 1708 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c sclk_table->entries[i].cks_enable << i; entries 1710 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / entries 1711 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); entries 1713 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / entries 1714 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); entries 1717 drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); entries 405 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1); entries 406 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2); entries 427 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value); entries 518 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c if (allowed_clock_voltage_table->entries[i].clk >= clock) { entries 519 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c *vol = allowed_clock_voltage_table->entries[i].v; entries 525 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c *vol = allowed_clock_voltage_table->entries[i - 1].v; entries 555 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { entries 558 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; entries 559 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE); entries 562 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; entries 563 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); entries 575 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) { entries 578 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE; entries 579 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE; entries 582 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE; entries 583 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE); entries 627 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c &(data->vddc_voltage_table.entries[count]), entries 633 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low; entries 654 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c &(data->vddci_voltage_table.entries[count]), entries 658 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low; entries 679 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c &(data->mvdd_voltage_table.entries[count]), entries 683 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low; entries 734 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) entries 738 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage); entries 741 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) entries 745 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) entries 883 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c if (sclk < pl->entries[i].Sclk) { entries 1218 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c if (memory_clock < pl->entries[i].Mclk) { entries 1405 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) { entries 1407 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c voltage->Voltage = data->mvdd_voltage_table.entries[i].value; entries 1626 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c &arb_regs.entries[i][j]); entries 1829 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk entries 1839 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk entries 368 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c if (dep_table->entries[i].clk >= clock) { entries 369 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c *voltage |= (dep_table->entries[i].vddc * entries 374 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c else if (dep_table->entries[i].vddci) entries 375 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c *voltage |= (dep_table->entries[i].vddci * entries 379 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c (dep_table->entries[i].vddc - entries 387 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c else if (dep_table->entries[i].mvdd) entries 388 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c *mvdd = (uint32_t) dep_table->entries[i].mvdd * entries 397 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; entries 402 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c else if (dep_table->entries[i-1].vddci) { entries 404 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c (dep_table->entries[i].vddc - entries 411 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c else if (dep_table->entries[i].mvdd) entries 412 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE; entries 658 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE); entries 663 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c data->mvdd_voltage_table.entries[level].smio_low; entries 686 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE); entries 689 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low; entries 715 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c data->vddc_voltage_table.entries[count].value); entries 716 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low); entries 717 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid); entries 718 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high); entries 1185 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { entries 1186 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; entries 1303 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->VceLevel[count].Frequency = mm_table->entries[count].eclk; entries 1306 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; entries 1310 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); entries 1312 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; entries 1375 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c &arb_regs.entries[i][j]); entries 1410 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; entries 1411 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; entries 1412 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * entries 1417 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); entries 1419 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; entries 1492 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c if (table_info->vdd_dep_on_sclk->entries[level].clk >= entries 1501 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c if (table_info->vdd_dep_on_mclk->entries[level].clk >= entries 1558 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c sclk_table->entries[i].cks_enable << i; entries 1560 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 - (ro - 70) * 1000000) / \ entries 1561 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000)); entries 1562 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \ entries 1563 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c (2522480 - sclk_table->entries[i].clk/100 * 115764/100)); entries 1565 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 - (ro - 50) * 1000000) / \ entries 1566 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000))); entries 1567 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \ entries 1568 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000))); entries 1573 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c sclk_table->entries[i].cks_voffset) * 100 + 624) / 625); entries 1744 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625); entries 1745 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100); entries 2254 drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk; entries 261 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c if (allowed_clock_voltage_table->entries[i].clk >= clock) { entries 264 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c allowed_clock_voltage_table->entries[i].vddgfx); entries 267 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c allowed_clock_voltage_table->entries[i].vddc); entries 269 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c if (allowed_clock_voltage_table->entries[i].vddci) entries 271 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c phm_get_voltage_id(&data->vddci_voltage_table, allowed_clock_voltage_table->entries[i].vddci); entries 275 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c allowed_clock_voltage_table->entries[i].vddc - VDDC_VDDCI_DELTA); entries 278 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c if (allowed_clock_voltage_table->entries[i].mvdd) entries 279 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i].mvdd; entries 288 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c allowed_clock_voltage_table->entries[i-1].vddgfx); entries 290 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c allowed_clock_voltage_table->entries[i-1].vddc); entries 292 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c if (allowed_clock_voltage_table->entries[i-1].vddci) entries 294 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c allowed_clock_voltage_table->entries[i-1].vddci); entries 296 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c if (allowed_clock_voltage_table->entries[i-1].mvdd) entries 297 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c *mvdd = (uint32_t) allowed_clock_voltage_table->entries[i-1].mvdd; entries 312 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c PP_HOST_TO_SMC_US(data->vddc_voltage_table.entries[count].value * VOLTAGE_SCALE); entries 329 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c PP_HOST_TO_SMC_US(data->vddgfx_voltage_table.entries[count].value * VOLTAGE_SCALE); entries 346 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); entries 349 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); entries 354 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c data->vddci_voltage_table.entries[count].smio_low; entries 356 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[count].value * VOLTAGE_SCALE); entries 376 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); entries 381 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c data->mvdd_voltage_table.entries[count].smio_low; entries 413 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c data->vddc_voltage_table.entries[count].value); entries 415 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c convert_to_vid(vddc_lookup_table->entries[index].us_cac_low); entries 417 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid); entries 419 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c convert_to_vid(vddc_lookup_table->entries[index].us_cac_high); entries 426 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_mid)); entries 428 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c convert_to_vid(vddgfx_lookup_table->entries[index].us_cac_high); entries 433 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c data->vddc_voltage_table.entries[count].value); entries 435 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c convert_to_vid(vddc_lookup_table->entries[index].us_cac_low); entries 437 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c convert_to_vid(vddc_lookup_table->entries[index].us_cac_mid); entries 439 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c convert_to_vid(vddc_lookup_table->entries[index].us_cac_high); entries 1154 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { entries 1157 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c data->mvdd_voltage_table.entries[i].value; entries 1324 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; entries 1325 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; entries 1328 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c mm_table->entries[count].vddc); entries 1332 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c mm_table->entries[count].vddgfx) : 0; entries 1335 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); entries 1385 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c mm_table->entries[count].eclk; entries 1388 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c mm_table->entries[count].vddc); entries 1392 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c mm_table->entries[count].vddgfx) : 0; entries 1395 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); entries 1430 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c pptable_info->mm_dep_table->entries[count].aclk; entries 1433 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c mm_table->entries[count].vddc); entries 1437 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c mm_table->entries[count].vddgfx) : 0; entries 1440 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); entries 1502 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c &arb_regs.entries[i][j]); entries 1622 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c sclk_table->entries[i].cks_enable << i; entries 1625 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c (sclk_table->entries[i].clk/100) / 10000) * 1000 / entries 1626 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c (8730 - (5301 * (sclk_table->entries[i].clk/100) / 1000))); entries 1628 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c (sclk_table->entries[i].clk/100) / 100000) * 1000 / entries 1629 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c (6146 - (3193 * (sclk_table->entries[i].clk/100) / 1000))); entries 1632 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / entries 1633 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); entries 1635 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / entries 1636 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); entries 1640 drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); entries 408 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk; entries 459 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE); entries 464 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c data->mvdd_voltage_table.entries[level].smio_low; entries 487 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE); entries 490 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low; entries 516 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c data->vddc_voltage_table.entries[count].value); entries 518 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c convert_to_vid(lookup_table->entries[index].us_cac_low); entries 520 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c convert_to_vid(lookup_table->entries[index].us_cac_mid); entries 522 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c convert_to_vid(lookup_table->entries[index].us_cac_high); entries 615 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c if (dep_table->entries[i].clk >= clock) { entries 616 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c *voltage |= (dep_table->entries[i].vddc * entries 621 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c else if (dep_table->entries[i].vddci) entries 622 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c *voltage |= (dep_table->entries[i].vddci * entries 626 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c (dep_table->entries[i].vddc - entries 634 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c else if (dep_table->entries[i].mvdd) entries 635 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c *mvdd = (uint32_t) dep_table->entries[i].mvdd * entries 644 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; entries 646 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c (dep_table->entries[i - 1].vddc - entries 652 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c else if (dep_table->entries[i - 1].vddci) entries 653 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c *voltage |= (dep_table->entries[i - 1].vddci * entries 660 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c else if (dep_table->entries[i].mvdd) entries 661 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE; entries 1093 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { entries 1094 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; entries 1220 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->VceLevel[count].Frequency = mm_table->entries[count].eclk; entries 1223 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; entries 1227 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); entries 1229 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; entries 1301 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c &arb_regs.entries[i][j]); entries 1334 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; entries 1335 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; entries 1337 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; entries 1341 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); entries 1343 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; entries 1416 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c if (table_info->vdd_dep_on_sclk->entries[level].clk >= entries 1425 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c if (table_info->vdd_dep_on_mclk->entries[level].clk >= entries 1516 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c sclk_table->entries[i].cks_enable << i; entries 1517 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * entries 1519 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000)); entries 1520 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * entries 1522 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c (2522480 - sclk_table->entries[i].clk/100 * 115764/100)); entries 1526 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c sclk_table->entries[i].cks_voffset) * 100 + 624) / 625); entries 1637 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625); entries 1640 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c (sclk_table->entries[i].sclk_offset) / 100); entries 2168 drivers/gpu/drm/amd/powerplay/vega20_ppt.c if (smu_dpm_ctx->mclk_latency_table->entries[i].latency <= latency) { entries 184 drivers/gpu/drm/drm_debugfs_crc.c kfree(crc->entries); entries 186 drivers/gpu/drm/drm_debugfs_crc.c crc->entries = NULL; entries 197 drivers/gpu/drm/drm_debugfs_crc.c struct drm_crtc_crc_entry *entries = NULL; entries 224 drivers/gpu/drm/drm_debugfs_crc.c entries = kcalloc(DRM_CRC_ENTRIES_NR, sizeof(*entries), GFP_KERNEL); entries 225 drivers/gpu/drm/drm_debugfs_crc.c if (!entries) entries 231 drivers/gpu/drm/drm_debugfs_crc.c crc->entries = entries; entries 239 drivers/gpu/drm/drm_debugfs_crc.c kfree(entries); entries 310 drivers/gpu/drm/drm_debugfs_crc.c entry = &crc->entries[crc->tail]; entries 399 drivers/gpu/drm/drm_debugfs_crc.c if (!crc->entries) { entries 419 drivers/gpu/drm/drm_debugfs_crc.c entry = &crc->entries[head]; entries 109 drivers/gpu/drm/drm_mm.c unsigned long entries[STACKDEPTH]; entries 112 drivers/gpu/drm/drm_mm.c n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); entries 115 drivers/gpu/drm/drm_mm.c node->stack = stack_depot_save(entries, n, GFP_NOWAIT); entries 121 drivers/gpu/drm/drm_mm.c unsigned long *entries; entries 136 drivers/gpu/drm/drm_mm.c nr_entries = stack_depot_fetch(node->stack, &entries); entries 137 drivers/gpu/drm/drm_mm.c stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0); entries 876 drivers/gpu/drm/drm_syncobj.c struct syncobj_wait_entry *entries; entries 894 drivers/gpu/drm/drm_syncobj.c entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); entries 895 drivers/gpu/drm/drm_syncobj.c if (!entries) { entries 908 drivers/gpu/drm/drm_syncobj.c entries[i].task = current; entries 909 drivers/gpu/drm/drm_syncobj.c entries[i].point = points[i]; entries 922 drivers/gpu/drm/drm_syncobj.c entries[i].fence = fence; entries 924 drivers/gpu/drm/drm_syncobj.c entries[i].fence = dma_fence_get_stub(); entries 927 drivers/gpu/drm/drm_syncobj.c dma_fence_is_signaled(entries[i].fence)) { entries 948 drivers/gpu/drm/drm_syncobj.c drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]); entries 956 drivers/gpu/drm/drm_syncobj.c fence = entries[i].fence; entries 962 drivers/gpu/drm/drm_syncobj.c (!entries[i].fence_cb.func && entries 964 drivers/gpu/drm/drm_syncobj.c &entries[i].fence_cb, entries 998 drivers/gpu/drm/drm_syncobj.c drm_syncobj_remove_wait(syncobjs[i], &entries[i]); entries 999 drivers/gpu/drm/drm_syncobj.c if (entries[i].fence_cb.func) entries 1000 drivers/gpu/drm/drm_syncobj.c dma_fence_remove_callback(entries[i].fence, entries 1001 drivers/gpu/drm/drm_syncobj.c &entries[i].fence_cb); entries 1002 drivers/gpu/drm/drm_syncobj.c dma_fence_put(entries[i].fence); entries 1004 drivers/gpu/drm/drm_syncobj.c kfree(entries); entries 13798 drivers/gpu/drm/i915/display/intel_display.c struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; entries 13803 drivers/gpu/drm/i915/display/intel_display.c entries[i] = old_crtc_state->wm.skl.ddb; entries 13828 drivers/gpu/drm/i915/display/intel_display.c entries, entries 13833 drivers/gpu/drm/i915/display/intel_display.c entries[i] = new_crtc_state->wm.skl.ddb; entries 2244 drivers/gpu/drm/i915/i915_gem_gtt.c gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; entries 2249 drivers/gpu/drm/i915/i915_gem_gtt.c iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); entries 59 drivers/gpu/drm/i915/i915_vma.c unsigned long *entries; entries 69 drivers/gpu/drm/i915/i915_vma.c nr_entries = stack_depot_fetch(vma->node.stack, &entries); entries 70 drivers/gpu/drm/i915/i915_vma.c stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0); entries 772 drivers/gpu/drm/i915/intel_pm.c int entries, wm_size; entries 780 drivers/gpu/drm/i915/intel_pm.c entries = intel_wm_method1(pixel_rate, cpp, entries 782 drivers/gpu/drm/i915/intel_pm.c entries = DIV_ROUND_UP(entries, wm->cacheline_size) + entries 784 drivers/gpu/drm/i915/intel_pm.c DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries); entries 786 drivers/gpu/drm/i915/intel_pm.c wm_size = fifo_size - entries; entries 2229 drivers/gpu/drm/i915/intel_pm.c int entries; entries 2231 drivers/gpu/drm/i915/intel_pm.c entries = intel_wm_method2(clock, htotal, entries 2233 drivers/gpu/drm/i915/intel_pm.c entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); entries 2234 drivers/gpu/drm/i915/intel_pm.c srwm = I965_FIFO_SIZE - entries; entries 2239 drivers/gpu/drm/i915/intel_pm.c entries, srwm); entries 2241 drivers/gpu/drm/i915/intel_pm.c entries = intel_wm_method2(clock, htotal, entries 2244 drivers/gpu/drm/i915/intel_pm.c entries = DIV_ROUND_UP(entries, entries 2248 drivers/gpu/drm/i915/intel_pm.c cursor_sr = i965_cursor_wm_info.fifo_size - entries; entries 2385 drivers/gpu/drm/i915/intel_pm.c int entries; entries 2392 drivers/gpu/drm/i915/intel_pm.c entries = intel_wm_method2(clock, htotal, hdisplay, cpp, entries 2394 drivers/gpu/drm/i915/intel_pm.c entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); entries 2395 drivers/gpu/drm/i915/intel_pm.c DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); entries 2396 drivers/gpu/drm/i915/intel_pm.c srwm = wm_info->fifo_size - entries; entries 5252 drivers/gpu/drm/i915/intel_pm.c const struct skl_ddb_entry *entries, entries 5259 drivers/gpu/drm/i915/intel_pm.c skl_ddb_entries_overlap(ddb, &entries[i])) entries 64 drivers/gpu/drm/i915/intel_pm.h const struct skl_ddb_entry *entries, entries 62 drivers/gpu/drm/i915/intel_runtime_pm.c unsigned long entries[STACKDEPTH]; entries 65 drivers/gpu/drm/i915/intel_runtime_pm.c n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); entries 66 drivers/gpu/drm/i915/intel_runtime_pm.c return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN); entries 72 drivers/gpu/drm/i915/intel_runtime_pm.c unsigned long *entries; entries 75 drivers/gpu/drm/i915/intel_runtime_pm.c nr_entries = stack_depot_fetch(stack, &entries); entries 76 drivers/gpu/drm/i915/intel_runtime_pm.c stack_trace_snprint(buf, sz, entries, nr_entries, indent); entries 380 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c .entries = sdm845_qos_linear entries 383 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c .entries = sdm845_qos_macrotile entries 386 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c .entries = sdm845_qos_nrt entries 272 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h struct dpu_qos_lut_entry *entries; entries 203 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c if (!tbl || !tbl->nentry || !tbl->entries) entries 207 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c if (total_fl <= tbl->entries[i].fl) entries 208 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c return tbl->entries[i].lut; entries 211 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c if (!tbl->entries[i-1].fl) entries 212 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c return tbl->entries[i-1].lut; entries 227 drivers/gpu/drm/nouveau/dispnv04/disp.c for (i = 0; i < dcb->entries; i++) { entries 2382 drivers/gpu/drm/nouveau/dispnv50/disp.c for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) { entries 6 drivers/gpu/drm/nouveau/include/nvif/clb069.h __u32 entries; entries 974 drivers/gpu/drm/nouveau/nouveau_bios.c u8 entries, *entry; entries 979 drivers/gpu/drm/nouveau/nouveau_bios.c entries = bios->data[bios->offset + 10]; entries 981 drivers/gpu/drm/nouveau/nouveau_bios.c while (entries--) { entries 1373 drivers/gpu/drm/nouveau/nouveau_bios.c struct dcb_output *entry = &dcb->entry[dcb->entries]; entries 1376 drivers/gpu/drm/nouveau/nouveau_bios.c entry->index = dcb->entries++; entries 1515 drivers/gpu/drm/nouveau/nouveau_bios.c dcb->entries--; entries 1607 drivers/gpu/drm/nouveau/nouveau_bios.c for (i = 0; i < dcb->entries; i++) { entries 1611 drivers/gpu/drm/nouveau/nouveau_bios.c for (j = i + 1; j < dcb->entries; j++) { entries 1631 drivers/gpu/drm/nouveau/nouveau_bios.c for (i = 0; i < dcb->entries; i++) { entries 1642 drivers/gpu/drm/nouveau/nouveau_bios.c dcb->entries = newentries; entries 1829 drivers/gpu/drm/nouveau/nouveau_bios.c for (i = 0; i < dcbt->entries; i++) { entries 1841 drivers/gpu/drm/nouveau/nouveau_bios.c for (i = 0; i < dcbt->entries; i++) { entries 56 drivers/gpu/drm/nouveau/nouveau_bios.h int entries; entries 1325 drivers/gpu/drm/nouveau/nouveau_connector.c for (i = 0; i < dcbt->entries; i++) { entries 544 drivers/gpu/drm/nouveau/nouveau_display.c if (nouveau_modeset != 2 && drm->vbios.dcb.entries) { entries 47 drivers/gpu/drm/nouveau/nouveau_svm.c u32 entries; entries 561 drivers/gpu/drm/nouveau/nouveau_svm.c if (++buffer->get == buffer->entries) entries 766 drivers/gpu/drm/nouveau/nouveau_svm.c for (i = 0; buffer->fault[i] && i < buffer->entries; i++) entries 796 drivers/gpu/drm/nouveau/nouveau_svm.c buffer->entries = args.entries; entries 806 drivers/gpu/drm/nouveau/nouveau_svm.c buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL); entries 73 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h struct nvkm_memory *, int entries); entries 31 drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c u8 entries = nvbios_rd08(bios, bios->bit_offset + 10); entries 33 drivers/gpu/drm/nouveau/nvkm/subdev/bios/bit.c while (entries--) { entries 102 drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries); entries 104 drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, buffer->entries * entries 54 drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78); entries 53 drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c if (++get == buffer->entries) entries 112 drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c buffer->entries = nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff; entries 14 drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h int entries; entries 67 drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c buffer->entries = nvkm_rd32(device, 0xb83010 + foff) & 0x000fffff; entries 97 drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c args->v0.entries = buffer->entries; entries 107 drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c u8 entries = 0; entries 124 drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c entries = (ROM32(desc[0]) & 0x01f00000) >> 20; entries 133 drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c entries = (desc[1] & 0xf0) >> 4; entries 141 drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c entries = desc[1] & 0x07; entries 162 drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c for (i = 0; i < entries; i++, dump += recordlen) { entries 174 drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.c desc += headerlen + (entries * recordlen); entries 105 drivers/gpu/drm/r128/r128_cce.c static int r128_do_wait_for_fifo(drm_r128_private_t *dev_priv, int entries) entries 111 drivers/gpu/drm/r128/r128_cce.c if (slots >= entries) entries 1186 drivers/gpu/drm/radeon/btc_dpm.c if (clock < table->entries[i].clk) entries 1187 drivers/gpu/drm/radeon/btc_dpm.c clock = table->entries[i].clk; entries 1201 drivers/gpu/drm/radeon/btc_dpm.c if (clock <= table->entries[i].clk) { entries 1202 drivers/gpu/drm/radeon/btc_dpm.c if (*voltage < table->entries[i].v) entries 1203 drivers/gpu/drm/radeon/btc_dpm.c *voltage = (u16)((table->entries[i].v < max_voltage) ? entries 1204 drivers/gpu/drm/radeon/btc_dpm.c table->entries[i].v : max_voltage); entries 1302 drivers/gpu/drm/radeon/btc_dpm.c if (voltage <= table->entries[i].value) entries 1303 drivers/gpu/drm/radeon/btc_dpm.c return table->entries[i].value; entries 1306 drivers/gpu/drm/radeon/btc_dpm.c return table->entries[table->count - 1].value; entries 2585 drivers/gpu/drm/radeon/btc_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = entries 2589 drivers/gpu/drm/radeon/btc_dpm.c if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { entries 2594 drivers/gpu/drm/radeon/btc_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; entries 2595 drivers/gpu/drm/radeon/btc_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; entries 2596 drivers/gpu/drm/radeon/btc_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; entries 2597 drivers/gpu/drm/radeon/btc_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 800; entries 2598 drivers/gpu/drm/radeon/btc_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; entries 2599 drivers/gpu/drm/radeon/btc_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 800; entries 2600 drivers/gpu/drm/radeon/btc_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; entries 2601 drivers/gpu/drm/radeon/btc_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 800; entries 2732 drivers/gpu/drm/radeon/btc_dpm.c kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); entries 283 drivers/gpu/drm/radeon/ci_dpm.c if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL) entries 293 drivers/gpu/drm/radeon/ci_dpm.c lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1); entries 294 drivers/gpu/drm/radeon/ci_dpm.c hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2); entries 295 drivers/gpu/drm/radeon/ci_dpm.c hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3); entries 297 drivers/gpu/drm/radeon/ci_dpm.c lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc); entries 298 drivers/gpu/drm/radeon/ci_dpm.c hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage); entries 314 drivers/gpu/drm/radeon/ci_dpm.c vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value); entries 2127 drivers/gpu/drm/radeon/ci_dpm.c voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; entries 2128 drivers/gpu/drm/radeon/ci_dpm.c voltage_table->entries[i].smio_low = 0; entries 2227 drivers/gpu/drm/radeon/ci_dpm.c &pi->vddc_voltage_table.entries[count], entries 2232 drivers/gpu/drm/radeon/ci_dpm.c pi->vddc_voltage_table.entries[count].smio_low; entries 2250 drivers/gpu/drm/radeon/ci_dpm.c &pi->vddci_voltage_table.entries[count], entries 2255 drivers/gpu/drm/radeon/ci_dpm.c pi->vddci_voltage_table.entries[count].smio_low; entries 2273 drivers/gpu/drm/radeon/ci_dpm.c &pi->mvdd_voltage_table.entries[count], entries 2278 drivers/gpu/drm/radeon/ci_dpm.c pi->mvdd_voltage_table.entries[count].smio_low; entries 2315 drivers/gpu/drm/radeon/ci_dpm.c if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) { entries 2316 drivers/gpu/drm/radeon/ci_dpm.c voltage->Voltage = pi->mvdd_voltage_table.entries[i].value; entries 2337 drivers/gpu/drm/radeon/ci_dpm.c if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) entries 2340 drivers/gpu/drm/radeon/ci_dpm.c if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { entries 2343 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { entries 2350 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; entries 2352 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; entries 2360 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { entries 2367 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; entries 2369 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; entries 2389 drivers/gpu/drm/radeon/ci_dpm.c if (sclk < limits->entries[i].sclk) { entries 2406 drivers/gpu/drm/radeon/ci_dpm.c if (mclk < limits->entries[i].mclk) { entries 2441 drivers/gpu/drm/radeon/ci_dpm.c if (allowed_clock_voltage_table->entries[i].clk >= clock) { entries 2442 drivers/gpu/drm/radeon/ci_dpm.c *voltage = allowed_clock_voltage_table->entries[i].v; entries 2447 drivers/gpu/drm/radeon/ci_dpm.c *voltage = allowed_clock_voltage_table->entries[i-1].v; entries 2560 drivers/gpu/drm/radeon/ci_dpm.c &arb_regs.entries[i][j]); entries 2594 drivers/gpu/drm/radeon/ci_dpm.c if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >= entries 2602 drivers/gpu/drm/radeon/ci_dpm.c if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >= entries 2660 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk; entries 2662 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk; entries 2664 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; entries 2703 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk; entries 2705 drivers/gpu/drm/radeon/ci_dpm.c (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; entries 2736 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk; entries 2738 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v; entries 2768 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk; entries 2770 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; entries 2883 drivers/gpu/drm/radeon/ci_dpm.c if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) { entries 2891 drivers/gpu/drm/radeon/ci_dpm.c if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) { entries 2899 drivers/gpu/drm/radeon/ci_dpm.c if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) { entries 3137 drivers/gpu/drm/radeon/ci_dpm.c if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) entries 3141 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage; entries 3143 drivers/gpu/drm/radeon/ci_dpm.c if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) entries 3147 drivers/gpu/drm/radeon/ci_dpm.c ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) * entries 3483 drivers/gpu/drm/radeon/ci_dpm.c allowed_sclk_vddc_table->entries[i].clk)) { entries 3485 drivers/gpu/drm/radeon/ci_dpm.c allowed_sclk_vddc_table->entries[i].clk; entries 3496 drivers/gpu/drm/radeon/ci_dpm.c allowed_mclk_table->entries[i].clk)) { entries 3498 drivers/gpu/drm/radeon/ci_dpm.c allowed_mclk_table->entries[i].clk; entries 3507 drivers/gpu/drm/radeon/ci_dpm.c allowed_sclk_vddc_table->entries[i].v; entries 3509 drivers/gpu/drm/radeon/ci_dpm.c std_voltage_table->entries[i].leakage; entries 3518 drivers/gpu/drm/radeon/ci_dpm.c allowed_mclk_table->entries[i].v; entries 3528 drivers/gpu/drm/radeon/ci_dpm.c allowed_mclk_table->entries[i].v; entries 3798 drivers/gpu/drm/radeon/ci_dpm.c if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk) entries 3799 drivers/gpu/drm/radeon/ci_dpm.c requested_voltage = disp_voltage_table->entries[i].v; entries 3803 drivers/gpu/drm/radeon/ci_dpm.c if (requested_voltage <= vddc_table->entries[i].v) { entries 3804 drivers/gpu/drm/radeon/ci_dpm.c requested_voltage = vddc_table->entries[i].v; entries 3946 drivers/gpu/drm/radeon/ci_dpm.c if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { entries 3994 drivers/gpu/drm/radeon/ci_dpm.c if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { entries 4027 drivers/gpu/drm/radeon/ci_dpm.c if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { entries 4058 drivers/gpu/drm/radeon/ci_dpm.c if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { entries 4107 drivers/gpu/drm/radeon/ci_dpm.c if (table->entries[i].evclk >= min_evclk) entries 4940 drivers/gpu/drm/radeon/ci_dpm.c pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v; entries 4942 drivers/gpu/drm/radeon/ci_dpm.c allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; entries 4944 drivers/gpu/drm/radeon/ci_dpm.c pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v; entries 4946 drivers/gpu/drm/radeon/ci_dpm.c allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; entries 4949 drivers/gpu/drm/radeon/ci_dpm.c allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; entries 4951 drivers/gpu/drm/radeon/ci_dpm.c allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; entries 4953 drivers/gpu/drm/radeon/ci_dpm.c allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; entries 4955 drivers/gpu/drm/radeon/ci_dpm.c allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; entries 4995 drivers/gpu/drm/radeon/ci_dpm.c ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); entries 5006 drivers/gpu/drm/radeon/ci_dpm.c ci_patch_with_vddci_leakage(rdev, &table->entries[i].v); entries 5017 drivers/gpu/drm/radeon/ci_dpm.c ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); entries 5028 drivers/gpu/drm/radeon/ci_dpm.c ci_patch_with_vddc_leakage(rdev, &table->entries[i].v); entries 5039 drivers/gpu/drm/radeon/ci_dpm.c ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage); entries 5059 drivers/gpu/drm/radeon/ci_dpm.c ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc); entries 5671 drivers/gpu/drm/radeon/ci_dpm.c kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); entries 5788 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = entries 5792 drivers/gpu/drm/radeon/ci_dpm.c if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { entries 5797 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; entries 5798 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; entries 5799 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; entries 5800 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; entries 5801 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; entries 5802 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; entries 5803 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; entries 5804 drivers/gpu/drm/radeon/ci_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; entries 411 drivers/gpu/drm/radeon/cypress_dpm.c if (value <= table->entries[i].value) { entries 413 drivers/gpu/drm/radeon/cypress_dpm.c voltage->value = cpu_to_be16(table->entries[i].value); entries 1479 drivers/gpu/drm/radeon/cypress_dpm.c voltage_table->entries[i] = voltage_table->entries[i + diff]; entries 1520 drivers/gpu/drm/radeon/cypress_dpm.c table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); entries 1542 drivers/gpu/drm/radeon/cypress_dpm.c eg_pi->vddc_voltage_table.entries[i].value) { entries 563 drivers/gpu/drm/radeon/kv_dpm.c return vddc_sclk_table->entries[vid_2bit].v; entries 565 drivers/gpu/drm/radeon/kv_dpm.c return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; entries 568 drivers/gpu/drm/radeon/kv_dpm.c if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) entries 569 drivers/gpu/drm/radeon/kv_dpm.c return vid_mapping_table->entries[i].vid_7bit; entries 571 drivers/gpu/drm/radeon/kv_dpm.c return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; entries 585 drivers/gpu/drm/radeon/kv_dpm.c if (vddc_sclk_table->entries[i].v == vid_7bit) entries 591 drivers/gpu/drm/radeon/kv_dpm.c if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) entries 592 drivers/gpu/drm/radeon/kv_dpm.c return vid_mapping_table->entries[i].vid_2bit; entries 595 drivers/gpu/drm/radeon/kv_dpm.c return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; entries 725 drivers/gpu/drm/radeon/kv_dpm.c if (table->entries[i].clk == pi->boot_pl.sclk) entries 739 drivers/gpu/drm/radeon/kv_dpm.c if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) entries 834 drivers/gpu/drm/radeon/kv_dpm.c (pi->high_voltage_t < table->entries[i].v)) entries 837 drivers/gpu/drm/radeon/kv_dpm.c pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); entries 838 drivers/gpu/drm/radeon/kv_dpm.c pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); entries 839 drivers/gpu/drm/radeon/kv_dpm.c pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); entries 842 drivers/gpu/drm/radeon/kv_dpm.c (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk); entries 844 drivers/gpu/drm/radeon/kv_dpm.c (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk); entries 847 drivers/gpu/drm/radeon/kv_dpm.c table->entries[i].vclk, false, ÷rs); entries 853 drivers/gpu/drm/radeon/kv_dpm.c table->entries[i].dclk, false, ÷rs); entries 905 drivers/gpu/drm/radeon/kv_dpm.c pi->high_voltage_t < table->entries[i].v) entries 908 drivers/gpu/drm/radeon/kv_dpm.c pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); entries 909 drivers/gpu/drm/radeon/kv_dpm.c pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); entries 912 drivers/gpu/drm/radeon/kv_dpm.c (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk); entries 915 drivers/gpu/drm/radeon/kv_dpm.c table->entries[i].evclk, false, ÷rs); entries 968 drivers/gpu/drm/radeon/kv_dpm.c pi->high_voltage_t < table->entries[i].v) entries 971 drivers/gpu/drm/radeon/kv_dpm.c pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); entries 972 drivers/gpu/drm/radeon/kv_dpm.c pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); entries 975 drivers/gpu/drm/radeon/kv_dpm.c (u8)kv_get_clk_bypass(rdev, table->entries[i].clk); entries 978 drivers/gpu/drm/radeon/kv_dpm.c table->entries[i].clk, false, ÷rs); entries 1033 drivers/gpu/drm/radeon/kv_dpm.c pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); entries 1034 drivers/gpu/drm/radeon/kv_dpm.c pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); entries 1037 drivers/gpu/drm/radeon/kv_dpm.c table->entries[i].clk, false, ÷rs); entries 1087 drivers/gpu/drm/radeon/kv_dpm.c if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) entries 1089 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) entries 1091 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) entries 1093 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) entries 1095 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) entries 1108 drivers/gpu/drm/radeon/kv_dpm.c if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) entries 1110 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) entries 1112 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) entries 1114 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) entries 1116 drivers/gpu/drm/radeon/kv_dpm.c else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) entries 1468 drivers/gpu/drm/radeon/kv_dpm.c if (table->entries[i].evclk >= evclk) entries 1556 drivers/gpu/drm/radeon/kv_dpm.c if (table->entries[i].clk >= 0) /* XXX */ entries 1718 drivers/gpu/drm/radeon/kv_dpm.c if ((table->entries[i].clk >= new_ps->levels[0].sclk) || entries 1726 drivers/gpu/drm/radeon/kv_dpm.c if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) entries 1732 drivers/gpu/drm/radeon/kv_dpm.c if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > entries 1733 drivers/gpu/drm/radeon/kv_dpm.c (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) entries 1743 drivers/gpu/drm/radeon/kv_dpm.c if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || entries 1751 drivers/gpu/drm/radeon/kv_dpm.c if (table->entries[i].sclk_frequency <= entries 1759 drivers/gpu/drm/radeon/kv_dpm.c table->entries[pi->highest_valid].sclk_frequency) > entries 1760 drivers/gpu/drm/radeon/kv_dpm.c (table->entries[pi->lowest_valid].sclk_frequency - entries 1976 drivers/gpu/drm/radeon/kv_dpm.c pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; entries 1979 drivers/gpu/drm/radeon/kv_dpm.c pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); entries 1999 drivers/gpu/drm/radeon/kv_dpm.c uvd_table->entries[i].v = entries 2001 drivers/gpu/drm/radeon/kv_dpm.c uvd_table->entries[i].v); entries 2006 drivers/gpu/drm/radeon/kv_dpm.c vce_table->entries[i].v = entries 2008 drivers/gpu/drm/radeon/kv_dpm.c vce_table->entries[i].v); entries 2013 drivers/gpu/drm/radeon/kv_dpm.c samu_table->entries[i].v = entries 2015 drivers/gpu/drm/radeon/kv_dpm.c samu_table->entries[i].v); entries 2020 drivers/gpu/drm/radeon/kv_dpm.c acp_table->entries[i].v = entries 2022 drivers/gpu/drm/radeon/kv_dpm.c acp_table->entries[i].v); entries 2115 drivers/gpu/drm/radeon/kv_dpm.c (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <= entries 2127 drivers/gpu/drm/radeon/kv_dpm.c (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <= entries 2170 drivers/gpu/drm/radeon/kv_dpm.c if (stable_p_state_sclk >= table->entries[i].clk) { entries 2171 drivers/gpu/drm/radeon/kv_dpm.c stable_p_state_sclk = table->entries[i].clk; entries 2177 drivers/gpu/drm/radeon/kv_dpm.c stable_p_state_sclk = table->entries[0].clk; entries 2200 drivers/gpu/drm/radeon/kv_dpm.c ps->levels[i].sclk = table->entries[limit].clk; entries 2212 drivers/gpu/drm/radeon/kv_dpm.c ps->levels[i].sclk = table->entries[limit].sclk_frequency; entries 2363 drivers/gpu/drm/radeon/kv_dpm.c kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v))) entries 2366 drivers/gpu/drm/radeon/kv_dpm.c kv_set_divider_value(rdev, i, table->entries[i].clk); entries 2369 drivers/gpu/drm/radeon/kv_dpm.c table->entries[i].v); entries 2383 drivers/gpu/drm/radeon/kv_dpm.c kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit)) entries 2386 drivers/gpu/drm/radeon/kv_dpm.c kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency); entries 2387 drivers/gpu/drm/radeon/kv_dpm.c kv_set_vid(rdev, i, table->entries[i].vid_2bit); entries 999 drivers/gpu/drm/radeon/ni_dpm.c if (0xff01 == table->entries[i].v) { entries 1002 drivers/gpu/drm/radeon/ni_dpm.c table->entries[i].v = pi->max_vddc; entries 1267 drivers/gpu/drm/radeon/ni_dpm.c table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); entries 1285 drivers/gpu/drm/radeon/ni_dpm.c if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) { entries 1309 drivers/gpu/drm/radeon/ni_dpm.c if (value <= table->entries[i].value) { entries 1311 drivers/gpu/drm/radeon/ni_dpm.c voltage->value = cpu_to_be16(table->entries[i].value); entries 1348 drivers/gpu/drm/radeon/ni_dpm.c if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries && entries 1350 drivers/gpu/drm/radeon/ni_dpm.c *std_voltage = rdev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc; entries 3073 drivers/gpu/drm/radeon/ni_dpm.c eg_pi->vddc_voltage_table.entries[j].value, entries 3121 drivers/gpu/drm/radeon/ni_dpm.c smc_leakage = leakage_table->entries[j].leakage; entries 4080 drivers/gpu/drm/radeon/ni_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = entries 4084 drivers/gpu/drm/radeon/ni_dpm.c if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { entries 4089 drivers/gpu/drm/radeon/ni_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; entries 4090 drivers/gpu/drm/radeon/ni_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; entries 4091 drivers/gpu/drm/radeon/ni_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; entries 4092 drivers/gpu/drm/radeon/ni_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; entries 4093 drivers/gpu/drm/radeon/ni_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; entries 4094 drivers/gpu/drm/radeon/ni_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; entries 4095 drivers/gpu/drm/radeon/ni_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; entries 4096 drivers/gpu/drm/radeon/ni_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; entries 4278 drivers/gpu/drm/radeon/ni_dpm.c kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); entries 476 drivers/gpu/drm/radeon/pptable.h ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries. entries 492 drivers/gpu/drm/radeon/pptable.h ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries. entries 516 drivers/gpu/drm/radeon/pptable.h ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries. entries 531 drivers/gpu/drm/radeon/pptable.h ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries. entries 543 drivers/gpu/drm/radeon/pptable.h VCEClockInfo entries[1]; entries 555 drivers/gpu/drm/radeon/pptable.h ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1]; entries 567 drivers/gpu/drm/radeon/pptable.h ATOM_PPLIB_VCE_State_Record entries[1]; entries 589 drivers/gpu/drm/radeon/pptable.h UVDClockInfo entries[1]; entries 601 drivers/gpu/drm/radeon/pptable.h ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1]; entries 620 drivers/gpu/drm/radeon/pptable.h ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1]; entries 638 drivers/gpu/drm/radeon/pptable.h ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1]; entries 828 drivers/gpu/drm/radeon/r600_dpm.c radeon_table->entries = kzalloc(size, GFP_KERNEL); entries 829 drivers/gpu/drm/radeon/r600_dpm.c if (!radeon_table->entries) entries 832 drivers/gpu/drm/radeon/r600_dpm.c entry = &atom_table->entries[0]; entries 834 drivers/gpu/drm/radeon/r600_dpm.c radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | entries 836 drivers/gpu/drm/radeon/r600_dpm.c radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage); entries 938 drivers/gpu/drm/radeon/r600_dpm.c kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); entries 949 drivers/gpu/drm/radeon/r600_dpm.c kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); entries 950 drivers/gpu/drm/radeon/r600_dpm.c kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); entries 961 drivers/gpu/drm/radeon/r600_dpm.c kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); entries 962 drivers/gpu/drm/radeon/r600_dpm.c kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); entries 963 drivers/gpu/drm/radeon/r600_dpm.c kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); entries 974 drivers/gpu/drm/radeon/r600_dpm.c le16_to_cpu(clk_v->entries[0].usSclkLow) | entries 975 drivers/gpu/drm/radeon/r600_dpm.c (clk_v->entries[0].ucSclkHigh << 16); entries 977 drivers/gpu/drm/radeon/r600_dpm.c le16_to_cpu(clk_v->entries[0].usMclkLow) | entries 978 drivers/gpu/drm/radeon/r600_dpm.c (clk_v->entries[0].ucMclkHigh << 16); entries 980 drivers/gpu/drm/radeon/r600_dpm.c le16_to_cpu(clk_v->entries[0].usVddc); entries 982 drivers/gpu/drm/radeon/r600_dpm.c le16_to_cpu(clk_v->entries[0].usVddci); entries 992 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = entries 996 drivers/gpu/drm/radeon/r600_dpm.c if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { entries 1001 drivers/gpu/drm/radeon/r600_dpm.c entry = &psl->entries[0]; entries 1003 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = entries 1005 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = entries 1007 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = entries 1039 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); entries 1040 drivers/gpu/drm/radeon/r600_dpm.c if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { entries 1044 drivers/gpu/drm/radeon/r600_dpm.c entry = &cac_table->entries[0]; entries 1047 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = entries 1049 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = entries 1051 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = entries 1054 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = entries 1056 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = entries 1093 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = entries 1095 drivers/gpu/drm/radeon/r600_dpm.c if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { entries 1101 drivers/gpu/drm/radeon/r600_dpm.c entry = &limits->entries[0]; entries 1102 drivers/gpu/drm/radeon/r600_dpm.c state_entry = &states->entries[0]; entries 1105 drivers/gpu/drm/radeon/r600_dpm.c ((u8 *)&array->entries[0] + entries 1107 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = entries 1109 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = entries 1111 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = entries 1120 drivers/gpu/drm/radeon/r600_dpm.c ((u8 *)&array->entries[0] + entries 1147 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = entries 1149 drivers/gpu/drm/radeon/r600_dpm.c if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { entries 1155 drivers/gpu/drm/radeon/r600_dpm.c entry = &limits->entries[0]; entries 1158 drivers/gpu/drm/radeon/r600_dpm.c ((u8 *)&array->entries[0] + entries 1160 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = entries 1162 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = entries 1164 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = entries 1179 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = entries 1181 drivers/gpu/drm/radeon/r600_dpm.c if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { entries 1187 drivers/gpu/drm/radeon/r600_dpm.c entry = &limits->entries[0]; entries 1189 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = entries 1191 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = entries 1237 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = entries 1239 drivers/gpu/drm/radeon/r600_dpm.c if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { entries 1245 drivers/gpu/drm/radeon/r600_dpm.c entry = &limits->entries[0]; entries 1247 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = entries 1249 drivers/gpu/drm/radeon/r600_dpm.c rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = entries 1302 drivers/gpu/drm/radeon/r600_dpm.c kfree(dyn_state->vddc_dependency_on_sclk.entries); entries 1303 drivers/gpu/drm/radeon/r600_dpm.c kfree(dyn_state->vddci_dependency_on_mclk.entries); entries 1304 drivers/gpu/drm/radeon/r600_dpm.c kfree(dyn_state->vddc_dependency_on_mclk.entries); entries 1305 drivers/gpu/drm/radeon/r600_dpm.c kfree(dyn_state->mvdd_dependency_on_mclk.entries); entries 1306 drivers/gpu/drm/radeon/r600_dpm.c kfree(dyn_state->cac_leakage_table.entries); entries 1307 drivers/gpu/drm/radeon/r600_dpm.c kfree(dyn_state->phase_shedding_limits_table.entries); entries 1310 drivers/gpu/drm/radeon/r600_dpm.c kfree(dyn_state->vce_clock_voltage_dependency_table.entries); entries 1311 drivers/gpu/drm/radeon/r600_dpm.c kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); entries 1312 drivers/gpu/drm/radeon/r600_dpm.c kfree(dyn_state->samu_clock_voltage_dependency_table.entries); entries 1313 drivers/gpu/drm/radeon/r600_dpm.c kfree(dyn_state->acp_clock_voltage_dependency_table.entries); entries 1392 drivers/gpu/drm/radeon/radeon.h struct radeon_clock_voltage_dependency_entry *entries; entries 1409 drivers/gpu/drm/radeon/radeon.h union radeon_cac_leakage_entry *entries; entries 1420 drivers/gpu/drm/radeon/radeon.h struct radeon_phase_shedding_limits_entry *entries; entries 1431 drivers/gpu/drm/radeon/radeon.h struct radeon_uvd_clock_voltage_dependency_entry *entries; entries 1442 drivers/gpu/drm/radeon/radeon.h struct radeon_vce_clock_voltage_dependency_entry *entries; entries 3320 drivers/gpu/drm/radeon/radeon_atombios.c if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v == entries 3332 drivers/gpu/drm/radeon/radeon_atombios.c cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk); entries 3756 drivers/gpu/drm/radeon/radeon_atombios.c voltage_table->entries[i].value = entries 3759 drivers/gpu/drm/radeon/radeon_atombios.c voltage_table->entries[i].value, entries 3761 drivers/gpu/drm/radeon/radeon_atombios.c &voltage_table->entries[i].smio_low, entries 3791 drivers/gpu/drm/radeon/radeon_atombios.c voltage_table->entries[i].value = entries 3793 drivers/gpu/drm/radeon/radeon_atombios.c voltage_table->entries[i].smio_low = entries 2764 drivers/gpu/drm/radeon/radeon_combios.c u8 entries = RBIOS8(offset + 0x5 + 0xb); entries 2766 drivers/gpu/drm/radeon/radeon_combios.c if (entries && voltage_table_offset) { entries 684 drivers/gpu/drm/radeon/radeon_mode.h struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; entries 392 drivers/gpu/drm/radeon/radeon_vm.c unsigned entries; entries 405 drivers/gpu/drm/radeon/radeon_vm.c entries = radeon_bo_size(bo) / 8; entries 413 drivers/gpu/drm/radeon/radeon_vm.c radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0); entries 2551 drivers/gpu/drm/radeon/si_dpm.c if (table->entries[i].vddc > *max) entries 2552 drivers/gpu/drm/radeon/si_dpm.c *max = table->entries[i].vddc; entries 2553 drivers/gpu/drm/radeon/si_dpm.c if (table->entries[i].vddc < *min) entries 2554 drivers/gpu/drm/radeon/si_dpm.c *min = table->entries[i].vddc; entries 2926 drivers/gpu/drm/radeon/si_dpm.c if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage) entries 2927 drivers/gpu/drm/radeon/si_dpm.c highest_leakage = si_pi->leakage_voltage.entries[i].voltage; entries 2951 drivers/gpu/drm/radeon/si_dpm.c if ((evclk <= table->entries[i].evclk) && entries 2952 drivers/gpu/drm/radeon/si_dpm.c (ecclk <= table->entries[i].ecclk)) { entries 2953 drivers/gpu/drm/radeon/si_dpm.c *voltage = table->entries[i].v; entries 2961 drivers/gpu/drm/radeon/si_dpm.c *voltage = table->entries[table->count - 1].v; entries 3243 drivers/gpu/drm/radeon/si_dpm.c si_pi->leakage_voltage.entries[count].voltage = vddc; entries 3244 drivers/gpu/drm/radeon/si_dpm.c si_pi->leakage_voltage.entries[count].leakage_index = entries 3271 drivers/gpu/drm/radeon/si_dpm.c if (si_pi->leakage_voltage.entries[i].leakage_index == index) { entries 3272 drivers/gpu/drm/radeon/si_dpm.c *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage; entries 3932 drivers/gpu/drm/radeon/si_dpm.c voltage_table->entries[i] = voltage_table->entries[i + diff]; entries 3951 drivers/gpu/drm/radeon/si_dpm.c voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; entries 3952 drivers/gpu/drm/radeon/si_dpm.c voltage_table->entries[i].smio_low = 0; entries 4045 drivers/gpu/drm/radeon/si_dpm.c table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); entries 4070 drivers/gpu/drm/radeon/si_dpm.c if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) { entries 4118 drivers/gpu/drm/radeon/si_dpm.c if (value <= table->entries[i].value) { entries 4120 drivers/gpu/drm/radeon/si_dpm.c voltage->value = cpu_to_be16(table->entries[i].value); entries 4143 drivers/gpu/drm/radeon/si_dpm.c voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value); entries 4156 drivers/gpu/drm/radeon/si_dpm.c if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { entries 4158 drivers/gpu/drm/radeon/si_dpm.c if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) entries 4163 drivers/gpu/drm/radeon/si_dpm.c (u16)rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { entries 4167 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; entries 4170 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.cac_leakage_table.entries[rdev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; entries 4178 drivers/gpu/drm/radeon/si_dpm.c (u16)rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { entries 4182 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; entries 4185 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.cac_leakage_table.entries[rdev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; entries 4192 drivers/gpu/drm/radeon/si_dpm.c *std_voltage = rdev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc; entries 4217 drivers/gpu/drm/radeon/si_dpm.c if ((voltage <= limits->entries[i].voltage) && entries 4218 drivers/gpu/drm/radeon/si_dpm.c (sclk <= limits->entries[i].sclk) && entries 4219 drivers/gpu/drm/radeon/si_dpm.c (mclk <= limits->entries[i].mclk)) entries 5162 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) { entries 5164 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v) entries 5880 drivers/gpu/drm/radeon/si_dpm.c table->entries[i].v, entries 5883 drivers/gpu/drm/radeon/si_dpm.c table->entries[i].v = leakage_voltage; entries 5894 drivers/gpu/drm/radeon/si_dpm.c table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ? entries 5895 drivers/gpu/drm/radeon/si_dpm.c table->entries[j].v : table->entries[j + 1].v; entries 6960 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = entries 6964 drivers/gpu/drm/radeon/si_dpm.c if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { entries 6969 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; entries 6970 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; entries 6971 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; entries 6972 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; entries 6973 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; entries 6974 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; entries 6975 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; entries 6976 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; entries 7088 drivers/gpu/drm/radeon/si_dpm.c kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); entries 137 drivers/gpu/drm/radeon/si_dpm.h struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT]; entries 235 drivers/gpu/drm/radeon/smu7_discrete.h SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS]; entries 1035 drivers/gpu/drm/radeon/sumo_dpm.c if (pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency >= lower_limit) entries 1036 drivers/gpu/drm/radeon/sumo_dpm.c return pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency; entries 1039 drivers/gpu/drm/radeon/sumo_dpm.c return pi->sys_info.sclk_voltage_mapping_table.entries[pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1].sclk_frequency; entries 1535 drivers/gpu/drm/radeon/sumo_dpm.c if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) entries 1536 drivers/gpu/drm/radeon/sumo_dpm.c return vid_mapping_table->entries[i].vid_7bit; entries 1539 drivers/gpu/drm/radeon/sumo_dpm.c return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; entries 1550 drivers/gpu/drm/radeon/sumo_dpm.c if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) entries 1551 drivers/gpu/drm/radeon/sumo_dpm.c return vid_mapping_table->entries[i].vid_2bit; entries 1554 drivers/gpu/drm/radeon/sumo_dpm.c return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; entries 1602 drivers/gpu/drm/radeon/sumo_dpm.c sclk_voltage_mapping_table->entries[n].sclk_frequency = entries 1604 drivers/gpu/drm/radeon/sumo_dpm.c sclk_voltage_mapping_table->entries[n].vid_2bit = entries 1622 drivers/gpu/drm/radeon/sumo_dpm.c vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = entries 1624 drivers/gpu/drm/radeon/sumo_dpm.c vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = entries 1630 drivers/gpu/drm/radeon/sumo_dpm.c if (vid_mapping_table->entries[i].vid_7bit == 0) { entries 1632 drivers/gpu/drm/radeon/sumo_dpm.c if (vid_mapping_table->entries[j].vid_7bit != 0) { entries 1633 drivers/gpu/drm/radeon/sumo_dpm.c vid_mapping_table->entries[i] = entries 1634 drivers/gpu/drm/radeon/sumo_dpm.c vid_mapping_table->entries[j]; entries 1635 drivers/gpu/drm/radeon/sumo_dpm.c vid_mapping_table->entries[j].vid_7bit = 0; entries 67 drivers/gpu/drm/radeon/sumo_dpm.h struct sumo_vid_mapping_entry entries[SUMO_MAX_NUMBER_VOLTAGES]; entries 78 drivers/gpu/drm/radeon/sumo_dpm.h struct sumo_sclk_voltage_mapping_entry entries[SUMO_MAX_HARDWARE_POWERLEVELS]; entries 1389 drivers/gpu/drm/radeon/trinity_dpm.c if (pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency >= lower_limit) entries 1390 drivers/gpu/drm/radeon/trinity_dpm.c return pi->sys_info.sclk_voltage_mapping_table.entries[i].sclk_frequency; entries 1520 drivers/gpu/drm/radeon/trinity_dpm.c if ((evclk <= table->entries[i].evclk) && entries 1521 drivers/gpu/drm/radeon/trinity_dpm.c (ecclk <= table->entries[i].ecclk)) { entries 1522 drivers/gpu/drm/radeon/trinity_dpm.c *voltage = table->entries[i].v; entries 1530 drivers/gpu/drm/radeon/trinity_dpm.c *voltage = table->entries[table->count - 1].v; entries 2291 drivers/hwmon/pmbus/pmbus_core.c struct pmbus_debugfs_entry *entries; entries 2308 drivers/hwmon/pmbus/pmbus_core.c entries = devm_kcalloc(data->dev, entries 2309 drivers/hwmon/pmbus/pmbus_core.c data->info->pages * 10, sizeof(*entries), entries 2311 drivers/hwmon/pmbus/pmbus_core.c if (!entries) entries 2318 drivers/hwmon/pmbus/pmbus_core.c entries[idx].client = client; entries 2319 drivers/hwmon/pmbus/pmbus_core.c entries[idx].page = i; entries 2322 drivers/hwmon/pmbus/pmbus_core.c &entries[idx++], entries 2327 drivers/hwmon/pmbus/pmbus_core.c entries[idx].client = client; entries 2328 drivers/hwmon/pmbus/pmbus_core.c entries[idx].page = i; entries 2329 drivers/hwmon/pmbus/pmbus_core.c entries[idx].reg = PMBUS_STATUS_VOUT; entries 2332 drivers/hwmon/pmbus/pmbus_core.c &entries[idx++], entries 2337 drivers/hwmon/pmbus/pmbus_core.c entries[idx].client = client; entries 2338 drivers/hwmon/pmbus/pmbus_core.c entries[idx].page = i; entries 2339 drivers/hwmon/pmbus/pmbus_core.c entries[idx].reg = PMBUS_STATUS_IOUT; entries 2342 drivers/hwmon/pmbus/pmbus_core.c &entries[idx++], entries 2347 drivers/hwmon/pmbus/pmbus_core.c entries[idx].client = client; entries 2348 drivers/hwmon/pmbus/pmbus_core.c entries[idx].page = i; entries 2349 drivers/hwmon/pmbus/pmbus_core.c entries[idx].reg = PMBUS_STATUS_INPUT; entries 2352 drivers/hwmon/pmbus/pmbus_core.c &entries[idx++], entries 2357 drivers/hwmon/pmbus/pmbus_core.c entries[idx].client = client; entries 2358 drivers/hwmon/pmbus/pmbus_core.c entries[idx].page = i; entries 2359 drivers/hwmon/pmbus/pmbus_core.c entries[idx].reg = PMBUS_STATUS_TEMPERATURE; entries 2362 drivers/hwmon/pmbus/pmbus_core.c &entries[idx++], entries 2367 drivers/hwmon/pmbus/pmbus_core.c entries[idx].client = client; entries 2368 drivers/hwmon/pmbus/pmbus_core.c entries[idx].page = i; entries 2369 drivers/hwmon/pmbus/pmbus_core.c entries[idx].reg = PMBUS_STATUS_CML; entries 2372 drivers/hwmon/pmbus/pmbus_core.c &entries[idx++], entries 2377 drivers/hwmon/pmbus/pmbus_core.c entries[idx].client = client; entries 2378 drivers/hwmon/pmbus/pmbus_core.c entries[idx].page = i; entries 2379 drivers/hwmon/pmbus/pmbus_core.c entries[idx].reg = PMBUS_STATUS_OTHER; entries 2382 drivers/hwmon/pmbus/pmbus_core.c &entries[idx++], entries 2388 drivers/hwmon/pmbus/pmbus_core.c entries[idx].client = client; entries 2389 drivers/hwmon/pmbus/pmbus_core.c entries[idx].page = i; entries 2390 drivers/hwmon/pmbus/pmbus_core.c entries[idx].reg = PMBUS_STATUS_MFR_SPECIFIC; entries 2393 drivers/hwmon/pmbus/pmbus_core.c &entries[idx++], entries 2398 drivers/hwmon/pmbus/pmbus_core.c entries[idx].client = client; entries 2399 drivers/hwmon/pmbus/pmbus_core.c entries[idx].page = i; entries 2400 drivers/hwmon/pmbus/pmbus_core.c entries[idx].reg = PMBUS_STATUS_FAN_12; entries 2403 drivers/hwmon/pmbus/pmbus_core.c &entries[idx++], entries 2408 drivers/hwmon/pmbus/pmbus_core.c entries[idx].client = client; entries 2409 drivers/hwmon/pmbus/pmbus_core.c entries[idx].page = i; entries 2410 drivers/hwmon/pmbus/pmbus_core.c entries[idx].reg = PMBUS_STATUS_FAN_34; entries 2413 drivers/hwmon/pmbus/pmbus_core.c &entries[idx++], entries 424 drivers/hwmon/pmbus/ucd9000.c struct ucd9000_debugfs_entry *entries; entries 443 drivers/hwmon/pmbus/ucd9000.c entries = devm_kcalloc(&client->dev, entries 444 drivers/hwmon/pmbus/ucd9000.c UCD9000_GPI_COUNT, sizeof(*entries), entries 446 drivers/hwmon/pmbus/ucd9000.c if (!entries) entries 450 drivers/hwmon/pmbus/ucd9000.c entries[i].client = client; entries 451 drivers/hwmon/pmbus/ucd9000.c entries[i].index = i; entries 455 drivers/hwmon/pmbus/ucd9000.c &entries[i], entries 1011 drivers/infiniband/hw/bnxt_re/ib_verbs.c int rc, entries; entries 1086 drivers/infiniband/hw/bnxt_re/ib_verbs.c entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1); entries 1087 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.rq.max_wqe = min_t(u32, entries, entries 1103 drivers/infiniband/hw/bnxt_re/ib_verbs.c entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); entries 1104 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.max_wqe = min_t(u32, entries, entries 1148 drivers/infiniband/hw/bnxt_re/ib_verbs.c entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + entries 1150 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.max_wqe = min_t(u32, entries, entries 1350 drivers/infiniband/hw/bnxt_re/ib_verbs.c int rc, entries; entries 1369 drivers/infiniband/hw/bnxt_re/ib_verbs.c entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1); entries 1370 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (entries > dev_attr->max_srq_wqes + 1) entries 1371 drivers/infiniband/hw/bnxt_re/ib_verbs.c entries = dev_attr->max_srq_wqes + 1; entries 1373 drivers/infiniband/hw/bnxt_re/ib_verbs.c srq->qplib_srq.max_wqe = entries; entries 1542 drivers/infiniband/hw/bnxt_re/ib_verbs.c int rc, entries; entries 1737 drivers/infiniband/hw/bnxt_re/ib_verbs.c entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); entries 1738 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.max_wqe = min_t(u32, entries, entries 1750 drivers/infiniband/hw/bnxt_re/ib_verbs.c entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); entries 1752 drivers/infiniband/hw/bnxt_re/ib_verbs.c min_t(u32, entries, dev_attr->max_qp_wqes + 1); entries 2539 drivers/infiniband/hw/bnxt_re/ib_verbs.c int rc, entries; entries 2553 drivers/infiniband/hw/bnxt_re/ib_verbs.c entries = roundup_pow_of_two(cqe + 1); entries 2554 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (entries > dev_attr->max_cq_wqes + 1) entries 2555 drivers/infiniband/hw/bnxt_re/ib_verbs.c entries = dev_attr->max_cq_wqes + 1; entries 2567 drivers/infiniband/hw/bnxt_re/ib_verbs.c entries * sizeof(struct cq_base), entries 2578 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); entries 2594 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->qplib_cq.max_wqe = entries; entries 2604 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->ib_cq.cqe = entries; entries 110 drivers/infiniband/hw/cxgb3/iwch_provider.c int entries = attr->cqe; entries 118 drivers/infiniband/hw/cxgb3/iwch_provider.c pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries); entries 142 drivers/infiniband/hw/cxgb3/iwch_provider.c entries += 16; entries 144 drivers/infiniband/hw/cxgb3/iwch_provider.c entries = roundup_pow_of_two(entries); entries 145 drivers/infiniband/hw/cxgb3/iwch_provider.c chp->cq.size_log2 = ilog2(entries); entries 994 drivers/infiniband/hw/cxgb4/cq.c int entries = attr->cqe; entries 1006 drivers/infiniband/hw/cxgb4/cq.c pr_debug("ib_dev %p entries %d\n", ibdev, entries); entries 1033 drivers/infiniband/hw/cxgb4/cq.c entries++; entries 1036 drivers/infiniband/hw/cxgb4/cq.c entries++; entries 1041 drivers/infiniband/hw/cxgb4/cq.c entries = roundup(entries, 16); entries 1046 drivers/infiniband/hw/cxgb4/cq.c hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size); entries 1076 drivers/infiniband/hw/cxgb4/cq.c chp->ibcq.cqe = entries - 2; entries 139 drivers/infiniband/hw/efa/efa_com.c u16 size = aq->depth * sizeof(*sq->entries); entries 144 drivers/infiniband/hw/efa/efa_com.c sq->entries = entries 146 drivers/infiniband/hw/efa/efa_com.c if (!sq->entries) entries 177 drivers/infiniband/hw/efa/efa_com.c u16 size = aq->depth * sizeof(*cq->entries); entries 182 drivers/infiniband/hw/efa/efa_com.c cq->entries = entries 184 drivers/infiniband/hw/efa/efa_com.c if (!cq->entries) entries 223 drivers/infiniband/hw/efa/efa_com.c size = EFA_ASYNC_QUEUE_DEPTH * sizeof(*aenq->entries); entries 224 drivers/infiniband/hw/efa/efa_com.c aenq->entries = dma_alloc_coherent(edev->dmadev, size, &aenq->dma_addr, entries 226 drivers/infiniband/hw/efa/efa_com.c if (!aenq->entries) entries 354 drivers/infiniband/hw/efa/efa_com.c aqe = &aq->sq.entries[pi]; entries 463 drivers/infiniband/hw/efa/efa_com.c cqe = &aq->cq.entries[ci]; entries 482 drivers/infiniband/hw/efa/efa_com.c cqe = &aq->cq.entries[ci]; entries 686 drivers/infiniband/hw/efa/efa_com.c size = aq->depth * sizeof(*sq->entries); entries 687 drivers/infiniband/hw/efa/efa_com.c dma_free_coherent(edev->dmadev, size, sq->entries, sq->dma_addr); entries 689 drivers/infiniband/hw/efa/efa_com.c size = aq->depth * sizeof(*cq->entries); entries 690 drivers/infiniband/hw/efa/efa_com.c dma_free_coherent(edev->dmadev, size, cq->entries, cq->dma_addr); entries 692 drivers/infiniband/hw/efa/efa_com.c size = aenq->depth * sizeof(*aenq->entries); entries 693 drivers/infiniband/hw/efa/efa_com.c dma_free_coherent(edev->dmadev, size, aenq->entries, aenq->dma_addr); entries 796 drivers/infiniband/hw/efa/efa_com.c dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->cq.entries), entries 797 drivers/infiniband/hw/efa/efa_com.c aq->cq.entries, aq->cq.dma_addr); entries 799 drivers/infiniband/hw/efa/efa_com.c dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->sq.entries), entries 800 drivers/infiniband/hw/efa/efa_com.c aq->sq.entries, aq->sq.dma_addr); entries 859 drivers/infiniband/hw/efa/efa_com.c aenq_e = &aenq->entries[ci]; /* Get first entry */ entries 884 drivers/infiniband/hw/efa/efa_com.c aenq_e = &aenq->entries[ci]; entries 25 drivers/infiniband/hw/efa/efa_com.h struct efa_admin_acq_entry *entries; entries 34 drivers/infiniband/hw/efa/efa_com.h struct efa_admin_aq_entry *entries; entries 84 drivers/infiniband/hw/efa/efa_com.h struct efa_admin_aenq_entry *entries; entries 928 drivers/infiniband/hw/efa/efa_verbs.c int entries = attr->cqe; entries 931 drivers/infiniband/hw/efa/efa_verbs.c ibdev_dbg(ibdev, "create_cq entries %d\n", entries); entries 933 drivers/infiniband/hw/efa/efa_verbs.c if (entries < 1 || entries > dev->dev_attr.max_cq_depth) { entries 936 drivers/infiniband/hw/efa/efa_verbs.c entries, dev->dev_attr.max_cq_depth); entries 987 drivers/infiniband/hw/efa/efa_verbs.c cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs); entries 996 drivers/infiniband/hw/efa/efa_verbs.c params.cq_depth = entries; entries 1007 drivers/infiniband/hw/efa/efa_verbs.c WARN_ON_ONCE(entries != result.actual_depth); entries 573 drivers/infiniband/hw/hfi1/init.c cce = cc_state->cct.entries[max_ccti].entry; entries 618 drivers/infiniband/hw/hfi1/init.c ccti_min = cc_state->cong_setting.entries[sl].ccti_min; entries 619 drivers/infiniband/hw/hfi1/init.c ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; entries 3738 drivers/infiniband/hw/hfi1/mad.c struct opa_congestion_setting_entry_shadow *entries; entries 3755 drivers/infiniband/hw/hfi1/mad.c entries = cc_state->cong_setting.entries; entries 3759 drivers/infiniband/hw/hfi1/mad.c p->entries[i].ccti_increase = entries[i].ccti_increase; entries 3760 drivers/infiniband/hw/hfi1/mad.c p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer); entries 3761 drivers/infiniband/hw/hfi1/mad.c p->entries[i].trigger_threshold = entries 3762 drivers/infiniband/hw/hfi1/mad.c entries[i].trigger_threshold; entries 3763 drivers/infiniband/hw/hfi1/mad.c p->entries[i].ccti_min = entries[i].ccti_min; entries 3807 drivers/infiniband/hw/hfi1/mad.c memcpy(new_cc_state->cct.entries, ppd->ccti_entries, entries 3812 drivers/infiniband/hw/hfi1/mad.c memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries, entries 3830 drivers/infiniband/hw/hfi1/mad.c struct opa_congestion_setting_entry_shadow *entries; entries 3845 drivers/infiniband/hw/hfi1/mad.c entries = ppd->congestion_entries; entries 3847 drivers/infiniband/hw/hfi1/mad.c entries[i].ccti_increase = p->entries[i].ccti_increase; entries 3848 drivers/infiniband/hw/hfi1/mad.c entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer); entries 3849 drivers/infiniband/hw/hfi1/mad.c entries[i].trigger_threshold = entries 3850 drivers/infiniband/hw/hfi1/mad.c p->entries[i].trigger_threshold; entries 3851 drivers/infiniband/hw/hfi1/mad.c entries[i].ccti_min = p->entries[i].ccti_min; entries 3938 drivers/infiniband/hw/hfi1/mad.c struct ib_cc_table_entry_shadow *entries; entries 3965 drivers/infiniband/hw/hfi1/mad.c entries = cc_state->cct.entries; entries 3970 drivers/infiniband/hw/hfi1/mad.c cpu_to_be16(entries[i].entry); entries 3989 drivers/infiniband/hw/hfi1/mad.c struct ib_cc_table_entry_shadow *entries; entries 4019 drivers/infiniband/hw/hfi1/mad.c entries = ppd->ccti_entries; entries 4021 drivers/infiniband/hw/hfi1/mad.c entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry); entries 298 drivers/infiniband/hw/hfi1/mad.h struct opa_congestion_setting_entry entries[OPA_MAX_SLS]; entries 304 drivers/infiniband/hw/hfi1/mad.h struct opa_congestion_setting_entry_shadow entries[OPA_MAX_SLS]; entries 337 drivers/infiniband/hw/hfi1/mad.h struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX]; entries 62 drivers/infiniband/hw/hfi1/msix.c struct hfi1_msix_entry *entries; entries 83 drivers/infiniband/hw/hfi1/msix.c entries = kcalloc(total, sizeof(*dd->msix_info.msix_entries), entries 85 drivers/infiniband/hw/hfi1/msix.c if (!entries) { entries 90 drivers/infiniband/hw/hfi1/msix.c dd->msix_info.msix_entries = entries; entries 2765 drivers/infiniband/hw/hfi1/rc.c ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase; entries 2766 drivers/infiniband/hw/hfi1/rc.c ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer; entries 2768 drivers/infiniband/hw/hfi1/rc.c cc_state->cong_setting.entries[sl].trigger_threshold; entries 759 drivers/infiniband/hw/hns/hns_roce_device.h u32 entries; entries 3864 drivers/infiniband/hw/hns/hns_roce_hw_v1.c unsigned long off = (entry & (eq->entries - 1)) * entries 3877 drivers/infiniband/hw/hns/hns_roce_hw_v1.c !!(eq->cons_index & eq->entries)) ? aeqe : NULL; entries 3972 drivers/infiniband/hw/hns/hns_roce_hw_v1.c unsigned long off = (entry & (eq->entries - 1)) * entries 3986 drivers/infiniband/hw/hns/hns_roce_hw_v1.c (!!(eq->cons_index & eq->entries)) ? ceqe : NULL; entries 4181 drivers/infiniband/hw/hns/hns_roce_hw_v1.c int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) + entries 4236 drivers/infiniband/hw/hns/hns_roce_hw_v1.c num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) + entries 4239 drivers/infiniband/hw/hns/hns_roce_hw_v1.c if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) { entries 4241 drivers/infiniband/hw/hns/hns_roce_hw_v1.c (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE, entries 4345 drivers/infiniband/hw/hns/hns_roce_hw_v1.c eq->entries = hr_dev->caps.ceqe_depth; entries 4346 drivers/infiniband/hw/hns/hns_roce_hw_v1.c eq->log_entries = ilog2(eq->entries); entries 4355 drivers/infiniband/hw/hns/hns_roce_hw_v1.c eq->entries = hr_dev->caps.aeqe_depth; entries 4356 drivers/infiniband/hw/hns/hns_roce_hw_v1.c eq->log_entries = ilog2(eq->entries); entries 4987 drivers/infiniband/hw/hns/hns_roce_hw_v2.c off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE; entries 5000 drivers/infiniband/hw/hns/hns_roce_hw_v2.c off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE; entries 5020 drivers/infiniband/hw/hns/hns_roce_hw_v2.c !!(eq->cons_index & eq->entries)) ? aeqe : NULL; entries 5099 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (eq->cons_index > (2 * eq->entries - 1)) entries 5117 drivers/infiniband/hw/hns/hns_roce_hw_v2.c off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE; entries 5130 drivers/infiniband/hw/hns/hns_roce_hw_v2.c off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE; entries 5150 drivers/infiniband/hw/hns/hns_roce_hw_v2.c (!!(eq->cons_index & eq->entries)) ? ceqe : NULL; entries 5176 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1)) { entries 5327 drivers/infiniband/hw/hns/hns_roce_hw_v2.c dma_free_coherent(dev, (unsigned int)(eq->entries * entries 5337 drivers/infiniband/hw/hns/hns_roce_hw_v2.c size = (eq->entries - eqe_alloc) * eq->eqe_size; entries 5356 drivers/infiniband/hw/hns/hns_roce_hw_v2.c size = (eq->entries - eqe_alloc) entries 5413 drivers/infiniband/hw/hns/hns_roce_hw_v2.c eq->shift = ilog2((unsigned int)eq->entries); entries 5567 drivers/infiniband/hw/hns/hns_roce_hw_v2.c ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size), entries 5572 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (eq->entries > buf_chk_sz / eq->eqe_size) { entries 5574 drivers/infiniband/hw/hns/hns_roce_hw_v2.c eq->entries); entries 5577 drivers/infiniband/hw/hns/hns_roce_hw_v2.c eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size, entries 5621 drivers/infiniband/hw/hns/hns_roce_hw_v2.c size = (eq->entries - eqe_alloc) * eq->eqe_size; entries 5656 drivers/infiniband/hw/hns/hns_roce_hw_v2.c size = (eq->entries - eqe_alloc) entries 5945 drivers/infiniband/hw/hns/hns_roce_hw_v2.c eq->entries = hr_dev->caps.ceqe_depth; entries 5954 drivers/infiniband/hw/hns/hns_roce_hw_v2.c eq->entries = hr_dev->caps.aeqe_depth; entries 1096 drivers/infiniband/hw/i40iw/i40iw_verbs.c int entries = attr->cqe; entries 1101 drivers/infiniband/hw/i40iw/i40iw_verbs.c if (entries > iwdev->max_cqe) entries 1115 drivers/infiniband/hw/i40iw/i40iw_verbs.c ukinfo->cq_size = max(entries, 4); entries 179 drivers/infiniband/hw/mlx4/cq.c int entries = attr->cqe; entries 189 drivers/infiniband/hw/mlx4/cq.c if (entries < 1 || entries > dev->dev->caps.max_cqes) entries 195 drivers/infiniband/hw/mlx4/cq.c entries = roundup_pow_of_two(entries + 1); entries 196 drivers/infiniband/hw/mlx4/cq.c cq->ibcq.cqe = entries - 1; entries 215 drivers/infiniband/hw/mlx4/cq.c ucmd.buf_addr, entries); entries 235 drivers/infiniband/hw/mlx4/cq.c err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries); entries 248 drivers/infiniband/hw/mlx4/cq.c err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma, entries 292 drivers/infiniband/hw/mlx4/cq.c int entries) entries 303 drivers/infiniband/hw/mlx4/cq.c err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); entries 310 drivers/infiniband/hw/mlx4/cq.c cq->resize_buf->cqe = entries - 1; entries 316 drivers/infiniband/hw/mlx4/cq.c int entries, struct ib_udata *udata) entries 332 drivers/infiniband/hw/mlx4/cq.c &cq->resize_umem, ucmd.buf_addr, entries); entries 339 drivers/infiniband/hw/mlx4/cq.c cq->resize_buf->cqe = entries - 1; entries 380 drivers/infiniband/hw/mlx4/cq.c int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) entries 389 drivers/infiniband/hw/mlx4/cq.c if (entries < 1 || entries > dev->dev->caps.max_cqes) { entries 394 drivers/infiniband/hw/mlx4/cq.c entries = roundup_pow_of_two(entries + 1); entries 395 drivers/infiniband/hw/mlx4/cq.c if (entries == ibcq->cqe + 1) { entries 400 drivers/infiniband/hw/mlx4/cq.c if (entries > dev->dev->caps.max_cqes + 1) { entries 406 drivers/infiniband/hw/mlx4/cq.c err = mlx4_alloc_resize_umem(dev, cq, entries, udata); entries 412 drivers/infiniband/hw/mlx4/cq.c if (entries < outst_cqe + 1) { entries 417 drivers/infiniband/hw/mlx4/cq.c err = mlx4_alloc_resize_buf(dev, cq, entries); entries 424 drivers/infiniband/hw/mlx4/cq.c err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); entries 745 drivers/infiniband/hw/mlx4/mlx4_ib.h int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); entries 705 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *cq, int entries, u32 **cqb, entries 735 drivers/infiniband/hw/mlx5/cq.c ib_umem_get(udata, ucmd.buf_addr, entries * ucmd.cqe_size, entries 749 drivers/infiniband/hw/mlx5/cq.c ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); entries 846 drivers/infiniband/hw/mlx5/cq.c int entries, int cqe_size, entries 861 drivers/infiniband/hw/mlx5/cq.c err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size); entries 914 drivers/infiniband/hw/mlx5/cq.c int entries = attr->cqe; entries 928 drivers/infiniband/hw/mlx5/cq.c if (entries < 0 || entries 929 drivers/infiniband/hw/mlx5/cq.c (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) entries 935 drivers/infiniband/hw/mlx5/cq.c entries = roundup_pow_of_two(entries + 1); entries 936 drivers/infiniband/hw/mlx5/cq.c if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) entries 939 drivers/infiniband/hw/mlx5/cq.c cq->ibcq.cqe = entries - 1; entries 949 drivers/infiniband/hw/mlx5/cq.c err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size, entries 955 drivers/infiniband/hw/mlx5/cq.c err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, entries 974 drivers/infiniband/hw/mlx5/cq.c MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries)); entries 1116 drivers/infiniband/hw/mlx5/cq.c int entries, struct ib_udata *udata, int *npas, entries 1132 drivers/infiniband/hw/mlx5/cq.c if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) entries 1136 drivers/infiniband/hw/mlx5/cq.c (size_t)ucmd.cqe_size * entries, entries 1153 drivers/infiniband/hw/mlx5/cq.c int entries, int cqe_size) entries 1161 drivers/infiniband/hw/mlx5/cq.c err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size); entries 1229 drivers/infiniband/hw/mlx5/cq.c int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) entries 1248 drivers/infiniband/hw/mlx5/cq.c if (entries < 1 || entries 1249 drivers/infiniband/hw/mlx5/cq.c entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) { entries 1251 drivers/infiniband/hw/mlx5/cq.c entries, entries 1256 drivers/infiniband/hw/mlx5/cq.c entries = roundup_pow_of_two(entries + 1); entries 1257 drivers/infiniband/hw/mlx5/cq.c if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) entries 1260 drivers/infiniband/hw/mlx5/cq.c if (entries == ibcq->cqe + 1) entries 1265 drivers/infiniband/hw/mlx5/cq.c err = resize_user(dev, cq, entries, udata, &npas, &page_shift, entries 1269 drivers/infiniband/hw/mlx5/cq.c err = resize_kernel(dev, cq, entries, cqe_size); entries 1311 drivers/infiniband/hw/mlx5/cq.c MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries)); entries 1321 drivers/infiniband/hw/mlx5/cq.c cq->ibcq.cqe = entries - 1; entries 1340 drivers/infiniband/hw/mlx5/cq.c cq->ibcq.cqe = entries - 1; entries 1146 drivers/infiniband/hw/mlx5/mlx5_ib.h int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata); entries 609 drivers/infiniband/hw/mthca/mthca_provider.c int entries = attr->cqe; entries 620 drivers/infiniband/hw/mthca/mthca_provider.c if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) entries 648 drivers/infiniband/hw/mthca/mthca_provider.c for (nent = 1; nent <= entries; nent <<= 1) entries 681 drivers/infiniband/hw/mthca/mthca_provider.c int entries) entries 707 drivers/infiniband/hw/mthca/mthca_provider.c ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); entries 716 drivers/infiniband/hw/mthca/mthca_provider.c cq->resize_buf->cqe = entries - 1; entries 725 drivers/infiniband/hw/mthca/mthca_provider.c static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) entries 733 drivers/infiniband/hw/mthca/mthca_provider.c if (entries < 1 || entries > dev->limits.max_cqes) entries 738 drivers/infiniband/hw/mthca/mthca_provider.c entries = roundup_pow_of_two(entries + 1); entries 739 drivers/infiniband/hw/mthca/mthca_provider.c if (entries == ibcq->cqe + 1) { entries 745 drivers/infiniband/hw/mthca/mthca_provider.c ret = mthca_alloc_resize_buf(dev, cq, entries); entries 757 drivers/infiniband/hw/mthca/mthca_provider.c ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries)); entries 793 drivers/infiniband/hw/mthca/mthca_provider.c ibcq->cqe = entries - 1; entries 1785 drivers/infiniband/hw/ocrdma/ocrdma_hw.c int entries, int dpp_cq, u16 pd_id) entries 1793 drivers/infiniband/hw/ocrdma/ocrdma_hw.c if (entries > dev->attr.max_cqe) { entries 1795 drivers/infiniband/hw/ocrdma/ocrdma_hw.c __func__, dev->id, dev->attr.max_cqe, entries); entries 124 drivers/infiniband/hw/ocrdma/ocrdma_hw.h int entries, int dpp_cq, u16 pd_id); entries 983 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c int entries = attr->cqe; entries 1009 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id); entries 687 drivers/infiniband/hw/qedr/verbs.c static inline int qedr_align_cq_entries(int entries) entries 692 drivers/infiniband/hw/qedr/verbs.c size = (entries + 1) * QEDR_CQE_SIZE; entries 822 drivers/infiniband/hw/qedr/verbs.c int entries = attr->cqe; entries 832 drivers/infiniband/hw/qedr/verbs.c udata ? "User Lib" : "Kernel", entries, vector); entries 834 drivers/infiniband/hw/qedr/verbs.c if (entries > QEDR_MAX_CQES) { entries 837 drivers/infiniband/hw/qedr/verbs.c entries, QEDR_MAX_CQES); entries 841 drivers/infiniband/hw/qedr/verbs.c chain_entries = qedr_align_cq_entries(entries); entries 2139 drivers/infiniband/hw/qib/qib_mad.c struct ib_cc_congestion_entry_shadow *entries; entries 2145 drivers/infiniband/hw/qib/qib_mad.c entries = ppd->congestion_entries_shadow->entries; entries 2151 drivers/infiniband/hw/qib/qib_mad.c p->entries[i].ccti_increase = entries[i].ccti_increase; entries 2152 drivers/infiniband/hw/qib/qib_mad.c p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer); entries 2153 drivers/infiniband/hw/qib/qib_mad.c p->entries[i].trigger_threshold = entries[i].trigger_threshold; entries 2154 drivers/infiniband/hw/qib/qib_mad.c p->entries[i].ccti_min = entries[i].ccti_min; entries 2172 drivers/infiniband/hw/qib/qib_mad.c struct ib_cc_table_entry_shadow *entries; entries 2200 drivers/infiniband/hw/qib/qib_mad.c entries = &ppd->ccti_entries_shadow-> entries 2201 drivers/infiniband/hw/qib/qib_mad.c entries[IB_CCT_ENTRIES * cct_block_index]; entries 2205 drivers/infiniband/hw/qib/qib_mad.c p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry); entries 2228 drivers/infiniband/hw/qib/qib_mad.c p->entries[i].ccti_increase; entries 2231 drivers/infiniband/hw/qib/qib_mad.c be16_to_cpu(p->entries[i].ccti_timer); entries 2234 drivers/infiniband/hw/qib/qib_mad.c p->entries[i].trigger_threshold; entries 2237 drivers/infiniband/hw/qib/qib_mad.c p->entries[i].ccti_min; entries 2252 drivers/infiniband/hw/qib/qib_mad.c struct ib_cc_table_entry_shadow *entries; entries 2275 drivers/infiniband/hw/qib/qib_mad.c entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index); entries 2278 drivers/infiniband/hw/qib/qib_mad.c entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry); entries 2283 drivers/infiniband/hw/qib/qib_mad.c memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries, entries 2288 drivers/infiniband/hw/qib/qib_mad.c memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries, entries 245 drivers/infiniband/hw/qib/qib_mad.h struct ib_cc_congestion_entry entries[IB_CC_CCS_ENTRIES]; entries 251 drivers/infiniband/hw/qib/qib_mad.h struct ib_cc_congestion_entry_shadow entries[IB_CC_CCS_ENTRIES]; entries 284 drivers/infiniband/hw/qib/qib_mad.h struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX]; entries 105 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c int entries = attr->cqe; entries 122 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c entries = roundup_pow_of_two(entries); entries 123 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c if (entries < 1 || entries > dev->dsr->caps.max_cqe) entries 129 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->ibcq.cqe = entries; entries 148 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c npages = 1 + (entries * sizeof(struct pvrdma_cqe) + entries 183 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cmd->cqe = entries; entries 209 drivers/infiniband/sw/rdmavt/cq.c unsigned int entries = attr->cqe; entries 216 drivers/infiniband/sw/rdmavt/cq.c if (entries < 1 || entries > rdi->dparms.props.max_cqe) entries 232 drivers/infiniband/sw/rdmavt/cq.c sz = sizeof(struct ib_uverbs_wc) * (entries + 1); entries 238 drivers/infiniband/sw/rdmavt/cq.c sz = sizeof(struct ib_wc) * (entries + 1); entries 291 drivers/infiniband/sw/rdmavt/cq.c cq->ibcq.cqe = entries; entries 153 drivers/infiniband/sw/rdmavt/qp.c kfree(wss->entries); entries 154 drivers/infiniband/sw/rdmavt/qp.c wss->entries = NULL; entries 218 drivers/infiniband/sw/rdmavt/qp.c wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries), entries 220 drivers/infiniband/sw/rdmavt/qp.c if (!wss->entries) { entries 276 drivers/infiniband/sw/rdmavt/qp.c bits = xchg(&wss->entries[entry], 0); entries 293 drivers/infiniband/sw/rdmavt/qp.c if (!test_and_set_bit(nr, &wss->entries[entry])) entries 306 drivers/infiniband/ulp/ipoib/ipoib.h atomic_t entries; entries 1433 drivers/infiniband/ulp/ipoib/ipoib_main.c atomic_inc(&ntbl->entries); entries 1459 drivers/infiniband/ulp/ipoib/ipoib_main.c if (atomic_dec_and_test(&priv->ntbl.entries)) { entries 1533 drivers/infiniband/ulp/ipoib/ipoib_main.c atomic_set(&ntbl->entries, 0); entries 1613 drivers/infiniband/ulp/ipoib/ipoib_main.c wait_flushed = atomic_read(&priv->ntbl.entries); entries 229 drivers/iommu/intel-pasid.c struct pasid_entry *entries; entries 243 drivers/iommu/intel-pasid.c entries = get_pasid_table_from_pde(&dir[dir_index]); entries 244 drivers/iommu/intel-pasid.c if (!entries) { entries 245 drivers/iommu/intel-pasid.c entries = alloc_pgtable_page(info->iommu->node); entries 246 drivers/iommu/intel-pasid.c if (!entries) { entries 252 drivers/iommu/intel-pasid.c (u64)virt_to_phys(entries) | PASID_PTE_PRESENT); entries 256 drivers/iommu/intel-pasid.c return &entries[index]; entries 488 drivers/iommu/iova.c if (fq->entries[idx].counter >= counter) entries 492 drivers/iommu/iova.c iovad->entry_dtor(fq->entries[idx].data); entries 495 drivers/iommu/iova.c fq->entries[idx].iova_pfn, entries 496 drivers/iommu/iova.c fq->entries[idx].pages); entries 526 drivers/iommu/iova.c iovad->entry_dtor(fq->entries[idx].data); entries 573 drivers/iommu/iova.c fq->entries[idx].iova_pfn = pfn; entries 574 drivers/iommu/iova.c fq->entries[idx].pages = pages; entries 575 drivers/iommu/iova.c fq->entries[idx].data = data; entries 576 drivers/iommu/iova.c fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); entries 3494 drivers/irqchip/irq-gic-v3-its.c int entries; entries 3504 drivers/irqchip/irq-gic-v3-its.c entries = roundup_pow_of_two(nr_cpu_ids); entries 3505 drivers/irqchip/irq-gic-v3-its.c vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), entries 3514 drivers/irqchip/irq-gic-v3-its.c vpe_proxy.dev = its_create_device(its, devid, entries, false); entries 3521 drivers/irqchip/irq-gic-v3-its.c BUG_ON(entries > vpe_proxy.dev->nr_ites); entries 42 drivers/lightnvm/pblk-rb.c vfree(rb->entries); entries 76 drivers/lightnvm/pblk-rb.c struct pblk_rb_entry *entries; entries 84 drivers/lightnvm/pblk-rb.c entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry))); entries 85 drivers/lightnvm/pblk-rb.c if (!entries) entries 92 drivers/lightnvm/pblk-rb.c rb->entries = entries; entries 123 drivers/lightnvm/pblk-rb.c vfree(entries); entries 133 drivers/lightnvm/pblk-rb.c vfree(entries); entries 138 drivers/lightnvm/pblk-rb.c entry = &rb->entries[init_entry]; entries 145 drivers/lightnvm/pblk-rb.c entry = &rb->entries[init_entry]; entries 248 drivers/lightnvm/pblk-rb.c entry = &rb->entries[rb->l2p_update]; entries 344 drivers/lightnvm/pblk-rb.c entry = &rb->entries[ring_pos]; entries 368 drivers/lightnvm/pblk-rb.c entry = &rb->entries[ring_pos]; entries 405 drivers/lightnvm/pblk-rb.c entry = &rb->entries[flush_point]; entries 577 drivers/lightnvm/pblk-rb.c entry = &rb->entries[pos]; entries 661 drivers/lightnvm/pblk-rb.c entry = &rb->entries[pos]; entries 688 drivers/lightnvm/pblk-rb.c return &rb->entries[entry].w_ctx; entries 778 drivers/lightnvm/pblk-rb.c if (!rb->entries) { entries 784 drivers/lightnvm/pblk-rb.c entry = &rb->entries[i]; entries 160 drivers/lightnvm/pblk-write.c entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)]; entries 164 drivers/lightnvm/pblk.h struct pblk_rb_entry *entries; /* Ring buffer entries */ entries 743 drivers/lightnvm/pblk.h unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries); entries 360 drivers/md/bcache/journal.c int ret = 0, keys = 0, entries = 0; entries 403 drivers/md/bcache/journal.c entries++; entries 407 drivers/md/bcache/journal.c keys, entries, end); entries 302 drivers/md/dm-bio-prison-v1.c struct dm_deferred_entry entries[DEFERRED_SET_SIZE]; entries 318 drivers/md/dm-bio-prison-v1.c ds->entries[i].ds = ds; entries 319 drivers/md/dm-bio-prison-v1.c ds->entries[i].count = 0; entries 320 drivers/md/dm-bio-prison-v1.c INIT_LIST_HEAD(&ds->entries[i].work_items); entries 339 drivers/md/dm-bio-prison-v1.c entry = ds->entries + ds->current_entry; entries 355 drivers/md/dm-bio-prison-v1.c !ds->entries[ds->sweeper].count) { entries 356 drivers/md/dm-bio-prison-v1.c list_splice_init(&ds->entries[ds->sweeper].work_items, head); entries 360 drivers/md/dm-bio-prison-v1.c if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count) entries 361 drivers/md/dm-bio-prison-v1.c list_splice_init(&ds->entries[ds->sweeper].work_items, head); entries 387 drivers/md/dm-bio-prison-v1.c !ds->entries[ds->current_entry].count) entries 390 drivers/md/dm-bio-prison-v1.c list_add(work, &ds->entries[ds->current_entry].work_items); entries 392 drivers/md/dm-bio-prison-v1.c if (!ds->entries[next_entry].count) entries 114 drivers/md/dm-integrity.c __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR]; entries 2573 drivers/md/dm-integrity.c memset(&js->entries, 0, JOURNAL_SECTOR_DATA); entries 74 drivers/md/dm-writecache.c struct wc_memory_entry entries[0]; entries 146 drivers/md/dm-writecache.c struct wc_entry *entries; entries 351 drivers/md/dm-writecache.c return &sb(wc)->entries[e->index]; entries 866 drivers/md/dm-writecache.c if (wc->entries) entries 868 drivers/md/dm-writecache.c wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks)); entries 869 drivers/md/dm-writecache.c if (!wc->entries) entries 872 drivers/md/dm-writecache.c struct wc_entry *e = &wc->entries[b]; entries 916 drivers/md/dm-writecache.c sb_entries_offset = offsetof(struct wc_memory_superblock, entries); entries 941 drivers/md/dm-writecache.c struct wc_entry *e = &wc->entries[b]; entries 962 drivers/md/dm-writecache.c struct wc_entry *e = &wc->entries[b]; entries 1764 drivers/md/dm-writecache.c offset = offsetof(struct wc_memory_superblock, entries[n_blocks]); entries 1804 drivers/md/dm-writecache.c write_original_sector_seq_count(wc, &wc->entries[b], -1, -1); entries 1843 drivers/md/dm-writecache.c if (wc->entries) entries 1844 drivers/md/dm-writecache.c vfree(wc->entries); entries 40 drivers/md/persistent-data/dm-block-manager.c unsigned long entries[MAX_STACK]; entries 86 drivers/md/persistent-data/dm-block-manager.c t->nr_entries = stack_trace_save(t->entries, MAX_STACK, 2); entries 107 drivers/md/persistent-data/dm-block-manager.c stack_trace_print(lock->traces[i].entries, entries 316 drivers/md/raid5-ppl.c &pplhdr->entries[io->entries_count - 1]; entries 336 drivers/md/raid5-ppl.c e = &pplhdr->entries[io->entries_count++]; entries 446 drivers/md/raid5-ppl.c struct ppl_header_entry *e = &pplhdr->entries[i]; entries 988 drivers/md/raid5-ppl.c struct ppl_header_entry *e = &pplhdr->entries[i]; entries 1166 drivers/md/raid5-ppl.c le32_to_cpu(pplhdr->entries[i].pp_size) >> 9; entries 66 drivers/media/dvb-frontends/dvb-pll.c } entries[]; entries 79 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 102 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 125 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 143 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 158 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 180 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 194 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 220 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 253 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 277 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 294 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 311 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 330 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 399 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 444 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 463 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 477 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 498 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 519 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 537 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 552 drivers/media/dvb-frontends/dvb-pll.c .entries = { entries 605 drivers/media/dvb-frontends/dvb-pll.c if (frequency > desc->entries[i].limit) entries 617 drivers/media/dvb-frontends/dvb-pll.c desc->entries[i].stepsize/2) / desc->entries[i].stepsize; entries 620 drivers/media/dvb-frontends/dvb-pll.c buf[2] = desc->entries[i].config; entries 621 drivers/media/dvb-frontends/dvb-pll.c buf[3] = desc->entries[i].cb; entries 631 drivers/media/dvb-frontends/dvb-pll.c return (div * desc->entries[i].stepsize) - desc->iffreq; entries 679 drivers/media/pci/cx18/cx18-ioctl.c e_idx = &idx->entry[idx->entries]; entries 683 drivers/media/pci/cx18/cx18-ioctl.c idx->entries < V4L2_ENC_IDX_ENTRIES) { entries 698 drivers/media/pci/cx18/cx18-ioctl.c idx->entries++; entries 699 drivers/media/pci/cx18/cx18-ioctl.c e_idx = &idx->entry[idx->entries]; entries 743 drivers/media/pci/cx18/cx18-ioctl.c if (idx->entries >= V4L2_ENC_IDX_ENTRIES || entries 770 drivers/media/pci/cx18/cx18-ioctl.c idx->entries = 0; entries 791 drivers/media/pci/cx18/cx18-ioctl.c } while (idx->entries < V4L2_ENC_IDX_ENTRIES); entries 1258 drivers/media/pci/ivtv/ivtv-ioctl.c int entries; entries 1261 drivers/media/pci/ivtv/ivtv-ioctl.c entries = (itv->pgm_info_write_idx + IVTV_MAX_PGM_INDEX - itv->pgm_info_read_idx) % entries 1263 drivers/media/pci/ivtv/ivtv-ioctl.c if (entries > V4L2_ENC_IDX_ENTRIES) entries 1264 drivers/media/pci/ivtv/ivtv-ioctl.c entries = V4L2_ENC_IDX_ENTRIES; entries 1265 drivers/media/pci/ivtv/ivtv-ioctl.c idx->entries = 0; entries 1269 drivers/media/pci/ivtv/ivtv-ioctl.c for (i = 0; i < entries; i++) { entries 1272 drivers/media/pci/ivtv/ivtv-ioctl.c idx->entries++; entries 1276 drivers/media/pci/ivtv/ivtv-ioctl.c itv->pgm_info_read_idx = (itv->pgm_info_read_idx + idx->entries) % IVTV_MAX_PGM_INDEX; entries 581 drivers/media/pci/saa7134/saa7134-video.c struct cliplist *cl, int entries, char *name) entries 586 drivers/media/pci/saa7134/saa7134-video.c for (i = 0; i < entries; i++) { entries 229 drivers/media/pci/saa7164/saa7164-core.c u32 entries = 0; entries 242 drivers/media/pci/saa7164/saa7164-core.c entries++; entries 244 drivers/media/pci/saa7164/saa7164-core.c printk(KERN_ERR "Total: %d\n", entries); entries 151 drivers/media/platform/qcom/venus/hfi_parser.c u32 entries = fmt->format_entries; entries 155 drivers/media/platform/qcom/venus/hfi_parser.c while (entries) { entries 167 drivers/media/platform/qcom/venus/hfi_parser.c entries--; entries 112 drivers/media/platform/vsp1/vsp1_dl.c struct vsp1_dl_entry *entries; entries 294 drivers/media/platform/vsp1/vsp1_dl.c dlb->entries = pool->mem + i * dlb_size; entries 386 drivers/media/platform/vsp1/vsp1_dl.c dlb->entries[dlb->num_entries].addr = reg; entries 387 drivers/media/platform/vsp1/vsp1_dl.c dlb->entries[dlb->num_entries].data = data; entries 565 drivers/media/platform/vsp1/vsp1_dl.c header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries); entries 567 drivers/media/platform/vsp1/vsp1_dl.c dl->header = ((void *)dl->body0->entries) + header_offset; entries 653 drivers/media/v4l2-core/v4l2-ioctl.c p->entries, p->entries_cap); entries 65 drivers/memstick/core/mspro_block.c struct mspro_attr_entry entries[]; entries 1040 drivers/memstick/core/mspro_block.c addr = be32_to_cpu(attr->entries[cnt].address); entries 1041 drivers/memstick/core/mspro_block.c s_attr->size = be32_to_cpu(attr->entries[cnt].size); entries 1043 drivers/memstick/core/mspro_block.c "size %zx\n", cnt, attr->entries[cnt].id, addr, entries 1045 drivers/memstick/core/mspro_block.c s_attr->id = attr->entries[cnt].id; entries 1048 drivers/memstick/core/mspro_block.c mspro_block_attr_name(attr->entries[cnt].id)); entries 1051 drivers/memstick/core/mspro_block.c "attr_x%02x", attr->entries[cnt].id); entries 233 drivers/misc/cxl/flash.c unsigned int entries = 0, i; entries 257 drivers/misc/cxl/flash.c entries = len_chunk / CXL_AI_BUFFER_SIZE; entries 260 drivers/misc/cxl/flash.c entries++; entries 262 drivers/misc/cxl/flash.c if (entries > CXL_AI_MAX_ENTRIES) { entries 280 drivers/misc/cxl/flash.c for (i = 0; i < entries; i++) { entries 290 drivers/misc/cxl/flash.c if ((i == (entries - 1)) && mod) entries 300 drivers/misc/cxl/flash.c if ((i == (entries - 1)) && mod) entries 305 drivers/misc/cxl/flash.c __func__, operation, need_header, entries, continue_token); entries 311 drivers/misc/cxl/flash.c rc = fct(adapter->guest->handle, virt_to_phys(le), entries, entries 67 drivers/misc/cxl/guest.c unsigned int entries, mod; entries 77 drivers/misc/cxl/guest.c entries = len / SG_BUFFER_SIZE; entries 80 drivers/misc/cxl/guest.c entries++; entries 82 drivers/misc/cxl/guest.c if (entries > SG_MAX_ENTRIES) { entries 83 drivers/misc/cxl/guest.c entries = SG_MAX_ENTRIES; entries 88 drivers/misc/cxl/guest.c vpd_buf = kcalloc(entries, sizeof(unsigned long *), GFP_KERNEL); entries 98 drivers/misc/cxl/guest.c for (i = 0; i < entries; i++) { entries 106 drivers/misc/cxl/guest.c if ((i == (entries - 1)) && mod) entries 112 drivers/misc/cxl/guest.c virt_to_phys(le), entries, &out); entries 115 drivers/misc/cxl/guest.c virt_to_phys(le), entries, &out); entries 117 drivers/misc/cxl/guest.c entries, out); entries 128 drivers/misc/cxl/guest.c for (i = 0; i < entries; i++) { entries 140 drivers/misc/cxl/guest.c for (i = 0; i < entries; i++) { entries 380 drivers/misc/genwqe/card_base.c cd->ffdc[type].entries = e; entries 466 drivers/misc/genwqe/card_base.c cd->ffdc[GENWQE_DBG_REGS].entries, 0); entries 470 drivers/misc/genwqe/card_base.c cd->ffdc[GENWQE_DBG_UNIT0].entries); entries 474 drivers/misc/genwqe/card_base.c cd->ffdc[GENWQE_DBG_UNIT1].entries); entries 478 drivers/misc/genwqe/card_base.c cd->ffdc[GENWQE_DBG_UNIT2].entries); entries 244 drivers/misc/genwqe/card_base.h unsigned int entries; entries 29 drivers/misc/genwqe/card_debugfs.c int entries) entries 34 drivers/misc/genwqe/card_debugfs.c for (i = 0; i < entries; i++) { entries 46 drivers/misc/genwqe/card_debugfs.c int entries; entries 49 drivers/misc/genwqe/card_debugfs.c entries = genwqe_ffdc_buff_size(cd, uid); entries 50 drivers/misc/genwqe/card_debugfs.c if (entries < 0) entries 53 drivers/misc/genwqe/card_debugfs.c if (entries == 0) entries 56 drivers/misc/genwqe/card_debugfs.c regs = kcalloc(entries, sizeof(*regs), GFP_KERNEL); entries 61 drivers/misc/genwqe/card_debugfs.c genwqe_ffdc_buff_read(cd, uid, regs, entries); entries 64 drivers/misc/genwqe/card_debugfs.c dbg_uidn_show(s, regs, entries); entries 94 drivers/misc/genwqe/card_debugfs.c dbg_uidn_show(s, cd->ffdc[uid].regs, cd->ffdc[uid].entries); entries 848 drivers/misc/genwqe/card_utils.c int entries = 0, ring, traps, traces, trace_entries; entries 871 drivers/misc/genwqe/card_utils.c entries += d_len; entries 873 drivers/misc/genwqe/card_utils.c entries += d_len >> 3; entries 891 drivers/misc/genwqe/card_utils.c entries += traps + (traces * trace_entries); entries 893 drivers/misc/genwqe/card_utils.c return entries; entries 67 drivers/misc/mic/host/mic_smpt.c int entries, struct mic_device *mdev) entries 72 drivers/misc/mic/host/mic_smpt.c for (i = spt; i < spt + entries; i++, entries 88 drivers/misc/mic/host/mic_smpt.c int entries, s64 *ref, size_t size) entries 108 drivers/misc/mic/host/mic_smpt.c if (ae == entries) entries 115 drivers/misc/mic/host/mic_smpt.c if (ae == entries) entries 124 drivers/misc/mic/host/mic_smpt.c spt = i - entries + 1; entries 126 drivers/misc/mic/host/mic_smpt.c mic_add_smpt_entry(spt, ref, dma_addr, entries, mdev); entries 260 drivers/misc/mic/scif/scif_fd.c int entries; entries 270 drivers/misc/mic/scif/scif_fd.c entries = min_t(int, scif_info.maxid, node_ids.len); entries 271 drivers/misc/mic/scif/scif_fd.c nodes = kmalloc_array(entries, sizeof(u16), GFP_KERNEL); entries 272 drivers/misc/mic/scif/scif_fd.c if (entries && !nodes) { entries 276 drivers/misc/mic/scif/scif_fd.c node_ids.len = scif_get_node_ids(nodes, entries, &self); entries 279 drivers/misc/mic/scif/scif_fd.c if (copy_to_user(unodes, nodes, sizeof(u16) * entries)) { entries 48 drivers/misc/vmw_vmci/vmci_doorbell.c struct hlist_head entries[VMCI_DOORBELL_INDEX_TABLE_SIZE]; entries 123 drivers/misc/vmw_vmci/vmci_doorbell.c hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], entries 188 drivers/misc/vmw_vmci/vmci_doorbell.c hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]); entries 360 drivers/misc/vmw_vmci/vmci_doorbell.c hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) { entries 68 drivers/misc/vmw_vmci/vmci_handle_array.c array->entries[array->size] = handle; entries 84 drivers/misc/vmw_vmci/vmci_handle_array.c if (vmci_handle_is_equal(array->entries[i], entry_handle)) { entries 85 drivers/misc/vmw_vmci/vmci_handle_array.c handle = array->entries[i]; entries 87 drivers/misc/vmw_vmci/vmci_handle_array.c array->entries[i] = array->entries[array->size]; entries 88 drivers/misc/vmw_vmci/vmci_handle_array.c array->entries[array->size] = VMCI_INVALID_HANDLE; entries 105 drivers/misc/vmw_vmci/vmci_handle_array.c handle = array->entries[array->size]; entries 106 drivers/misc/vmw_vmci/vmci_handle_array.c array->entries[array->size] = VMCI_INVALID_HANDLE; entries 121 drivers/misc/vmw_vmci/vmci_handle_array.c return array->entries[index]; entries 130 drivers/misc/vmw_vmci/vmci_handle_array.c if (vmci_handle_is_equal(array->entries[i], entry_handle)) entries 143 drivers/misc/vmw_vmci/vmci_handle_array.c return array->entries; entries 20 drivers/misc/vmw_vmci/vmci_handle_array.h struct vmci_handle entries[]; entries 24 drivers/misc/vmw_vmci/vmci_handle_array.h offsetof(struct vmci_handle_arr, entries) entries 23 drivers/misc/vmw_vmci/vmci_resource.c struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS]; entries 46 drivers/misc/vmw_vmci/vmci_resource.c &vmci_resource_table.entries[idx], node) { entries 128 drivers/misc/vmw_vmci/vmci_resource.c hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]); entries 146 drivers/misc/vmw_vmci/vmci_resource.c hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) { entries 35 drivers/mmc/host/dw_mmc-pci.c const struct pci_device_id *entries) entries 60 drivers/mtd/ubi/eba.c struct ubi_eba_entry *entries; entries 111 drivers/mtd/ubi/eba.c ldesc->pnum = vol->eba_tbl->entries[lnum].pnum; entries 134 drivers/mtd/ubi/eba.c tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries), entries 136 drivers/mtd/ubi/eba.c if (!tbl->entries) entries 140 drivers/mtd/ubi/eba.c tbl->entries[i].pnum = UBI_LEB_UNMAPPED; entries 145 drivers/mtd/ubi/eba.c kfree(tbl->entries); entries 162 drivers/mtd/ubi/eba.c kfree(tbl->entries); entries 185 drivers/mtd/ubi/eba.c dst->entries[i].pnum = src->entries[i].pnum; entries 438 drivers/mtd/ubi/eba.c return vol->eba_tbl->entries[lnum].pnum >= 0; entries 463 drivers/mtd/ubi/eba.c pnum = vol->eba_tbl->entries[lnum].pnum; entries 471 drivers/mtd/ubi/eba.c vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED; entries 533 drivers/mtd/ubi/eba.c vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED; entries 608 drivers/mtd/ubi/eba.c pnum = vol->eba_tbl->entries[lnum].pnum; entries 867 drivers/mtd/ubi/eba.c vol->eba_tbl->entries[lnum].pnum = new_pnum; entries 958 drivers/mtd/ubi/eba.c opnum = vol->eba_tbl->entries[lnum].pnum; entries 980 drivers/mtd/ubi/eba.c vol->eba_tbl->entries[lnum].pnum = pnum; entries 1022 drivers/mtd/ubi/eba.c pnum = vol->eba_tbl->entries[lnum].pnum; entries 1150 drivers/mtd/ubi/eba.c ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0); entries 1366 drivers/mtd/ubi/eba.c if (vol->eba_tbl->entries[lnum].pnum != from) { entries 1368 drivers/mtd/ubi/eba.c vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum); entries 1453 drivers/mtd/ubi/eba.c ubi_assert(vol->eba_tbl->entries[lnum].pnum == from); entries 1454 drivers/mtd/ubi/eba.c vol->eba_tbl->entries[lnum].pnum = to; entries 1658 drivers/mtd/ubi/eba.c entry = &vol->eba_tbl->entries[aeb->lnum]; entries 289 drivers/net/dsa/microchip/ksz8795.c u8 *timestamp, u16 *entries) entries 304 drivers/net/dsa/microchip/ksz8795.c *entries = 0; entries 306 drivers/net/dsa/microchip/ksz8795.c *entries = 0; entries 321 drivers/net/dsa/microchip/ksz8795.c *entries = cnt + 1; entries 252 drivers/net/dsa/microchip/ksz_common.c u16 entries = 0; entries 262 drivers/net/dsa/microchip/ksz_common.c &entries); entries 269 drivers/net/dsa/microchip/ksz_common.c } while (i < entries); entries 270 drivers/net/dsa/microchip/ksz_common.c if (i >= entries) entries 131 drivers/net/dsa/microchip/ksz_common.h u16 *entries); entries 475 drivers/net/dsa/sja1105/sja1105_clocking.c mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; entries 657 drivers/net/dsa/sja1105/sja1105_clocking.c mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; entries 812 drivers/net/dsa/sja1105/sja1105_dynamic_config.c priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS].entries; entries 110 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); entries 114 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_NUM_PORTS, entries 116 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) entries 121 drivers/net/dsa/sja1105/sja1105_main.c mac = table->entries; entries 150 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); entries 154 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT, entries 156 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) entries 162 drivers/net/dsa/sja1105/sja1105_main.c mii = table->entries; entries 198 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); entries 244 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); entries 248 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT, entries 250 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) entries 256 drivers/net/dsa/sja1105/sja1105_main.c ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] = entries 282 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); entries 286 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(1, table->ops->unpacked_entry_size, entries 288 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) entries 302 drivers/net/dsa/sja1105/sja1105_main.c ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; entries 315 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); entries 319 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT, entries 321 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) entries 326 drivers/net/dsa/sja1105/sja1105_main.c l2fwd = table->entries; entries 364 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); entries 368 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT, entries 370 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) entries 376 drivers/net/dsa/sja1105/sja1105_main.c ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] = entries 441 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); entries 445 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT, entries 447 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) entries 453 drivers/net/dsa/sja1105/sja1105_main.c ((struct sja1105_general_params_entry *)table->entries)[0] = entries 482 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); entries 486 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT, entries 488 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) entries 493 drivers/net/dsa/sja1105/sja1105_main.c policing = table->entries; entries 520 drivers/net/dsa/sja1105/sja1105_main.c kfree(table->entries); entries 528 drivers/net/dsa/sja1105/sja1105_main.c table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT, entries 530 drivers/net/dsa/sja1105/sja1105_main.c if (!table->entries) entries 535 drivers/net/dsa/sja1105/sja1105_main.c avb = table->entries; entries 719 drivers/net/dsa/sja1105/sja1105_main.c mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; entries 720 drivers/net/dsa/sja1105/sja1105_main.c mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; entries 788 drivers/net/dsa/sja1105/sja1105_main.c mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; entries 850 drivers/net/dsa/sja1105/sja1105_main.c mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries; entries 886 drivers/net/dsa/sja1105/sja1105_main.c l2_lookup = table->entries; entries 928 drivers/net/dsa/sja1105/sja1105_main.c l2_lookup = table->entries; entries 1290 drivers/net/dsa/sja1105/sja1105_main.c l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries; entries 1330 drivers/net/dsa/sja1105/sja1105_main.c mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; entries 1394 drivers/net/dsa/sja1105/sja1105_main.c mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; entries 1434 drivers/net/dsa/sja1105/sja1105_main.c mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries; entries 1447 drivers/net/dsa/sja1105/sja1105_main.c vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries; entries 1479 drivers/net/dsa/sja1105/sja1105_main.c vlan = table->entries; entries 1572 drivers/net/dsa/sja1105/sja1105_main.c general_params = table->entries; entries 1602 drivers/net/dsa/sja1105/sja1105_main.c l2_lookup_params = table->entries; entries 1892 drivers/net/dsa/sja1105/sja1105_main.c l2_lookup_params = table->entries; entries 1915 drivers/net/dsa/sja1105/sja1105_main.c general_params = table->entries; entries 564 drivers/net/dsa/sja1105/sja1105_static_config.c l2_fwd_params = tables[BLK_IDX_L2_FORWARDING_PARAMS].entries; entries 648 drivers/net/dsa/sja1105/sja1105_static_config.c u8 *entry_ptr = table->entries; entries 1193 drivers/net/dsa/sja1105/sja1105_static_config.c kfree(config->tables[i].entries); entries 1202 drivers/net/dsa/sja1105/sja1105_static_config.c u8 *entries = table->entries; entries 1207 drivers/net/dsa/sja1105/sja1105_static_config.c memmove(entries + i * entry_size, entries + (i + 1) * entry_size, entries 1219 drivers/net/dsa/sja1105/sja1105_static_config.c void *new_entries, *old_entries = table->entries; entries 1231 drivers/net/dsa/sja1105/sja1105_static_config.c table->entries = new_entries; entries 280 drivers/net/dsa/sja1105/sja1105_static_config.h void *entries; entries 107 drivers/net/dsa/sja1105/sja1105_tas.c kfree(table->entries); entries 114 drivers/net/dsa/sja1105/sja1105_tas.c kfree(table->entries); entries 121 drivers/net/dsa/sja1105/sja1105_tas.c kfree(table->entries); entries 128 drivers/net/dsa/sja1105/sja1105_tas.c kfree(table->entries); entries 148 drivers/net/dsa/sja1105/sja1105_tas.c table->entries = kcalloc(num_entries, table->ops->unpacked_entry_size, entries 150 drivers/net/dsa/sja1105/sja1105_tas.c if (!table->entries) entries 153 drivers/net/dsa/sja1105/sja1105_tas.c schedule = table->entries; entries 157 drivers/net/dsa/sja1105/sja1105_tas.c table->entries = kcalloc(SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT, entries 159 drivers/net/dsa/sja1105/sja1105_tas.c if (!table->entries) entries 166 drivers/net/dsa/sja1105/sja1105_tas.c schedule_entry_points_params = table->entries; entries 170 drivers/net/dsa/sja1105/sja1105_tas.c table->entries = kcalloc(SJA1105_MAX_SCHEDULE_PARAMS_COUNT, entries 172 drivers/net/dsa/sja1105/sja1105_tas.c if (!table->entries) entries 175 drivers/net/dsa/sja1105/sja1105_tas.c schedule_params = table->entries; entries 179 drivers/net/dsa/sja1105/sja1105_tas.c table->entries = kcalloc(num_cycles, table->ops->unpacked_entry_size, entries 181 drivers/net/dsa/sja1105/sja1105_tas.c if (!table->entries) entries 184 drivers/net/dsa/sja1105/sja1105_tas.c schedule_entry_points = table->entries; entries 220 drivers/net/dsa/sja1105/sja1105_tas.c s64 delta_ns = offload->entries[i].interval; entries 226 drivers/net/dsa/sja1105/sja1105_tas.c ~offload->entries[i].gate_mask; entries 299 drivers/net/dsa/sja1105/sja1105_tas.c delta1 += offload->entries[i].interval, i++) { entries 305 drivers/net/dsa/sja1105/sja1105_tas.c delta2 += admin->entries[j].interval, j++) { entries 375 drivers/net/dsa/sja1105/sja1105_tas.c s64 delta_ns = admin->entries[i].interval; entries 114 drivers/net/ethernet/amazon/ena/ena_com.c sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr, entries 117 drivers/net/ethernet/amazon/ena/ena_com.c if (!sq->entries) { entries 136 drivers/net/ethernet/amazon/ena/ena_com.c cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr, entries 139 drivers/net/ethernet/amazon/ena/ena_com.c if (!cq->entries) { entries 159 drivers/net/ethernet/amazon/ena/ena_com.c aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr, entries 162 drivers/net/ethernet/amazon/ena/ena_com.c if (!aenq->entries) { entries 269 drivers/net/ethernet/amazon/ena/ena_com.c memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); entries 494 drivers/net/ethernet/amazon/ena/ena_com.c cqe = &admin_queue->cq.entries[head_masked]; entries 512 drivers/net/ethernet/amazon/ena/ena_com.c cqe = &admin_queue->cq.entries[head_masked]; entries 1643 drivers/net/ethernet/amazon/ena/ena_com.c if (sq->entries) entries 1644 drivers/net/ethernet/amazon/ena/ena_com.c dma_free_coherent(ena_dev->dmadev, size, sq->entries, entries 1646 drivers/net/ethernet/amazon/ena/ena_com.c sq->entries = NULL; entries 1649 drivers/net/ethernet/amazon/ena/ena_com.c if (cq->entries) entries 1650 drivers/net/ethernet/amazon/ena/ena_com.c dma_free_coherent(ena_dev->dmadev, size, cq->entries, entries 1652 drivers/net/ethernet/amazon/ena/ena_com.c cq->entries = NULL; entries 1655 drivers/net/ethernet/amazon/ena/ena_com.c if (ena_dev->aenq.entries) entries 1656 drivers/net/ethernet/amazon/ena/ena_com.c dma_free_coherent(ena_dev->dmadev, size, aenq->entries, entries 1658 drivers/net/ethernet/amazon/ena/ena_com.c aenq->entries = NULL; entries 2023 drivers/net/ethernet/amazon/ena/ena_com.c aenq_e = &aenq->entries[masked_head]; /* Get first entry */ entries 2053 drivers/net/ethernet/amazon/ena/ena_com.c aenq_e = &aenq->entries[masked_head]; entries 211 drivers/net/ethernet/amazon/ena/ena_com.h struct ena_admin_acq_entry *entries; entries 219 drivers/net/ethernet/amazon/ena/ena_com.h struct ena_admin_aq_entry *entries; entries 272 drivers/net/ethernet/amazon/ena/ena_com.h struct ena_admin_aenq_entry *entries; entries 487 drivers/net/ethernet/amd/pcnet32.c unsigned int entries = BIT(size); entries 493 drivers/net/ethernet/amd/pcnet32.c sizeof(struct pcnet32_tx_head) * entries, entries 498 drivers/net/ethernet/amd/pcnet32.c new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC); entries 502 drivers/net/ethernet/amd/pcnet32.c new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC); entries 512 drivers/net/ethernet/amd/pcnet32.c lp->tx_ring_size = entries; entries 525 drivers/net/ethernet/amd/pcnet32.c sizeof(struct pcnet32_tx_head) * entries, entries 549 drivers/net/ethernet/amd/pcnet32.c unsigned int entries = BIT(size); entries 553 drivers/net/ethernet/amd/pcnet32.c sizeof(struct pcnet32_rx_head) * entries, entries 558 drivers/net/ethernet/amd/pcnet32.c new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC); entries 562 drivers/net/ethernet/amd/pcnet32.c new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC); entries 567 drivers/net/ethernet/amd/pcnet32.c overlap = min(entries, lp->rx_ring_size); entries 574 drivers/net/ethernet/amd/pcnet32.c for (; new < entries; new++) { entries 620 drivers/net/ethernet/amd/pcnet32.c lp->rx_ring_size = entries; entries 646 drivers/net/ethernet/amd/pcnet32.c sizeof(struct pcnet32_rx_head) * entries, entries 14962 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c u32 offset, entries; entries 14993 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c entries = tbl->fc_npiv_cfg.num_of_npiv; entries 14994 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c entries = (__force u32)be32_to_cpu((__force __be32)entries); entries 14995 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c tbl->fc_npiv_cfg.num_of_npiv = entries; entries 6549 drivers/net/ethernet/broadcom/bnxt/bnxt.c req.qp_num_entries = cpu_to_le32(ctx_pg->entries); entries 6559 drivers/net/ethernet/broadcom/bnxt/bnxt.c req.srq_num_entries = cpu_to_le32(ctx_pg->entries); entries 6568 drivers/net/ethernet/broadcom/bnxt/bnxt.c req.cq_num_entries = cpu_to_le32(ctx_pg->entries); entries 6595 drivers/net/ethernet/broadcom/bnxt/bnxt.c req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); entries 6606 drivers/net/ethernet/broadcom/bnxt/bnxt.c req.tim_num_entries = cpu_to_le32(ctx_pg->entries); entries 6622 drivers/net/ethernet/broadcom/bnxt/bnxt.c *num_entries = cpu_to_le32(ctx_pg->entries); entries 6761 drivers/net/ethernet/broadcom/bnxt/bnxt.c u32 mem_size, ena, entries; entries 6785 drivers/net/ethernet/broadcom/bnxt/bnxt.c ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + entries 6787 drivers/net/ethernet/broadcom/bnxt/bnxt.c mem_size = ctx->qp_entry_size * ctx_pg->entries; entries 6793 drivers/net/ethernet/broadcom/bnxt/bnxt.c ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; entries 6794 drivers/net/ethernet/broadcom/bnxt/bnxt.c mem_size = ctx->srq_entry_size * ctx_pg->entries; entries 6800 drivers/net/ethernet/broadcom/bnxt/bnxt.c ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; entries 6801 drivers/net/ethernet/broadcom/bnxt/bnxt.c mem_size = ctx->cq_entry_size * ctx_pg->entries; entries 6807 drivers/net/ethernet/broadcom/bnxt/bnxt.c ctx_pg->entries = ctx->vnic_max_vnic_entries + entries 6809 drivers/net/ethernet/broadcom/bnxt/bnxt.c mem_size = ctx->vnic_entry_size * ctx_pg->entries; entries 6815 drivers/net/ethernet/broadcom/bnxt/bnxt.c ctx_pg->entries = ctx->stat_max_entries; entries 6816 drivers/net/ethernet/broadcom/bnxt/bnxt.c mem_size = ctx->stat_entry_size * ctx_pg->entries; entries 6831 drivers/net/ethernet/broadcom/bnxt/bnxt.c ctx_pg->entries = num_mr + num_ah; entries 6832 drivers/net/ethernet/broadcom/bnxt/bnxt.c mem_size = ctx->mrav_entry_size * ctx_pg->entries; entries 6838 drivers/net/ethernet/broadcom/bnxt/bnxt.c ctx_pg->entries = entries 6843 drivers/net/ethernet/broadcom/bnxt/bnxt.c ctx_pg->entries = ctx->qp_mem.entries; entries 6844 drivers/net/ethernet/broadcom/bnxt/bnxt.c mem_size = ctx->tim_entry_size * ctx_pg->entries; entries 6851 drivers/net/ethernet/broadcom/bnxt/bnxt.c entries = ctx->qp_max_l2_entries + extra_qps; entries 6852 drivers/net/ethernet/broadcom/bnxt/bnxt.c entries = roundup(entries, ctx->tqm_entries_multiple); entries 6853 drivers/net/ethernet/broadcom/bnxt/bnxt.c entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring, entries 6857 drivers/net/ethernet/broadcom/bnxt/bnxt.c ctx_pg->entries = entries; entries 6858 drivers/net/ethernet/broadcom/bnxt/bnxt.c mem_size = ctx->tqm_entry_size * entries; entries 1308 drivers/net/ethernet/broadcom/bnxt/bnxt.h u32 entries; entries 2114 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) entries 2126 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c *entries = le32_to_cpu(output->entries); entries 7531 drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h __le32 entries; entries 188 drivers/net/ethernet/chelsio/cxgb/sge.c struct cmdQ_e *entries; /* HW command descriptor Q */ entries 203 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ_e *entries; /* HW freelist descriptor Q */ entries 213 drivers/net/ethernet/chelsio/cxgb/sge.c struct respQ_e *entries; /* HW response descriptor Q */ entries 530 drivers/net/ethernet/chelsio/cxgb/sge.c if (sge->respQ.entries) { entries 532 drivers/net/ethernet/chelsio/cxgb/sge.c pci_free_consistent(pdev, size, sge->respQ.entries, entries 543 drivers/net/ethernet/chelsio/cxgb/sge.c if (q->entries) { entries 545 drivers/net/ethernet/chelsio/cxgb/sge.c pci_free_consistent(pdev, size, q->entries, entries 567 drivers/net/ethernet/chelsio/cxgb/sge.c q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); entries 568 drivers/net/ethernet/chelsio/cxgb/sge.c if (!q->entries) entries 603 drivers/net/ethernet/chelsio/cxgb/sge.c sge->respQ.entries = entries 605 drivers/net/ethernet/chelsio/cxgb/sge.c if (!sge->respQ.entries) entries 664 drivers/net/ethernet/chelsio/cxgb/sge.c if (q->entries) { entries 666 drivers/net/ethernet/chelsio/cxgb/sge.c pci_free_consistent(pdev, size, q->entries, entries 692 drivers/net/ethernet/chelsio/cxgb/sge.c q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); entries 693 drivers/net/ethernet/chelsio/cxgb/sge.c if (!q->entries) entries 828 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ_e *e = &q->entries[q->pidx]; entries 859 drivers/net/ethernet/chelsio/cxgb/sge.c e = q->entries; entries 1004 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ_e *from = &fl->entries[idx]; entries 1005 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ_e *to = &fl->entries[fl->pidx]; entries 1185 drivers/net/ethernet/chelsio/cxgb/sge.c e1 = q->entries; entries 1209 drivers/net/ethernet/chelsio/cxgb/sge.c e = e1 = &q->entries[pidx]; entries 1238 drivers/net/ethernet/chelsio/cxgb/sge.c e1 = q->entries; entries 1261 drivers/net/ethernet/chelsio/cxgb/sge.c e1 = q->entries; entries 1470 drivers/net/ethernet/chelsio/cxgb/sge.c struct respQ_e *e = &q->entries[q->cidx]; entries 1524 drivers/net/ethernet/chelsio/cxgb/sge.c e = q->entries; entries 1543 drivers/net/ethernet/chelsio/cxgb/sge.c const struct respQ_e *e = &Q->entries[Q->cidx]; entries 1560 drivers/net/ethernet/chelsio/cxgb/sge.c struct respQ_e *e = &q->entries[q->cidx]; entries 1579 drivers/net/ethernet/chelsio/cxgb/sge.c e = q->entries; entries 3119 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c struct msix_entry entries[SGE_QSETS + 1]; entries 3123 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c vectors = ARRAY_SIZE(entries); entries 3125 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c entries[i].entry = i; entries 3127 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c vectors = pci_enable_msix_range(adap->pdev, entries, entries 3133 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c adap->msix_info[i].vec = entries[i].vector; entries 1693 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h int t4_read_rss(struct adapter *adapter, u16 *entries); entries 3019 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c int entries = sge_queue_entries(seq->private); entries 3021 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; entries 3030 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c int entries = sge_queue_entries(seq->private); entries 3033 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; entries 5279 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c struct msix_entry *entries; entries 5286 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c entries = kmalloc_array(max_ingq + 1, sizeof(*entries), entries 5288 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (!entries) entries 5298 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c entries[i].entry = i; entries 5317 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c allocated = pci_enable_msix_range(adap->pdev, entries, need, want); entries 5321 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c kfree(entries); entries 5343 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->msix_info[i].vec = entries[i].vector; entries 5346 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->msix_info_ulds[j].vec = entries[i].vector; entries 5355 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c kfree(entries); entries 2158 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c int entries = sge_queue_entries(seq->private); entries 2160 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; entries 2169 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c int entries = sge_queue_entries(seq->private); entries 2172 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; entries 2304 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c int entries = sge_qstats_entries(seq->private); entries 2306 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; entries 2315 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c int entries = sge_qstats_entries(seq->private); entries 2318 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL; entries 2869 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c struct msix_entry entries[MSIX_ENTRIES]; entries 2873 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c entries[i].entry = i; entries 2885 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c want = pci_enable_msix_range(adapter->pdev, entries, need, want); entries 2898 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c adapter->msix_info[i].vec = entries[i].vector; entries 59 drivers/net/ethernet/cisco/enic/vnic_rq.h #define VNIC_RQ_BUF_BLK_ENTRIES(entries) \ entries 60 drivers/net/ethernet/cisco/enic/vnic_rq.h ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \ entries 62 drivers/net/ethernet/cisco/enic/vnic_rq.h #define VNIC_RQ_BUF_BLK_SZ(entries) \ entries 63 drivers/net/ethernet/cisco/enic/vnic_rq.h (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf)) entries 64 drivers/net/ethernet/cisco/enic/vnic_rq.h #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ entries 65 drivers/net/ethernet/cisco/enic/vnic_rq.h DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries)) entries 71 drivers/net/ethernet/cisco/enic/vnic_wq.h #define VNIC_WQ_BUF_BLK_ENTRIES(entries) \ entries 72 drivers/net/ethernet/cisco/enic/vnic_wq.h ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \ entries 74 drivers/net/ethernet/cisco/enic/vnic_wq.h #define VNIC_WQ_BUF_BLK_SZ(entries) \ entries 75 drivers/net/ethernet/cisco/enic/vnic_wq.h (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf)) entries 76 drivers/net/ethernet/cisco/enic/vnic_wq.h #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ entries 77 drivers/net/ethernet/cisco/enic/vnic_wq.h DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries)) entries 554 drivers/net/ethernet/cortina/gemini.c size_t entries = 1 << port->txq_order; entries 557 drivers/net/ethernet/cortina/gemini.c size_t len = n_txq * entries; entries 600 drivers/net/ethernet/cortina/gemini.c desc_ring += entries; entries 601 drivers/net/ethernet/cortina/gemini.c skb_tab += entries; entries 307 drivers/net/ethernet/freescale/fec_main.c int entries; entries 309 drivers/net/ethernet/freescale/fec_main.c entries = (((const char *)txq->dirty_tx - entries 312 drivers/net/ethernet/freescale/fec_main.c return entries >= 0 ? entries : entries + txq->bd.ring_size; entries 134 drivers/net/ethernet/ibm/ehea/ehea.h u8 entries[PAGE_SIZE]; entries 203 drivers/net/ethernet/ibm/ehea/ehea_qmr.h return ¤t_page->entries[q_offset & (EHEA_PAGESIZE - 1)]; entries 361 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) entries 373 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); entries 380 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c vfinfo->num_vf_mc_hashes = entries; entries 386 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c for (i = 0; i < entries; i++) { entries 1092 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c act = &rule->flow->action.entries[0]; entries 1308 drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c act = &flow->action.entries[0]; entries 203 drivers/net/ethernet/mellanox/mlx4/alloc.c struct list_head entries; entries 231 drivers/net/ethernet/mellanox/mlx4/alloc.c INIT_LIST_HEAD(&zones->entries); entries 291 drivers/net/ethernet/mellanox/mlx4/alloc.c if (!list_is_last(&entry->list, &zone_alloc->entries)) { entries 325 drivers/net/ethernet/mellanox/mlx4/alloc.c list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) { entries 361 drivers/net/ethernet/mellanox/mlx4/alloc.c list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) { entries 375 drivers/net/ethernet/mellanox/mlx4/alloc.c list_for_each_entry_from(it, &zone_alloc->entries, list) { entries 400 drivers/net/ethernet/mellanox/mlx4/alloc.c list_for_each_entry_from(curr_node, &zone_alloc->entries, list) { entries 430 drivers/net/ethernet/mellanox/mlx4/alloc.c list_for_each_entry(zone, &zones->entries, list) { entries 490 drivers/net/ethernet/mellanox/mlx4/alloc.c list_for_each_entry(zone, &zones->entries, list) { entries 192 drivers/net/ethernet/mellanox/mlx4/cq.c int entries, struct mlx4_mtt *mtt) entries 204 drivers/net/ethernet/mellanox/mlx4/cq.c cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24); entries 290 drivers/net/ethernet/mellanox/mlx4/cq.c static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size) entries 307 drivers/net/ethernet/mellanox/mlx4/cq.c if (entries_per_copy < entries) { entries 308 drivers/net/ethernet/mellanox/mlx4/cq.c for (i = 0; i < entries / entries_per_copy; i++) { entries 317 drivers/net/ethernet/mellanox/mlx4/cq.c err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ? entries 328 drivers/net/ethernet/mellanox/mlx4/cq.c int entries, entries 334 drivers/net/ethernet/mellanox/mlx4/cq.c memset(buf->direct.buf, 0xcc, entries * cqe_size); entries 48 drivers/net/ethernet/mellanox/mlx4/en_cq.c int entries, int ring, enum cq_type mode, entries 61 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->size = entries; entries 2934 drivers/net/ethernet/mellanox/mlx4/main.c struct msix_entry *entries; entries 2947 drivers/net/ethernet/mellanox/mlx4/main.c entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); entries 2948 drivers/net/ethernet/mellanox/mlx4/main.c if (!entries) entries 2952 drivers/net/ethernet/mellanox/mlx4/main.c entries[i].entry = i; entries 2954 drivers/net/ethernet/mellanox/mlx4/main.c nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, entries 2958 drivers/net/ethernet/mellanox/mlx4/main.c kfree(entries); entries 2964 drivers/net/ethernet/mellanox/mlx4/main.c priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector; entries 2973 drivers/net/ethernet/mellanox/mlx4/main.c entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; entries 3009 drivers/net/ethernet/mellanox/mlx4/main.c kfree(entries); entries 745 drivers/net/ethernet/mellanox/mlx4/mlx4.h __be64 entries[MLX4_MAX_MAC_NUM]; entries 768 drivers/net/ethernet/mellanox/mlx4/mlx4.h __be32 entries[MLX4_MAX_VLAN_NUM]; entries 691 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h int entries, int ring, enum cq_type mode, int node); entries 68 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[i] = 0; entries 82 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[i] = 0; entries 105 drivers/net/ethernet/mellanox/mlx4/port.c if (index < 0 || index >= table->max || !table->entries[index]) { entries 120 drivers/net/ethernet/mellanox/mlx4/port.c (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) entries 128 drivers/net/ethernet/mellanox/mlx4/port.c __be64 *entries) entries 138 drivers/net/ethernet/mellanox/mlx4/port.c memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); entries 160 drivers/net/ethernet/mellanox/mlx4/port.c if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { entries 216 drivers/net/ethernet/mellanox/mlx4/port.c if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))) entries 218 drivers/net/ethernet/mellanox/mlx4/port.c if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i])))) entries 243 drivers/net/ethernet/mellanox/mlx4/port.c ((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port])))) entries 262 drivers/net/ethernet/mellanox/mlx4/port.c (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { entries 267 drivers/net/ethernet/mellanox/mlx4/port.c u64 dup_mac = MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]); entries 299 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); entries 301 drivers/net/ethernet/mellanox/mlx4/port.c err = mlx4_set_port_mac_table(dev, port, table->entries); entries 305 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[free] = 0; entries 314 drivers/net/ethernet/mellanox/mlx4/port.c dup_table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); entries 316 drivers/net/ethernet/mellanox/mlx4/port.c err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries); entries 320 drivers/net/ethernet/mellanox/mlx4/port.c dup_table->entries[free] = 0; entries 420 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[index] = 0; entries 421 drivers/net/ethernet/mellanox/mlx4/port.c if (mlx4_set_port_mac_table(dev, port, table->entries)) entries 429 drivers/net/ethernet/mellanox/mlx4/port.c dup_table->entries[index] = 0; entries 430 drivers/net/ethernet/mellanox/mlx4/port.c if (mlx4_set_port_mac_table(dev, dup_port, dup_table->entries)) entries 501 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); entries 503 drivers/net/ethernet/mellanox/mlx4/port.c err = mlx4_set_port_mac_table(dev, port, table->entries); entries 507 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[index] = 0; entries 510 drivers/net/ethernet/mellanox/mlx4/port.c dup_table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); entries 512 drivers/net/ethernet/mellanox/mlx4/port.c err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries); entries 516 drivers/net/ethernet/mellanox/mlx4/port.c dup_table->entries[index] = 0; entries 537 drivers/net/ethernet/mellanox/mlx4/port.c __be32 *entries) entries 547 drivers/net/ethernet/mellanox/mlx4/port.c memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); entries 566 drivers/net/ethernet/mellanox/mlx4/port.c be32_to_cpu(table->entries[i])))) { entries 617 drivers/net/ethernet/mellanox/mlx4/port.c if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i]))) entries 619 drivers/net/ethernet/mellanox/mlx4/port.c if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]))) entries 643 drivers/net/ethernet/mellanox/mlx4/port.c (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[index_at_dup_port])))) entries 662 drivers/net/ethernet/mellanox/mlx4/port.c be32_to_cpu(table->entries[i])))) { entries 668 drivers/net/ethernet/mellanox/mlx4/port.c u16 dup_vlan = MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]); entries 699 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); entries 701 drivers/net/ethernet/mellanox/mlx4/port.c err = mlx4_set_port_vlan_table(dev, port, table->entries); entries 705 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[free] = 0; entries 712 drivers/net/ethernet/mellanox/mlx4/port.c dup_table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); entries 714 drivers/net/ethernet/mellanox/mlx4/port.c err = mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries); entries 718 drivers/net/ethernet/mellanox/mlx4/port.c dup_table->entries[free] = 0; entries 799 drivers/net/ethernet/mellanox/mlx4/port.c table->entries[index] = 0; entries 800 drivers/net/ethernet/mellanox/mlx4/port.c if (mlx4_set_port_vlan_table(dev, port, table->entries)) entries 807 drivers/net/ethernet/mellanox/mlx4/port.c dup_table->entries[index] = 0; entries 808 drivers/net/ethernet/mellanox/mlx4/port.c if (mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries)) entries 854 drivers/net/ethernet/mellanox/mlx4/port.c if ((t1->entries[i] != t2->entries[i]) && entries 855 drivers/net/ethernet/mellanox/mlx4/port.c t1->entries[i] && t2->entries[i]) { entries 863 drivers/net/ethernet/mellanox/mlx4/port.c if (t1->entries[i] && !t2->entries[i]) { entries 864 drivers/net/ethernet/mellanox/mlx4/port.c t2->entries[i] = t1->entries[i]; entries 867 drivers/net/ethernet/mellanox/mlx4/port.c } else if (!t1->entries[i] && t2->entries[i]) { entries 868 drivers/net/ethernet/mellanox/mlx4/port.c t1->entries[i] = t2->entries[i]; entries 871 drivers/net/ethernet/mellanox/mlx4/port.c } else if (t1->entries[i] && t2->entries[i]) { entries 878 drivers/net/ethernet/mellanox/mlx4/port.c ret = mlx4_set_port_mac_table(dev, 1, t1->entries); entries 883 drivers/net/ethernet/mellanox/mlx4/port.c ret = mlx4_set_port_mac_table(dev, 2, t2->entries); entries 909 drivers/net/ethernet/mellanox/mlx4/port.c if (t1->entries[i] != t2->entries[i]) { entries 917 drivers/net/ethernet/mellanox/mlx4/port.c if (!t1->entries[i]) entries 921 drivers/net/ethernet/mellanox/mlx4/port.c t1->entries[i] = 0; entries 926 drivers/net/ethernet/mellanox/mlx4/port.c t2->entries[i] = 0; entries 932 drivers/net/ethernet/mellanox/mlx4/port.c ret = mlx4_set_port_mac_table(dev, 1, t1->entries); entries 937 drivers/net/ethernet/mellanox/mlx4/port.c ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries); entries 961 drivers/net/ethernet/mellanox/mlx4/port.c if ((t1->entries[i] != t2->entries[i]) && entries 962 drivers/net/ethernet/mellanox/mlx4/port.c t1->entries[i] && t2->entries[i]) { entries 970 drivers/net/ethernet/mellanox/mlx4/port.c if (t1->entries[i] && !t2->entries[i]) { entries 971 drivers/net/ethernet/mellanox/mlx4/port.c t2->entries[i] = t1->entries[i]; entries 974 drivers/net/ethernet/mellanox/mlx4/port.c } else if (!t1->entries[i] && t2->entries[i]) { entries 975 drivers/net/ethernet/mellanox/mlx4/port.c t1->entries[i] = t2->entries[i]; entries 978 drivers/net/ethernet/mellanox/mlx4/port.c } else if (t1->entries[i] && t2->entries[i]) { entries 985 drivers/net/ethernet/mellanox/mlx4/port.c ret = mlx4_set_port_vlan_table(dev, 1, t1->entries); entries 990 drivers/net/ethernet/mellanox/mlx4/port.c ret = mlx4_set_port_vlan_table(dev, 2, t2->entries); entries 1016 drivers/net/ethernet/mellanox/mlx4/port.c if (t1->entries[i] != t2->entries[i]) { entries 1024 drivers/net/ethernet/mellanox/mlx4/port.c if (!t1->entries[i]) entries 1028 drivers/net/ethernet/mellanox/mlx4/port.c t1->entries[i] = 0; entries 1033 drivers/net/ethernet/mellanox/mlx4/port.c t2->entries[i] = 0; entries 1039 drivers/net/ethernet/mellanox/mlx4/port.c ret = mlx4_set_port_vlan_table(dev, 1, t1->entries); entries 1044 drivers/net/ethernet/mellanox/mlx4/port.c ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries); entries 8 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c const struct flow_action_entry *entries, entries 14 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c ids[i] = entries[i].id; entries 18 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h const struct flow_action_entry *entries, entries 38 drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h f->rule->action.entries, entries 1437 drivers/net/ethernet/mellanox/mlxsw/spectrum.c act = &f->rule->action.entries[0]; entries 165 drivers/net/ethernet/mellanox/mlxsw/spectrum.h struct mlxsw_sp_span_entry *entries; entries 70 drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c struct mlxsw_sp_nve_mc_entry entries[0]; entries 270 drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c mc_record = kzalloc(struct_size(mc_record, entries, num_max_entries), entries 342 drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c if (mc_record->entries[i].valid) entries 344 drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c return &mc_record->entries[i]; entries 379 drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c mc_entry = &mc_record->entries[i]; entries 415 drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c mc_entry = &mc_record->entries[i]; entries 681 drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c struct mlxsw_sp_nve_mc_entry *mc_entry = &mc_record->entries[i]; entries 26 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, entries 29 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c if (!mlxsw_sp->span.entries) entries 33 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; entries 47 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; entries 51 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c kfree(mlxsw_sp->span.entries); entries 628 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c if (!mlxsw_sp->span.entries[i].ref_count) { entries 629 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c span_entry = &mlxsw_sp->span.entries[i]; entries 656 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; entries 677 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; entries 719 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; entries 809 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c &mlxsw_sp->span.entries[i]; entries 966 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; entries 43 drivers/net/ethernet/mscc/ocelot_tc.c action = &f->rule->action.entries[0]; entries 3727 drivers/net/ethernet/neterion/s2io.c nic->entries = kzalloc(size, GFP_KERNEL); entries 3728 drivers/net/ethernet/neterion/s2io.c if (!nic->entries) { entries 3742 drivers/net/ethernet/neterion/s2io.c kfree(nic->entries); entries 3749 drivers/net/ethernet/neterion/s2io.c nic->entries[0].entry = 0; entries 3756 drivers/net/ethernet/neterion/s2io.c nic->entries[i].entry = ((i - 1) * 8) + 1; entries 3773 drivers/net/ethernet/neterion/s2io.c ret = pci_enable_msix_range(nic->pdev, nic->entries, entries 3778 drivers/net/ethernet/neterion/s2io.c kfree(nic->entries); entries 3784 drivers/net/ethernet/neterion/s2io.c nic->entries = NULL; entries 3819 drivers/net/ethernet/neterion/s2io.c err = request_irq(sp->entries[1].vector, s2io_test_intr, 0, entries 3847 drivers/net/ethernet/neterion/s2io.c free_irq(sp->entries[1].vector, sp); entries 3861 drivers/net/ethernet/neterion/s2io.c int vector = sp->entries[i].vector; entries 3867 drivers/net/ethernet/neterion/s2io.c kfree(sp->entries); entries 3869 drivers/net/ethernet/neterion/s2io.c sp->entries = NULL; entries 3932 drivers/net/ethernet/neterion/s2io.c if (sp->entries) { entries 3933 drivers/net/ethernet/neterion/s2io.c kfree(sp->entries); entries 6938 drivers/net/ethernet/neterion/s2io.c err = request_irq(sp->entries[i].vector, entries 6949 drivers/net/ethernet/neterion/s2io.c err = request_irq(sp->entries[i].vector, entries 946 drivers/net/ethernet/neterion/s2io.h struct msix_entry *entries; entries 2300 drivers/net/ethernet/neterion/vxge/vxge-main.c vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry), entries 2302 drivers/net/ethernet/neterion/vxge/vxge-main.c if (!vdev->entries) { entries 2325 drivers/net/ethernet/neterion/vxge/vxge-main.c vdev->entries[j].entry = msix_intr_vect; entries 2331 drivers/net/ethernet/neterion/vxge/vxge-main.c vdev->entries[j].entry = msix_intr_vect + 1; entries 2338 drivers/net/ethernet/neterion/vxge/vxge-main.c vdev->entries[j].entry = VXGE_ALARM_MSIX_ID; entries 2343 drivers/net/ethernet/neterion/vxge/vxge-main.c vdev->entries, 3, vdev->intr_cnt); entries 2358 drivers/net/ethernet/neterion/vxge/vxge-main.c kfree(vdev->entries); entries 2360 drivers/net/ethernet/neterion/vxge/vxge-main.c vdev->entries = NULL; entries 2373 drivers/net/ethernet/neterion/vxge/vxge-main.c kfree(vdev->entries); entries 2417 drivers/net/ethernet/neterion/vxge/vxge-main.c synchronize_irq(vdev->entries[intr_cnt].vector); entries 2418 drivers/net/ethernet/neterion/vxge/vxge-main.c free_irq(vdev->entries[intr_cnt].vector, entries 2424 drivers/net/ethernet/neterion/vxge/vxge-main.c kfree(vdev->entries); entries 2426 drivers/net/ethernet/neterion/vxge/vxge-main.c vdev->entries = NULL; entries 2474 drivers/net/ethernet/neterion/vxge/vxge-main.c vdev->entries[intr_cnt].entry, entries 2477 drivers/net/ethernet/neterion/vxge/vxge-main.c vdev->entries[intr_cnt].vector, entries 2489 drivers/net/ethernet/neterion/vxge/vxge-main.c vdev->entries[intr_cnt].entry, entries 2492 drivers/net/ethernet/neterion/vxge/vxge-main.c vdev->entries[intr_cnt].vector, entries 2535 drivers/net/ethernet/neterion/vxge/vxge-main.c vdev->entries[intr_cnt].entry, entries 2538 drivers/net/ethernet/neterion/vxge/vxge-main.c ret = request_irq(vdev->entries[intr_cnt].vector, entries 361 drivers/net/ethernet/neterion/vxge/vxge-main.h struct msix_entry *entries; entries 266 drivers/net/ethernet/netronome/nfp/flower/action.c struct flow_action_entry *act = flow->rule->action.entries; entries 1138 drivers/net/ethernet/netronome/nfp/flower/action.c current_act = flow_act->entries[current_act_idx]; entries 1145 drivers/net/ethernet/netronome/nfp/flower/action.c prev_act = flow_act->entries[current_act_idx - 1]; entries 1156 drivers/net/ethernet/netronome/nfp/flower/action.c current_act = flow_act->entries[current_act_idx]; entries 1163 drivers/net/ethernet/netronome/nfp/flower/action.c next_act = flow_act->entries[current_act_idx + 1]; entries 65 drivers/net/ethernet/netronome/nfp/flower/qos_conf.c struct flow_action_entry *action = &flow->rule->action.entries[0]; entries 34 drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, entries 137 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c void *entries; entries 194 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c return state->entries; entries 203 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx) entries 205 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c state->entries = entries; entries 211 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c state->entries = NULL; entries 247 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c union eth_table_entry *entries; entries 251 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); entries 252 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c if (!entries) entries 255 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); entries 262 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c if (entries[i].port & NSP_ETH_PORT_LANES_MASK) entries 281 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c if (entries[i].port & NSP_ETH_PORT_LANES_MASK) entries 282 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c nfp_eth_port_translate(nsp, &entries[i], i, entries 289 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c kfree(entries); entries 294 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c kfree(entries); entries 300 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c union eth_table_entry *entries; entries 304 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); entries 305 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c if (!entries) entries 310 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c kfree(entries); entries 314 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); entries 320 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) { entries 326 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c nfp_nsp_config_set_state(nsp, entries, idx); entries 331 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c kfree(entries); entries 337 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c union eth_table_entry *entries = nfp_nsp_config_entries(nsp); entries 342 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c kfree(entries); entries 361 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c union eth_table_entry *entries = nfp_nsp_config_entries(nsp); entries 365 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); entries 390 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c union eth_table_entry *entries; entries 398 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c entries = nfp_nsp_config_entries(nsp); entries 401 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c reg = le64_to_cpu(entries[idx].state); entries 403 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c reg = le64_to_cpu(entries[idx].control); entries 406 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c entries[idx].control = cpu_to_le64(reg); entries 429 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c union eth_table_entry *entries; entries 445 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c entries = nfp_nsp_config_entries(nsp); entries 448 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c reg = le64_to_cpu(entries[idx].state); entries 450 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c reg = le64_to_cpu(entries[idx].control); entries 453 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c entries[idx].control = cpu_to_le64(reg); entries 466 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c union eth_table_entry *entries = nfp_nsp_config_entries(nsp); entries 480 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c reg = le64_to_cpu(entries[idx].raw[raw_idx]); entries 486 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c entries[idx].raw[raw_idx] = cpu_to_le64(reg); entries 488 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c entries[idx].control |= cpu_to_le64(ctrl_bit); entries 2220 drivers/net/ethernet/nvidia/forcedeth.c u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); entries 2233 drivers/net/ethernet/nvidia/forcedeth.c entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + entries 2239 drivers/net/ethernet/nvidia/forcedeth.c if (unlikely(empty_slots <= entries)) { entries 2375 drivers/net/ethernet/nvidia/forcedeth.c u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); entries 2389 drivers/net/ethernet/nvidia/forcedeth.c entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + entries 2395 drivers/net/ethernet/nvidia/forcedeth.c if (unlikely(empty_slots <= entries)) { entries 571 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c __le32 entries = cpu_to_le32(directory->num_entries); entries 573 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c for (i = 0; i < entries; i++) { entries 595 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c __le32 entries; entries 601 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c entries = cpu_to_le32(directory->num_entries); entries 603 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); entries 688 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c __le32 entries; entries 697 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c entries = cpu_to_le32(ptab_descr->num_entries); entries 699 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); entries 705 drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c for (i = 0; i < entries; i++) { entries 965 drivers/net/ethernet/qlogic/qed/qed_cxt.c struct src_ent *entries = p_mngr->t2[i].p_virt; entries 971 drivers/net/ethernet/qlogic/qed/qed_cxt.c entries[j].next = cpu_to_be64(val); entries 978 drivers/net/ethernet/qlogic/qed/qed_cxt.c entries[j].next = cpu_to_be64(val); entries 52 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c u16 entries; entries 60 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c u16 entries; entries 1997 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c int index, entries; entries 2003 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c entries = p_dev->ahw->reset.hdr->entries; entries 2006 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c for (; (!p_dev->ahw->reset.seq_end) && (index < entries); index++) { entries 750 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c u32 i, entries; entries 752 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c entries = le32_to_cpu(directory->num_entries); entries 754 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c for (i = 0; i < entries; i++) { entries 774 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c u32 entries, entry_size, tab_size, fw_file_size; entries 781 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c entries = le32_to_cpu(directory->num_entries); entries 783 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c tab_size = le32_to_cpu(directory->findex) + (entries * entry_size); entries 866 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c u32 entries, entry_size, tab_size, i; entries 874 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c entries = le32_to_cpu(ptab_descr->num_entries); entries 876 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c tab_size = le32_to_cpu(ptab_descr->findex) + (entries * entry_size); entries 882 drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c for (i = 0; i < entries; i++) { entries 2411 drivers/net/ethernet/sfc/ef10.c size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; entries 2457 drivers/net/ethernet/sfc/ef10.c tx_queue->queue, entries, (u64)dma_addr); entries 2459 drivers/net/ethernet/sfc/ef10.c for (i = 0; i < entries; ++i) { entries 2464 drivers/net/ethernet/sfc/ef10.c inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); entries 3086 drivers/net/ethernet/sfc/ef10.c size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; entries 3112 drivers/net/ethernet/sfc/ef10.c efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); entries 3114 drivers/net/ethernet/sfc/ef10.c for (i = 0; i < entries; ++i) { entries 3119 drivers/net/ethernet/sfc/ef10.c inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); entries 3265 drivers/net/ethernet/sfc/ef10.c size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; entries 3313 drivers/net/ethernet/sfc/ef10.c for (i = 0; i < entries; ++i) { entries 3318 drivers/net/ethernet/sfc/ef10.c inlen = MC_CMD_INIT_EVQ_IN_LEN(entries); entries 375 drivers/net/ethernet/sfc/efx.c unsigned long entries; entries 382 drivers/net/ethernet/sfc/efx.c entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); entries 383 drivers/net/ethernet/sfc/efx.c EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); entries 384 drivers/net/ethernet/sfc/efx.c channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; entries 827 drivers/net/ethernet/sfc/efx.c channel->eventq.entries); entries 831 drivers/net/ethernet/sfc/efx.c rx_queue->rxd.entries); entries 835 drivers/net/ethernet/sfc/efx.c tx_queue->txd.entries); entries 339 drivers/net/ethernet/sfc/falcon/efx.c unsigned long entries; entries 346 drivers/net/ethernet/sfc/falcon/efx.c entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); entries 347 drivers/net/ethernet/sfc/falcon/efx.c EF4_BUG_ON_PARANOID(entries > EF4_MAX_EVQ_SIZE); entries 348 drivers/net/ethernet/sfc/falcon/efx.c channel->eventq_mask = max(entries, EF4_MIN_EVQ_SIZE) - 1; entries 786 drivers/net/ethernet/sfc/falcon/efx.c channel->eventq.entries); entries 790 drivers/net/ethernet/sfc/falcon/efx.c rx_queue->rxd.entries); entries 794 drivers/net/ethernet/sfc/falcon/efx.c tx_queue->txd.entries); entries 178 drivers/net/ethernet/sfc/falcon/farch.c for (i = 0; i < buffer->entries; i++) { entries 198 drivers/net/ethernet/sfc/falcon/farch.c unsigned int end = (buffer->index + buffer->entries - 1); entries 200 drivers/net/ethernet/sfc/falcon/farch.c if (!buffer->entries) entries 204 drivers/net/ethernet/sfc/falcon/farch.c buffer->index, buffer->index + buffer->entries - 1); entries 231 drivers/net/ethernet/sfc/falcon/farch.c buffer->entries = len / EF4_BUF_SIZE; entries 236 drivers/net/ethernet/sfc/falcon/farch.c efx->next_buffer_table += buffer->entries; entries 241 drivers/net/ethernet/sfc/falcon/farch.c buffer->index + buffer->entries - 1, entries 257 drivers/net/ethernet/sfc/falcon/farch.c buffer->index + buffer->entries - 1, entries 262 drivers/net/ethernet/sfc/falcon/farch.c buffer->entries = 0; entries 366 drivers/net/ethernet/sfc/falcon/farch.c unsigned entries; entries 368 drivers/net/ethernet/sfc/falcon/farch.c entries = tx_queue->ptr_mask + 1; entries 370 drivers/net/ethernet/sfc/falcon/farch.c entries * sizeof(ef4_qword_t)); entries 392 drivers/net/ethernet/sfc/falcon/farch.c __ffs(tx_queue->txd.entries), entries 512 drivers/net/ethernet/sfc/falcon/farch.c unsigned entries; entries 514 drivers/net/ethernet/sfc/falcon/farch.c entries = rx_queue->ptr_mask + 1; entries 516 drivers/net/ethernet/sfc/falcon/farch.c entries * sizeof(ef4_qword_t)); entries 537 drivers/net/ethernet/sfc/falcon/farch.c rx_queue->rxd.index + rx_queue->rxd.entries - 1); entries 555 drivers/net/ethernet/sfc/falcon/farch.c __ffs(rx_queue->rxd.entries), entries 1331 drivers/net/ethernet/sfc/falcon/farch.c unsigned entries; entries 1333 drivers/net/ethernet/sfc/falcon/farch.c entries = channel->eventq_mask + 1; entries 1335 drivers/net/ethernet/sfc/falcon/farch.c entries * sizeof(ef4_qword_t)); entries 1346 drivers/net/ethernet/sfc/falcon/farch.c channel->eventq.index + channel->eventq.entries - 1); entries 1357 drivers/net/ethernet/sfc/falcon/farch.c FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), entries 128 drivers/net/ethernet/sfc/falcon/net_driver.h unsigned int entries; entries 682 drivers/net/ethernet/sfc/falcon/rx.c unsigned int entries; entries 686 drivers/net/ethernet/sfc/falcon/rx.c entries = max(roundup_pow_of_two(efx->rxq_entries), EF4_MIN_DMAQ_SIZE); entries 687 drivers/net/ethernet/sfc/falcon/rx.c EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE); entries 688 drivers/net/ethernet/sfc/falcon/rx.c rx_queue->ptr_mask = entries - 1; entries 696 drivers/net/ethernet/sfc/falcon/rx.c rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), entries 541 drivers/net/ethernet/sfc/falcon/tx.c unsigned int entries; entries 545 drivers/net/ethernet/sfc/falcon/tx.c entries = max(roundup_pow_of_two(efx->txq_entries), EF4_MIN_DMAQ_SIZE); entries 546 drivers/net/ethernet/sfc/falcon/tx.c EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE); entries 547 drivers/net/ethernet/sfc/falcon/tx.c tx_queue->ptr_mask = entries - 1; entries 554 drivers/net/ethernet/sfc/falcon/tx.c tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), entries 180 drivers/net/ethernet/sfc/farch.c for (i = 0; i < buffer->entries; i++) { entries 200 drivers/net/ethernet/sfc/farch.c unsigned int end = (buffer->index + buffer->entries - 1); entries 202 drivers/net/ethernet/sfc/farch.c if (!buffer->entries) entries 206 drivers/net/ethernet/sfc/farch.c buffer->index, buffer->index + buffer->entries - 1); entries 236 drivers/net/ethernet/sfc/farch.c buffer->entries = len / EFX_BUF_SIZE; entries 241 drivers/net/ethernet/sfc/farch.c efx->next_buffer_table += buffer->entries; entries 250 drivers/net/ethernet/sfc/farch.c buffer->index + buffer->entries - 1, entries 266 drivers/net/ethernet/sfc/farch.c buffer->index + buffer->entries - 1, entries 271 drivers/net/ethernet/sfc/farch.c buffer->entries = 0; entries 372 drivers/net/ethernet/sfc/farch.c unsigned entries; entries 374 drivers/net/ethernet/sfc/farch.c entries = tx_queue->ptr_mask + 1; entries 376 drivers/net/ethernet/sfc/farch.c entries * sizeof(efx_qword_t)); entries 399 drivers/net/ethernet/sfc/farch.c __ffs(tx_queue->txd.entries), entries 500 drivers/net/ethernet/sfc/farch.c unsigned entries; entries 502 drivers/net/ethernet/sfc/farch.c entries = rx_queue->ptr_mask + 1; entries 504 drivers/net/ethernet/sfc/farch.c entries * sizeof(efx_qword_t)); entries 519 drivers/net/ethernet/sfc/farch.c rx_queue->rxd.index + rx_queue->rxd.entries - 1); entries 537 drivers/net/ethernet/sfc/farch.c __ffs(rx_queue->rxd.entries), entries 1336 drivers/net/ethernet/sfc/farch.c unsigned entries; entries 1338 drivers/net/ethernet/sfc/farch.c entries = channel->eventq_mask + 1; entries 1340 drivers/net/ethernet/sfc/farch.c entries * sizeof(efx_qword_t)); entries 1351 drivers/net/ethernet/sfc/farch.c channel->eventq.index + channel->eventq.entries - 1); entries 1368 drivers/net/ethernet/sfc/farch.c FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), entries 132 drivers/net/ethernet/sfc/net_driver.h unsigned int entries; entries 680 drivers/net/ethernet/sfc/rx.c unsigned int entries; entries 684 drivers/net/ethernet/sfc/rx.c entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE); entries 685 drivers/net/ethernet/sfc/rx.c EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); entries 686 drivers/net/ethernet/sfc/rx.c rx_queue->ptr_mask = entries - 1; entries 694 drivers/net/ethernet/sfc/rx.c rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer), entries 798 drivers/net/ethernet/sfc/tx.c unsigned int entries; entries 802 drivers/net/ethernet/sfc/tx.c entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); entries 803 drivers/net/ethernet/sfc/tx.c EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); entries 804 drivers/net/ethernet/sfc/tx.c tx_queue->ptr_mask = entries - 1; entries 811 drivers/net/ethernet/sfc/tx.c tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), entries 373 drivers/net/ethernet/stmicro/stmmac/dwmac5.c dwmac5_rxp_get_next_entry(struct stmmac_tc_entry *entries, unsigned int count, entries 382 drivers/net/ethernet/stmicro/stmmac/dwmac5.c entry = &entries[i]; entries 408 drivers/net/ethernet/stmicro/stmmac/dwmac5.c return &entries[min_prio_idx]; entries 412 drivers/net/ethernet/stmicro/stmmac/dwmac5.c int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries, entries 432 drivers/net/ethernet/stmicro/stmmac/dwmac5.c entry = &entries[i]; entries 438 drivers/net/ethernet/stmicro/stmmac/dwmac5.c entry = dwmac5_rxp_get_next_entry(entries, count, curr_prio); entries 474 drivers/net/ethernet/stmicro/stmmac/dwmac5.c entry = &entries[i]; entries 81 drivers/net/ethernet/stmicro/stmmac/dwmac5.h int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries, entries 938 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries, entries 947 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c entry = &entries[i]; entries 973 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c return &entries[min_prio_idx]; entries 978 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c struct stmmac_tc_entry *entries, entries 998 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c entry = &entries[i]; entries 1004 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio); entries 1040 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c entry = &entries[i]; entries 347 drivers/net/ethernet/stmicro/stmmac/hwif.h int (*rxp_config)(void __iomem *ioaddr, struct stmmac_tc_entry *entries, entries 1333 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); entries 1350 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c rule->action.entries[0].id = FLOW_ACTION_DROP; entries 1460 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL); entries 1478 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c rule->action.entries[0].id = FLOW_ACTION_DROP; entries 582 drivers/net/hippi/rrunner.c rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES; entries 588 drivers/net/hippi/rrunner.c rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES; entries 602 drivers/net/hippi/rrunner.c rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES; entries 665 drivers/net/hippi/rrunner.c rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES; entries 1360 drivers/net/hippi/rrunner.c rrpriv->info->tx_ctrl.entries = 0; entries 1363 drivers/net/hippi/rrunner.c rrpriv->rx_ctrl[4].entries = 0; entries 767 drivers/net/hippi/rrunner.h u16 entries; entries 775 drivers/net/hippi/rrunner.h u16 entries; entries 1854 drivers/net/virtio_net.c (2 * sizeof(mac_data->entries)), GFP_ATOMIC); entries 1862 drivers/net/virtio_net.c mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); entries 1868 drivers/net/virtio_net.c sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); entries 1873 drivers/net/virtio_net.c mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); entries 1879 drivers/net/virtio_net.c sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); entries 333 drivers/net/vmxnet3/vmxnet3_drv.c int entries = 0; entries 355 drivers/net/vmxnet3/vmxnet3_drv.c entries++; entries 359 drivers/net/vmxnet3/vmxnet3_drv.c return entries; entries 606 drivers/net/wireless/ath/ath10k/core.h struct ath10k_ce_crash_data entries[]; entries 1135 drivers/net/wireless/ath/ath10k/coredump.c CE_COUNT * sizeof(ce_hdr->entries[0]); entries 1195 drivers/net/wireless/ath/ath10k/coredump.c dump_tlv->tlv_len = cpu_to_le32(struct_size(ce_hdr, entries, entries 1200 drivers/net/wireless/ath/ath10k/coredump.c memcpy(ce_hdr->entries, crash_data->ce_crash_data, entries 1201 drivers/net/wireless/ath/ath10k/coredump.c CE_COUNT * sizeof(ce_hdr->entries[0])); entries 1203 drivers/net/wireless/ath/ath10k/coredump.c CE_COUNT * sizeof(ce_hdr->entries[0]); entries 122 drivers/net/wireless/ath/ath6kl/trace.h unsigned int entries, struct hif_scatter_item *list), entries 124 drivers/net/wireless/ath/ath6kl/trace.h TP_ARGS(addr, flags, total_len, entries, list), entries 130 drivers/net/wireless/ath/ath6kl/trace.h __field(unsigned int, entries) entries 132 drivers/net/wireless/ath/ath6kl/trace.h __dynamic_array(unsigned int, len_array, entries) entries 143 drivers/net/wireless/ath/ath6kl/trace.h __entry->entries = entries; entries 153 drivers/net/wireless/ath/ath6kl/trace.h for (i = 0; i < entries; i++) { entries 169 drivers/net/wireless/ath/ath6kl/trace.h __entry->entries, entries 2676 drivers/net/wireless/intel/ipw2x00/ipw2100.c if (r >= rxq->entries) { entries 2681 drivers/net/wireless/intel/ipw2x00/ipw2100.c i = (rxq->next + 1) % rxq->entries; entries 2755 drivers/net/wireless/intel/ipw2x00/ipw2100.c i = (i + 1) % rxq->entries; entries 2760 drivers/net/wireless/intel/ipw2x00/ipw2100.c rxq->next = (i ? i : rxq->entries) - 1; entries 2837 drivers/net/wireless/intel/ipw2x00/ipw2100.c e %= txq->entries; entries 2897 drivers/net/wireless/intel/ipw2x00/ipw2100.c i = (i + 1) % txq->entries; entries 2919 drivers/net/wireless/intel/ipw2x00/ipw2100.c tbd = &txq->drv[(packet->index + 1 + i) % txq->entries]; entries 2922 drivers/net/wireless/intel/ipw2x00/ipw2100.c (packet->index + 1 + i) % txq->entries, entries 2971 drivers/net/wireless/intel/ipw2x00/ipw2100.c txq->oldest = (e + 1) % txq->entries; entries 3043 drivers/net/wireless/intel/ipw2x00/ipw2100.c txq->next %= txq->entries; entries 3143 drivers/net/wireless/intel/ipw2x00/ipw2100.c txq->next %= txq->entries; entries 3190 drivers/net/wireless/intel/ipw2x00/ipw2100.c txq->next %= txq->entries; entries 4318 drivers/net/wireless/intel/ipw2x00/ipw2100.c static int status_queue_allocate(struct ipw2100_priv *priv, int entries) entries 4324 drivers/net/wireless/intel/ipw2x00/ipw2100.c q->size = entries * sizeof(struct ipw2100_status); entries 4351 drivers/net/wireless/intel/ipw2x00/ipw2100.c struct ipw2100_bd_queue *q, int entries) entries 4357 drivers/net/wireless/intel/ipw2x00/ipw2100.c q->entries = entries; entries 4358 drivers/net/wireless/intel/ipw2x00/ipw2100.c q->size = entries * sizeof(struct ipw2100_bd); entries 4396 drivers/net/wireless/intel/ipw2x00/ipw2100.c write_register(priv->net_dev, size, q->entries); entries 4509 drivers/net/wireless/intel/ipw2x00/ipw2100.c priv->tx_queue.available = priv->tx_queue.entries; entries 4631 drivers/net/wireless/intel/ipw2x00/ipw2100.c priv->rx_queue.available = priv->rx_queue.entries - 1; entries 4632 drivers/net/wireless/intel/ipw2x00/ipw2100.c priv->rx_queue.next = priv->rx_queue.entries - 1; entries 176 drivers/net/wireless/intel/ipw2x00/ipw2100.h u32 entries; entries 4086 drivers/net/wireless/intel/ipw2x00/ipw2200.c avg->sum -= avg->entries[avg->pos]; entries 4088 drivers/net/wireless/intel/ipw2x00/ipw2200.c avg->entries[avg->pos++] = val; entries 1070 drivers/net/wireless/intel/ipw2x00/ipw2200.h s16 entries[AVG_ENTRIES]; entries 418 drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c int idx, entries; entries 427 drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN; entries 432 drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c for (idx = 0; idx < entries; idx++) { entries 734 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h struct iwl_mvm_reorder_buf_entry entries[]; entries 544 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c struct iwl_mvm_reorder_buf_entry *entries = entries 545 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c &baid_data->entries[reorder_buf->queue * entries 569 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c struct sk_buff_head *skb_list = &entries[index].e.frames; entries 595 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c while (skb_queue_empty(&entries[index].e.frames)) entries 599 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c entries[index].e.reorder_time + 1 + entries 611 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c struct iwl_mvm_reorder_buf_entry *entries = entries 612 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c &baid_data->entries[buf->queue * baid_data->entries_per_queue]; entries 628 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c if (skb_queue_empty(&entries[index].e.frames)) { entries 637 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c !time_after(jiffies, entries[index].e.reorder_time + entries 672 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c entries[index].e.reorder_time + entries 868 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c struct iwl_mvm_reorder_buf_entry *entries; entries 921 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c entries = &baid_data->entries[queue * baid_data->entries_per_queue]; entries 1020 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c tail = skb_peek_tail(&entries[index].e.frames); entries 1028 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c __skb_queue_tail(&entries[index].e.frames, skb); entries 1030 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c entries[index].e.reorder_time = jiffies; entries 2464 drivers/net/wireless/intel/iwlwifi/mvm/sta.c struct iwl_mvm_reorder_buf_entry *entries = entries 2465 drivers/net/wireless/intel/iwlwifi/mvm/sta.c &data->entries[i * data->entries_per_queue]; entries 2481 drivers/net/wireless/intel/iwlwifi/mvm/sta.c __skb_queue_purge(&entries[j].e.frames); entries 2505 drivers/net/wireless/intel/iwlwifi/mvm/sta.c struct iwl_mvm_reorder_buf_entry *entries = entries 2506 drivers/net/wireless/intel/iwlwifi/mvm/sta.c &data->entries[i * data->entries_per_queue]; entries 2520 drivers/net/wireless/intel/iwlwifi/mvm/sta.c __skb_queue_head_init(&entries[j].e.frames); entries 2541 drivers/net/wireless/intel/iwlwifi/mvm/sta.c u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); entries 2551 drivers/net/wireless/intel/iwlwifi/mvm/sta.c BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) && entries 2552 drivers/net/wireless/intel/iwlwifi/mvm/sta.c sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES); entries 2578 drivers/net/wireless/intel/iwlwifi/mvm/sta.c reorder_buf_size / sizeof(baid_data->entries[0]); entries 365 drivers/net/wireless/intel/iwlwifi/pcie/internal.h struct iwl_pcie_txq_entry *entries; entries 1329 drivers/net/wireless/intel/iwlwifi/pcie/rx.c kzfree(txq->entries[cmd_index].free_buf); entries 1330 drivers/net/wireless/intel/iwlwifi/pcie/rx.c txq->entries[cmd_index].free_buf = NULL; entries 1555 drivers/net/wireless/intel/iwlwifi/pcie/rx.c struct msix_entry *entries = entry - queue; entries 1557 drivers/net/wireless/intel/iwlwifi/pcie/rx.c return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); entries 3323 drivers/net/wireless/intel/iwlwifi/pcie/trans.c memcpy(txcmd->data, cmdq->entries[idx].cmd, entries 196 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, entries 200 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c if (txq->entries) { entries 203 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c skb = txq->entries[idx].skb; entries 211 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c txq->entries[idx].skb = NULL; entries 632 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c txq->entries[idx].skb = skb; entries 633 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c txq->entries[idx].cmd = dev_cmd; entries 640 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c out_meta = &txq->entries[idx].meta; entries 791 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c out_cmd = txq->entries[idx].cmd; entries 792 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c out_meta = &txq->entries[idx].meta; entries 901 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c if (WARN_ON_ONCE(txq->entries[idx].free_buf)) entries 902 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c kzfree(txq->entries[idx].free_buf); entries 903 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c txq->entries[idx].free_buf = dup_buf; entries 1004 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; entries 1060 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c struct sk_buff *skb = txq->entries[idx].skb; entries 1099 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c kfree(txq->entries); entries 1126 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c kzfree(txq->entries[i].cmd); entries 1127 drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c kzfree(txq->entries[i].free_buf); entries 216 drivers/net/wireless/intel/iwlwifi/pcie/tx.c struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; entries 260 drivers/net/wireless/intel/iwlwifi/pcie/tx.c struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; entries 473 drivers/net/wireless/intel/iwlwifi/pcie/tx.c iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); entries 476 drivers/net/wireless/intel/iwlwifi/pcie/tx.c if (txq->entries) { entries 479 drivers/net/wireless/intel/iwlwifi/pcie/tx.c skb = txq->entries[idx].skb; entries 487 drivers/net/wireless/intel/iwlwifi/pcie/tx.c txq->entries[idx].skb = NULL; entries 531 drivers/net/wireless/intel/iwlwifi/pcie/tx.c if (WARN_ON(txq->entries || txq->tfds)) entries 542 drivers/net/wireless/intel/iwlwifi/pcie/tx.c txq->entries = kcalloc(slots_num, entries 546 drivers/net/wireless/intel/iwlwifi/pcie/tx.c if (!txq->entries) entries 551 drivers/net/wireless/intel/iwlwifi/pcie/tx.c txq->entries[i].cmd = entries 554 drivers/net/wireless/intel/iwlwifi/pcie/tx.c if (!txq->entries[i].cmd) entries 579 drivers/net/wireless/intel/iwlwifi/pcie/tx.c if (txq->entries && cmd_queue) entries 581 drivers/net/wireless/intel/iwlwifi/pcie/tx.c kfree(txq->entries[i].cmd); entries 582 drivers/net/wireless/intel/iwlwifi/pcie/tx.c kfree(txq->entries); entries 583 drivers/net/wireless/intel/iwlwifi/pcie/tx.c txq->entries = NULL; entries 666 drivers/net/wireless/intel/iwlwifi/pcie/tx.c struct sk_buff *skb = txq->entries[txq->read_ptr].skb; entries 721 drivers/net/wireless/intel/iwlwifi/pcie/tx.c kzfree(txq->entries[i].cmd); entries 722 drivers/net/wireless/intel/iwlwifi/pcie/tx.c kzfree(txq->entries[i].free_buf); entries 739 drivers/net/wireless/intel/iwlwifi/pcie/tx.c kfree(txq->entries); entries 740 drivers/net/wireless/intel/iwlwifi/pcie/tx.c txq->entries = NULL; entries 1153 drivers/net/wireless/intel/iwlwifi/pcie/tx.c struct sk_buff *skb = txq->entries[read_ptr].skb; entries 1162 drivers/net/wireless/intel/iwlwifi/pcie/tx.c txq->entries[read_ptr].skb = NULL; entries 1637 drivers/net/wireless/intel/iwlwifi/pcie/tx.c out_cmd = txq->entries[idx].cmd; entries 1638 drivers/net/wireless/intel/iwlwifi/pcie/tx.c out_meta = &txq->entries[idx].meta; entries 1764 drivers/net/wireless/intel/iwlwifi/pcie/tx.c if (WARN_ON_ONCE(txq->entries[idx].free_buf)) entries 1765 drivers/net/wireless/intel/iwlwifi/pcie/tx.c kzfree(txq->entries[idx].free_buf); entries 1766 drivers/net/wireless/intel/iwlwifi/pcie/tx.c txq->entries[idx].free_buf = dup_buf; entries 1829 drivers/net/wireless/intel/iwlwifi/pcie/tx.c cmd = txq->entries[cmd_index].cmd; entries 1830 drivers/net/wireless/intel/iwlwifi/pcie/tx.c meta = &txq->entries[cmd_index].meta; entries 1972 drivers/net/wireless/intel/iwlwifi/pcie/tx.c txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; entries 2378 drivers/net/wireless/intel/iwlwifi/pcie/tx.c txq->entries[txq->write_ptr].skb = skb; entries 2379 drivers/net/wireless/intel/iwlwifi/pcie/tx.c txq->entries[txq->write_ptr].cmd = dev_cmd; entries 2393 drivers/net/wireless/intel/iwlwifi/pcie/tx.c out_meta = &txq->entries[txq->write_ptr].meta; entries 343 drivers/net/wireless/intersil/hostap/hostap_ap.c seq_printf(m, "MAC entries: %u\n", ap->mac_restrictions.entries); entries 391 drivers/net/wireless/intersil/hostap/hostap_ap.c mac_restrictions->entries++; entries 411 drivers/net/wireless/intersil/hostap/hostap_ap.c mac_restrictions->entries--; entries 451 drivers/net/wireless/intersil/hostap/hostap_ap.c if (mac_restrictions->entries == 0) entries 462 drivers/net/wireless/intersil/hostap/hostap_ap.c mac_restrictions->entries = 0; entries 145 drivers/net/wireless/intersil/hostap/hostap_ap.h unsigned int entries; entries 81 drivers/net/wireless/intersil/p54/eeprom.c size_t entries; entries 154 drivers/net/wireless/intersil/p54/eeprom.c if ((!list->entries) || (!list->band_channel_num[band])) entries 172 drivers/net/wireless/intersil/p54/eeprom.c (i < list->entries); i++) { entries 243 drivers/net/wireless/intersil/p54/eeprom.c for (i = list->entries; i >= 0; i--) { entries 250 drivers/net/wireless/intersil/p54/eeprom.c if ((i < 0) && (list->entries < list->max_entries)) { entries 259 drivers/net/wireless/intersil/p54/eeprom.c i = list->entries++; entries 328 drivers/net/wireless/intersil/p54/eeprom.c if ((priv->iq_autocal_len != priv->curve_data->entries) || entries 329 drivers/net/wireless/intersil/p54/eeprom.c (priv->iq_autocal_len != priv->output_limit->entries)) entries 334 drivers/net/wireless/intersil/p54/eeprom.c max_channel_num = max_t(unsigned int, priv->output_limit->entries, entries 337 drivers/net/wireless/intersil/p54/eeprom.c priv->curve_data->entries); entries 367 drivers/net/wireless/intersil/p54/eeprom.c if (i < priv->output_limit->entries) { entries 383 drivers/net/wireless/intersil/p54/eeprom.c if (i < priv->curve_data->entries) { entries 394 drivers/net/wireless/intersil/p54/eeprom.c sort(list->channels, list->entries, sizeof(struct p54_channel_entry), entries 437 drivers/net/wireless/intersil/p54/eeprom.c priv->curve_data->entries = curve_data->channels; entries 489 drivers/net/wireless/intersil/p54/eeprom.c priv->curve_data->entries = curve_data->channels; entries 522 drivers/net/wireless/intersil/p54/eeprom.c size_t db_len, entries; entries 526 drivers/net/wireless/intersil/p54/eeprom.c entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2; entries 527 drivers/net/wireless/intersil/p54/eeprom.c if (len != sizeof(struct pda_rssi_cal_entry) * entries) { entries 539 drivers/net/wireless/intersil/p54/eeprom.c entries = (len - offset) / entries 544 drivers/net/wireless/intersil/p54/eeprom.c entries == 0) { entries 550 drivers/net/wireless/intersil/p54/eeprom.c db_len = sizeof(*entry) * entries; entries 556 drivers/net/wireless/intersil/p54/eeprom.c priv->rssi_db->entries = entries; entries 564 drivers/net/wireless/intersil/p54/eeprom.c for (i = 0; i < entries; i++) { entries 572 drivers/net/wireless/intersil/p54/eeprom.c for (i = 0; i < entries; i++) { entries 590 drivers/net/wireless/intersil/p54/eeprom.c sort(entry, entries, sizeof(*entry), p54_compare_rssichan, NULL); entries 613 drivers/net/wireless/intersil/p54/eeprom.c for (i = 0; i < priv->rssi_db->entries; i++) { entries 689 drivers/net/wireless/intersil/p54/eeprom.c priv->output_limit->entries = data[1]; entries 693 drivers/net/wireless/intersil/p54/eeprom.c priv->output_limit->entries + entries 706 drivers/net/wireless/intersil/p54/eeprom.c size_t payload_len, entries, entry_size, offset; entries 709 drivers/net/wireless/intersil/p54/eeprom.c entries = le16_to_cpu(src->entries); entries 712 drivers/net/wireless/intersil/p54/eeprom.c if (((entries * entry_size + offset) != payload_len) || entries 720 drivers/net/wireless/intersil/p54/eeprom.c dst->entries = entries; entries 844 drivers/net/wireless/intersil/p54/eeprom.c for (i = 0; i < priv->rssi_db->entries; i++) entries 118 drivers/net/wireless/intersil/p54/eeprom.h __le16 entries; entries 440 drivers/net/wireless/intersil/p54/fwio.c for (i = 0; i < priv->output_limit->entries; i++) { entries 467 drivers/net/wireless/intersil/p54/fwio.c if (i == priv->output_limit->entries) entries 471 drivers/net/wireless/intersil/p54/fwio.c for (i = 0; i < priv->curve_data->entries; i++) { entries 495 drivers/net/wireless/intersil/p54/fwio.c if (i == priv->curve_data->entries) entries 125 drivers/net/wireless/intersil/p54/p54.h size_t entries; entries 175 drivers/net/wireless/mediatek/mt7601u/dma.c q->start = (q->start + 1) % q->entries; entries 208 drivers/net/wireless/mediatek/mt7601u/dma.c q->end = (q->end + 1) % q->entries; entries 260 drivers/net/wireless/mediatek/mt7601u/dma.c if (q->used == q->entries - q->entries / 8) entries 263 drivers/net/wireless/mediatek/mt7601u/dma.c q->start = (q->start + 1) % q->entries; entries 307 drivers/net/wireless/mediatek/mt7601u/dma.c if (WARN_ON(q->entries <= q->used)) { entries 329 drivers/net/wireless/mediatek/mt7601u/dma.c q->end = (q->end + 1) % q->entries; entries 332 drivers/net/wireless/mediatek/mt7601u/dma.c if (q->used >= q->entries) entries 383 drivers/net/wireless/mediatek/mt7601u/dma.c for (i = 0; i < dev->rx_q.entries; i++) entries 412 drivers/net/wireless/mediatek/mt7601u/dma.c for (i = 0; i < dev->rx_q.entries; i++) { entries 425 drivers/net/wireless/mediatek/mt7601u/dma.c for (i = 0; i < dev->rx_q.entries; i++) { entries 437 drivers/net/wireless/mediatek/mt7601u/dma.c dev->rx_q.entries = N_RX_ENTRIES; entries 454 drivers/net/wireless/mediatek/mt7601u/dma.c for (i = 0; i < q->entries; i++) { entries 479 drivers/net/wireless/mediatek/mt7601u/dma.c q->entries = N_TX_ENTRIES; entries 77 drivers/net/wireless/mediatek/mt7601u/mt7601u.h unsigned int entries; entries 93 drivers/net/wireless/mediatek/mt7601u/mt7601u.h unsigned int entries; entries 772 drivers/net/wireless/ralink/rt2x00/rt2400pci.c entry_priv = rt2x00dev->tx[1].entries[0].priv_data; entries 778 drivers/net/wireless/ralink/rt2x00/rt2400pci.c entry_priv = rt2x00dev->tx[0].entries[0].priv_data; entries 784 drivers/net/wireless/ralink/rt2x00/rt2400pci.c entry_priv = rt2x00dev->atim->entries[0].priv_data; entries 790 drivers/net/wireless/ralink/rt2x00/rt2400pci.c entry_priv = rt2x00dev->bcn->entries[0].priv_data; entries 801 drivers/net/wireless/ralink/rt2x00/rt2400pci.c entry_priv = rt2x00dev->rx->entries[0].priv_data; entries 857 drivers/net/wireless/ralink/rt2x00/rt2500pci.c entry_priv = rt2x00dev->tx[1].entries[0].priv_data; entries 863 drivers/net/wireless/ralink/rt2x00/rt2500pci.c entry_priv = rt2x00dev->tx[0].entries[0].priv_data; entries 869 drivers/net/wireless/ralink/rt2x00/rt2500pci.c entry_priv = rt2x00dev->atim->entries[0].priv_data; entries 875 drivers/net/wireless/ralink/rt2x00/rt2500pci.c entry_priv = rt2x00dev->bcn->entries[0].priv_data; entries 886 drivers/net/wireless/ralink/rt2x00/rt2500pci.c entry_priv = rt2x00dev->rx->entries[0].priv_data; entries 1300 drivers/net/wireless/ralink/rt2x00/rt2800lib.c entry = &queue->entries[i]; entries 1872 drivers/net/wireless/ralink/rt2x00/rt2800lib.c entry = &queue->entries[i]; entries 665 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c entry_priv = rt2x00dev->tx[0].entries[0].priv_data; entries 673 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c entry_priv = rt2x00dev->tx[1].entries[0].priv_data; entries 681 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c entry_priv = rt2x00dev->tx[2].entries[0].priv_data; entries 689 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c entry_priv = rt2x00dev->tx[3].entries[0].priv_data; entries 707 drivers/net/wireless/ralink/rt2x00/rt2800mmio.c entry_priv = rt2x00dev->rx->entries[0].priv_data; entries 207 drivers/net/wireless/ralink/rt2x00/rt2x00mac.c entry = &queue->entries[i]; entries 121 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c entry_priv = queue->entries[i].priv_data; entries 133 drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c queue->entries[0].priv_data; entries 822 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c if (fn(&queue->entries[i], data)) entries 827 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c if (fn(&queue->entries[i], data)) entries 832 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c if (fn(&queue->entries[i], data)) entries 855 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c entry = &queue->entries[queue->index[index]]; entries 1094 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); entries 1100 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c struct queue_entry *entries; entries 1109 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c entry_size = sizeof(*entries) + queue->priv_size; entries 1110 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); entries 1111 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c if (!entries) entries 1119 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c entries[i].flags = 0; entries 1120 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c entries[i].queue = queue; entries 1121 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c entries[i].skb = NULL; entries 1122 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c entries[i].entry_idx = i; entries 1123 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c entries[i].priv_data = entries 1124 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, entries 1125 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c sizeof(*entries), queue->priv_size); entries 1130 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c queue->entries = entries; entries 1139 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c if (!queue->entries) entries 1143 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c rt2x00queue_free_skb(&queue->entries[i]); entries 1153 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL); entries 1156 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c queue->entries[i].skb = skb; entries 1208 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c kfree(queue->entries); entries 1209 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c queue->entries = NULL; entries 453 drivers/net/wireless/ralink/rt2x00/rt2x00queue.h struct queue_entry *entries; entries 654 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c entry_priv = queue->entries[i].priv_data; entries 670 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c bcn_priv = queue->entries[i].priv_data; entries 686 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c if (!queue->entries) entries 690 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c entry_priv = queue->entries[i].priv_data; entries 705 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c bcn_priv = queue->entries[i].priv_data; entries 1351 drivers/net/wireless/ralink/rt2x00/rt61pci.c entry_priv = rt2x00dev->tx[0].entries[0].priv_data; entries 1357 drivers/net/wireless/ralink/rt2x00/rt61pci.c entry_priv = rt2x00dev->tx[1].entries[0].priv_data; entries 1363 drivers/net/wireless/ralink/rt2x00/rt61pci.c entry_priv = rt2x00dev->tx[2].entries[0].priv_data; entries 1369 drivers/net/wireless/ralink/rt2x00/rt61pci.c entry_priv = rt2x00dev->tx[3].entries[0].priv_data; entries 1382 drivers/net/wireless/ralink/rt2x00/rt61pci.c entry_priv = rt2x00dev->rx->entries[0].priv_data; entries 2103 drivers/net/wireless/ralink/rt2x00/rt61pci.c entry = &queue->entries[index]; entries 356 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c ring->idx = (ring->idx + 1) % ring->entries; entries 371 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c if (ring->entries - skb_queue_len(&ring->queue) == 2) entries 544 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries; entries 574 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c if (ring->entries - skb_queue_len(&ring->queue) < 2) entries 1069 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c unsigned int prio, unsigned int entries) entries 1076 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c ring = pci_zalloc_consistent(priv->pdev, sizeof(*ring) * entries, entries 1087 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c priv->tx_ring[prio].entries = entries; entries 1090 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c for (i = 0; i < entries; i++) entries 1092 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c cpu_to_le32((u32)dma + ((i + 1) % entries) * sizeof(*ring)); entries 1109 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c ring->idx = (ring->idx + 1) % ring->entries; entries 1112 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c pci_free_consistent(priv->pdev, sizeof(*ring->desc)*ring->entries, entries 92 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h unsigned int entries; entries 504 drivers/net/wireless/realtek/rtlwifi/pci.c (ring->entries - skb_queue_len(&ring->queue) > entries 547 drivers/net/wireless/realtek/rtlwifi/pci.c ring->idx = (ring->idx + 1) % ring->entries; entries 610 drivers/net/wireless/realtek/rtlwifi/pci.c if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) { entries 1208 drivers/net/wireless/realtek/rtlwifi/pci.c unsigned int prio, unsigned int entries) entries 1222 drivers/net/wireless/realtek/rtlwifi/pci.c sizeof(*buffer_desc) * entries, entries 1240 drivers/net/wireless/realtek/rtlwifi/pci.c sizeof(*desc) * entries, &desc_dma); entries 1251 drivers/net/wireless/realtek/rtlwifi/pci.c rtlpci->tx_ring[prio].entries = entries; entries 1259 drivers/net/wireless/realtek/rtlwifi/pci.c for (i = 0; i < entries; i++) { entries 1261 drivers/net/wireless/realtek/rtlwifi/pci.c ((i + 1) % entries) * entries 1356 drivers/net/wireless/realtek/rtlwifi/pci.c ring->idx = (ring->idx + 1) % ring->entries; entries 1361 drivers/net/wireless/realtek/rtlwifi/pci.c sizeof(*ring->desc) * ring->entries, entries 1366 drivers/net/wireless/realtek/rtlwifi/pci.c sizeof(*ring->buffer_desc) * ring->entries, entries 1538 drivers/net/wireless/realtek/rtlwifi/pci.c ring->idx = (ring->idx + 1) % ring->entries; entries 1547 drivers/net/wireless/realtek/rtlwifi/pci.c ring->entries = rtlpci->txringcount[i]; entries 1639 drivers/net/wireless/realtek/rtlwifi/pci.c ring->entries; entries 1687 drivers/net/wireless/realtek/rtlwifi/pci.c if ((ring->entries - skb_queue_len(&ring->queue)) < 2 && entries 149 drivers/net/wireless/realtek/rtlwifi/pci.h unsigned int entries; entries 84 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c ring->idx = (ring->idx + 1) % ring->entries; entries 910 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c u16 max_tx_desc = ring->entries; entries 123 drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries; entries 46 drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c ring->idx = (ring->idx + 1) % ring->entries; entries 42 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c ring->idx = (ring->idx + 1) % ring->entries; entries 2309 drivers/nvme/host/core.c table->entries[state] = target; entries 687 drivers/nvme/host/pci.c dma_addr_t dma_addr, int entries) entries 690 drivers/nvme/host/pci.c if (entries < SGES_PER_PAGE) { entries 691 drivers/nvme/host/pci.c sge->length = cpu_to_le32(entries * sizeof(*sge)); entries 700 drivers/nvme/host/pci.c struct request *req, struct nvme_rw_command *cmd, int entries) entries 712 drivers/nvme/host/pci.c if (entries == 1) { entries 717 drivers/nvme/host/pci.c if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { entries 734 drivers/nvme/host/pci.c nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); entries 748 drivers/nvme/host/pci.c nvme_pci_sgl_set_seg(link, sgl_dma, entries); entries 753 drivers/nvme/host/pci.c } while (--entries > 0); entries 108 drivers/nvme/target/discovery.c struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec]; entries 148 drivers/nvme/target/discovery.c size_t entries = 0; entries 153 drivers/nvme/target/discovery.c entries++; entries 156 drivers/nvme/target/discovery.c entries++; entries 157 drivers/nvme/target/discovery.c return entries; entries 1305 drivers/nvmem/core.c void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) entries 1311 drivers/nvmem/core.c list_add_tail(&entries[i].node, &nvmem_lookup_list); entries 1323 drivers/nvmem/core.c void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) entries 1329 drivers/nvmem/core.c list_del(&entries[i].node); entries 676 drivers/of/dynamic.c INIT_LIST_HEAD(&ocs->entries); entries 692 drivers/of/dynamic.c list_for_each_entry_safe_reverse(ce, cen, &ocs->entries, node) entries 713 drivers/of/dynamic.c list_for_each_entry(ce, &ocs->entries, node) { entries 717 drivers/of/dynamic.c list_for_each_entry_continue_reverse(ce, &ocs->entries, entries 745 drivers/of/dynamic.c list_for_each_entry(ce, &ocs->entries, node) { entries 816 drivers/of/dynamic.c list_for_each_entry_reverse(ce, &ocs->entries, node) { entries 820 drivers/of/dynamic.c list_for_each_entry_continue(ce, &ocs->entries, node) { entries 845 drivers/of/dynamic.c list_for_each_entry_reverse(ce, &ocs->entries, node) { entries 926 drivers/of/dynamic.c list_add_tail(&ce->node, &ocs->entries); entries 544 drivers/of/overlay.c list_for_each_entry_continue(ce_2, &ovcs->cset.entries, node) { entries 578 drivers/of/overlay.c list_for_each_entry_continue(ce_2, &ovcs->cset.entries, node) { entries 616 drivers/of/overlay.c list_for_each_entry(ce_1, &ovcs->cset.entries, node) { entries 851 drivers/of/overlay.c if (ovcs->cset.entries.next) entries 1111 drivers/of/overlay.c list_for_each_entry(ce, &ovcs->cset.entries, node) { entries 1144 drivers/of/overlay.c list_for_each_entry(remove_ce, &remove_ovcs->cset.entries, node) { entries 229 drivers/pci/hotplug/rpaphp_core.c unsigned int entries; entries 239 drivers/pci/hotplug/rpaphp_core.c value = of_prop_next_u32(info, NULL, &entries); entries 245 drivers/pci/hotplug/rpaphp_core.c for (j = 0; j < entries; j++) { entries 695 drivers/pci/msi.c struct msix_entry *entries, int nvec, entries 720 drivers/pci/msi.c if (entries) entries 721 drivers/pci/msi.c entry->msi_attrib.entry_nr = entries[i].entry; entries 742 drivers/pci/msi.c struct msix_entry *entries) entries 749 drivers/pci/msi.c if (entries) entries 750 drivers/pci/msi.c entries[i++].vector = entry->irq; entries 774 drivers/pci/msi.c static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, entries 790 drivers/pci/msi.c ret = msix_setup_entries(dev, base, entries, nvec, affd); entries 811 drivers/pci/msi.c msix_program_entries(dev, entries); entries 970 drivers/pci/msi.c static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, entries 985 drivers/pci/msi.c if (entries) { entries 988 drivers/pci/msi.c if (entries[i].entry >= nr_entries) entries 991 drivers/pci/msi.c if (entries[i].entry == entries[j].entry) entries 1002 drivers/pci/msi.c return msix_capability_init(dev, entries, nvec, affd); entries 1117 drivers/pci/msi.c struct msix_entry *entries, int minvec, entries 1136 drivers/pci/msi.c rc = __pci_enable_msix(dev, entries, nvec, affd, flags); entries 1164 drivers/pci/msi.c int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, entries 1167 drivers/pci/msi.c return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL, 0); entries 175 drivers/platform/chrome/cros_usbpd_logger.c int entries = 0; entries 178 drivers/platform/chrome/cros_usbpd_logger.c while (entries++ < CROS_USBPD_MAX_LOG_ENTRIES) { entries 99 drivers/platform/chrome/wilco_ec/event.c struct ec_event *entries[0]; entries 110 drivers/platform/chrome/wilco_ec/event.c q = kzalloc(struct_size(q, entries, capacity), GFP_KERNEL); entries 122 drivers/platform/chrome/wilco_ec/event.c return q->head == q->tail && !q->entries[q->head]; entries 128 drivers/platform/chrome/wilco_ec/event.c return q->head == q->tail && q->entries[q->head]; entries 138 drivers/platform/chrome/wilco_ec/event.c ev = q->entries[q->tail]; entries 139 drivers/platform/chrome/wilco_ec/event.c q->entries[q->tail] = NULL; entries 156 drivers/platform/chrome/wilco_ec/event.c q->entries[q->head] = ev; entries 723 drivers/rapidio/devices/tsi721.c struct msix_entry entries[TSI721_VECT_MAX]; entries 727 drivers/rapidio/devices/tsi721.c entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE); entries 728 drivers/rapidio/devices/tsi721.c entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT; entries 737 drivers/rapidio/devices/tsi721.c entries[TSI721_VECT_IMB0_RCV + i].entry = entries 739 drivers/rapidio/devices/tsi721.c entries[TSI721_VECT_IMB0_INT + i].entry = entries 741 drivers/rapidio/devices/tsi721.c entries[TSI721_VECT_OMB0_DONE + i].entry = entries 743 drivers/rapidio/devices/tsi721.c entries[TSI721_VECT_OMB0_INT + i].entry = entries 754 drivers/rapidio/devices/tsi721.c entries[TSI721_VECT_DMA0_DONE + i].entry = entries 756 drivers/rapidio/devices/tsi721.c entries[TSI721_VECT_DMA0_INT + i].entry = entries 761 drivers/rapidio/devices/tsi721.c err = pci_enable_msix_exact(priv->pdev, entries, ARRAY_SIZE(entries)); entries 771 drivers/rapidio/devices/tsi721.c priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector; entries 774 drivers/rapidio/devices/tsi721.c priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector; entries 780 drivers/rapidio/devices/tsi721.c entries[TSI721_VECT_IMB0_RCV + i].vector; entries 786 drivers/rapidio/devices/tsi721.c entries[TSI721_VECT_IMB0_INT + i].vector; entries 792 drivers/rapidio/devices/tsi721.c entries[TSI721_VECT_OMB0_DONE + i].vector; entries 798 drivers/rapidio/devices/tsi721.c entries[TSI721_VECT_OMB0_INT + i].vector; entries 807 drivers/rapidio/devices/tsi721.c entries[TSI721_VECT_DMA0_DONE + i].vector; entries 813 drivers/rapidio/devices/tsi721.c entries[TSI721_VECT_DMA0_INT + i].vector; entries 1874 drivers/rapidio/devices/tsi721.c int mbox, int entries) entries 1880 drivers/rapidio/devices/tsi721.c if ((entries < TSI721_OMSGD_MIN_RING_SIZE) || entries 1881 drivers/rapidio/devices/tsi721.c (entries > (TSI721_OMSGD_RING_SIZE)) || entries 1882 drivers/rapidio/devices/tsi721.c (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { entries 1893 drivers/rapidio/devices/tsi721.c priv->omsg_ring[mbox].size = entries; entries 1899 drivers/rapidio/devices/tsi721.c for (i = 0; i < entries; i++) { entries 1916 drivers/rapidio/devices/tsi721.c (entries + 1) * sizeof(struct tsi721_omsg_desc), entries 1928 drivers/rapidio/devices/tsi721.c priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); entries 1995 drivers/rapidio/devices/tsi721.c bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29); entries 1996 drivers/rapidio/devices/tsi721.c bd_ptr[entries].msg_info = 0; entries 1997 drivers/rapidio/devices/tsi721.c bd_ptr[entries].next_lo = entries 2000 drivers/rapidio/devices/tsi721.c bd_ptr[entries].next_hi = entries 2027 drivers/rapidio/devices/tsi721.c (entries + 1) * sizeof(struct tsi721_omsg_desc), entries 2161 drivers/rapidio/devices/tsi721.c int mbox, int entries) entries 2169 drivers/rapidio/devices/tsi721.c if ((entries < TSI721_IMSGD_MIN_RING_SIZE) || entries 2170 drivers/rapidio/devices/tsi721.c (entries > TSI721_IMSGD_RING_SIZE) || entries 2171 drivers/rapidio/devices/tsi721.c (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { entries 2183 drivers/rapidio/devices/tsi721.c priv->imsg_ring[mbox].size = entries; entries 2194 drivers/rapidio/devices/tsi721.c entries * TSI721_MSG_BUFFER_SIZE, entries 2208 drivers/rapidio/devices/tsi721.c entries * 8, entries 2222 drivers/rapidio/devices/tsi721.c entries * sizeof(struct tsi721_imsg_desc), entries 2235 drivers/rapidio/devices/tsi721.c for (i = 0; i < entries; i++) entries 2263 drivers/rapidio/devices/tsi721.c iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), entries 2272 drivers/rapidio/devices/tsi721.c iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), entries 2313 drivers/rapidio/devices/tsi721.c priv->imsg_ring[mbox].fq_wrptr = entries - 1; entries 2314 drivers/rapidio/devices/tsi721.c iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch)); entries 235 drivers/rapidio/rio.c int entries, entries 262 drivers/rapidio/rio.c rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries); entries 320 drivers/rapidio/rio.c int entries, entries 346 drivers/rapidio/rio.c rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries); entries 1619 drivers/rapidio/rio_cm.c u32 entries; entries 1625 drivers/rapidio/rio_cm.c if (copy_from_user(&entries, arg, sizeof(entries))) entries 1627 drivers/rapidio/rio_cm.c if (entries == 0 || entries > RIO_MAX_MPORTS) entries 1629 drivers/rapidio/rio_cm.c buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL); entries 1637 drivers/rapidio/rio_cm.c if (count++ < entries) { entries 44 drivers/rpmsg/qcom_glink_rpm.c struct rpm_toc_entry entries[]; entries 217 drivers/rpmsg/qcom_glink_rpm.c id = le32_to_cpu(toc->entries[i].id); entries 218 drivers/rpmsg/qcom_glink_rpm.c offset = le32_to_cpu(toc->entries[i].offset); entries 219 drivers/rpmsg/qcom_glink_rpm.c size = le32_to_cpu(toc->entries[i].size); entries 211 drivers/s390/char/sclp.h u32 entries[0]; entries 243 drivers/s390/char/sclp_cmd.c u32 entries[0]; entries 265 drivers/s390/char/sclp_cmd.c if (sccb->entries[i]) entries 266 drivers/s390/char/sclp_cmd.c sclp_unassign_storage(sccb->entries[i] >> 16); entries 499 drivers/s390/char/sclp_cmd.c if (!sccb->entries[i]) entries 502 drivers/s390/char/sclp_cmd.c insert_increment(sccb->entries[i] >> 16, 0, 1); entries 509 drivers/s390/char/sclp_cmd.c if (!sccb->entries[i]) entries 512 drivers/s390/char/sclp_cmd.c insert_increment(sccb->entries[i] >> 16, 1, 1); entries 334 drivers/s390/char/sclp_early_core.c if (!sccb->entries[sn]) entries 336 drivers/s390/char/sclp_early_core.c rn = sccb->entries[sn] >> 16; entries 246 drivers/s390/cio/chsc.h } entries; entries 1828 drivers/s390/cio/qdio_main.c &rr->entries.l3_ipv6[i]); entries 1832 drivers/s390/cio/qdio_main.c &rr->entries.l3_ipv4[i]); entries 1836 drivers/s390/cio/qdio_main.c &rr->entries.l2[i]); entries 618 drivers/s390/net/qeth_core.h struct list_head entries; entries 1357 drivers/s390/net/qeth_core_main.c INIT_LIST_HEAD(&card->ipato.entries); entries 123 drivers/s390/net/qeth_l3_main.c list_for_each_entry(ipatoe, &card->ipato.entries, entry) { entries 579 drivers/s390/net/qeth_l3_main.c list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { entries 598 drivers/s390/net/qeth_l3_main.c list_for_each_entry(ipatoe, &card->ipato.entries, entry) { entries 610 drivers/s390/net/qeth_l3_main.c list_add_tail(&new->entry, &card->ipato.entries); entries 630 drivers/s390/net/qeth_l3_main.c list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { entries 456 drivers/s390/net/qeth_l3_sys.c list_for_each_entry(ipatoe, &card->ipato.entries, entry) { entries 990 drivers/scsi/aacraid/aacraid.h u32 entries; /*Number of queue entries */ entries 272 drivers/scsi/aacraid/comminit.c q->entries = qsize; entries 810 drivers/scsi/aacraid/commsup.c if (le32_to_cpu(*q->headers.consumer) >= q->entries) entries 838 drivers/scsi/aacraid/commsup.c if (le32_to_cpu(*q->headers.consumer) >= q->entries) entries 56 drivers/scsi/arm/msgqueue.c msgq->free = &msgq->entries[0]; entries 59 drivers/scsi/arm/msgqueue.c msgq->entries[i].next = &msgq->entries[i + 1]; entries 61 drivers/scsi/arm/msgqueue.c msgq->entries[NR_MESSAGES - 1].next = NULL; entries 28 drivers/scsi/arm/msgqueue.h struct msgqueue_entry entries[NR_MESSAGES]; entries 78 drivers/scsi/fnic/vnic_rq.h #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ entries 79 drivers/scsi/fnic/vnic_rq.h DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES) entries 83 drivers/scsi/fnic/vnic_wq.h #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ entries 84 drivers/scsi/fnic/vnic_wq.h DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES) entries 409 drivers/scsi/gdth.h u32 entries; /* number of elements */ entries 645 drivers/scsi/gdth.h u32 entries; /* entry count */ entries 254 drivers/scsi/gdth_proc.c pds->entries = ha->raw[i].pdev_cnt; entries 257 drivers/scsi/gdth_proc.c if (pds->entries > cnt) entries 258 drivers/scsi/gdth_proc.c pds->entries = cnt; entries 490 drivers/scsi/gdth_proc.c phg->entries = MAX_HDRIVES; entries 497 drivers/scsi/gdth_proc.c for (j = 0; j < phg->entries; ++j) { entries 3505 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c int entries, entries_old = 0, time; entries 3508 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT); entries 3509 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c if (entries == entries_old) entries 3512 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c entries_old = entries; entries 2601 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c int entries, entries_old = 0, time; entries 2604 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT); entries 2605 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c if (entries == entries_old) entries 2608 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c entries_old = entries; entries 7796 drivers/scsi/ipr.c int entries, found, flag, i; entries 7812 drivers/scsi/ipr.c entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries); entries 7814 drivers/scsi/ipr.c entries = ioa_cfg->u.cfg_table->hdr.num_entries; entries 7816 drivers/scsi/ipr.c for (i = 0; i < entries; i++) { entries 219 drivers/scsi/lpfc/lpfc_ct.c uint32_t size, int *entries) entries 270 drivers/scsi/lpfc/lpfc_ct.c *entries = i; entries 5512 drivers/scsi/pmcraid.c cfgte = &pinstance->cfg_table->entries[i]; entries 368 drivers/scsi/pmcraid.h entries[PMCRAID_MAX_RESOURCES]; entries 2952 drivers/scsi/qla2xxx/qla_def.h } entries[1]; entries 2982 drivers/scsi/qla2xxx/qla_def.h entries[MAX_FIBRE_DEVICES_MAX]; entries 64 drivers/scsi/qla2xxx/qla_dfs.c uint16_t entries, loop_id; entries 80 drivers/scsi/qla2xxx/qla_dfs.c &entries); entries 88 drivers/scsi/qla2xxx/qla_dfs.c for (i = 0; i < entries; i++) { entries 1219 drivers/scsi/qla2xxx/qla_fw.h struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC]; entries 1549 drivers/scsi/qla2xxx/qla_fw.h uint16_t entries; entries 2013 drivers/scsi/qla2xxx/qla_fw.h struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC]; entries 335 drivers/scsi/qla2xxx/qla_gs.c gid_data = &ct_rsp->rsp.gid_pt.entries[i]; entries 1519 drivers/scsi/qla2xxx/qla_gs.c void *entries; entries 1541 drivers/scsi/qla2xxx/qla_gs.c entries = &ct_req->req; entries 1544 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1554 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1567 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1587 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1600 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1613 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1637 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1650 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1663 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1682 drivers/scsi/qla2xxx/qla_gs.c entries, size); entries 1729 drivers/scsi/qla2xxx/qla_gs.c void *entries; entries 1750 drivers/scsi/qla2xxx/qla_gs.c entries = &ct_req->req; entries 1753 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1765 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1805 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1848 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1861 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1874 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1898 drivers/scsi/qla2xxx/qla_gs.c entries, size); entries 1940 drivers/scsi/qla2xxx/qla_gs.c void *entries; entries 1963 drivers/scsi/qla2xxx/qla_gs.c entries = &ct_req->req; entries 1966 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1976 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 1990 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2010 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2023 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2036 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2060 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2073 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2087 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2100 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2120 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2131 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2144 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2154 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2164 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2174 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2187 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2206 drivers/scsi/qla2xxx/qla_gs.c entries, size); entries 2302 drivers/scsi/qla2xxx/qla_gs.c void *entries; entries 2322 drivers/scsi/qla2xxx/qla_gs.c entries = &ct_req->req; entries 2325 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2344 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2384 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2419 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2432 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2445 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2464 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2474 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2484 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2497 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2507 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2517 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2527 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2550 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2560 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2570 drivers/scsi/qla2xxx/qla_gs.c eiter = entries + size; entries 2585 drivers/scsi/qla2xxx/qla_gs.c entries, size); entries 3781 drivers/scsi/qla2xxx/qla_gs.c d = &ct_rsp->entries[i]; entries 5040 drivers/scsi/qla2xxx/qla_init.c uint16_t entries; entries 5092 drivers/scsi/qla2xxx/qla_init.c entries = MAX_FIBRE_DEVICES_LOOP; entries 5097 drivers/scsi/qla2xxx/qla_init.c &entries); entries 5102 drivers/scsi/qla2xxx/qla_init.c "Entries in ID list (%d).\n", entries); entries 5104 drivers/scsi/qla2xxx/qla_init.c ha->gid_list, entries * sizeof(*ha->gid_list)); entries 5106 drivers/scsi/qla2xxx/qla_init.c if (entries == 0) { entries 5135 drivers/scsi/qla2xxx/qla_init.c for (index = 0; index < entries; index++) { entries 8728 drivers/scsi/qla2xxx/qla_init.c int i, entries; entries 8740 drivers/scsi/qla2xxx/qla_init.c entries = ha->fcp_prio_cfg->num_entries; entries 8743 drivers/scsi/qla2xxx/qla_init.c for (i = 0; i < entries; i++) { entries 2809 drivers/scsi/qla2xxx/qla_mbx.c uint16_t *entries) entries 2847 drivers/scsi/qla2xxx/qla_mbx.c *entries = mcp->mb[1]; entries 6429 drivers/scsi/qla2xxx/qla_mbx.c void *id_list, dma_addr_t id_list_dma, uint16_t *entries) entries 6451 drivers/scsi/qla2xxx/qla_mbx.c *entries = mc.mb[1]; entries 1566 drivers/scsi/qla2xxx/qla_nx.c __le32 entries = cpu_to_le32(directory->num_entries); entries 1568 drivers/scsi/qla2xxx/qla_nx.c for (i = 0; i < entries; i++) { entries 1850 drivers/scsi/qla2xxx/qla_nx.c __le32 entries; entries 1862 drivers/scsi/qla2xxx/qla_nx.c entries = cpu_to_le32(ptab_desc->num_entries); entries 1864 drivers/scsi/qla2xxx/qla_nx.c for (i = 0; i < entries; i++) { entries 944 drivers/scsi/qla2xxx/qla_nx2.c int index, entries; entries 950 drivers/scsi/qla2xxx/qla_nx2.c entries = vha->reset_tmplt.hdr->entries; entries 953 drivers/scsi/qla2xxx/qla_nx2.c for (; (!vha->reset_tmplt.seq_end) && (index < entries); index++) { entries 223 drivers/scsi/qla2xxx/qla_nx2.h uint16_t entries; entries 1123 drivers/scsi/qla2xxx/qla_sup.c le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), entries 1138 drivers/scsi/qla2xxx/qla_sup.c cnt = (sizeof(hdr) + le16_to_cpu(hdr.entries) * sizeof(*entry)) >> 1; entries 1145 drivers/scsi/qla2xxx/qla_sup.c le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries), entries 1151 drivers/scsi/qla2xxx/qla_sup.c cnt = le16_to_cpu(hdr.entries); entries 1292 drivers/scsi/qla2xxx/qla_target.c uint16_t entries; entries 1304 drivers/scsi/qla2xxx/qla_target.c rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); entries 1315 drivers/scsi/qla2xxx/qla_target.c for (i = 0; i < entries; i++) { entries 1119 drivers/scsi/qla4xxx/ql4_83xx.c int index, entries; entries 1125 drivers/scsi/qla4xxx/ql4_83xx.c entries = ha->reset_tmplt.hdr->entries; entries 1128 drivers/scsi/qla4xxx/ql4_83xx.c for (; (!ha->reset_tmplt.seq_end) && (index < entries); index++) { entries 183 drivers/scsi/qla4xxx/ql4_83xx.h __le16 entries; entries 821 drivers/scsi/qlogicpti.c #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) entries 1388 drivers/scsi/qlogicpti.c #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) entries 1427 drivers/scsi/qlogicpti.c #define QSIZE(entries) (((entries) + 1) * QUEUE_ENTRY_LEN) entries 63 drivers/scsi/snic/vnic_wq.h #define VNIC_WQ_BUF_BLK_ENTRIES(entries) \ entries 64 drivers/scsi/snic/vnic_wq.h ((unsigned int)(entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \ entries 68 drivers/scsi/snic/vnic_wq.h #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ entries 69 drivers/scsi/snic/vnic_wq.h DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES) entries 70 drivers/scsi/snic/vnic_wq.h #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ entries 71 drivers/scsi/snic/vnic_wq.h DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES) entries 203 drivers/sh/intc/virq.c struct intc_subgroup_entry *entries[32]; entries 212 drivers/sh/intc/virq.c (void ***)entries, 0, ARRAY_SIZE(entries), entries 219 drivers/sh/intc/virq.c entry = radix_tree_deref_slot((void **)entries[i]); entries 255 drivers/sh/intc/virq.c radix_tree_replace_slot(&d->tree, (void **)entries[i], entries 73 drivers/soc/qcom/smp2p.c } entries[SMP2P_MAX_ENTRY]; entries 203 drivers/soc/qcom/smp2p.c memcpy(buf, in->entries[i].name, sizeof(buf)); entries 205 drivers/soc/qcom/smp2p.c entry->value = &in->entries[i].value; entries 350 drivers/soc/qcom/smp2p.c memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME); entries 353 drivers/soc/qcom/smp2p.c entry->value = &out->entries[out->valid_entries].value; entries 89 drivers/soc/qcom/smsm.c struct smsm_entry *entries; entries 477 drivers/soc/qcom/smsm.c smsm->entries = devm_kcalloc(&pdev->dev, entries 481 drivers/soc/qcom/smsm.c if (!smsm->entries) entries 562 drivers/soc/qcom/smsm.c entry = &smsm->entries[id]; entries 582 drivers/soc/qcom/smsm.c if (smsm->entries[id].domain) entries 583 drivers/soc/qcom/smsm.c irq_domain_remove(smsm->entries[id].domain); entries 596 drivers/soc/qcom/smsm.c if (smsm->entries[id].domain) entries 597 drivers/soc/qcom/smsm.c irq_domain_remove(smsm->entries[id].domain); entries 908 drivers/staging/exfat/exfat.h struct uni_name_t *p_uniname, s32 *entries, entries 2564 drivers/staging/exfat/exfat_core.c struct uni_name_t *p_uniname, s32 *entries, entries 2597 drivers/staging/exfat/exfat_core.c *entries = num_entries; entries 180 drivers/staging/gasket/gasket_page_table.c struct gasket_page_table_entry *entries; entries 263 drivers/staging/gasket/gasket_page_table.c pg_tbl->entries = vzalloc(bytes); entries 264 drivers/staging/gasket/gasket_page_table.c if (!pg_tbl->entries) { entries 354 drivers/staging/gasket/gasket_page_table.c for (pte = pg_tbl->entries + pg_tbl->num_simple_entries, entries 356 drivers/staging/gasket/gasket_page_table.c pte < pg_tbl->entries + pg_tbl->config.total_entries; entries 383 drivers/staging/gasket/gasket_page_table.c vfree(pg_tbl->entries); entries 384 drivers/staging/gasket/gasket_page_table.c pg_tbl->entries = NULL; entries 407 drivers/staging/gasket/gasket_page_table.c if (pg_tbl->entries[i].status != PTE_FREE) { entries 578 drivers/staging/gasket/gasket_page_table.c if (!gasket_is_pte_range_free(pg_tbl->entries + entries 634 drivers/staging/gasket/gasket_page_table.c gasket_perform_unmapping(pg_tbl, pg_tbl->entries + slot, entries 651 drivers/staging/gasket/gasket_page_table.c pte = pg_tbl->entries + pg_tbl->num_simple_entries + entries 832 drivers/staging/gasket/gasket_page_table.c ret = gasket_perform_mapping(pg_tbl, pg_tbl->entries + slot_idx, entries 918 drivers/staging/gasket/gasket_page_table.c pte = pg_tbl->entries + pg_tbl->num_simple_entries + entries 978 drivers/staging/gasket/gasket_page_table.c pte = pg_tbl->entries + pg_tbl->num_simple_entries + entries 1101 drivers/staging/gasket/gasket_page_table.c pte = pg_tbl->entries + page_num; entries 1110 drivers/staging/gasket/gasket_page_table.c pte = pg_tbl->entries + pg_tbl->num_simple_entries + page_num; entries 2213 drivers/staging/media/ipu3/include/intel-ipu3.h entries[IPU3_UAPI_YUVP2_TCC_MACC_TABLE_ELEMENTS]; entries 2226 drivers/staging/media/ipu3/include/intel-ipu3.h __u16 entries[IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS]; entries 2236 drivers/staging/media/ipu3/include/intel-ipu3.h __u16 entries[IPU3_UAPI_YUVP2_TCC_GAIN_PCWL_LUT_ELEMENTS]; entries 2246 drivers/staging/media/ipu3/include/intel-ipu3.h __s16 entries[IPU3_UAPI_YUVP2_TCC_R_SQR_LUT_ELEMENTS]; entries 909 drivers/staging/media/ipu3/ipu3-abi.h u16 entries[IMGU_ABI_YUVP2_YTM_LUT_ENTRIES]; entries 2229 drivers/staging/media/ipu3/ipu3-css-params.c acc->ytm.entries[i] = i * 32; entries 2264 drivers/staging/media/ipu3/ipu3-css-params.c acc->tcc.macc_table.entries[i].a = 1024; entries 2265 drivers/staging/media/ipu3/ipu3-css-params.c acc->tcc.macc_table.entries[i].b = 0; entries 2266 drivers/staging/media/ipu3/ipu3-css-params.c acc->tcc.macc_table.entries[i].c = 0; entries 2267 drivers/staging/media/ipu3/ipu3-css-params.c acc->tcc.macc_table.entries[i].d = 1024; entries 2270 drivers/staging/media/ipu3/ipu3-css-params.c acc->tcc.inv_y_lut.entries[6] = 1023; entries 2272 drivers/staging/media/ipu3/ipu3-css-params.c acc->tcc.inv_y_lut.entries[i] = 1024 >> (i - 6); entries 265 drivers/staging/rtl8192e/rtl8192e/rtl_core.c if (ring->entries - skb_queue_len(&ring->queue) >= 2) entries 1589 drivers/staging/rtl8192e/rtl8192e/rtl_core.c ring->idx = (ring->idx + 1) % ring->entries; entries 1592 drivers/staging/rtl8192e/rtl8192e/rtl_core.c pci_free_consistent(priv->pdev, sizeof(*ring->desc) * ring->entries, entries 1676 drivers/staging/rtl8192e/rtl8192e/rtl_core.c ring->idx = (ring->idx + 1) % ring->entries; entries 1701 drivers/staging/rtl8192e/rtl8192e/rtl_core.c idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries; entries 1751 drivers/staging/rtl8192e/rtl8192e/rtl_core.c idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries; entries 1828 drivers/staging/rtl8192e/rtl8192e/rtl_core.c unsigned int entries) entries 1835 drivers/staging/rtl8192e/rtl8192e/rtl_core.c ring = pci_zalloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma); entries 1844 drivers/staging/rtl8192e/rtl8192e/rtl_core.c priv->tx_ring[prio].entries = entries; entries 1847 drivers/staging/rtl8192e/rtl8192e/rtl_core.c for (i = 0; i < entries; i++) entries 1849 drivers/staging/rtl8192e/rtl8192e/rtl_core.c (u32)dma + ((i + 1) % entries) * entries 1913 drivers/staging/rtl8192e/rtl8192e/rtl_core.c ring->idx = (ring->idx + 1) % ring->entries; entries 265 drivers/staging/rtl8192e/rtl8192e/rtl_core.h unsigned int entries; entries 37 drivers/staging/uwb/est.c u8 entries; entries 243 drivers/staging/uwb/est.c const struct uwb_est_entry *entry, size_t entries) entries 269 drivers/staging/uwb/est.c uwb_est[itr].entries = entries; entries 292 drivers/staging/uwb/est.c const struct uwb_est_entry *entry, size_t entries) entries 301 drivers/staging/uwb/est.c .entries = entries entries 348 drivers/staging/uwb/est.c if (event_low >= est->entries) { /* in range? */ entries 351 drivers/staging/uwb/est.c est->entries, event_low); entries 359 drivers/staging/uwb/est.c est->entries, event_low); entries 380 drivers/staging/uwb/est.c est->product, est->entries); entries 81 drivers/staging/uwb/uwb.h size_t entries; entries 667 drivers/staging/uwb/uwb.h const struct uwb_est_entry *, size_t entries); entries 669 drivers/staging/uwb/uwb.h const struct uwb_est_entry *, size_t entries); entries 476 drivers/staging/wilc1000/wilc_wlan.c int i, entries = 0; entries 578 drivers/staging/wilc1000/wilc_wlan.c entries = ((reg >> 3) & 0x3f); entries 590 drivers/staging/wilc1000/wilc_wlan.c if (entries == 0) { entries 606 drivers/staging/wilc1000/wilc_wlan.c if (entries == 0) { entries 661 drivers/staging/wilc1000/wilc_wlan.c } while (--entries); entries 28 drivers/thunderbolt/property.c struct tb_property_entry entries[]; entries 33 drivers/thunderbolt/property.c struct tb_property_entry entries[]; entries 164 drivers/thunderbolt/property.c const struct tb_property_entry *entries; entries 187 drivers/thunderbolt/property.c entries = (const struct tb_property_entry *)&block[content_offset]; entries 188 drivers/thunderbolt/property.c nentries = content_len / (sizeof(*entries) / 4); entries 195 drivers/thunderbolt/property.c property = tb_property_parse(block, block_len, &entries[i]); entries 417 drivers/thunderbolt/property.c entry = pe->entries; entries 424 drivers/thunderbolt/property.c entry = re->entries; entries 297 drivers/tty/vt/vt_ioctl.c return con_set_unimap(vc, tmp.entry_ct, tmp.entries); entries 301 drivers/tty/vt/vt_ioctl.c return con_get_unimap(vc, tmp.entry_ct, &(user_ud->entry_ct), tmp.entries); entries 1162 drivers/tty/vt/vt_ioctl.c compat_caddr_t entries; entries 1174 drivers/tty/vt/vt_ioctl.c tmp_entries = compat_ptr(tmp.entries); entries 262 drivers/usb/core/message.c while (io->entries--) entries 263 drivers/usb/core/message.c usb_free_urb(io->urbs[io->entries]); entries 311 drivers/usb/core/message.c for (i = 0, found = 0; i < io->entries; i++) { entries 387 drivers/usb/core/message.c io->entries = 1; entries 390 drivers/usb/core/message.c io->entries = nents; entries 394 drivers/usb/core/message.c io->urbs = kmalloc_array(io->entries, sizeof(*io->urbs), mem_flags); entries 402 drivers/usb/core/message.c for_each_sg(sg, sg, io->entries, i) { entries 408 drivers/usb/core/message.c io->entries = i; entries 451 drivers/usb/core/message.c io->entries = i + 1; entries 459 drivers/usb/core/message.c io->count = io->entries; entries 518 drivers/usb/core/message.c int entries = io->entries; entries 523 drivers/usb/core/message.c while (i < entries && !io->status) { entries 562 drivers/usb/core/message.c io->count -= entries - i; entries 600 drivers/usb/core/message.c for (i = io->entries - 1; i >= 0; --i) { entries 1808 drivers/usb/host/xhci-mem.c erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, entries 1810 drivers/usb/host/xhci-mem.c if (!erst->entries) entries 1817 drivers/usb/host/xhci-mem.c entry = &erst->entries[val]; entries 1833 drivers/usb/host/xhci-mem.c if (erst->entries) entries 1835 drivers/usb/host/xhci-mem.c erst->entries, entries 1837 drivers/usb/host/xhci-mem.c erst->entries = NULL; entries 1628 drivers/usb/host/xhci.h struct xhci_erst_entry *entries; entries 140 drivers/vfio/vfio_iommu_spapr_tce.c unsigned long entries = size >> PAGE_SHIFT; entries 146 drivers/vfio/vfio_iommu_spapr_tce.c mem = mm_iommu_get(container->mm, vaddr, entries); entries 155 drivers/vfio/vfio_iommu_spapr_tce.c ret = mm_iommu_new(container->mm, vaddr, entries, &mem); entries 494 drivers/video/fbdev/aty/aty128fb.c static void do_wait_for_fifo(u16 entries, struct aty128fb_par *par); entries 495 drivers/video/fbdev/aty/aty128fb.c static void wait_for_fifo(u16 entries, struct aty128fb_par *par); entries 641 drivers/video/fbdev/aty/aty128fb.c static void do_wait_for_fifo(u16 entries, struct aty128fb_par *par) entries 648 drivers/video/fbdev/aty/aty128fb.c if (par->fifo_slots >= entries) entries 675 drivers/video/fbdev/aty/aty128fb.c static void wait_for_fifo(u16 entries, struct aty128fb_par *par) entries 677 drivers/video/fbdev/aty/aty128fb.c if (par->fifo_slots < entries) entries 679 drivers/video/fbdev/aty/aty128fb.c par->fifo_slots -= entries; entries 350 drivers/video/fbdev/aty/atyfb.h static inline void wait_for_fifo(u16 entries, struct atyfb_par *par) entries 353 drivers/video/fbdev/aty/atyfb.h while (entries > fifo_space) { entries 356 drivers/video/fbdev/aty/atyfb.h par->fifo_space = fifo_space - entries; entries 358 drivers/video/fbdev/aty/radeon_base.c void _radeon_fifo_wait(struct radeonfb_info *rinfo, int entries) entries 363 drivers/video/fbdev/aty/radeon_base.c if ((INREG(RBBM_STATUS) & 0x7f) >= entries) entries 471 drivers/video/fbdev/aty/radeonfb.h void _radeon_fifo_wait(struct radeonfb_info *rinfo, int entries); entries 476 drivers/video/fbdev/aty/radeonfb.h #define radeon_fifo_wait(entries) _radeon_fifo_wait(rinfo,entries) entries 922 drivers/video/fbdev/uvesafb.c static int uvesafb_setpalette(struct uvesafb_pal_entry *entries, int count, entries 945 drivers/video/fbdev/uvesafb.c outb_p(entries[i].red, dac_val); entries 946 drivers/video/fbdev/uvesafb.c outb_p(entries[i].green, dac_val); entries 947 drivers/video/fbdev/uvesafb.c outb_p(entries[i].blue, dac_val); entries 959 drivers/video/fbdev/uvesafb.c "D" (entries), /* EDI */ entries 976 drivers/video/fbdev/uvesafb.c task->buf = entries; entries 1040 drivers/video/fbdev/uvesafb.c struct uvesafb_pal_entry *entries; entries 1049 drivers/video/fbdev/uvesafb.c entries = kmalloc_array(cmap->len, sizeof(*entries), entries 1051 drivers/video/fbdev/uvesafb.c if (!entries) entries 1055 drivers/video/fbdev/uvesafb.c entries[i].red = cmap->red[i] >> shift; entries 1056 drivers/video/fbdev/uvesafb.c entries[i].green = cmap->green[i] >> shift; entries 1057 drivers/video/fbdev/uvesafb.c entries[i].blue = cmap->blue[i] >> shift; entries 1058 drivers/video/fbdev/uvesafb.c entries[i].pad = 0; entries 1060 drivers/video/fbdev/uvesafb.c err = uvesafb_setpalette(entries, cmap->len, cmap->start, info); entries 1061 drivers/video/fbdev/uvesafb.c kfree(entries); entries 263 drivers/video/fbdev/w100fb.c static void w100_fifo_wait(int entries) entries 270 drivers/video/fbdev/w100fb.c if (status.f.cmdfifo_avail >= entries) entries 1140 drivers/vme/bridges/vme_ca91cx42.c list_add_tail(&entry->list, &list->entries); entries 1143 drivers/vme/bridges/vme_ca91cx42.c if (entry->list.prev != &list->entries) { entries 1208 drivers/vme/bridges/vme_ca91cx42.c entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry, entries 1277 drivers/vme/bridges/vme_ca91cx42.c list_for_each_safe(pos, temp, &list->entries) { entries 1748 drivers/vme/bridges/vme_tsi148.c list_add_tail(&entry->list, &list->entries); entries 1761 drivers/vme/bridges/vme_tsi148.c if (entry->list.prev != &list->entries) { entries 1841 drivers/vme/bridges/vme_tsi148.c entry = list_first_entry(&list->entries, struct tsi148_dma_entry, entries 1907 drivers/vme/bridges/vme_tsi148.c list_for_each_safe(pos, temp, &list->entries) { entries 955 drivers/vme/vme.c INIT_LIST_HEAD(&dma_list->entries); entries 57 drivers/vme/vme_bridge.h struct list_head entries; entries 316 drivers/watchdog/wdat_wdt.c const struct acpi_wdat_entry *entries; entries 373 drivers/watchdog/wdat_wdt.c entries = (struct acpi_wdat_entry *)(tbl + 1); entries 374 drivers/watchdog/wdat_wdt.c for (i = 0; i < tbl->entries; i++) { entries 382 drivers/watchdog/wdat_wdt.c action = entries[i].action; entries 393 drivers/watchdog/wdat_wdt.c instr->entry = entries[i]; entries 395 drivers/watchdog/wdat_wdt.c gas = &entries[i].register_region; entries 213 drivers/xen/xen-pciback/pciback_ops.c struct msix_entry *entries; entries 235 drivers/xen/xen-pciback/pciback_ops.c entries = kmalloc_array(op->value, sizeof(*entries), GFP_KERNEL); entries 236 drivers/xen/xen-pciback/pciback_ops.c if (entries == NULL) entries 240 drivers/xen/xen-pciback/pciback_ops.c entries[i].entry = op->msix_entries[i].entry; entries 241 drivers/xen/xen-pciback/pciback_ops.c entries[i].vector = op->msix_entries[i].vector; entries 244 drivers/xen/xen-pciback/pciback_ops.c result = pci_enable_msix_exact(dev, entries, op->value); entries 247 drivers/xen/xen-pciback/pciback_ops.c op->msix_entries[i].entry = entries[i].entry; entries 248 drivers/xen/xen-pciback/pciback_ops.c if (entries[i].vector) { entries 250 drivers/xen/xen-pciback/pciback_ops.c xen_pirq_from_irq(entries[i].vector); entries 262 drivers/xen/xen-pciback/pciback_ops.c kfree(entries); entries 43 fs/binfmt_misc.c static LIST_HEAD(entries); entries 96 fs/binfmt_misc.c list_for_each(l, &entries) { entries 749 fs/binfmt_misc.c list_add(&e->list, &entries); entries 800 fs/binfmt_misc.c while (!list_empty(&entries)) entries 801 fs/binfmt_misc.c kill_node(list_first_entry(&entries, Node, list)); entries 923 fs/btrfs/free-space-cache.c int *entries, int *bitmaps, entries 951 fs/btrfs/free-space-cache.c *entries += 1; entries 986 fs/btrfs/free-space-cache.c *entries += 1; entries 1001 fs/btrfs/free-space-cache.c int entries, int bitmaps) entries 1037 fs/btrfs/free-space-cache.c btrfs_set_free_space_entries(leaf, header, entries); entries 1052 fs/btrfs/free-space-cache.c int *entries) entries 1089 fs/btrfs/free-space-cache.c *entries += 1; entries 1167 fs/btrfs/free-space-cache.c io_ctl->entries, io_ctl->bitmaps); entries 1246 fs/btrfs/free-space-cache.c int entries = 0; entries 1288 fs/btrfs/free-space-cache.c block_group, &entries, &bitmaps, entries 1301 fs/btrfs/free-space-cache.c ret = write_pinned_extent_entries(block_group, io_ctl, &entries); entries 1341 fs/btrfs/free-space-cache.c io_ctl->entries = entries; entries 48 fs/btrfs/free-space-cache.h int entries; entries 6071 fs/btrfs/inode.c static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) entries 6073 fs/btrfs/inode.c while (entries--) { entries 6106 fs/btrfs/inode.c int entries = 0; entries 6163 fs/btrfs/inode.c ret = btrfs_filldir(private->filldir_buf, entries, ctx); entries 6167 fs/btrfs/inode.c entries = 0; entries 6182 fs/btrfs/inode.c entries++; entries 6190 fs/btrfs/inode.c ret = btrfs_filldir(private->filldir_buf, entries, ctx); entries 349 fs/ceph/xattr.c XATTR_NAME_CEPH(dir, entries, 0), entries 395 fs/ext4/extents.c unsigned short entries; entries 399 fs/ext4/extents.c entries = le16_to_cpu(eh->eh_entries); entries 409 fs/ext4/extents.c while (entries) { entries 422 fs/ext4/extents.c entries--; entries 427 fs/ext4/extents.c while (entries) { entries 431 fs/ext4/extents.c entries--; entries 229 fs/ext4/namei.c struct dx_entry entries[0]; entries 235 fs/ext4/namei.c struct dx_entry entries[0]; entries 242 fs/ext4/namei.c struct dx_entry *entries; entries 265 fs/ext4/namei.c static unsigned dx_get_count(struct dx_entry *entries); entries 266 fs/ext4/namei.c static unsigned dx_get_limit(struct dx_entry *entries); entries 267 fs/ext4/namei.c static void dx_set_count(struct dx_entry *entries, unsigned value); entries 268 fs/ext4/namei.c static void dx_set_limit(struct dx_entry *entries, unsigned value); entries 552 fs/ext4/namei.c static inline unsigned dx_get_count(struct dx_entry *entries) entries 554 fs/ext4/namei.c return le16_to_cpu(((struct dx_countlimit *) entries)->count); entries 557 fs/ext4/namei.c static inline unsigned dx_get_limit(struct dx_entry *entries) entries 559 fs/ext4/namei.c return le16_to_cpu(((struct dx_countlimit *) entries)->limit); entries 562 fs/ext4/namei.c static inline void dx_set_count(struct dx_entry *entries, unsigned value) entries 564 fs/ext4/namei.c ((struct dx_countlimit *) entries)->count = cpu_to_le16(value); entries 567 fs/ext4/namei.c static inline void dx_set_limit(struct dx_entry *entries, unsigned value) entries 569 fs/ext4/namei.c ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value); entries 595 fs/ext4/namei.c static void dx_show_index(char * label, struct dx_entry *entries) entries 597 fs/ext4/namei.c int i, n = dx_get_count (entries); entries 601 fs/ext4/namei.c i ? dx_get_hash(entries + i) : 0, entries 602 fs/ext4/namei.c (unsigned long)dx_get_block(entries + i)); entries 706 fs/ext4/namei.c struct dx_entry *entries, int levels) entries 709 fs/ext4/namei.c unsigned count = dx_get_count(entries), names = 0, space = 0, i; entries 713 fs/ext4/namei.c for (i = 0; i < count; i++, entries++) entries 715 fs/ext4/namei.c ext4_lblk_t block = dx_get_block(entries); entries 716 fs/ext4/namei.c ext4_lblk_t hash = i ? dx_get_hash(entries): 0; entries 717 fs/ext4/namei.c u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash; entries 724 fs/ext4/namei.c dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1): entries 754 fs/ext4/namei.c struct dx_entry *at, *entries, *p, *q, *m; entries 802 fs/ext4/namei.c entries = (struct dx_entry *)(((char *)&root->info) + entries 805 fs/ext4/namei.c if (dx_get_limit(entries) != dx_root_limit(dir, entries 808 fs/ext4/namei.c dx_get_limit(entries), entries 815 fs/ext4/namei.c count = dx_get_count(entries); entries 816 fs/ext4/namei.c if (!count || count > dx_get_limit(entries)) { entries 819 fs/ext4/namei.c count, dx_get_limit(entries)); entries 823 fs/ext4/namei.c p = entries + 1; entries 824 fs/ext4/namei.c q = entries + count - 1; entries 836 fs/ext4/namei.c at = entries; entries 851 fs/ext4/namei.c at == entries ? 0 : dx_get_hash(at), entries 853 fs/ext4/namei.c frame->entries = entries; entries 864 fs/ext4/namei.c entries = ((struct dx_node *) frame->bh->b_data)->entries; entries 866 fs/ext4/namei.c if (dx_get_limit(entries) != dx_node_limit(dir)) { entries 869 fs/ext4/namei.c dx_get_limit(entries), dx_node_limit(dir)); entries 941 fs/ext4/namei.c if (++(p->at) < p->entries + dx_get_count(p->entries)) entries 974 fs/ext4/namei.c p->at = p->entries = ((struct dx_node *) bh->b_data)->entries; entries 1261 fs/ext4/namei.c struct dx_entry *entries = frame->entries; entries 1263 fs/ext4/namei.c int count = dx_get_count(entries); entries 1265 fs/ext4/namei.c assert(count < dx_get_limit(entries)); entries 1266 fs/ext4/namei.c assert(old < entries + count); entries 1267 fs/ext4/namei.c memmove(new + 1, new, (char *)(entries + count) - (char *)(new)); entries 1270 fs/ext4/namei.c dx_set_count(entries, count + 1); entries 1906 fs/ext4/namei.c dxtrace(dx_show_index("frame", frame->entries)); entries 2044 fs/ext4/namei.c struct dx_entry *entries; entries 2106 fs/ext4/namei.c entries = root->entries; entries 2107 fs/ext4/namei.c dx_set_block(entries, 1); entries 2108 fs/ext4/namei.c dx_set_count(entries, 1); entries 2109 fs/ext4/namei.c dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info))); entries 2120 fs/ext4/namei.c frame->entries = entries; entries 2121 fs/ext4/namei.c frame->at = entries; entries 2277 fs/ext4/namei.c struct dx_entry *entries, *at; entries 2289 fs/ext4/namei.c entries = frame->entries; entries 2310 fs/ext4/namei.c dx_get_count(entries), dx_get_limit(entries))); entries 2312 fs/ext4/namei.c if (dx_get_count(entries) == dx_get_limit(entries)) { entries 2322 fs/ext4/namei.c if (dx_get_count((frame - 1)->entries) < entries 2323 fs/ext4/namei.c dx_get_limit((frame - 1)->entries)) { entries 2329 fs/ext4/namei.c entries = frame->entries; entries 2344 fs/ext4/namei.c icount = dx_get_count(entries); entries 2351 fs/ext4/namei.c entries2 = node2->entries; entries 2361 fs/ext4/namei.c unsigned hash2 = dx_get_hash(entries + icount1); entries 2371 fs/ext4/namei.c memcpy((char *) entries2, (char *) (entries + icount1), entries 2373 fs/ext4/namei.c dx_set_count(entries, icount1); entries 2378 fs/ext4/namei.c if (at - entries >= icount1) { entries 2379 fs/ext4/namei.c frame->at = at = at - entries - icount1 + entries2; entries 2380 fs/ext4/namei.c frame->entries = entries = entries2; entries 2384 fs/ext4/namei.c dxtrace(dx_show_index("node", frame->entries)); entries 2386 fs/ext4/namei.c ((struct dx_node *) bh2->b_data)->entries)); entries 2402 fs/ext4/namei.c memcpy((char *) entries2, (char *) entries, entries 2407 fs/ext4/namei.c dx_set_count(entries, 1); entries 2408 fs/ext4/namei.c dx_set_block(entries + 0, newblock); entries 362 fs/f2fs/f2fs.h #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) entries 363 fs/f2fs/f2fs.h #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) entries 364 fs/f2fs/f2fs.h #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) entries 365 fs/f2fs/f2fs.h #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) entries 1221 fs/f2fs/gc.c submitted += gc_node_segment(sbi, sum->entries, segno, entries 1224 fs/f2fs/gc.c submitted += gc_data_segment(sbi, sum->entries, gc_list, entries 568 fs/f2fs/node.c ne = nat_blk->entries[nid - start_nid]; entries 2238 fs/f2fs/node.c blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); entries 2672 fs/f2fs/node.c sum_entry = &sum->entries[0]; entries 2774 fs/f2fs/node.c if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR) entries 2836 fs/f2fs/node.c raw_ne = &nat_blk->entries[nid - start_nid]; entries 421 fs/f2fs/recovery.c sum = curseg->sum_blk->entries[blkoff]; entries 430 fs/f2fs/recovery.c sum = sum_node->entries[blkoff]; entries 1844 fs/f2fs/segment.c int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); entries 1868 fs/f2fs/segment.c for (i = 0; i < entries; i++) entries 2349 fs/f2fs/segment.c memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE); entries 2531 fs/f2fs/segment.c int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); entries 2537 fs/f2fs/segment.c for (i = 0; i < entries; i++) entries 3472 fs/f2fs/segment.c seg_i->sum_blk->entries[j] = *s; entries 3531 fs/f2fs/segment.c struct f2fs_summary *ns = &sum->entries[0]; entries 3553 fs/f2fs/segment.c memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE); entries 3647 fs/f2fs/segment.c *summary = seg_i->sum_blk->entries[j]; entries 3910 fs/f2fs/segment.c &raw_sit->entries[sit_offset]); entries 3912 fs/f2fs/segment.c &raw_sit->entries[sit_offset]); entries 4142 fs/f2fs/segment.c sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; entries 389 fs/f2fs/segment.h rs = &raw_sit->entries[i]; entries 1269 fs/gfs2/dir.c struct gfs2_dirent **darr, u32 entries, entries 1277 fs/gfs2/dir.c if (sort_start < entries) entries 1278 fs/gfs2/dir.c sort(&darr[sort_start], entries - sort_start, entries 1284 fs/gfs2/dir.c for (x = 0, y = 1; x < entries; x++, y++) { entries 1288 fs/gfs2/dir.c if (y < entries) { entries 1340 fs/gfs2/dir.c unsigned entries) entries 1345 fs/gfs2/dir.c for (i = 0; i < entries; i++) { entries 1379 fs/gfs2/dir.c unsigned entries = 0, entries2 = 0; entries 1394 fs/gfs2/dir.c entries += be16_to_cpu(lf->lf_entries); entries 1405 fs/gfs2/dir.c if (!entries) entries 1415 fs/gfs2/dir.c larr = gfs2_alloc_sort_buffer((leaves + entries + 99) * sizeof(void *)); entries 1461 fs/gfs2/dir.c BUG_ON(entries2 != entries); entries 1462 fs/gfs2/dir.c error = do_filldir_main(ip, ctx, darr, entries, need_sort ? entries 1463 fs/gfs2/dir.c sort_offset : entries, copied); entries 1902 fs/gfs2/dir.c u16 entries = be16_to_cpu(leaf->lf_entries); entries 1903 fs/gfs2/dir.c if (!entries) entries 1905 fs/gfs2/dir.c leaf->lf_entries = cpu_to_be16(--entries); entries 3886 fs/io_uring.c static int io_uring_create(unsigned entries, struct io_uring_params *p) entries 3893 fs/io_uring.c if (!entries || entries > IORING_MAX_ENTRIES) entries 3902 fs/io_uring.c p->sq_entries = roundup_pow_of_two(entries); entries 3980 fs/io_uring.c static long io_uring_setup(u32 entries, struct io_uring_params __user *params) entries 3997 fs/io_uring.c ret = io_uring_create(entries, &p); entries 4007 fs/io_uring.c SYSCALL_DEFINE2(io_uring_setup, u32, entries, entries 4010 fs/io_uring.c return io_uring_setup(entries, params); entries 425 fs/nfs/mount_clnt.c u32 entries, i; entries 434 fs/nfs/mount_clnt.c entries = be32_to_cpup(p); entries 435 fs/nfs/mount_clnt.c dprintk("NFS: received %u auth flavors\n", entries); entries 436 fs/nfs/mount_clnt.c if (entries > NFS_MAX_SECFLAVORS) entries 437 fs/nfs/mount_clnt.c entries = NFS_MAX_SECFLAVORS; entries 439 fs/nfs/mount_clnt.c p = xdr_inline_decode(xdr, 4 * entries); entries 443 fs/nfs/mount_clnt.c if (entries > *count) entries 444 fs/nfs/mount_clnt.c entries = *count; entries 446 fs/nfs/mount_clnt.c for (i = 0; i < entries; i++) { entries 95 fs/nfs_common/nfsacl.c int entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0; entries 99 fs/nfs_common/nfsacl.c .array_len = encode_entries ? entries : 0, entries 110 fs/nfs_common/nfsacl.c if (entries > NFS_ACL_MAX_ENTRIES || entries 111 fs/nfs_common/nfsacl.c xdr_encode_word(buf, base, entries)) entries 274 fs/nfs_common/nfsacl.c u32 entries; entries 277 fs/nfs_common/nfsacl.c if (xdr_decode_word(buf, base, &entries) || entries 278 fs/nfs_common/nfsacl.c entries > NFS_ACL_MAX_ENTRIES) entries 280 fs/nfs_common/nfsacl.c nfsacl_desc.desc.array_maxlen = entries; entries 285 fs/nfs_common/nfsacl.c if (entries != nfsacl_desc.desc.array_len || entries 293 fs/nfs_common/nfsacl.c *aclcnt = entries; entries 42 fs/nfsd/acl.h int nfs4_acl_bytes(int entries); entries 827 fs/nfsd/nfs4acl.c int nfs4_acl_bytes(int entries) entries 829 fs/nfsd/nfs4acl.c return sizeof(struct nfs4_acl) + entries * sizeof(struct nfs4_ace); entries 810 fs/nfsd/nfs4state.c int entries, old_entries; entries 821 fs/nfsd/nfs4state.c if (bd->entries == 0) entries 826 fs/nfsd/nfs4state.c bd->entries -= bd->old_entries; entries 827 fs/nfsd/nfs4state.c bd->old_entries = bd->entries; entries 860 fs/nfsd/nfs4state.c if (bd->entries == 0) entries 862 fs/nfsd/nfs4state.c bd->entries += 1; entries 344 fs/nfsd/nfscache.c unsigned int entries = 0; entries 348 fs/nfsd/nfscache.c ++entries; entries 366 fs/nfsd/nfscache.c if (entries > nn->longest_chain) { entries 367 fs/nfsd/nfscache.c nn->longest_chain = entries; entries 369 fs/nfsd/nfscache.c } else if (entries == nn->longest_chain) { entries 4392 fs/ocfs2/xattr.c char *entries, *buf, *bucket_buf = NULL; entries 4422 fs/ocfs2/xattr.c entries = (char *)xh->xh_entries; entries 4434 fs/ocfs2/xattr.c sort(entries, le16_to_cpu(xh->xh_count), entries 4476 fs/ocfs2/xattr.c sort(entries, le16_to_cpu(xh->xh_count), entries 4585 fs/ocfs2/xattr.c struct ocfs2_xattr_entry *entries = xh->xh_entries; entries 4597 fs/ocfs2/xattr.c if (cmp_xe(&entries[middle - delta - 1], entries 4598 fs/ocfs2/xattr.c &entries[middle - delta])) entries 4606 fs/ocfs2/xattr.c if (cmp_xe(&entries[middle + delta], entries 4607 fs/ocfs2/xattr.c &entries[middle + delta + 1])) entries 34 fs/overlayfs/readdir.c struct list_head entries; entries 233 fs/overlayfs/readdir.c ovl_cache_free(&cache->entries); entries 248 fs/overlayfs/readdir.c ovl_cache_free(&cache->entries); entries 396 fs/overlayfs/readdir.c list_for_each(p, &od->cache->entries) { entries 423 fs/overlayfs/readdir.c INIT_LIST_HEAD(&cache->entries); entries 426 fs/overlayfs/readdir.c res = ovl_dir_read_merged(dentry, &cache->entries, &cache->root); entries 428 fs/overlayfs/readdir.c ovl_cache_free(&cache->entries); entries 614 fs/overlayfs/readdir.c res = ovl_dir_read_impure(path, &cache->entries, &cache->root); entries 616 fs/overlayfs/readdir.c ovl_cache_free(&cache->entries); entries 620 fs/overlayfs/readdir.c if (list_empty(&cache->entries)) { entries 766 fs/overlayfs/readdir.c while (od->cursor != &od->cache->entries) { entries 155 fs/proc/base.c static unsigned int __init pid_entry_nlink(const struct pid_entry *entries, entries 163 fs/proc/base.c if (S_ISDIR(entries[i].mode)) entries 428 fs/proc/base.c unsigned long *entries; entries 445 fs/proc/base.c entries = kmalloc_array(MAX_STACK_TRACE_DEPTH, sizeof(*entries), entries 447 fs/proc/base.c if (!entries) entries 454 fs/proc/base.c nr_entries = stack_trace_save_tsk(task, entries, entries 458 fs/proc/base.c seq_printf(m, "[<0>] %pB\n", (void *)entries[i]); entries 463 fs/proc/base.c kfree(entries); entries 529 fs/reiserfs/item_ops.c int entries = 0; entries 538 fs/reiserfs/item_ops.c entries++; entries 541 fs/reiserfs/item_ops.c if (entries == dir_u->entry_count) { entries 549 fs/reiserfs/item_ops.c && entries < 2) entries 550 fs/reiserfs/item_ops.c entries = 0; entries 552 fs/reiserfs/item_ops.c return entries ? : -1; entries 558 fs/reiserfs/item_ops.c int entries = 0; entries 567 fs/reiserfs/item_ops.c entries++; entries 569 fs/reiserfs/item_ops.c BUG_ON(entries == dir_u->entry_count); entries 573 fs/reiserfs/item_ops.c && entries > dir_u->entry_count - 2) entries 574 fs/reiserfs/item_ops.c entries = dir_u->entry_count - 2; entries 576 fs/reiserfs/item_ops.c return entries ? : -1; entries 100 fs/select.c struct poll_table_entry entries[0]; entries 152 fs/select.c } while (entry > p->entries); entries 175 fs/select.c new_table->entry = new_table->entries; entries 829 fs/select.c struct pollfd entries[0]; entries 897 fs/select.c pfd = walk->entries; entries 983 fs/select.c if (copy_from_user(walk->entries, ufds + nfds-todo, entries 992 fs/select.c walk = walk->next = kmalloc(struct_size(walk, entries, len), entries 1005 fs/select.c struct pollfd *fds = walk->entries; entries 61 fs/squashfs/cache.c for (i = cache->curr_blk, n = 0; n < cache->entries; n++) { entries 66 fs/squashfs/cache.c i = (i + 1) % cache->entries; entries 69 fs/squashfs/cache.c if (n == cache->entries) { entries 89 fs/squashfs/cache.c for (n = 0; n < cache->entries; n++) { entries 92 fs/squashfs/cache.c i = (i + 1) % cache->entries; entries 95 fs/squashfs/cache.c cache->next_blk = (i + 1) % cache->entries; entries 204 fs/squashfs/cache.c for (i = 0; i < cache->entries; i++) { entries 223 fs/squashfs/cache.c struct squashfs_cache *squashfs_cache_init(char *name, int entries, entries 234 fs/squashfs/cache.c cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); entries 242 fs/squashfs/cache.c cache->unused = entries; entries 243 fs/squashfs/cache.c cache->entries = entries; entries 252 fs/squashfs/cache.c for (i = 0; i < entries; i++) { entries 138 fs/squashfs/file.c meta->entries = 0; entries 253 fs/squashfs/file.c offset = index < meta->offset + meta->entries ? index : entries 254 fs/squashfs/file.c meta->offset + meta->entries - 1; entries 262 fs/squashfs/file.c meta->entries); entries 273 fs/squashfs/file.c for (i = meta->offset + meta->entries; i <= index && entries 280 fs/squashfs/file.c if (meta->entries == 0) entries 296 fs/squashfs/file.c meta->entries++; entries 301 fs/squashfs/file.c meta->offset, meta->entries); entries 221 fs/squashfs/squashfs_fs.h unsigned short entries; entries 17 fs/squashfs/squashfs_fs_sb.h int entries; entries 242 fs/xfs/libxfs/xfs_attr_leaf.c struct xfs_attr_leaf_entry *entries; entries 271 fs/xfs/libxfs/xfs_attr_leaf.c entries = xfs_attr3_leaf_entryp(bp->b_addr); entries 272 fs/xfs/libxfs/xfs_attr_leaf.c if ((char *)&entries[ichdr.count] > entries 1082 fs/xfs/libxfs/xfs_attr_leaf.c struct xfs_attr_leaf_entry *entries; entries 1128 fs/xfs/libxfs/xfs_attr_leaf.c entries = xfs_attr3_leaf_entryp(leaf); entries 1131 fs/xfs/libxfs/xfs_attr_leaf.c btree[0].hashval = entries[icleafhdr.count - 1].hashval; entries 2272 fs/xfs/libxfs/xfs_attr_leaf.c struct xfs_attr_leaf_entry *entries; entries 2283 fs/xfs/libxfs/xfs_attr_leaf.c entries = xfs_attr3_leaf_entryp(leaf); entries 2292 fs/xfs/libxfs/xfs_attr_leaf.c for (entry = &entries[probe]; span > 4; entry = &entries[probe]) { entries 2576 fs/xfs/libxfs/xfs_attr_leaf.c struct xfs_attr_leaf_entry *entries; entries 2580 fs/xfs/libxfs/xfs_attr_leaf.c entries = xfs_attr3_leaf_entryp(bp->b_addr); entries 2585 fs/xfs/libxfs/xfs_attr_leaf.c return be32_to_cpu(entries[ichdr.count - 1].hashval); entries 2595 fs/xfs/libxfs/xfs_attr_leaf.c struct xfs_attr_leaf_entry *entries; entries 2600 fs/xfs/libxfs/xfs_attr_leaf.c entries = xfs_attr3_leaf_entryp(leaf); entries 2601 fs/xfs/libxfs/xfs_attr_leaf.c if (entries[index].flags & XFS_ATTR_LOCAL) { entries 668 fs/xfs/libxfs/xfs_da_format.h xfs_attr_leaf_entry_t entries[1]; /* sorted on key, not name */ entries 700 fs/xfs/libxfs/xfs_da_format.h struct xfs_attr_leaf_entry entries[1]; entries 786 fs/xfs/libxfs/xfs_da_format.h return &((struct xfs_attr3_leafblock *)leafp)->entries[0]; entries 787 fs/xfs/libxfs/xfs_da_format.h return &leafp->entries[0]; entries 796 fs/xfs/libxfs/xfs_da_format.h struct xfs_attr_leaf_entry *entries = xfs_attr3_leaf_entryp(leafp); entries 798 fs/xfs/libxfs/xfs_da_format.h return &((char *)leafp)[be16_to_cpu(entries[idx].nameidx)]; entries 1613 fs/xfs/libxfs/xfs_dir2_leaf.c int entries; entries 1616 fs/xfs/libxfs/xfs_dir2_leaf.c entries = hdr->count - hdr->stale; entries 1623 fs/xfs/libxfs/xfs_dir2_leaf.c return hdrsize + entries * sizeof(xfs_dir2_leaf_entry_t) entries 603 fs/xfs/scrub/agheader.c xfs_agblock_t *entries; entries 635 fs/xfs/scrub/agheader.c sai->entries[sai->nr_entries++] = agbno; entries 723 fs/xfs/scrub/agheader.c sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, entries 725 fs/xfs/scrub/agheader.c if (!sai.entries) { entries 746 fs/xfs/scrub/agheader.c sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]), entries 749 fs/xfs/scrub/agheader.c if (sai.entries[i] == sai.entries[i - 1]) { entries 756 fs/xfs/scrub/agheader.c kmem_free(sai.entries); entries 313 fs/xfs/scrub/attr.c struct xfs_attr_leaf_entry *entries; entries 365 fs/xfs/scrub/attr.c entries = xfs_attr3_leaf_entryp(leaf); entries 366 fs/xfs/scrub/attr.c if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused) entries 370 fs/xfs/scrub/attr.c for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) { entries 306 fs/xfs/xfs_attr_list.c struct xfs_attr_leaf_entry *entries; entries 321 fs/xfs/xfs_attr_list.c entries = xfs_attr3_leaf_entryp(leaf); entries 323 fs/xfs/xfs_attr_list.c entries[leafhdr.count - 1].hashval)) { entries 328 fs/xfs/xfs_attr_list.c entries[0].hashval)) { entries 386 fs/xfs/xfs_attr_list.c struct xfs_attr_leaf_entry *entries; entries 395 fs/xfs/xfs_attr_list.c entries = xfs_attr3_leaf_entryp(leaf); entries 404 fs/xfs/xfs_attr_list.c entry = &entries[0]; entries 423 fs/xfs/xfs_attr_list.c entry = &entries[0]; entries 695 include/acpi/actbl1.h u32 entries; entries 801 include/acpi/actbl1.h u32 entries; entries 544 include/acpi/actbl3.h u32 entries; /* Number of watchdog entries that follow */ entries 57 include/drm/drm_debugfs_crc.h struct drm_crtc_crc_entry *entries; entries 58 include/linux/can/can-ml.h int entries; entries 50 include/linux/dqblk_qtree.h unsigned long long entries = epb; entries 53 include/linux/dqblk_qtree.h for (i = 1; entries < (1ULL << 32); i++) entries 54 include/linux/dqblk_qtree.h entries *= epb; entries 330 include/linux/f2fs_fs.h struct f2fs_nat_entry entries[NAT_ENTRY_PER_BLOCK]; entries 369 include/linux/f2fs_fs.h struct f2fs_sit_entry entries[SIT_ENTRY_PER_BLOCK]; entries 444 include/linux/f2fs_fs.h struct nat_journal_entry entries[NAT_JOURNAL_ENTRIES]; entries 454 include/linux/f2fs_fs.h struct sit_journal_entry entries[SIT_JOURNAL_ENTRIES]; entries 478 include/linux/f2fs_fs.h struct f2fs_summary entries[ENTRIES_IN_SUM]; entries 62 include/linux/iova.h struct iova_fq_entry entries[IOVA_FQ_SIZE]; entries 104 include/linux/jump_label.h struct jump_entry *entries; entries 242 include/linux/jump_label.h { .entries = (void *)JUMP_TYPE_TRUE } } entries 245 include/linux/jump_label.h { .entries = (void *)JUMP_TYPE_FALSE } } entries 983 include/linux/kvm_host.h struct kvm_kernel_irq_routing_entry *entries, int gsi); entries 1140 include/linux/kvm_host.h const struct kvm_irq_routing_entry *entries, entries 156 include/linux/memcontrol.h struct mem_cgroup_threshold entries[0]; entries 180 include/linux/mlx4/cq.h int entries, struct mlx4_mtt *mtt); entries 267 include/linux/netfilter/x_tables.h unsigned char entries[0] __aligned(8); entries 87 include/linux/netfilter_bridge/ebtables.h char *entries; entries 776 include/linux/nvme.h __le64 entries[32]; entries 1133 include/linux/nvme.h struct nvmf_disc_rsp_page_entry entries[0]; entries 84 include/linux/nvmem-consumer.h void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, entries 86 include/linux/nvmem-consumer.h void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, entries 193 include/linux/nvmem-consumer.h nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {} entries 195 include/linux/nvmem-consumer.h nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {} entries 1339 include/linux/of.h struct list_head entries; entries 349 include/linux/pagemap.h unsigned int nr_entries, struct page **entries, entries 1440 include/linux/pci.h int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, entries 1443 include/linux/pci.h struct msix_entry *entries, int nvec) entries 1445 include/linux/pci.h int rc = pci_enable_msix_range(dev, entries, nvec, nvec); entries 1469 include/linux/pci.h struct msix_entry *entries, int minvec, int maxvec) entries 1472 include/linux/pci.h struct msix_entry *entries, int nvec) entries 103 include/linux/perf_event.h struct perf_branch_entry entries[0]; entries 411 include/linux/rio.h int mbox, int entries); entries 414 include/linux/rio.h int mbox, int entries); entries 16 include/linux/stackdepot.h depot_stack_handle_t stack_depot_save(unsigned long *entries, entries 20 include/linux/stackdepot.h unsigned long **entries); entries 14 include/linux/stacktrace.h int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, entries 66 include/linux/stacktrace.h unsigned long *entries; entries 106 include/linux/sunrpc/cache.h int entries; entries 458 include/linux/swap.h extern void swapcache_free_entries(swp_entry_t *entries, int n); entries 317 include/linux/syscalls.h asmlinkage long sys_io_uring_setup(u32 entries, entries 1866 include/linux/usb.h int entries; entries 221 include/net/flow_offload.h struct flow_action_entry entries[0]; entries 241 include/net/flow_offload.h for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i]) entries 220 include/net/neighbour.h atomic_t entries; entries 184 include/net/pkt_sched.h struct tc_taprio_sched_entry entries[0]; entries 226 include/rdma/rdma_vt.h unsigned long *entries; entries 34 include/trace/events/neigh.h __field(int, entries) entries 47 include/trace/events/neigh.h __entry->entries = atomic_read(&tbl->gc_entries); entries 67 include/trace/events/neigh.h __entry->family, __get_str(dev), __entry->entries, entries 463 include/trace/events/xen.h TP_PROTO(const void *addr, unsigned entries), entries 464 include/trace/events/xen.h TP_ARGS(addr, entries), entries 467 include/trace/events/xen.h __field(unsigned, entries) entries 470 include/trace/events/xen.h __entry->entries = entries), entries 472 include/trace/events/xen.h __entry->addr, __entry->entries) entries 1045 include/uapi/drm/amdgpu_drm.h struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES]; entries 68 include/uapi/linux/kd.h struct unipair __user *entries; entries 1057 include/uapi/linux/kvm.h struct kvm_irq_routing_entry entries[0]; entries 31 include/uapi/linux/net_dropmon.h __u32 entries; entries 36 include/uapi/linux/net_dropmon.h __u32 entries; entries 148 include/uapi/linux/netfilter/x_tables.h #define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \ entries 156 include/uapi/linux/netfilter/x_tables.h __entry = (void *)(entries) + __i; \ entries 168 include/uapi/linux/netfilter/x_tables.h #define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \ entries 169 include/uapi/linux/netfilter/x_tables.h XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args) entries 32 include/uapi/linux/netfilter_arp/arp_tables.h #define ARPT_ENTRY_ITERATE(entries, size, fn, args...) \ entries 33 include/uapi/linux/netfilter_arp/arp_tables.h XT_ENTRY_ITERATE(struct arpt_entry, entries, size, fn, ## args) entries 184 include/uapi/linux/netfilter_arp/arp_tables.h struct arpt_entry entries[0]; entries 57 include/uapi/linux/netfilter_bridge/ebtables.h char __user *entries; entries 73 include/uapi/linux/netfilter_bridge/ebtables.h char *entries; entries 262 include/uapi/linux/netfilter_bridge/ebtables.h #define EBT_ENTRY_ITERATE(entries, size, fn, args...) \ entries 269 include/uapi/linux/netfilter_bridge/ebtables.h __entry = (void *)(entries) + __i; \ entries 66 include/uapi/linux/netfilter_ipv4/ip_tables.h #define IPT_ENTRY_ITERATE(entries, size, fn, args...) \ entries 67 include/uapi/linux/netfilter_ipv4/ip_tables.h XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args) entries 206 include/uapi/linux/netfilter_ipv4/ip_tables.h struct ipt_entry entries[0]; entries 59 include/uapi/linux/netfilter_ipv6/ip6_tables.h #define IP6T_ENTRY_ITERATE(entries, size, fn, args...) \ entries 60 include/uapi/linux/netfilter_ipv6/ip6_tables.h XT_ENTRY_ITERATE(struct ip6t_entry, entries, size, fn, ## args) entries 246 include/uapi/linux/netfilter_ipv6/ip6_tables.h struct ip6t_entry entries[0]; entries 430 include/uapi/linux/raid/md_p.h struct ppl_header_entry entries[PPL_HDR_MAX_ENTRIES]; entries 1948 include/uapi/linux/videodev2.h __u32 entries; entries 197 include/uapi/linux/virtio_net.h __virtio32 entries; entries 55 include/uapi/linux/xdp_diag.h __u32 entries; /*num descs */ entries 47 kernel/backtracetest.c unsigned long entries[8]; entries 53 kernel/backtracetest.c nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); entries 54 kernel/backtracetest.c stack_trace_print(entries, nr_entries, 0); entries 91 kernel/bpf/devmap.c static struct hlist_head *dev_map_create_hash(unsigned int entries) entries 96 kernel/bpf/devmap.c hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL); entries 98 kernel/bpf/devmap.c for (i = 0; i < entries; i++) entries 50 kernel/events/callchain.c struct callchain_cpus_entries *entries; entries 53 kernel/events/callchain.c entries = container_of(head, struct callchain_cpus_entries, rcu_head); entries 56 kernel/events/callchain.c kfree(entries->cpu_entries[cpu]); entries 58 kernel/events/callchain.c kfree(entries); entries 63 kernel/events/callchain.c struct callchain_cpus_entries *entries; entries 65 kernel/events/callchain.c entries = callchain_cpus_entries; entries 67 kernel/events/callchain.c call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); entries 74 kernel/events/callchain.c struct callchain_cpus_entries *entries; entries 83 kernel/events/callchain.c entries = kzalloc(size, GFP_KERNEL); entries 84 kernel/events/callchain.c if (!entries) entries 90 kernel/events/callchain.c entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, entries 92 kernel/events/callchain.c if (!entries->cpu_entries[cpu]) entries 96 kernel/events/callchain.c rcu_assign_pointer(callchain_cpus_entries, entries); entries 102 kernel/events/callchain.c kfree(entries->cpu_entries[cpu]); entries 103 kernel/events/callchain.c kfree(entries); entries 155 kernel/events/callchain.c struct callchain_cpus_entries *entries; entries 161 kernel/events/callchain.c entries = rcu_dereference(callchain_cpus_entries); entries 162 kernel/events/callchain.c if (!entries) entries 167 kernel/events/callchain.c return (((void *)entries->cpu_entries[cpu]) + entries 6447 kernel/events/core.c perf_output_copy(handle, data->br_stack->entries, size); entries 381 kernel/jump_label.c struct jump_entry *entries) entries 385 kernel/jump_label.c WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK); entries 387 kernel/jump_label.c key->entries = entries; entries 512 kernel/jump_label.c struct jump_entry *entries; entries 569 kernel/jump_label.c if (!mod->entries) entries 577 kernel/jump_label.c __jump_label_update(key, mod->entries, stop, entries 649 kernel/jump_label.c jlm2->entries = static_key_entries(key); entries 655 kernel/jump_label.c jlm->entries = iter; entries 711 kernel/jump_label.c static_key_set_entries(key, jlm->entries); entries 193 kernel/livepatch/transition.c static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, entries 201 kernel/livepatch/transition.c address = entries[i]; entries 244 kernel/livepatch/transition.c static unsigned long entries[MAX_STACK_ENTRIES]; entries 249 kernel/livepatch/transition.c ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries)); entries 262 kernel/livepatch/transition.c ret = klp_check_stack_func(func, entries, nr_entries); entries 463 kernel/locking/lockdep.c unsigned long entries[0] __aligned(sizeof(unsigned long)); entries 476 kernel/locking/lockdep.c memcmp(t1->entries, t2->entries, entries 477 kernel/locking/lockdep.c t1->nr_entries * sizeof(t1->entries[0])) == 0; entries 503 kernel/locking/lockdep.c trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); entries 505 kernel/locking/lockdep.c hash = jhash(trace->entries, trace->nr_entries * entries 506 kernel/locking/lockdep.c sizeof(trace->entries[0]), 0); entries 1543 kernel/locking/lockdep.c stack_trace_print(trace->entries, trace->nr_entries, spaces); entries 81 kernel/power/swap.c sector_t entries[MAP_PAGE_ENTRIES]; entries 452 kernel/power/swap.c handle->cur->entries[handle->k++] = offset; entries 1016 kernel/power/swap.c offset = handle->cur->entries[handle->k]; entries 1300 kernel/power/swap.c handle->cur->entries[handle->k]) { entries 23 kernel/stacktrace.c void stack_trace_print(const unsigned long *entries, unsigned int nr_entries, entries 28 kernel/stacktrace.c if (WARN_ON(!entries)) entries 32 kernel/stacktrace.c printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]); entries 46 kernel/stacktrace.c int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, entries 51 kernel/stacktrace.c if (WARN_ON(!entries)) entries 56 kernel/stacktrace.c (void *)entries[i]); entries 276 kernel/stacktrace.c .entries = store, entries 300 kernel/stacktrace.c .entries = store, entries 323 kernel/stacktrace.c .entries = store, entries 349 kernel/stacktrace.c .entries = store, entries 369 kernel/stacktrace.c .entries = store, entries 323 kernel/trace/ring_buffer.c local_t entries; /* entries on this page */ entries 462 kernel/trace/ring_buffer.c local_t entries; entries 1079 kernel/trace/ring_buffer.c old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); entries 1109 kernel/trace/ring_buffer.c (void)local_cmpxchg(&next_page->entries, old_entries, eval); entries 1486 kernel/trace/ring_buffer.c return local_read(&bpage->entries) & RB_WRITE_MASK; entries 1980 kernel/trace/ring_buffer.c int entries; entries 1984 kernel/trace/ring_buffer.c entries = rb_page_entries(next_page); entries 2012 kernel/trace/ring_buffer.c local_add(entries, &cpu_buffer->overrun); entries 2606 kernel/trace/ring_buffer.c local_inc(&cpu_buffer->entries); entries 2855 kernel/trace/ring_buffer.c local_inc(&tail_page->entries); entries 3024 kernel/trace/ring_buffer.c local_dec(&bpage->entries); entries 3036 kernel/trace/ring_buffer.c local_dec(&bpage->entries); entries 3339 kernel/trace/ring_buffer.c return local_read(&cpu_buffer->entries) - entries 3508 kernel/trace/ring_buffer.c unsigned long entries = 0; entries 3514 kernel/trace/ring_buffer.c entries += rb_num_of_entries(cpu_buffer); entries 3517 kernel/trace/ring_buffer.c return entries; entries 3719 kernel/trace/ring_buffer.c local_set(&cpu_buffer->reader_page->entries, 0); entries 4358 kernel/trace/ring_buffer.c local_set(&cpu_buffer->head_page->entries, 0); entries 4369 kernel/trace/ring_buffer.c local_set(&cpu_buffer->reader_page->entries, 0); entries 4377 kernel/trace/ring_buffer.c local_set(&cpu_buffer->entries, 0); entries 4817 kernel/trace/ring_buffer.c local_set(&reader->entries, 0); entries 234 kernel/trace/ring_buffer_benchmark.c unsigned long long entries; entries 299 kernel/trace/ring_buffer_benchmark.c entries = ring_buffer_entries(buffer); entries 332 kernel/trace/ring_buffer_benchmark.c trace_printk("Entries: %lld\n", entries); entries 333 kernel/trace/ring_buffer_benchmark.c trace_printk("Total: %lld\n", entries + overruns + read); entries 3412 kernel/trace/trace.c unsigned long entries = 0; entries 3431 kernel/trace/trace.c entries++; entries 3435 kernel/trace/trace.c per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; entries 3521 kernel/trace/trace.c unsigned long *entries, int cpu) entries 3538 kernel/trace/trace.c *entries = count; entries 3543 kernel/trace/trace.c unsigned long *total, unsigned long *entries) entries 3549 kernel/trace/trace.c *entries = 0; entries 3554 kernel/trace/trace.c *entries += e; entries 3560 kernel/trace/trace.c unsigned long total, entries; entries 3565 kernel/trace/trace.c get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu); entries 3567 kernel/trace/trace.c return entries; entries 3572 kernel/trace/trace.c unsigned long total, entries; entries 3577 kernel/trace/trace.c get_total_entries(&tr->trace_buffer, &total, &entries); entries 3579 kernel/trace/trace.c return entries; entries 3597 kernel/trace/trace.c unsigned long entries; entries 3599 kernel/trace/trace.c get_total_entries(buf, &total, &entries); entries 3601 kernel/trace/trace.c entries, total, num_online_cpus()); entries 3641 kernel/trace/trace.c unsigned long entries; entries 3647 kernel/trace/trace.c get_total_entries(buf, &total, &entries); entries 3656 kernel/trace/trace.c entries, entries 5459 kernel/trace/trace.c per_cpu_ptr(buf->data, cpu)->entries = val; entries 5472 kernel/trace/trace.c per_cpu_ptr(size_buf->data, cpu)->entries, cpu); entries 5475 kernel/trace/trace.c per_cpu_ptr(trace_buf->data, cpu)->entries = entries 5476 kernel/trace/trace.c per_cpu_ptr(size_buf->data, cpu)->entries; entries 5480 kernel/trace/trace.c per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); entries 5482 kernel/trace/trace.c per_cpu_ptr(trace_buf->data, cpu_id)->entries = entries 5483 kernel/trace/trace.c per_cpu_ptr(size_buf->data, cpu_id)->entries; entries 5543 kernel/trace/trace.c per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size; entries 5551 kernel/trace/trace.c per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size; entries 6272 kernel/trace/trace.c size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries; entries 6273 kernel/trace/trace.c if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) { entries 6289 kernel/trace/trace.c r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); entries 6336 kernel/trace/trace.c size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10; entries 7422 kernel/trace/trace.c int entries, i; entries 7444 kernel/trace/trace.c entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); entries 7446 kernel/trace/trace.c for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { entries 7485 kernel/trace/trace.c entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); entries 153 kernel/trace/trace.h unsigned long entries; entries 5357 kernel/trace/trace_events_hist.c unsigned long entries[HIST_STACKTRACE_DEPTH]; entries 5372 kernel/trace/trace_events_hist.c memset(entries, 0, HIST_STACKTRACE_SIZE); entries 5373 kernel/trace/trace_events_hist.c stack_trace_save(entries, HIST_STACKTRACE_DEPTH, entries 5375 kernel/trace/trace_events_hist.c key = entries; entries 923 kernel/trace/tracing_map.c void tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries, entries 929 kernel/trace/tracing_map.c destroy_sort_entry(entries[i]); entries 931 kernel/trace/tracing_map.c vfree(entries); entries 987 kernel/trace/tracing_map.c const struct tracing_map_sort_entry **entries, entries 1009 kernel/trace/tracing_map.c const struct tracing_map_sort_entry **a = &entries[i]; entries 1010 kernel/trace/tracing_map.c const struct tracing_map_sort_entry **b = &entries[i + 1]; entries 1025 kernel/trace/tracing_map.c sort(&entries[start], n_sub, entries 1066 kernel/trace/tracing_map.c struct tracing_map_sort_entry *sort_entry, **entries; entries 1069 kernel/trace/tracing_map.c entries = vmalloc(array_size(sizeof(sort_entry), map->max_elts)); entries 1070 kernel/trace/tracing_map.c if (!entries) entries 1081 kernel/trace/tracing_map.c entries[n_entries] = create_sort_entry(entry->val->key, entries 1083 kernel/trace/tracing_map.c if (!entries[n_entries++]) { entries 1095 kernel/trace/tracing_map.c *sort_entries = entries; entries 1099 kernel/trace/tracing_map.c detect_dups(entries, n_entries, map->key_size); entries 1108 kernel/trace/tracing_map.c sort(entries, n_entries, sizeof(struct tracing_map_sort_entry *), entries 1113 kernel/trace/tracing_map.c (const struct tracing_map_sort_entry **)entries, entries 1118 kernel/trace/tracing_map.c *sort_entries = entries; entries 1122 kernel/trace/tracing_map.c tracing_map_destroy_sort_entries(entries, n_entries); entries 286 kernel/trace/tracing_map.h tracing_map_destroy_sort_entries(struct tracing_map_sort_entry **entries, entries 1013 lib/dynamic_debug.c int n = 0, entries = 0, modct = 0; entries 1024 lib/dynamic_debug.c entries++; entries 1045 lib/dynamic_debug.c modct, entries, (int)(modct * sizeof(struct ddebug_table)), entries 70 lib/fault-inject.c unsigned long entries[MAX_STACK_TRACE_DEPTH]; entries 77 lib/fault-inject.c nr_entries = stack_trace_save(entries, depth, 1); entries 79 lib/fault-inject.c if (attr->reject_start <= entries[n] && entries 80 lib/fault-inject.c entries[n] < attr->reject_end) entries 82 lib/fault-inject.c if (attr->require_start <= entries[n] && entries 83 lib/fault-inject.c entries[n] < attr->require_end) entries 64 lib/stackdepot.c unsigned long entries[1]; /* Variable-sized array of entries. */ entries 103 lib/stackdepot.c static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, entries 106 lib/stackdepot.c int required_size = offsetof(struct stack_record, entries) + entries 138 lib/stackdepot.c memcpy(stack->entries, entries, size * sizeof(unsigned long)); entries 154 lib/stackdepot.c static inline u32 hash_stack(unsigned long *entries, unsigned int size) entries 156 lib/stackdepot.c return jhash2((u32 *)entries, entries 178 lib/stackdepot.c unsigned long *entries, int size, entries 186 lib/stackdepot.c !stackdepot_memcmp(entries, found->entries, size)) entries 202 lib/stackdepot.c unsigned long **entries) entries 209 lib/stackdepot.c *entries = stack->entries; entries 223 lib/stackdepot.c depot_stack_handle_t stack_depot_save(unsigned long *entries, entries 237 lib/stackdepot.c hash = hash_stack(entries, nr_entries); entries 245 lib/stackdepot.c found = find_stack(smp_load_acquire(bucket), entries, entries 274 lib/stackdepot.c found = find_stack(*bucket, entries, nr_entries, hash); entries 277 lib/stackdepot.c depot_alloc_stack(entries, nr_entries, entries 73 lib/test_rhashtable.c unsigned int entries; entries 138 lib/test_rhashtable.c unsigned int entries) entries 142 lib/test_rhashtable.c for (i = 0; i < entries; i++) { entries 175 lib/test_rhashtable.c static void test_bucket_stats(struct rhashtable *ht, unsigned int entries) entries 202 lib/test_rhashtable.c total, atomic_read(&ht->nelems), entries, chain_len); entries 204 lib/test_rhashtable.c if (total != atomic_read(&ht->nelems) || total != entries) entries 209 lib/test_rhashtable.c unsigned int entries) entries 220 lib/test_rhashtable.c pr_info(" Adding %d keys\n", entries); entries 222 lib/test_rhashtable.c for (i = 0; i < entries; i++) { entries 237 lib/test_rhashtable.c test_bucket_stats(ht, entries); entries 239 lib/test_rhashtable.c test_rht_lookup(ht, array, entries); entries 242 lib/test_rhashtable.c test_bucket_stats(ht, entries); entries 244 lib/test_rhashtable.c pr_info(" Deleting %d keys\n", entries); entries 245 lib/test_rhashtable.c for (i = 0; i < entries; i++) { entries 269 lib/test_rhashtable.c static int __init test_rhltable(unsigned int entries) entries 276 lib/test_rhashtable.c if (entries == 0) entries 277 lib/test_rhashtable.c entries = 1; entries 279 lib/test_rhashtable.c rhl_test_objects = vzalloc(array_size(entries, entries 286 lib/test_rhashtable.c BITS_TO_LONGS(entries))); entries 296 lib/test_rhashtable.c for (i = 0; i < entries; i++) { entries 309 lib/test_rhashtable.c pr_info("test %d add/delete pairs into rhlist\n", entries); entries 310 lib/test_rhashtable.c for (i = 0; i < entries; i++) { entries 320 lib/test_rhashtable.c if (WARN(!h, "key not found during iteration %d of %d", i, entries)) { entries 358 lib/test_rhashtable.c for (i = 0; i < entries; i++) { entries 369 lib/test_rhashtable.c pr_info("test %d random rhlist add/delete operations\n", entries); entries 370 lib/test_rhashtable.c for (j = 0; j < entries; j++) { entries 371 lib/test_rhashtable.c u32 i = prandom_u32_max(entries); entries 414 lib/test_rhashtable.c i = prandom_u32_max(entries); entries 428 lib/test_rhashtable.c for (i = 0; i < entries; i++) { entries 449 lib/test_rhashtable.c unsigned int entries) entries 454 lib/test_rhashtable.c test_rht_params.max_size = roundup_pow_of_two(entries / 8); entries 603 lib/test_rhashtable.c unsigned int entries = tdata->entries; entries 606 lib/test_rhashtable.c for (i = 0; i < entries; i++) { entries 643 lib/test_rhashtable.c for (i = 0; i < tdata->entries; i++) { entries 667 lib/test_rhashtable.c for (i = 0; i < tdata->entries; i += step) { entries 698 lib/test_rhashtable.c unsigned int entries; entries 707 lib/test_rhashtable.c entries = min(parm_entries, MAX_ENTRIES); entries 710 lib/test_rhashtable.c test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries); entries 734 lib/test_rhashtable.c time = test_rhashtable(&ht, objs, entries); entries 746 lib/test_rhashtable.c test_rht_params.max_size, test_rhashtable_max(objs, entries) == 0 ? entries 764 lib/test_rhashtable.c objs = vzalloc(array3_size(sizeof(struct test_obj), tcount, entries)); entries 771 lib/test_rhashtable.c roundup_pow_of_two(tcount * entries); entries 782 lib/test_rhashtable.c tdata[i].entries = entries; entries 783 lib/test_rhashtable.c tdata[i].objs = objs + i * entries; entries 815 lib/test_rhashtable.c err = test_rhltable(entries / 16); entries 1726 mm/filemap.c struct page **entries, pgoff_t *indices) entries 1757 mm/filemap.c entries[ret] = page; entries 50 mm/kasan/common.c static inline unsigned int filter_irq_stacks(unsigned long *entries, entries 56 mm/kasan/common.c if (in_irqentry_text(entries[i])) { entries 66 mm/kasan/common.c unsigned long entries[KASAN_STACK_DEPTH]; entries 69 mm/kasan/common.c nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); entries 70 mm/kasan/common.c nr_entries = filter_irq_stacks(entries, nr_entries); entries 71 mm/kasan/common.c return stack_depot_save(entries, nr_entries, flags); entries 104 mm/kasan/report.c unsigned long *entries; entries 107 mm/kasan/report.c nr_entries = stack_depot_fetch(track->stack, &entries); entries 108 mm/kasan/report.c stack_trace_print(entries, nr_entries, 0); entries 2964 mm/memcontrol.c memcg_cachep = READ_ONCE(arr->entries[kmemcg_id]); entries 4052 mm/memcontrol.c for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) entries 4053 mm/memcontrol.c eventfd_signal(t->entries[i].eventfd, 1); entries 4064 mm/memcontrol.c for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) entries 4065 mm/memcontrol.c eventfd_signal(t->entries[i].eventfd, 1); entries 4150 mm/memcontrol.c new = kmalloc(struct_size(new, entries, size), GFP_KERNEL); entries 4159 mm/memcontrol.c memcpy(new->entries, thresholds->primary->entries, (size - 1) * entries 4164 mm/memcontrol.c new->entries[size - 1].eventfd = eventfd; entries 4165 mm/memcontrol.c new->entries[size - 1].threshold = threshold; entries 4168 mm/memcontrol.c sort(new->entries, size, sizeof(struct mem_cgroup_threshold), entries 4174 mm/memcontrol.c if (new->entries[i].threshold <= usage) { entries 4218 mm/memcontrol.c int i, j, size, entries; entries 4238 mm/memcontrol.c size = entries = 0; entries 4240 mm/memcontrol.c if (thresholds->primary->entries[i].eventfd != eventfd) entries 4243 mm/memcontrol.c entries++; entries 4249 mm/memcontrol.c if (!entries) entries 4264 mm/memcontrol.c if (thresholds->primary->entries[i].eventfd == eventfd) entries 4267 mm/memcontrol.c new->entries[j] = thresholds->primary->entries[i]; entries 4268 mm/memcontrol.c if (new->entries[j].threshold <= usage) { entries 77 mm/page_ext.c int entries = ARRAY_SIZE(page_ext_ops); entries 80 mm/page_ext.c for (i = 0; i < entries; i++) { entries 94 mm/page_ext.c int entries = ARRAY_SIZE(page_ext_ops); entries 96 mm/page_ext.c for (i = 0; i < entries; i++) { entries 58 mm/page_owner.c unsigned long entries[4]; entries 61 mm/page_owner.c nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); entries 62 mm/page_owner.c return stack_depot_save(entries, nr_entries, GFP_KERNEL); entries 103 mm/page_owner.c static inline bool check_recursive_alloc(unsigned long *entries, entries 110 mm/page_owner.c if (entries[i] == ip) entries 118 mm/page_owner.c unsigned long entries[PAGE_OWNER_STACK_DEPTH]; entries 122 mm/page_owner.c nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); entries 132 mm/page_owner.c if (check_recursive_alloc(entries, nr_entries, _RET_IP_)) entries 135 mm/page_owner.c handle = stack_depot_save(entries, nr_entries, flags); entries 343 mm/page_owner.c unsigned long *entries; entries 374 mm/page_owner.c nr_entries = stack_depot_fetch(handle, &entries); entries 375 mm/page_owner.c ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0); entries 407 mm/page_owner.c unsigned long *entries; entries 438 mm/page_owner.c nr_entries = stack_depot_fetch(handle, &entries); entries 439 mm/page_owner.c stack_trace_print(entries, nr_entries, 0); entries 446 mm/page_owner.c nr_entries = stack_depot_fetch(handle, &entries); entries 448 mm/page_owner.c stack_trace_print(entries, nr_entries, 0); entries 1123 mm/shmem.c struct page **entries, pgoff_t *indices, entries 1150 mm/shmem.c entries[ret] = page; entries 532 mm/slab.c static struct array_cache *alloc_arraycache(int node, int entries, entries 535 mm/slab.c size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache); entries 547 mm/slab.c init_arraycache(ac, entries, batchcount); entries 633 mm/slab.c static struct alien_cache *__alloc_alien_cache(int node, int entries, entries 636 mm/slab.c size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache); entries 642 mm/slab.c init_arraycache(&alc->ac, entries, batch); entries 1722 mm/slab.c struct kmem_cache *cachep, int entries, int batchcount) entries 1728 mm/slab.c size = sizeof(void *) * entries + sizeof(struct array_cache); entries 1736 mm/slab.c entries, batchcount); entries 37 mm/slab.h struct kmem_cache *entries[0]; entries 210 mm/slab_common.c memcpy(new->entries, old->entries, entries 671 mm/slab_common.c if (arr->entries[idx]) entries 701 mm/slab_common.c arr->entries[idx] = s; entries 809 mm/slab_common.c c = arr->entries[idx]; entries 814 mm/slab_common.c arr->entries[idx] = NULL; entries 850 mm/slab_common.c c = arr->entries[i]; entries 867 mm/slab_common.c arr->entries[i] = NULL; entries 1396 mm/swapfile.c void swapcache_free_entries(swp_entry_t *entries, int n) entries 1413 mm/swapfile.c sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL); entries 1415 mm/swapfile.c p = swap_info_get_cont(entries[i], prev); entries 1417 mm/swapfile.c swap_entry_free(p, entries[i]); entries 36 net/bridge/netfilter/ebtable_broute.c .entries = (char *)&initial_chain, entries 43 net/bridge/netfilter/ebtable_filter.c .entries = (char *)initial_chains, entries 43 net/bridge/netfilter/ebtable_nat.c .entries = (char *)initial_chains, entries 211 net/bridge/netfilter/ebtables.c base = private->entries; entries 443 net/bridge/netfilter/ebtables.c struct ebt_entry *e = (void *)newinfo->entries + offset; entries 452 net/bridge/netfilter/ebtables.c repl->entries + offset) entries 841 net/bridge/netfilter/ebtables.c if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) entries 863 net/bridge/netfilter/ebtables.c ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, entries 904 net/bridge/netfilter/ebtables.c EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, entries 917 net/bridge/netfilter/ebtables.c cl_s, udc_cnt, i, newinfo->entries)) { entries 935 net/bridge/netfilter/ebtables.c ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, entries 938 net/bridge/netfilter/ebtables.c EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, entries 1041 net/bridge/netfilter/ebtables.c EBT_ENTRY_ITERATE(table->entries, table->entries_size, entries 1044 net/bridge/netfilter/ebtables.c vfree(table->entries); entries 1062 net/bridge/netfilter/ebtables.c EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, entries 1106 net/bridge/netfilter/ebtables.c newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT, entries 1108 net/bridge/netfilter/ebtables.c if (!newinfo->entries) { entries 1113 net/bridge/netfilter/ebtables.c newinfo->entries, tmp.entries, tmp.entries_size) != 0) { entries 1122 net/bridge/netfilter/ebtables.c vfree(newinfo->entries); entries 1133 net/bridge/netfilter/ebtables.c EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, entries 1137 net/bridge/netfilter/ebtables.c vfree(table->private->entries); entries 1153 net/bridge/netfilter/ebtables.c repl->entries == NULL || repl->entries_size == 0 || entries 1174 net/bridge/netfilter/ebtables.c memcpy(p, repl->entries, repl->entries_size); entries 1175 net/bridge/netfilter/ebtables.c newinfo->entries = p; entries 1190 net/bridge/netfilter/ebtables.c ((char *)repl->hook_entry[i] - repl->entries); entries 1231 net/bridge/netfilter/ebtables.c vfree(newinfo->entries); entries 1421 net/bridge/netfilter/ebtables.c char *entries; entries 1426 net/bridge/netfilter/ebtables.c entries = t->private->entries; entries 1431 net/bridge/netfilter/ebtables.c entries = t->table->entries; entries 1454 net/bridge/netfilter/ebtables.c return EBT_ENTRY_ITERATE(entries, entries_size, entries 1455 net/bridge/netfilter/ebtables.c ebt_entry_to_user, entries, tmp.entries); entries 1551 net/bridge/netfilter/ebtables.c compat_uptr_t entries; entries 1788 net/bridge/netfilter/ebtables.c const void *entries = info->entries; entries 1796 net/bridge/netfilter/ebtables.c return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, entries 1797 net/bridge/netfilter/ebtables.c entries, newinfo); entries 1814 net/bridge/netfilter/ebtables.c tinfo.entries = t->private->entries; entries 1819 net/bridge/netfilter/ebtables.c tinfo.entries = t->table->entries; entries 1851 net/bridge/netfilter/ebtables.c pos = compat_ptr(tmp.entries); entries 1852 net/bridge/netfilter/ebtables.c return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size, entries 2198 net/bridge/netfilter/ebtables.c repl->entries = compat_ptr(tmp.entries); entries 2229 net/bridge/netfilter/ebtables.c newinfo->entries = vmalloc(tmp.entries_size); entries 2230 net/bridge/netfilter/ebtables.c if (!newinfo->entries) { entries 2235 net/bridge/netfilter/ebtables.c newinfo->entries, tmp.entries, tmp.entries_size) != 0) { entries 2240 net/bridge/netfilter/ebtables.c entries_tmp = newinfo->entries; entries 2257 net/bridge/netfilter/ebtables.c newinfo->entries = vmalloc(size64); entries 2258 net/bridge/netfilter/ebtables.c if (!newinfo->entries) { entries 2265 net/bridge/netfilter/ebtables.c state.buf_kern_start = newinfo->entries; entries 2282 net/bridge/netfilter/ebtables.c delta = usrptr - tmp.entries; entries 2295 net/bridge/netfilter/ebtables.c vfree(newinfo->entries); entries 477 net/can/af_can.c dev_rcv_lists->entries++; entries 550 net/can/af_can.c dev_rcv_lists->entries--; entries 580 net/can/af_can.c if (dev_rcv_lists->entries == 0) entries 70 net/core/drop_monitor.c struct net_dm_hw_entry entries[0]; entries 235 net/core/drop_monitor.c for (i = 0; i < msg->entries; i++) { entries 242 net/core/drop_monitor.c if (msg->entries == dm_hit_limit) entries 251 net/core/drop_monitor.c msg->entries++; entries 304 net/core/drop_monitor.c hw_entries = kzalloc(struct_size(hw_entries, entries, dm_hit_limit), entries 358 net/core/drop_monitor.c rc = net_dm_hw_entry_put(msg, &hw_entries->entries[i]); entries 451 net/core/drop_monitor.c hw_entry = &hw_entries->entries[i]; entries 461 net/core/drop_monitor.c hw_entry = &hw_entries->entries[hw_entries->num_entries]; entries 3089 net/core/ethtool.c act = &flow->rule->action.entries[0]; entries 12 net/core/flow_offload.c rule = kzalloc(struct_size(rule, action.entries, num_actions), entries 380 net/core/neighbour.c int entries; entries 385 net/core/neighbour.c entries = atomic_inc_return(&tbl->gc_entries) - 1; entries 386 net/core/neighbour.c if (entries >= tbl->gc_thresh3 || entries 387 net/core/neighbour.c (entries >= tbl->gc_thresh2 && entries 390 net/core/neighbour.c entries >= tbl->gc_thresh3) { entries 419 net/core/neighbour.c atomic_inc(&tbl->entries); entries 621 net/core/neighbour.c if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) entries 856 net/core/neighbour.c atomic_dec(&neigh->tbl->entries); entries 911 net/core/neighbour.c if (atomic_read(&tbl->entries) < tbl->gc_thresh1) entries 1733 net/core/neighbour.c if (atomic_read(&tbl->entries)) entries 2058 net/core/neighbour.c .ndtc_entries = atomic_read(&tbl->entries), entries 3313 net/core/neighbour.c atomic_read(&tbl->entries), entries 880 net/dsa/slave.c act = &cls->rule->action.entries[0]; entries 208 net/ipv4/netfilter/arp_tables.c table_base = private->entries; entries 610 net/ipv4/netfilter/arp_tables.c xt_entry_foreach(iter, t->entries, t->size) { entries 637 net/ipv4/netfilter/arp_tables.c xt_entry_foreach(iter, t->entries, t->size) { entries 684 net/ipv4/netfilter/arp_tables.c loc_cpu_entry = private->entries; entries 775 net/ipv4/netfilter/arp_tables.c memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); entries 777 net/ipv4/netfilter/arp_tables.c loc_cpu_entry = info->entries; entries 929 net/ipv4/netfilter/arp_tables.c loc_cpu_old_entry = oldinfo->entries; entries 975 net/ipv4/netfilter/arp_tables.c loc_cpu_entry = newinfo->entries; entries 1032 net/ipv4/netfilter/arp_tables.c xt_entry_foreach(iter, private->entries, private->size) { entries 1060 net/ipv4/netfilter/arp_tables.c struct compat_arpt_entry entries[0]; entries 1204 net/ipv4/netfilter/arp_tables.c entry1 = newinfo->entries; entries 1273 net/ipv4/netfilter/arp_tables.c loc_cpu_entry = newinfo->entries; entries 1373 net/ipv4/netfilter/arp_tables.c xt_entry_foreach(iter, private->entries, total_size) { entries 1528 net/ipv4/netfilter/arp_tables.c loc_cpu_entry = private->entries; entries 1552 net/ipv4/netfilter/arp_tables.c loc_cpu_entry = newinfo->entries; entries 1553 net/ipv4/netfilter/arp_tables.c memcpy(loc_cpu_entry, repl->entries, repl->size); entries 201 net/ipv4/netfilter/ip_tables.c root = get_entry(private->entries, private->hook_entry[hook]); entries 263 net/ipv4/netfilter/ip_tables.c table_base = private->entries; entries 751 net/ipv4/netfilter/ip_tables.c xt_entry_foreach(iter, t->entries, t->size) { entries 778 net/ipv4/netfilter/ip_tables.c xt_entry_foreach(iter, t->entries, t->size) { entries 826 net/ipv4/netfilter/ip_tables.c loc_cpu_entry = private->entries; entries 932 net/ipv4/netfilter/ip_tables.c memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); entries 934 net/ipv4/netfilter/ip_tables.c loc_cpu_entry = info->entries; entries 1084 net/ipv4/netfilter/ip_tables.c xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) entries 1129 net/ipv4/netfilter/ip_tables.c loc_cpu_entry = newinfo->entries; entries 1186 net/ipv4/netfilter/ip_tables.c xt_entry_foreach(iter, private->entries, private->size) { entries 1214 net/ipv4/netfilter/ip_tables.c struct compat_ipt_entry entries[0]; entries 1438 net/ipv4/netfilter/ip_tables.c entry1 = newinfo->entries; entries 1512 net/ipv4/netfilter/ip_tables.c loc_cpu_entry = newinfo->entries; entries 1586 net/ipv4/netfilter/ip_tables.c xt_entry_foreach(iter, private->entries, total_size) { entries 1747 net/ipv4/netfilter/ip_tables.c loc_cpu_entry = private->entries; entries 1769 net/ipv4/netfilter/ip_tables.c loc_cpu_entry = newinfo->entries; entries 1770 net/ipv4/netfilter/ip_tables.c memcpy(loc_cpu_entry, repl->entries, repl->size); entries 41 net/ipv4/netfilter/ipt_CLUSTERIP.c refcount_t entries; /* number of entries/rules entries 118 net/ipv4/netfilter/ipt_CLUSTERIP.c if (refcount_dec_and_lock(&c->entries, &cn->lock)) { entries 166 net/ipv4/netfilter/ipt_CLUSTERIP.c if (unlikely(!refcount_inc_not_zero(&c->entries))) { entries 292 net/ipv4/netfilter/ipt_CLUSTERIP.c refcount_set(&c->entries, 1); entries 58 net/ipv4/netfilter/iptable_filter.c ((struct ipt_standard *)repl->entries)[1].target.verdict = entries 226 net/ipv6/netfilter/ip6_tables.c root = get_entry(private->entries, private->hook_entry[hook]); entries 285 net/ipv6/netfilter/ip6_tables.c table_base = private->entries; entries 768 net/ipv6/netfilter/ip6_tables.c xt_entry_foreach(iter, t->entries, t->size) { entries 795 net/ipv6/netfilter/ip6_tables.c xt_entry_foreach(iter, t->entries, t->size) { entries 842 net/ipv6/netfilter/ip6_tables.c loc_cpu_entry = private->entries; entries 948 net/ipv6/netfilter/ip6_tables.c memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); entries 950 net/ipv6/netfilter/ip6_tables.c loc_cpu_entry = info->entries; entries 1101 net/ipv6/netfilter/ip6_tables.c xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) entries 1146 net/ipv6/netfilter/ip6_tables.c loc_cpu_entry = newinfo->entries; entries 1202 net/ipv6/netfilter/ip6_tables.c xt_entry_foreach(iter, private->entries, private->size) { entries 1230 net/ipv6/netfilter/ip6_tables.c struct compat_ip6t_entry entries[0]; entries 1453 net/ipv6/netfilter/ip6_tables.c entry1 = newinfo->entries; entries 1521 net/ipv6/netfilter/ip6_tables.c loc_cpu_entry = newinfo->entries; entries 1595 net/ipv6/netfilter/ip6_tables.c xt_entry_foreach(iter, private->entries, total_size) { entries 1756 net/ipv6/netfilter/ip6_tables.c loc_cpu_entry = private->entries; entries 1779 net/ipv6/netfilter/ip6_tables.c loc_cpu_entry = newinfo->entries; entries 1780 net/ipv6/netfilter/ip6_tables.c memcpy(loc_cpu_entry, repl->entries, repl->size); entries 59 net/ipv6/netfilter/ip6table_filter.c ((struct ip6t_standard *)repl->entries)[1].target.verdict = entries 3204 net/ipv6/route.c int entries; entries 3206 net/ipv6/route.c entries = dst_entries_get_fast(ops); entries 3208 net/ipv6/route.c entries <= rt_max_size) entries 3213 net/ipv6/route.c entries = dst_entries_get_slow(ops); entries 3214 net/ipv6/route.c if (entries < ops->gc_thresh) entries 3218 net/ipv6/route.c return entries > rt_max_size; entries 215 net/mac80211/mesh.c int entries = 0; entries 227 net/mac80211/mesh.c ++entries; entries 229 net/mac80211/mesh.c entries == RMC_QUEUE_MAX_LEN) { entries 232 net/mac80211/mesh.c --entries; entries 147 net/mac80211/mesh.h atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ entries 60 net/mac80211/mesh_pathtbl.c atomic_set(&newtbl->entries, 0); entries 523 net/mac80211/mesh_pathtbl.c atomic_dec(&tbl->entries); entries 106 net/netfilter/ipvs/ip_vs_lblc.c atomic_t entries; /* number of entries */ entries 173 net/netfilter/ipvs/ip_vs_lblc.c atomic_inc(&tbl->entries); entries 241 net/netfilter/ipvs/ip_vs_lblc.c atomic_dec(&tbl->entries); entries 275 net/netfilter/ipvs/ip_vs_lblc.c atomic_dec(&tbl->entries); entries 311 net/netfilter/ipvs/ip_vs_lblc.c if (atomic_read(&tbl->entries) <= tbl->max_size) { entries 316 net/netfilter/ipvs/ip_vs_lblc.c goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; entries 329 net/netfilter/ipvs/ip_vs_lblc.c atomic_dec(&tbl->entries); entries 370 net/netfilter/ipvs/ip_vs_lblc.c atomic_set(&tbl->entries, 0); entries 274 net/netfilter/ipvs/ip_vs_lblcr.c atomic_t entries; /* number of entries */ entries 336 net/netfilter/ipvs/ip_vs_lblcr.c atomic_inc(&tbl->entries); entries 439 net/netfilter/ipvs/ip_vs_lblcr.c atomic_dec(&tbl->entries); entries 475 net/netfilter/ipvs/ip_vs_lblcr.c if (atomic_read(&tbl->entries) <= tbl->max_size) { entries 480 net/netfilter/ipvs/ip_vs_lblcr.c goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; entries 493 net/netfilter/ipvs/ip_vs_lblcr.c atomic_dec(&tbl->entries); entries 533 net/netfilter/ipvs/ip_vs_lblcr.c atomic_set(&tbl->entries, 0); entries 66 net/netfilter/nf_dup_netdev.c entry = &flow->rule->action.entries[ctx->num_actions++]; entries 40 net/netfilter/nf_nat_core.c struct nf_hook_entries __rcu *entries; entries 753 net/netfilter/nf_nat_core.c struct nf_hook_entries *e = rcu_dereference(lpriv->entries); entries 1082 net/netfilter/nf_nat_core.c ret = nf_hook_entries_insert_raw(&priv->entries, ops); entries 1121 net/netfilter/nf_nat_core.c nf_hook_entries_delete_raw(&priv->entries, ops); entries 135 net/netfilter/nft_immediate.c entry = &flow->rule->action.entries[ctx->num_actions++]; entries 82 net/netfilter/xt_recent.c unsigned int entries; entries 149 net/netfilter/xt_recent.c t->entries--; entries 178 net/netfilter/xt_recent.c if (t->entries >= ip_list_tot) { entries 198 net/netfilter/xt_recent.c t->entries++; entries 23 net/netfilter/xt_repldata.h struct type##_standard entries[]; \ entries 26 net/netfilter/xt_repldata.h size_t term_offset = (offsetof(typeof(*tbl), entries[nhooks]) + \ entries 43 net/netfilter/xt_repldata.h tbl->entries[i++] = (struct type##_standard) \ entries 3451 net/sched/cls_api.c entry = &flow_action->entries[j]; entries 3518 net/sched/cls_api.c entry = &flow_action->entries[++j]; entries 54 net/sched/sch_taprio.c struct list_head entries; entries 119 net/sched/sch_taprio.c list_for_each_entry_safe(entry, n, &sched->entries, list) { entries 216 net/sched/sch_taprio.c list_for_each_entry(entry, &sched->entries, list) { entries 647 net/sched/sch_taprio.c if (list_is_last(&entry->list, &oper->entries)) entries 717 net/sched/sch_taprio.c next = list_first_entry(&oper->entries, struct sched_entry, entries 724 net/sched/sch_taprio.c next = list_first_entry(&oper->entries, struct sched_entry, entries 856 net/sched/sch_taprio.c list_add_tail(&entry->list, &sched->entries); entries 895 net/sched/sch_taprio.c list_for_each_entry(entry, &new->entries, list) entries 1009 net/sched/sch_taprio.c first = list_first_entry(&sched->entries, entries 1103 net/sched/sch_taprio.c list_for_each_entry(entry, &sched->entries, list) { entries 1192 net/sched/sch_taprio.c list_for_each_entry(entry, &sched->entries, list) { entries 1193 net/sched/sch_taprio.c struct tc_taprio_sched_entry *e = &offload->entries[i]; entries 1442 net/sched/sch_taprio.c INIT_LIST_HEAD(&new_admin->entries); entries 1764 net/sched/sch_taprio.c list_for_each_entry(entry, &root->entries, list) { entries 104 net/sunrpc/cache.c detail->entries --; entries 116 net/sunrpc/cache.c detail->entries++; entries 202 net/sunrpc/cache.c detail->entries++; entries 369 net/sunrpc/cache.c cd->entries = 0; entries 458 net/sunrpc/cache.c current_detail->entries--; entries 519 net/sunrpc/cache.c if (!detail->entries) { entries 524 net/sunrpc/cache.c dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name); entries 529 net/sunrpc/cache.c detail->entries--; entries 1890 net/sunrpc/cache.c cd->entries--; entries 460 net/xdp/xsk.c static int xsk_init_queue(u32 entries, struct xsk_queue **queue, entries 465 net/xdp/xsk.c if (entries == 0 || *queue || !is_power_of_2(entries)) entries 468 net/xdp/xsk.c q = xskq_create(entries, umem_queue); entries 749 net/xdp/xsk.c int entries; entries 751 net/xdp/xsk.c if (optlen < sizeof(entries)) entries 753 net/xdp/xsk.c if (copy_from_user(&entries, optval, sizeof(entries))) entries 762 net/xdp/xsk.c err = xsk_init_queue(entries, q, false); entries 805 net/xdp/xsk.c int entries; entries 807 net/xdp/xsk.c if (copy_from_user(&entries, optval, sizeof(entries))) entries 822 net/xdp/xsk.c err = xsk_init_queue(entries, q, true); entries 31 net/xdp/xsk_diag.c dr.entries = queue->nentries; entries 98 net/xdp/xsk_queue.h u32 entries = q->prod_tail - q->cons_tail; entries 100 net/xdp/xsk_queue.h if (entries == 0) { entries 103 net/xdp/xsk_queue.h entries = q->prod_tail - q->cons_tail; entries 106 net/xdp/xsk_queue.h return (entries > dcnt) ? dcnt : entries; entries 123 net/xdp/xsk_queue.h u32 entries = q->prod_tail - q->cons_tail; entries 125 net/xdp/xsk_queue.h if (entries >= cnt) entries 130 net/xdp/xsk_queue.h entries = q->prod_tail - q->cons_tail; entries 132 net/xdp/xsk_queue.h return entries >= cnt; entries 607 net/xfrm/xfrm_algo.c int entries; entries 614 net/xfrm/xfrm_algo.c .entries = ARRAY_SIZE(aead_list), entries 621 net/xfrm/xfrm_algo.c .entries = ARRAY_SIZE(aalg_list), entries 628 net/xfrm/xfrm_algo.c .entries = ARRAY_SIZE(ealg_list), entries 635 net/xfrm/xfrm_algo.c .entries = ARRAY_SIZE(calg_list), entries 648 net/xfrm/xfrm_algo.c for (i = 0; i < algo_list->entries; i++) { entries 90 scripts/kallsyms.c struct addr_range *ranges, int entries) entries 95 scripts/kallsyms.c for (i = 0; i < entries; ++i) { entries 178 scripts/kallsyms.c int entries) entries 183 scripts/kallsyms.c for (i = 0; i < entries; ++i) { entries 279 scripts/kconfig/expr.h struct list_head entries; entries 313 scripts/kconfig/mconf.c struct list_head entries; entries 330 scripts/kconfig/mconf.c list_for_each_entry(sp, &trail, entries) { entries 369 scripts/kconfig/mconf.c list_for_each_entry(pos, data->head, entries) { entries 429 scripts/kconfig/mconf.c list_add_tail(&stpart.entries, &trail); entries 456 scripts/kconfig/mconf.c list_for_each_entry_safe(pos, tmp, &head, entries) entries 656 scripts/kconfig/mconf.c list_add_tail(&stpart.entries, &trail); entries 735 scripts/kconfig/menu.c entries)->index + 1; entries 737 scripts/kconfig/menu.c list_add_tail(&jump->entries, head); entries 574 security/integrity/ima/ima_policy.c static void add_rules(struct ima_rule_entry *entries, int count, entries 583 security/integrity/ima/ima_policy.c list_add_tail(&entries[i].list, &ima_default_rules); entries 586 security/integrity/ima/ima_policy.c entry = kmemdup(&entries[i], sizeof(*entry), entries 593 security/integrity/ima/ima_policy.c if (entries[i].action == APPRAISE) { entries 594 security/integrity/ima/ima_policy.c temp_ima_appraise |= ima_appraise_flag(entries[i].func); entries 595 security/integrity/ima/ima_policy.c if (entries[i].func == POLICY_CHECK) entries 86 security/selinux/ss/sidtab.c s->roots[l].ptr_inner->entries[0] = s->roots[l - 1]; entries 110 security/selinux/ss/sidtab.c entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift]; entries 128 security/selinux/ss/sidtab.c return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES].context; entries 185 security/selinux/ss/sidtab.c rc = sidtab_find_context(node->entries[i], entries 197 security/selinux/ss/sidtab.c if (context_cmp(&node->entries[i].context, context)) { entries 376 security/selinux/ss/sidtab.c rc = sidtab_convert_tree(&edst->ptr_inner->entries[i], entries 377 security/selinux/ss/sidtab.c &esrc->ptr_inner->entries[i], entries 393 security/selinux/ss/sidtab.c rc = convert->func(&esrc->ptr_leaf->entries[i].context, entries 394 security/selinux/ss/sidtab.c &edst->ptr_leaf->entries[i].context, entries 467 security/selinux/ss/sidtab.c sidtab_destroy_tree(node->entries[i], level - 1); entries 476 security/selinux/ss/sidtab.c context_destroy(&node->entries[i].context); entries 51 security/selinux/ss/sidtab.h struct sidtab_entry_leaf entries[SIDTAB_LEAF_ENTRIES]; entries 55 security/selinux/ss/sidtab.h union sidtab_entry_inner entries[SIDTAB_INNER_ENTRIES]; entries 27 sound/firewire/dice/dice-presonus.c } *entry, entries[] = { entries 43 sound/firewire/dice/dice-presonus.c for (i = 0; i < ARRAY_SIZE(entries); ++i) { entries 44 sound/firewire/dice/dice-presonus.c entry = entries + i; entries 48 sound/firewire/dice/dice-presonus.c if (i == ARRAY_SIZE(entries)) entries 63 sound/firewire/dice/dice-tcelectronic.c } *entry, entries[] = { entries 85 sound/firewire/dice/dice-tcelectronic.c for (i = 0; i < ARRAY_SIZE(entries); ++i) { entries 86 sound/firewire/dice/dice-tcelectronic.c entry = entries + i; entries 90 sound/firewire/dice/dice-tcelectronic.c if (i == ARRAY_SIZE(entries)) entries 42 sound/firewire/tascam/tascam-hwdep.c struct snd_firewire_tascam_change *entries = tscm->queue; entries 46 sound/firewire/tascam/tascam-hwdep.c if (remained < sizeof(type) + sizeof(*entries)) { entries 69 sound/firewire/tascam/tascam-hwdep.c length = (tail_pos - head_pos) * sizeof(*entries); entries 71 sound/firewire/tascam/tascam-hwdep.c length = rounddown(remained, sizeof(*entries)); entries 76 sound/firewire/tascam/tascam-hwdep.c if (copy_to_user(pos, &entries[head_pos], length)) entries 252 sound/pci/trident/trident.h __le32 *entries; /* 16k-aligned TLB table */ entries 815 sound/pci/trident/trident_main.c if (trident->tlb.entries) { entries 908 sound/pci/trident/trident_main.c if (trident->tlb.entries) { entries 2170 sound/pci/trident/trident_main.c if (trident->tlb.entries) { entries 2185 sound/pci/trident/trident_main.c if (trident->tlb.entries) { entries 2226 sound/pci/trident/trident_main.c if (trident->tlb.entries) entries 2244 sound/pci/trident/trident_main.c if (trident->tlb.entries) entries 3298 sound/pci/trident/trident_main.c if (trident->tlb.entries) { entries 3346 sound/pci/trident/trident_main.c trident->tlb.entries = (__le32 *)ALIGN((unsigned long)trident->tlb.buffer.area, SNDRV_TRIDENT_MAX_PAGES * 4); entries 3363 sound/pci/trident/trident_main.c trident->tlb.entries[i] = cpu_to_le32(trident->tlb.silent_page.addr & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); entries 3467 sound/pci/trident/trident_main.c if (trident->tlb.entries != NULL) { entries 3586 sound/pci/trident/trident_main.c trident->tlb.entries = NULL; entries 23 sound/pci/trident/trident_memory.c do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \ entries 28 sound/pci/trident/trident_memory.c (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1)) entries 87 sound/soc/intel/skylake/skl-sst-utils.c u32 entries; entries 186 tools/arch/x86/include/uapi/asm/kvm.h struct kvm_msr_entry entries[0]; entries 209 tools/arch/x86/include/uapi/asm/kvm.h struct kvm_cpuid_entry entries[0]; entries 231 tools/arch/x86/include/uapi/asm/kvm.h struct kvm_cpuid_entry2 entries[0]; entries 8 tools/build/feature/test-backtrace.c size_t entries; entries 10 tools/build/feature/test-backtrace.c entries = backtrace(backtrace_fns, 10); entries 11 tools/build/feature/test-backtrace.c backtrace_symbols_fd(backtrace_fns, entries, 1); entries 9 tools/include/linux/stacktrace.h unsigned long *entries; entries 15 tools/include/linux/stacktrace.h backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1); entries 20 tools/include/linux/stacktrace.h backtrace((void **)(trace)->entries, (trace)->max_entries)) entries 1057 tools/include/uapi/linux/kvm.h struct kvm_irq_routing_entry entries[0]; entries 34 tools/io_uring/io_uring-cp.c static int setup_context(unsigned entries, struct io_uring *ring) entries 38 tools/io_uring/io_uring-cp.c ret = io_uring_queue_init(entries, ring, 0); entries 54 tools/io_uring/liburing.h extern int io_uring_setup(unsigned entries, struct io_uring_params *p); entries 63 tools/io_uring/liburing.h extern int io_uring_queue_init(unsigned entries, struct io_uring *ring, entries 79 tools/io_uring/setup.c int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags) entries 87 tools/io_uring/setup.c fd = io_uring_setup(entries, &p); entries 42 tools/io_uring/syscall.c int io_uring_setup(unsigned int entries, struct io_uring_params *p) entries 44 tools/io_uring/syscall.c return syscall(__NR_io_uring_setup, entries, p); entries 14 tools/lib/api/fd/array.c fda->entries = NULL; entries 26 tools/lib/api/fd/array.c struct pollfd *entries = realloc(fda->entries, size); entries 28 tools/lib/api/fd/array.c if (entries == NULL) entries 33 tools/lib/api/fd/array.c free(entries); entries 38 tools/lib/api/fd/array.c fda->entries = entries; entries 61 tools/lib/api/fd/array.c free(fda->entries); entries 80 tools/lib/api/fd/array.c fda->entries[fda->nr].fd = fd; entries 81 tools/lib/api/fd/array.c fda->entries[fda->nr].events = revents; entries 96 tools/lib/api/fd/array.c if (fda->entries[fd].revents & revents) { entries 104 tools/lib/api/fd/array.c fda->entries[nr] = fda->entries[fd]; entries 116 tools/lib/api/fd/array.c return poll(fda->entries, fda->nr, timeout); entries 124 tools/lib/api/fd/array.c printed += fprintf(fp, "%s%d", fd ? ", " : "", fda->entries[fd].fd); entries 23 tools/lib/api/fd/array.h struct pollfd *entries; entries 106 tools/lib/bpf/xsk.h __u32 entries = r->cached_prod - r->cached_cons; entries 108 tools/lib/bpf/xsk.h if (entries == 0) { entries 110 tools/lib/bpf/xsk.h entries = r->cached_prod - r->cached_cons; entries 113 tools/lib/bpf/xsk.h return (entries > nb) ? nb : entries; entries 141 tools/lib/bpf/xsk.h size_t entries = xsk_cons_nb_avail(cons, nb); entries 143 tools/lib/bpf/xsk.h if (entries > 0) { entries 150 tools/lib/bpf/xsk.h cons->cached_cons += entries; entries 153 tools/lib/bpf/xsk.h return entries; entries 44 tools/objtool/special.c struct special_entry entries[] = { entries 169 tools/objtool/special.c for (entry = entries; entry->sec; entry++) { entries 313 tools/perf/builtin-annotate.c struct rb_node *nd = rb_first_cached(&hists->entries), *next; entries 2106 tools/perf/builtin-c2c.c struct rb_node *next = rb_first_cached(&hists->entries); entries 2233 tools/perf/builtin-c2c.c nd = rb_first_cached(&c2c.hists.hists.entries); entries 2301 tools/perf/builtin-c2c.c struct rb_node *nd = rb_first_cached(&hb->hists->entries); entries 996 tools/perf/builtin-kvm.c if (fda->entries[nr_stdin].revents & POLLIN) entries 1044 tools/perf/builtin-record.c if (!have_tracepoints(&rec->evlist->core.entries)) entries 1251 tools/perf/builtin-record.c if (have_tracepoints(&rec->evlist->core.entries)) { entries 731 tools/perf/builtin-report.c for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { entries 795 tools/perf/builtin-report.c for (nd = rb_first_cached(&threads->entries); nd; entries 2938 tools/perf/builtin-sched.c list_for_each_entry(evsel, &evlist->core.entries, core.node) { entries 746 tools/perf/builtin-script.c from = br->entries[i].from; entries 747 tools/perf/builtin-script.c to = br->entries[i].to; entries 771 tools/perf/builtin-script.c mispred_str( br->entries + i), entries 772 tools/perf/builtin-script.c br->entries[i].flags.in_tx? 'X' : '-', entries 773 tools/perf/builtin-script.c br->entries[i].flags.abort? 'A' : '-', entries 774 tools/perf/builtin-script.c br->entries[i].flags.cycles); entries 796 tools/perf/builtin-script.c from = br->entries[i].from; entries 797 tools/perf/builtin-script.c to = br->entries[i].to; entries 816 tools/perf/builtin-script.c mispred_str( br->entries + i), entries 817 tools/perf/builtin-script.c br->entries[i].flags.in_tx? 'X' : '-', entries 818 tools/perf/builtin-script.c br->entries[i].flags.abort? 'A' : '-', entries 819 tools/perf/builtin-script.c br->entries[i].flags.cycles); entries 841 tools/perf/builtin-script.c from = br->entries[i].from; entries 842 tools/perf/builtin-script.c to = br->entries[i].to; entries 865 tools/perf/builtin-script.c mispred_str(br->entries + i), entries 866 tools/perf/builtin-script.c br->entries[i].flags.in_tx ? 'X' : '-', entries 867 tools/perf/builtin-script.c br->entries[i].flags.abort ? 'A' : '-', entries 868 tools/perf/builtin-script.c br->entries[i].flags.cycles); entries 1034 tools/perf/builtin-script.c len = grab_bb(buffer, br->entries[nr-1].from, entries 1035 tools/perf/builtin-script.c br->entries[nr-1].from, entries 1038 tools/perf/builtin-script.c printed += ip__fprintf_sym(br->entries[nr - 1].from, thread, entries 1040 tools/perf/builtin-script.c printed += ip__fprintf_jump(br->entries[nr - 1].from, &br->entries[nr - 1], entries 1043 tools/perf/builtin-script.c printed += print_srccode(thread, x.cpumode, br->entries[nr - 1].from); entries 1048 tools/perf/builtin-script.c if (br->entries[i].from || br->entries[i].to) entries 1050 tools/perf/builtin-script.c br->entries[i].from, entries 1051 tools/perf/builtin-script.c br->entries[i].to); entries 1052 tools/perf/builtin-script.c start = br->entries[i + 1].to; entries 1053 tools/perf/builtin-script.c end = br->entries[i].from; entries 1058 tools/perf/builtin-script.c end = br->entries[--i].from; entries 1071 tools/perf/builtin-script.c printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, ++insn, fp, entries 1095 tools/perf/builtin-script.c if (br->entries[0].from == sample->ip) entries 1097 tools/perf/builtin-script.c if (br->entries[0].flags.abort) entries 1108 tools/perf/builtin-script.c start = br->entries[0].to; entries 409 tools/perf/builtin-top.c next = rb_first_cached(&hists->entries); entries 124 tools/perf/builtin-trace.c int *entries; entries 128 tools/perf/builtin-trace.c pid_t *entries; entries 399 tools/perf/builtin-trace.c if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) { entries 406 tools/perf/builtin-trace.c return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); entries 435 tools/perf/builtin-trace.c struct strarray *sa = sas->entries[i]; entries 439 tools/perf/builtin-trace.c if (sa->entries[idx] == NULL) entries 441 tools/perf/builtin-trace.c return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]); entries 447 tools/perf/builtin-trace.c printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix); entries 1569 tools/perf/builtin-trace.c trace->ev_qualifier_ids.entries = malloc(nr_allocated * entries 1570 tools/perf/builtin-trace.c sizeof(trace->ev_qualifier_ids.entries[0])); entries 1572 tools/perf/builtin-trace.c if (trace->ev_qualifier_ids.entries == NULL) { entries 1599 tools/perf/builtin-trace.c trace->ev_qualifier_ids.entries[nr_used++] = id; entries 1608 tools/perf/builtin-trace.c void *entries; entries 1611 tools/perf/builtin-trace.c entries = realloc(trace->ev_qualifier_ids.entries, entries 1612 tools/perf/builtin-trace.c nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); entries 1613 tools/perf/builtin-trace.c if (entries == NULL) { entries 1618 tools/perf/builtin-trace.c trace->ev_qualifier_ids.entries = entries; entries 1620 tools/perf/builtin-trace.c trace->ev_qualifier_ids.entries[nr_used++] = id; entries 1625 tools/perf/builtin-trace.c qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); entries 1631 tools/perf/builtin-trace.c zfree(&trace->ev_qualifier_ids.entries); entries 1643 tools/perf/builtin-trace.c in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, entries 2769 tools/perf/builtin-trace.c trace->ev_qualifier_ids.entries); entries 2882 tools/perf/builtin-trace.c int key = trace->ev_qualifier_ids.entries[i]; entries 3196 tools/perf/builtin-trace.c trace->filter_pids.entries); entries 3199 tools/perf/builtin-trace.c trace->filter_pids.entries); entries 3811 tools/perf/builtin-trace.c trace->filter_pids.entries = calloc(i, sizeof(pid_t)); entries 3813 tools/perf/builtin-trace.c if (trace->filter_pids.entries == NULL) entries 3816 tools/perf/builtin-trace.c trace->filter_pids.entries[0] = getpid(); entries 3819 tools/perf/builtin-trace.c trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i; entries 3998 tools/perf/builtin-trace.c if (!list_empty(&trace->evlist->core.entries)) entries 28 tools/perf/lib/evlist.c INIT_LIST_HEAD(&evlist->entries); entries 62 tools/perf/lib/evlist.c list_add_tail(&evsel->node, &evlist->entries); entries 90 tools/perf/lib/evlist.c next = list_first_entry(&evlist->entries, entries 98 tools/perf/lib/evlist.c if (&next->node == &evlist->entries) entries 16 tools/perf/lib/include/internal/evlist.h struct list_head entries; entries 45 tools/perf/lib/include/internal/evlist.h __perf_evlist__for_each_entry(&(evlist)->entries, evsel) entries 61 tools/perf/lib/include/internal/evlist.h __perf_evlist__for_each_entry_reverse(&(evlist)->entries, evsel) entries 65 tools/perf/lib/include/internal/evlist.h return list_entry(evlist->entries.next, struct perf_evsel, node); entries 70 tools/perf/lib/include/internal/evlist.h return list_entry(evlist->entries.prev, struct perf_evsel, node); entries 11 tools/perf/lib/include/internal/xyarray.h size_t entries; entries 207 tools/perf/lib/include/perf/event.h struct id_index_entry entries[0]; entries 264 tools/perf/lib/include/perf/event.h struct perf_record_thread_map_entry entries[]; entries 15 tools/perf/lib/xyarray.c xy->entries = xlen * ylen; entries 25 tools/perf/lib/xyarray.c size_t n = xy->entries * xy->entry_size; entries 14 tools/perf/tests/fdarray.c fda->entries[fd].fd = fda->nr - fd; entries 15 tools/perf/tests/fdarray.c fda->entries[fd].revents = revents; entries 57 tools/perf/tests/fdarray.c fda->entries[2].revents = POLLIN; entries 58 tools/perf/tests/fdarray.c expected_fd[0] = fda->entries[2].fd; entries 69 tools/perf/tests/fdarray.c if (fda->entries[0].fd != expected_fd[0]) { entries 71 tools/perf/tests/fdarray.c fda->entries[0].fd, expected_fd[0]); entries 76 tools/perf/tests/fdarray.c fda->entries[0].revents = POLLIN; entries 77 tools/perf/tests/fdarray.c expected_fd[0] = fda->entries[0].fd; entries 78 tools/perf/tests/fdarray.c fda->entries[3].revents = POLLIN; entries 79 tools/perf/tests/fdarray.c expected_fd[1] = fda->entries[3].fd; entries 92 tools/perf/tests/fdarray.c if (fda->entries[fd].fd != expected_fd[fd]) { entries 94 tools/perf/tests/fdarray.c fda->entries[fd].fd, expected_fd[fd]); entries 119 tools/perf/tests/fdarray.c if (fda->entries[_idx].fd != _fd) { \ entries 121 tools/perf/tests/fdarray.c __LINE__, _idx, fda->entries[1].fd, _fd); \ entries 124 tools/perf/tests/fdarray.c if (fda->entries[_idx].events != (_revents)) { \ entries 126 tools/perf/tests/fdarray.c __LINE__, _idx, fda->entries[_idx].fd, _revents); \ entries 150 tools/perf/tests/fdarray.c if (fda->entries == NULL) { entries 199 tools/perf/tests/hists_common.c root = &hists->entries; entries 138 tools/perf/tests/hists_cumulate.c root_out = &hists->entries; entries 202 tools/perf/tests/hists_cumulate.c root = &hists->entries.rb_root; entries 104 tools/perf/tests/hists_output.c root_out = &hists->entries; entries 165 tools/perf/tests/hists_output.c root = &hists->entries; entries 265 tools/perf/tests/hists_output.c root = &hists->entries; entries 319 tools/perf/tests/hists_output.c root = &hists->entries; entries 397 tools/perf/tests/hists_output.c root = &hists->entries; entries 500 tools/perf/tests/hists_output.c root = &hists->entries; entries 103 tools/perf/tests/sample-parsing.c MCOMP(branch_stack->entries[i]); entries 71 tools/perf/tests/thread-map.c TEST_ASSERT_VAL("wrong pid", map->entries[0].pid == (u64) getpid()); entries 72 tools/perf/tests/thread-map.c TEST_ASSERT_VAL("wrong comm", !strcmp(map->entries[0].comm, NAME)); entries 13 tools/perf/trace/beauty/beauty.h const char **entries; entries 18 tools/perf/trace/beauty/beauty.h .entries = array, \ entries 25 tools/perf/trace/beauty/beauty.h .entries = array, \ entries 44 tools/perf/trace/beauty/beauty.h struct strarray **entries; entries 49 tools/perf/trace/beauty/beauty.h .entries = array, \ entries 41 tools/perf/trace/beauty/ioctl.c if (nr < strarray__ioctl_tty_cmd.nr_entries && strarray__ioctl_tty_cmd.entries[nr] != NULL) entries 42 tools/perf/trace/beauty/ioctl.c return scnprintf(bf, size, "%s", strarray__ioctl_tty_cmd.entries[nr]); entries 52 tools/perf/trace/beauty/ioctl.c if (nr < strarray__drm_ioctl_cmds.nr_entries && strarray__drm_ioctl_cmds.entries[nr] != NULL) entries 53 tools/perf/trace/beauty/ioctl.c return scnprintf(bf, size, "DRM_%s", strarray__drm_ioctl_cmds.entries[nr]); entries 63 tools/perf/trace/beauty/ioctl.c if (nr < strarray__sndrv_pcm_ioctl_cmds.nr_entries && strarray__sndrv_pcm_ioctl_cmds.entries[nr] != NULL) entries 64 tools/perf/trace/beauty/ioctl.c return scnprintf(bf, size, "SNDRV_PCM_%s", strarray__sndrv_pcm_ioctl_cmds.entries[nr]); entries 74 tools/perf/trace/beauty/ioctl.c if (nr < strarray__sndrv_ctl_ioctl_cmds.nr_entries && strarray__sndrv_ctl_ioctl_cmds.entries[nr] != NULL) entries 75 tools/perf/trace/beauty/ioctl.c return scnprintf(bf, size, "SNDRV_CTL_%s", strarray__sndrv_ctl_ioctl_cmds.entries[nr]); entries 85 tools/perf/trace/beauty/ioctl.c if (nr < strarray__kvm_ioctl_cmds.nr_entries && strarray__kvm_ioctl_cmds.entries[nr] != NULL) entries 86 tools/perf/trace/beauty/ioctl.c return scnprintf(bf, size, "KVM_%s", strarray__kvm_ioctl_cmds.entries[nr]); entries 98 tools/perf/trace/beauty/ioctl.c if (nr < s->nr_entries && s->entries[nr] != NULL) entries 99 tools/perf/trace/beauty/ioctl.c return scnprintf(bf, size, "VHOST_%s", s->entries[nr]); entries 109 tools/perf/trace/beauty/ioctl.c if (nr < strarray__perf_ioctl_cmds.nr_entries && strarray__perf_ioctl_cmds.entries[nr] != NULL) entries 110 tools/perf/trace/beauty/ioctl.c return scnprintf(bf, size, "PERF_%s", strarray__perf_ioctl_cmds.entries[nr]); entries 120 tools/perf/trace/beauty/ioctl.c if (nr < strarray__usbdevfs_ioctl_cmds.nr_entries && strarray__usbdevfs_ioctl_cmds.entries[nr] != NULL) entries 121 tools/perf/trace/beauty/ioctl.c return scnprintf(bf, size, "USBDEVFS_%s", strarray__usbdevfs_ioctl_cmds.entries[nr]); entries 87 tools/perf/trace/beauty/mmap.c if (behavior < strarray__madvise_advices.nr_entries && strarray__madvise_advices.entries[behavior] != NULL) entries 88 tools/perf/trace/beauty/mmap.c return scnprintf(bf, size, "MADV_%s", strarray__madvise_advices.entries[behavior]); entries 17 tools/perf/trace/beauty/pkey_alloc.c const char *s = sa->entries[0]; entries 32 tools/perf/trace/beauty/pkey_alloc.c if (sa->entries[i] != NULL) entries 33 tools/perf/trace/beauty/pkey_alloc.c printed += scnprintf(bf + printed, size - printed, "%s%s", show_prefix ? sa->prefix : "", sa->entries[i]); entries 85 tools/perf/ui/browser.c } while (pos != browser->entries); entries 98 tools/perf/ui/browser.c } while (pos != browser->entries); entries 105 tools/perf/ui/browser.c struct list_head *head = browser->entries; entries 140 tools/perf/ui/browser.c struct rb_root *root = browser->entries; entries 174 tools/perf/ui/browser.c browser->top = rb_first(browser->entries); entries 501 tools/perf/ui/browser.c struct list_head *head = browser->entries; entries 504 tools/perf/ui/browser.c if (browser->top == NULL || browser->top == browser->entries) entries 612 tools/perf/ui/browser.c browser->top = browser->entries; entries 618 tools/perf/ui/browser.c browser->top = (char **)browser->entries + browser->nr_entries - 1 + offset; entries 623 tools/perf/ui/browser.c assert((char **)browser->top < (char **)browser->entries + browser->nr_entries); entries 624 tools/perf/ui/browser.c assert((char **)browser->top >= (char **)browser->entries); entries 633 tools/perf/ui/browser.c browser->top = browser->entries; entries 638 tools/perf/ui/browser.c assert(pos < (char **)browser->entries + browser->nr_entries); entries 19 tools/perf/ui/browser.h void *top, *entries; entries 31 tools/perf/ui/browsers/annotate.c struct rb_root entries; entries 247 tools/perf/ui/browsers/annotate.c struct rb_root *root = &browser->entries; entries 310 tools/perf/ui/browsers/annotate.c browser->entries = RB_ROOT; entries 343 tools/perf/ui/browsers/annotate.c browser->curr_hot = rb_last(&browser->entries); entries 715 tools/perf/ui/browsers/annotate.c nd = rb_last(&browser->entries); entries 723 tools/perf/ui/browsers/annotate.c nd = rb_first(&browser->entries); entries 933 tools/perf/ui/browsers/annotate.c browser.b.entries = ¬es->src->source, entries 85 tools/perf/ui/browsers/header.c .entries = (void *)argv, entries 64 tools/perf/ui/browsers/hists.c for (nd = rb_first_cached(&hists->entries); entries 581 tools/perf/ui/browsers/hists.c nd = rb_first_cached(&browser->hists->entries); entries 634 tools/perf/ui/browsers/hists.c browser->b.entries = &browser->hists->entries; entries 1759 tools/perf/ui/browsers/hists.c browser->top = rb_first_cached(&hb->hists->entries); entries 1867 tools/perf/ui/browsers/hists.c nd = hists__filter_entries(rb_first(browser->entries), entries 1874 tools/perf/ui/browsers/hists.c nd = rb_hierarchy_last(rb_last(browser->entries)); entries 2097 tools/perf/ui/browsers/hists.c struct rb_node *nd = hists__filter_entries(rb_first(browser->b.entries), entries 2765 tools/perf/ui/browsers/hists.c struct rb_node *nd = rb_first_cached(&hb->hists->entries); entries 2785 tools/perf/ui/browsers/hists.c struct rb_node *nd = rb_first_cached(&hb->hists->entries); entries 3322 tools/perf/ui/browsers/hists.c if (pos->core.node.next == &evlist->core.entries) entries 3328 tools/perf/ui/browsers/hists.c if (pos->core.node.prev == &evlist->core.entries) entries 3383 tools/perf/ui/browsers/hists.c .entries = &evlist->core.entries, entries 109 tools/perf/ui/browsers/map.c .entries = &map->dso->symbols, entries 120 tools/perf/ui/browsers/map.c for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) { entries 358 tools/perf/ui/gtk/hists.c for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) { entries 583 tools/perf/ui/gtk/hists.c perf_gtk__add_hierarchy_entries(hists, &hists->entries, store, entries 829 tools/perf/ui/stdio/hist.c for (nd = rb_first_cached(&hists->entries); nd; entries 61 tools/perf/ui/tui/util.c .entries = (void *)argv, entries 668 tools/perf/util/auxtrace.c auxtrace_index->entries[nr].file_offset = file_offset; entries 669 tools/perf/util/auxtrace.c auxtrace_index->entries[nr].sz = event->header.size; entries 682 tools/perf/util/auxtrace.c ent.file_offset = auxtrace_index->entries[i].file_offset; entries 683 tools/perf/util/auxtrace.c ent.sz = auxtrace_index->entries[i].sz; entries 727 tools/perf/util/auxtrace.c auxtrace_index->entries[nr].file_offset = entries 729 tools/perf/util/auxtrace.c auxtrace_index->entries[nr].sz = bswap_64(ent.sz); entries 731 tools/perf/util/auxtrace.c auxtrace_index->entries[nr].file_offset = ent.file_offset; entries 732 tools/perf/util/auxtrace.c auxtrace_index->entries[nr].sz = ent.sz; entries 787 tools/perf/util/auxtrace.c ent = &auxtrace_index->entries[i]; entries 137 tools/perf/util/auxtrace.h struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT]; entries 1444 tools/perf/util/bpf-loader.c events = xy->entries / (xy->row_size / xy->entry_size); entries 42 tools/perf/util/branch.h struct branch_entry entries[0]; entries 212 tools/perf/util/cgroup.c if (list_empty(&evlist->core.entries)) { entries 881 tools/perf/util/cs-etm.c memcpy(&bs_dst->entries[0], entries 882 tools/perf/util/cs-etm.c &bs_src->entries[tidq->last_branch_pos], entries 893 tools/perf/util/cs-etm.c memcpy(&bs_dst->entries[nr], entries 894 tools/perf/util/cs-etm.c &bs_src->entries[0], entries 977 tools/perf/util/cs-etm.c be = &bs->entries[tidq->last_branch_pos]; entries 1175 tools/perf/util/cs-etm.c struct branch_entry entries; entries 1205 tools/perf/util/cs-etm.c .entries = { entries 207 tools/perf/util/evlist.c __perf_evlist__set_leader(&evlist->core.entries); entries 909 tools/perf/util/evlist.c if (evlist->core.pollfd.entries == NULL && perf_evlist__alloc_pollfd(&evlist->core) < 0) entries 1523 tools/perf/util/evlist.c list_splice(&move, &evlist->core.entries); entries 234 tools/perf/util/evlist.h return list_empty(&evlist->core.entries); entries 272 tools/perf/util/evlist.h __evlist__for_each_entry(&(evlist)->core.entries, evsel) entries 288 tools/perf/util/evlist.h __evlist__for_each_entry_continue(&(evlist)->core.entries, evsel) entries 304 tools/perf/util/evlist.h __evlist__for_each_entry_reverse(&(evlist)->core.entries, evsel) entries 322 tools/perf/util/evlist.h __evlist__for_each_entry_safe(&(evlist)->core.entries, tmp, evsel) entries 302 tools/perf/util/header.c return read_tracing_data(ff->fd, &evlist->core.entries); entries 226 tools/perf/util/hist.c struct rb_node *next = rb_first_cached(&hists->entries); entries 347 tools/perf/util/hist.c root_out = &hists->entries; entries 362 tools/perf/util/hist.c struct rb_node *next = rb_first_cached(&hists->entries); entries 378 tools/perf/util/hist.c struct rb_node *next = rb_first_cached(&hists->entries); entries 391 tools/perf/util/hist.c struct rb_node *next = rb_first_cached(&hists->entries); entries 1680 tools/perf/util/hist.c node = rb_first_cached(&hists->entries); entries 1785 tools/perf/util/hist.c static void __hists__insert_output_entry(struct rb_root_cached *entries, entries 1790 tools/perf/util/hist.c struct rb_node **p = &entries->rb_root.rb_node; entries 1822 tools/perf/util/hist.c rb_insert_color_cached(&he->rb_node, entries, leftmost); entries 1853 tools/perf/util/hist.c &hists->entries, entries 1866 tools/perf/util/hist.c hists->entries = RB_ROOT_CACHED; entries 1875 tools/perf/util/hist.c __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain); entries 2100 tools/perf/util/hist.c for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) { entries 2161 tools/perf/util/hist.c nd = rb_first_cached(&hists->entries); entries 2205 tools/perf/util/hist.c nd = rb_first_cached(&hists->entries); entries 2210 tools/perf/util/hist.c rb_erase_cached(&h->rb_node, &hists->entries); entries 2215 tools/perf/util/hist.c hists->entries = new_root; entries 2577 tools/perf/util/hist.c if (bs && bs->nr && bs->entries[0].flags.cycles) { entries 2733 tools/perf/util/hist.c hists->entries = RB_ROOT_CACHED; entries 81 tools/perf/util/hist.h struct rb_root_cached entries; entries 1145 tools/perf/util/intel-pt.c memcpy(&bs_dst->entries[0], entries 1146 tools/perf/util/intel-pt.c &bs_src->entries[ptq->last_branch_pos], entries 1150 tools/perf/util/intel-pt.c memcpy(&bs_dst->entries[nr], entries 1151 tools/perf/util/intel-pt.c &bs_src->entries[0], entries 1173 tools/perf/util/intel-pt.c be = &bs->entries[ptq->last_branch_pos]; entries 1276 tools/perf/util/intel-pt.c struct branch_entry entries; entries 1297 tools/perf/util/intel-pt.c .entries = { entries 1680 tools/perf/util/intel-pt.c to = &br_stack->entries[0].from; entries 1777 tools/perf/util/intel-pt.c struct branch_entry entries[LBRS_MAX]; entries 48 tools/perf/util/intlist.h struct rb_node *rn = rb_first_cached(&ilist->rblist.entries); entries 311 tools/perf/util/jitdump.c jr->info.entries[n].addr = bswap_64(jr->info.entries[n].addr); entries 312 tools/perf/util/jitdump.c jr->info.entries[n].lineno = bswap_32(jr->info.entries[n].lineno); entries 313 tools/perf/util/jitdump.c jr->info.entries[n].discrim = bswap_32(jr->info.entries[n].discrim); entries 604 tools/perf/util/jitdump.c memcpy(data, &jr->info.entries, sz); entries 104 tools/perf/util/jitdump.h struct debug_entry entries[0]; entries 58 tools/perf/util/machine.c threads->entries = RB_ROOT_CACHED; entries 196 tools/perf/util/machine.c nd = rb_first_cached(&threads->entries); entries 498 tools/perf/util/machine.c struct rb_node **p = &threads->entries.rb_root.rb_node; entries 531 tools/perf/util/machine.c rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost); entries 542 tools/perf/util/machine.c rb_erase_cached(&th->rb_node, &threads->entries); entries 868 tools/perf/util/machine.c for (nd = rb_first_cached(&threads->entries); nd; entries 1766 tools/perf/util/machine.c rb_erase_cached(&th->rb_node, &threads->entries); entries 2090 tools/perf/util/machine.c ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to); entries 2091 tools/perf/util/machine.c ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from); entries 2092 tools/perf/util/machine.c bi[i].flags = bs->entries[i].flags; entries 2213 tools/perf/util/machine.c ip = lbr_stack->entries[k].from; entries 2215 tools/perf/util/machine.c flags = &lbr_stack->entries[k].flags; entries 2217 tools/perf/util/machine.c ip = lbr_stack->entries[0].to; entries 2219 tools/perf/util/machine.c flags = &lbr_stack->entries[0].flags; entries 2221 tools/perf/util/machine.c lbr_stack->entries[0].from; entries 2226 tools/perf/util/machine.c ip = lbr_stack->entries[k].from; entries 2228 tools/perf/util/machine.c flags = &lbr_stack->entries[k].flags; entries 2233 tools/perf/util/machine.c ip = lbr_stack->entries[0].to; entries 2235 tools/perf/util/machine.c flags = &lbr_stack->entries[0].flags; entries 2237 tools/perf/util/machine.c lbr_stack->entries[0].from; entries 2331 tools/perf/util/machine.c be[i] = branch->entries[i]; entries 2350 tools/perf/util/machine.c be[i] = branch->entries[branch->nr - i - 1]; entries 2550 tools/perf/util/machine.c for (nd = rb_first_cached(&threads->entries); nd; entries 33 tools/perf/util/machine.h struct rb_root_cached entries; entries 577 tools/perf/util/map.c maps->entries = RB_ROOT; entries 597 tools/perf/util/map.c struct rb_root *root = &maps->entries; entries 694 tools/perf/util/map.c for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { entries 746 tools/perf/util/map.c for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { entries 781 tools/perf/util/map.c root = &maps->entries; entries 913 tools/perf/util/map.c struct rb_node **p = &maps->entries.rb_node; entries 928 tools/perf/util/map.c rb_insert_color(&map->rb_node, &maps->entries); entries 963 tools/perf/util/map.c rb_erase_init(&map->rb_node, &maps->entries); entries 984 tools/perf/util/map.c p = maps->entries.rb_node; entries 1003 tools/perf/util/map.c struct rb_node *first = rb_first(&maps->entries); entries 18 tools/perf/util/map_groups.h struct rb_root entries; entries 49 tools/perf/util/mem2node.c struct phys_entry *entries, *tmp_entries; entries 61 tools/perf/util/mem2node.c entries = zalloc(sizeof(*entries) * max); entries 62 tools/perf/util/mem2node.c if (!entries) entries 83 tools/perf/util/mem2node.c struct phys_entry *prev = &entries[j - 1]; entries 92 tools/perf/util/mem2node.c phys_entry__init(&entries[j++], start, bsize, n->node); entries 97 tools/perf/util/mem2node.c tmp_entries = realloc(entries, sizeof(*entries) * j); entries 99 tools/perf/util/mem2node.c entries = tmp_entries; entries 103 tools/perf/util/mem2node.c entries[i].node, entries[i].start, entries[i].end); entries 105 tools/perf/util/mem2node.c phys_entry__insert(&entries[i], &map->root); entries 108 tools/perf/util/mem2node.c map->entries = entries; entries 114 tools/perf/util/mem2node.c zfree(&map->entries); entries 12 tools/perf/util/mem2node.h struct phys_entry *entries; entries 387 tools/perf/util/metricgroup.c for (node = rb_first_cached(&groups.entries); node; node = next) { entries 2075 tools/perf/util/parse-events.c if (last->core.node.prev == &evlist->core.entries) entries 2392 tools/perf/util/parse-events.c list_for_each_entry(ent, &pcache->entries, node) { entries 511 tools/perf/util/probe-file.c list_add_tail(&entry->node, &pcache->entries); entries 530 tools/perf/util/probe-file.c INIT_LIST_HEAD(&pcache->entries); entries 540 tools/perf/util/probe-file.c list_for_each_entry_safe(entry, n, &pcache->entries, node) { entries 681 tools/perf/util/probe-file.c list_add_tail(&entry->node, &pcache->entries); entries 851 tools/perf/util/probe-file.c list_add_tail(&entry->node, &pcache->entries); entries 953 tools/perf/util/probe-file.c list_for_each_entry_safe(entry, tmp, &pcache->entries, node) { entries 995 tools/perf/util/probe-file.c if (!list_empty(&pcache->entries)) { entries 21 tools/perf/util/probe-file.h struct list_head entries; entries 36 tools/perf/util/probe-file.h list_for_each_entry(entry, &pcache->entries, node) entries 18 tools/perf/util/pstack.c void *entries[0]; entries 45 tools/perf/util/pstack.c if (pstack->entries[i] == key) { entries 47 tools/perf/util/pstack.c memmove(pstack->entries + i, entries 48 tools/perf/util/pstack.c pstack->entries + i + 1, entries 63 tools/perf/util/pstack.c pstack->entries[pstack->top++] = key; entries 75 tools/perf/util/pstack.c ret = pstack->entries[--pstack->top]; entries 76 tools/perf/util/pstack.c pstack->entries[pstack->top] = NULL; entries 84 tools/perf/util/pstack.c return pstack->entries[pstack->top - 1]; entries 947 tools/perf/util/python.c FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r"); entries 954 tools/perf/util/python.c file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1, entries 72 tools/perf/util/rb_resort.h struct rb_root entries; \ entries 79 tools/perf/util/rb_resort.h struct rb_node **p = &sorted->entries.rb_node, *parent = NULL; \ entries 88 tools/perf/util/rb_resort.h rb_insert_color(sorted_nd, &sorted->entries); \ entries 92 tools/perf/util/rb_resort.h struct rb_root *entries) \ entries 96 tools/perf/util/rb_resort.h for (nd = rb_first(entries); nd; nd = rb_next(nd)) { \ entries 103 tools/perf/util/rb_resort.h static struct __name##_sorted *__name##_sorted__new(struct rb_root *entries, \ entries 109 tools/perf/util/rb_resort.h sorted->entries = RB_ROOT; \ entries 110 tools/perf/util/rb_resort.h __name##_sorted__sort(sorted, entries); \ entries 128 tools/perf/util/rb_resort.h for (__nd = rb_first(&__name->entries); \ entries 143 tools/perf/util/rb_resort.h DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries.rb_root, \ entries 148 tools/perf/util/rb_resort.h DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries.rb_root, \ entries 15 tools/perf/util/rblist.c struct rb_node **p = &rblist->entries.rb_root.rb_node; entries 40 tools/perf/util/rblist.c rb_insert_color_cached(new_node, &rblist->entries, leftmost); entries 48 tools/perf/util/rblist.c rb_erase_cached(rb_node, &rblist->entries); entries 57 tools/perf/util/rblist.c struct rb_node **p = &rblist->entries.rb_root.rb_node; entries 82 tools/perf/util/rblist.c &rblist->entries, leftmost); entries 103 tools/perf/util/rblist.c rblist->entries = RB_ROOT_CACHED; entries 112 tools/perf/util/rblist.c struct rb_node *pos, *next = rb_first_cached(&rblist->entries); entries 133 tools/perf/util/rblist.c for (node = rb_first_cached(&rblist->entries); node; entries 23 tools/perf/util/rblist.h struct rb_root_cached entries; entries 487 tools/perf/util/scripting-engines/trace-event-python.c PyLong_FromUnsignedLongLong(br->entries[i].from)); entries 489 tools/perf/util/scripting-engines/trace-event-python.c PyLong_FromUnsignedLongLong(br->entries[i].to)); entries 491 tools/perf/util/scripting-engines/trace-event-python.c PyBool_FromLong(br->entries[i].flags.mispred)); entries 493 tools/perf/util/scripting-engines/trace-event-python.c PyBool_FromLong(br->entries[i].flags.predicted)); entries 495 tools/perf/util/scripting-engines/trace-event-python.c PyBool_FromLong(br->entries[i].flags.in_tx)); entries 497 tools/perf/util/scripting-engines/trace-event-python.c PyBool_FromLong(br->entries[i].flags.abort)); entries 499 tools/perf/util/scripting-engines/trace-event-python.c PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles)); entries 502 tools/perf/util/scripting-engines/trace-event-python.c br->entries[i].from, &al); entries 508 tools/perf/util/scripting-engines/trace-event-python.c br->entries[i].to, &al); entries 584 tools/perf/util/scripting-engines/trace-event-python.c br->entries[i].from, &al); entries 590 tools/perf/util/scripting-engines/trace-event-python.c br->entries[i].to, &al); entries 595 tools/perf/util/scripting-engines/trace-event-python.c get_br_mspred(&br->entries[i].flags, bf, sizeof(bf)); entries 599 tools/perf/util/scripting-engines/trace-event-python.c if (br->entries[i].flags.in_tx) { entries 607 tools/perf/util/scripting-engines/trace-event-python.c if (br->entries[i].flags.abort) { entries 842 tools/perf/util/session.c event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid); entries 1041 tools/perf/util/session.c (int)(kernel_callchain_nr), lbr_stack->entries[0].to); entries 1044 tools/perf/util/session.c (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from); entries 1073 tools/perf/util/session.c struct branch_entry *e = &sample->branch_stack->entries[i]; entries 2414 tools/perf/util/session.c struct id_index_entry *e = &ie->entries[i]; entries 552 tools/perf/util/stat-display.c alias = list_prepare_entry(counter, &(evlist->core.entries), core.node); entries 553 tools/perf/util/stat-display.c list_for_each_entry_continue (alias, &evlist->core.entries, core.node) { entries 173 tools/perf/util/stat-shadow.c next = rb_first_cached(&rblist->entries); entries 60 tools/perf/util/strlist.h struct rb_node *rn = rb_first_cached(&slist->rblist.entries); entries 891 tools/perf/util/synthetic-events.c size += threads->nr * sizeof(event->thread_map.entries[0]); entries 902 tools/perf/util/synthetic-events.c struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i]; entries 1443 tools/perf/util/synthetic-events.c e = &ev->id_index.entries[i++]; entries 1727 tools/perf/util/synthetic-events.c tdata = tracing_data_get(&evlist->core.entries, fd, true); entries 62 tools/perf/util/syscalltbl.c struct syscall *entries; entries 68 tools/perf/util/syscalltbl.c entries = tbl->syscalls.entries = malloc(sizeof(struct syscall) * nr_entries); entries 69 tools/perf/util/syscalltbl.c if (tbl->syscalls.entries == NULL) entries 74 tools/perf/util/syscalltbl.c entries[j].name = syscalltbl_native[i]; entries 75 tools/perf/util/syscalltbl.c entries[j].id = i; entries 80 tools/perf/util/syscalltbl.c qsort(tbl->syscalls.entries, nr_entries, sizeof(struct syscall), syscallcmp); entries 100 tools/perf/util/syscalltbl.c zfree(&tbl->syscalls.entries); entries 111 tools/perf/util/syscalltbl.c struct syscall *sc = bsearch(name, tbl->syscalls.entries, entries 121 tools/perf/util/syscalltbl.c struct syscall *syscalls = tbl->syscalls.entries; entries 11 tools/perf/util/syscalltbl.h void *entries; entries 379 tools/perf/util/thread_map.c perf_thread_map__set_pid(threads, i, (pid_t) event->entries[i].pid); entries 380 tools/perf/util/thread_map.c threads->map[i].comm = strndup(event->entries[i].comm, 16); entries 77 tools/perf/util/unwind-libdw.c struct unwind_entry *e = &ui->entries[ui->idx++]; entries 213 tools/perf/util/unwind-libdw.c ui = zalloc(sizeof(ui_buf) + sizeof(ui_buf.entries[0]) * max_stack); entries 249 tools/perf/util/unwind-libdw.c err = ui->entries[j].ip ? ui->cb(&ui->entries[j], ui->arg) : 0; entries 23 tools/perf/util/unwind-libdw.h struct unwind_entry entries[]; entries 15 tools/perf/util/xyarray.c xy->entries = xlen * ylen; entries 25 tools/perf/util/xyarray.c size_t n = xy->entries * xy->entry_size; entries 108 tools/testing/selftests/bpf/progs/strobemeta.h struct strobe_map_entry entries[STROBE_MAX_MAP_ENTRIES]; entries 422 tools/testing/selftests/bpf/progs/strobemeta.h map.entries[i].key); entries 429 tools/testing/selftests/bpf/progs/strobemeta.h map.entries[i].val); entries 757 tools/testing/selftests/kvm/lib/x86_64/processor.c if (cpuid->entries[i].function == function && entries 758 tools/testing/selftests/kvm/lib/x86_64/processor.c cpuid->entries[i].index == index) { entries 759 tools/testing/selftests/kvm/lib/x86_64/processor.c entry = &cpuid->entries[i]; entries 1046 tools/testing/selftests/kvm/lib/x86_64/processor.c state = malloc(sizeof(*state) + nmsrs * sizeof(state->msrs.entries[0])); entries 1086 tools/testing/selftests/kvm/lib/x86_64/processor.c state->msrs.entries[i].index = list->indices[i]; entries 1120 tools/testing/selftests/kvm/lib/x86_64/processor.c r, r == state->msrs.nmsrs ? -1 : state->msrs.entries[r].index); entries 65 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c struct kvm_cpuid_entry2 *entry = &hv_cpuid_entries->entries[i]; entries 111 tools/thermal/tmon/tui.c int entries = ptdata.nr_cooling_dev + 1; entries 112 tools/thermal/tmon/tui.c int rows = max(DIAG_DEV_ROWS, (entries + 1) / 2); entries 113 tools/thermal/tmon/tui.c return min(rows, entries); entries 123 virt/kvm/arm/vgic/vgic-irqfd.c struct kvm_irq_routing_entry *entries; entries 128 virt/kvm/arm/vgic/vgic-irqfd.c entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL); entries 129 virt/kvm/arm/vgic/vgic-irqfd.c if (!entries) entries 133 virt/kvm/arm/vgic/vgic-irqfd.c entries[i].gsi = i; entries 134 virt/kvm/arm/vgic/vgic-irqfd.c entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; entries 135 virt/kvm/arm/vgic/vgic-irqfd.c entries[i].u.irqchip.irqchip = 0; entries 136 virt/kvm/arm/vgic/vgic-irqfd.c entries[i].u.irqchip.pin = i; entries 138 virt/kvm/arm/vgic/vgic-irqfd.c ret = kvm_set_irq_routing(kvm, entries, nr, 0); entries 139 virt/kvm/arm/vgic/vgic-irqfd.c kfree(entries); entries 246 virt/kvm/eventfd.c struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS]; entries 249 virt/kvm/eventfd.c n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi); entries 253 virt/kvm/eventfd.c e = entries; entries 23 virt/kvm/irqchip.c struct kvm_kernel_irq_routing_entry *entries, int gsi) entries 33 virt/kvm/irqchip.c entries[n] = *e; entries 3478 virt/kvm/kvm_main.c struct kvm_irq_routing_entry *entries = NULL; entries 3492 virt/kvm/kvm_main.c entries = vmalloc(array_size(sizeof(*entries), entries 3494 virt/kvm/kvm_main.c if (!entries) entries 3498 virt/kvm/kvm_main.c if (copy_from_user(entries, urouting->entries, entries 3499 virt/kvm/kvm_main.c routing.nr * sizeof(*entries))) entries 3502 virt/kvm/kvm_main.c r = kvm_set_irq_routing(kvm, entries, routing.nr, entries 3505 virt/kvm/kvm_main.c vfree(entries);